Commit 4b0b946019e7 for kernel

commit 4b0b946019e7376752456380b67e54eea2f10a7c
Merge: a5d1079c28a5 9091e3b59f2b
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date:   Mon Apr 20 11:20:35 2026 -0700

    Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

    Pull rdma updates from Jason Gunthorpe:
     "The usual collection of driver changes, more core infrastructure
      updates that typical this cycle:

       - Minor cleanups and kernel-doc fixes in bnxt_re, hns, rdmavt, efa,
         ocrdma, erdma, rtrs, hfi1, ionic, and pvrdma

       - New udata validation framework and driver updates

       - Modernize CQ creation interface in mlx4 and mlx5, manage CQ umem in
         core

       - Promote UMEM to a core component, split out DMA block iterator
         logic

       - Introduce FRMR pools with aging, statistics, pinned handles, and
         netlink control and use it in mlx5

       - Add PCIe TLP emulation support in mlx5

       - Extend umem to work with revocable pinned dmabuf's and use it in
         irdma

       - More net namespace improvements for rxe

       - GEN4 hardware support in irdma

       - First steps to MW and UC support in mana_ib

       - Support for CQ umem and doorbells in bnxt_re

       - Drop opa_vnic driver from hfi1

      Fixes:

       - IB/core zero dmac neighbor resolution race

       - GID table memory free

       - rxe pad/ICRC validation and r_key async errors

       - mlx4 external umem for CQ

       - umem DMA attributes on unmap

       - mana_ib RX steering on RSS QP destroy"

    * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (116 commits)
      RDMA/core: Fix user CQ creation for drivers without create_cq
      RDMA/ionic: bound node_desc sysfs read with %.64s
      IB/core: Fix zero dmac race in neighbor resolution
      RDMA/mana_ib: Support memory windows
      RDMA/rxe: Validate pad and ICRC before payload_size() in rxe_rcv
      RDMA/core: Prefer NLA_NUL_STRING
      RDMA/core: Fix memory free for GID table
      RDMA/hns: Remove the duplicate calls to ib_copy_validate_udata_in()
      RDMA: Remove redundant = {} for udata req structs
      RDMA/irdma: Add missing comp_mask check in alloc_ucontext
      RDMA/hns: Add missing comp_mask check in create_qp
      RDMA/mlx5: Pull comp_mask validation into ib_copy_validate_udata_in_cm()
      RDMA: Use ib_copy_validate_udata_in_cm() for zero comp_mask
      RDMA/hns: Use ib_copy_validate_udata_in()
      RDMA/mlx4: Use ib_copy_validate_udata_in() for QP
      RDMA/mlx4: Use ib_copy_validate_udata_in()
      RDMA/mlx5: Use ib_copy_validate_udata_in() for MW
      RDMA/mlx5: Use ib_copy_validate_udata_in() for SRQ
      RDMA/pvrdma: Use ib_copy_validate_udata_in() for srq
      RDMA: Use ib_copy_validate_udata_in() for implicit full structs
      ...

diff --cc drivers/infiniband/core/umem_dmabuf.c
index d7e1d2adb6e9,9deded3d58b5..ad023c2d84d8
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@@ -181,15 -181,51 +181,41 @@@ struct ib_umem_dmabuf *ib_umem_dmabuf_g
  }
  EXPORT_SYMBOL(ib_umem_dmabuf_get);

 -static void
 -ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
 -{
 -	struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
 -
 -	ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
 -			       "Invalidate callback should not be called when memory is pinned\n");
 -}
 -
  static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
  	.allow_peer2peer = true,
 -	.move_notify = ib_umem_dmabuf_unsupported_move_notify,
  };

- struct ib_umem_dmabuf *
- ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
- 					  struct device *dma_device,
- 					  unsigned long offset, size_t size,
- 					  int fd, int access)
+ static void ib_umem_dmabuf_revoke_locked(struct dma_buf_attachment *attach)
+ {
+ 	struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+
+ 	dma_resv_assert_held(attach->dmabuf->resv);
+
+ 	if (umem_dmabuf->revoked)
+ 		return;
+
+ 	if (umem_dmabuf->pinned_revoke)
+ 		umem_dmabuf->pinned_revoke(umem_dmabuf->private);
+
+ 	ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ 	if (umem_dmabuf->pinned) {
+ 		dma_buf_unpin(umem_dmabuf->attach);
+ 		umem_dmabuf->pinned = 0;
+ 	}
+ 	umem_dmabuf->revoked = 1;
+ }
+
+ static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_revocable_ops = {
+ 	.allow_peer2peer = true,
 -	.move_notify = ib_umem_dmabuf_revoke_locked,
++	.invalidate_mappings = ib_umem_dmabuf_revoke_locked,
+ };
+
+ static struct ib_umem_dmabuf *
+ ib_umem_dmabuf_get_pinned_and_lock(struct ib_device *device,
+ 				   struct device *dma_device,
+ 				   unsigned long offset,
+ 				   size_t size, int fd, int access,
+ 				   const struct dma_buf_attach_ops *ops)
  {
  	struct ib_umem_dmabuf *umem_dmabuf;
  	int err;
diff --cc drivers/infiniband/hw/mlx5/mr.c
index cf26c3f3f09a,3ef467ac9e3d..3b6da45061a5
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@@ -46,14 -46,15 +46,13 @@@
  #include "data_direct.h"
  #include "dmah.h"

- enum {
- 	MAX_PENDING_REG_MR = 8,
- };
 -#define MLX5_UMR_ALIGN 2048
--
- #define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4
+ static int mkey_max_umr_order(struct mlx5_ib_dev *dev)
+ {
+ 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ 		return MLX5_MAX_UMR_EXTENDED_SHIFT;
+ 	return MLX5_MAX_UMR_SHIFT;
+ }

- static void
- create_mkey_callback(int status, struct mlx5_async_work *context);
  static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
  				     u64 iova, int access_flags,
  				     unsigned long page_size, bool populate,
diff --cc drivers/infiniband/sw/rxe/rxe_net.c
index cbc646a30003,211bd3000acc..50a2cb5405e2
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@@ -138,9 -141,9 +141,7 @@@ static struct dst_entry *rxe_find_route
  	memcpy(&fl6.daddr, daddr, sizeof(*daddr));
  	fl6.flowi6_proto = IPPROTO_UDP;

- 	ndst = ip6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
- 				   recv_sockets.sk6->sk, &fl6,
- 				   NULL);
 -	ndst = ipv6_stub->ipv6_dst_lookup_flow(net,
 -					       rxe_ns_pernet_sk6(net), &fl6,
 -					       NULL);
++	ndst = ip6_dst_lookup_flow(net, rxe_ns_pernet_sk6(net), &fl6, NULL);
  	if (IS_ERR(ndst)) {
  		rxe_dbg_qp(qp, "no route to %pI6\n", daddr);
  		return NULL;