Skip to content
/ linux Public
forked from torvalds/linux

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/gi…
Browse files Browse the repository at this point in the history
…t/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "Usual wide collection of unrelated items in drivers:

   - Driver bug fixes and treewide cleanups in hfi1, siw, qib, mlx5,
     rxe, usnic, usnic, bnxt_re, ocrdma, iser:
       - remove unnecessary NULL checks
       - kmap obsolescence
       - pci_enable_pcie_error_reporting() obsolescence
       - unused variables and macros
       - trace event related warnings
       - casting warnings

   - Code cleanups for irdm and erdma

   - EFA reporting of 128 byte PCIe TLP support

   - mlx5 more agressively uses the out of order HW feature

   - Big rework of how state machines and tasks work in rxe

   - Fix a syzkaller found crash netdev refcount leak in siw

   - bnxt_re revises their HW description header

   - Congestion control for bnxt_re

   - Use mmu_notifiers more safely in hfi1

   - mlx5 gets better support for PCIe relaxed ordering inside VMs"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (81 commits)
  RDMA/efa: Add rdma write capability to device caps
  RDMA/mlx5: Use correct device num_ports when modify DC
  RDMA/irdma: Drop spurious WQ_UNBOUND from alloc_ordered_workqueue() call
  RDMA/rxe: Fix spinlock recursion deadlock on requester
  RDMA/mlx5: Fix flow counter query via DEVX
  RDMA/rxe: Protect QP state with qp->state_lock
  RDMA/rxe: Move code to check if drained to subroutine
  RDMA/rxe: Remove qp->req.state
  RDMA/rxe: Remove qp->comp.state
  RDMA/rxe: Remove qp->resp.state
  RDMA/mlx5: Allow relaxed ordering read in VFs and VMs
  net/mlx5: Update relaxed ordering read HCA capabilities
  RDMA/mlx5: Check pcie_relaxed_ordering_enabled() in UMR
  RDMA/mlx5: Remove pcie_relaxed_ordering_enabled() check for RO write
  RDMA: Add ib_virt_dma_to_page()
  RDMA/rxe: Fix the error "trying to register non-static key in rxe_cleanup_task"
  RDMA/irdma: Slightly optimize irdma_form_ah_cm_frame()
  RDMA/rxe: Fix incorrect TASKLET_STATE_SCHED check in rxe_task.c
  IB/hfi1: Place struct mmu_rb_handler on cache line start
  IB/hfi1: Fix bugs with non-PAGE_SIZE-end multi-iovec user SDMA requests
  ...
  • Loading branch information
torvalds committed Apr 30, 2023
2 parents 1ae78a1 + 531094d commit af38772
Show file tree
Hide file tree
Showing 106 changed files with 8,007 additions and 5,519 deletions.
3 changes: 2 additions & 1 deletion drivers/infiniband/core/cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2912,6 +2912,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL;

trace_icm_send_rej(&cm_id_priv->id, reason);

switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
Expand Down Expand Up @@ -2942,7 +2944,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
return -EINVAL;
}

trace_icm_send_rej(&cm_id_priv->id, reason);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_free_msg(msg);
Expand Down
6 changes: 2 additions & 4 deletions drivers/infiniband/core/cma.c
Original file line number Diff line number Diff line change
Expand Up @@ -709,8 +709,7 @@ cma_validate_port(struct ib_device *device, u32 port,
}

sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
if (ndev)
dev_put(ndev);
dev_put(ndev);
return sgid_attr;
}

Expand Down Expand Up @@ -2429,8 +2428,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
mutex_unlock(&listen_id->handler_mutex);

net_dev_put:
if (net_dev)
dev_put(net_dev);
dev_put(net_dev);

return ret;
}
Expand Down
23 changes: 14 additions & 9 deletions drivers/infiniband/core/user_mad.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,11 @@ struct ib_umad_packet {
struct ib_user_mad mad;
};

struct ib_rmpp_mad_hdr {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
} __packed;

#define CREATE_TRACE_POINTS
#include <trace/events/ib_umad.h>

Expand Down Expand Up @@ -494,19 +499,19 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_umad_file *file = filp->private_data;
struct ib_rmpp_mad_hdr *rmpp_mad_hdr;
struct ib_umad_packet *packet;
struct ib_mad_agent *agent;
struct rdma_ah_attr ah_attr;
struct ib_ah *ah;
struct ib_rmpp_mad *rmpp_mad;
__be64 *tid;
int ret, data_len, hdr_len, copy_offset, rmpp_active;
u8 base_version;

if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
return -EINVAL;

packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL);
if (!packet)
return -ENOMEM;

Expand Down Expand Up @@ -560,13 +565,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto err_up;
}

rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data;
hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class);

if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
&& ib_mad_kernel_rmpp_agent(agent)) {
copy_offset = IB_MGMT_RMPP_HDR;
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE;
} else {
copy_offset = IB_MGMT_MAD_HDR;
Expand Down Expand Up @@ -615,12 +620,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
(be64_to_cpup(tid) & 0xffffffff));
rmpp_mad->mad_hdr.tid = *tid;
rmpp_mad_hdr->mad_hdr.tid = *tid;
}

if (!ib_mad_kernel_rmpp_agent(agent)
&& ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
&& (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
&& ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
&& (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
spin_lock_irq(&file->send_lock);
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);
Expand Down
109 changes: 109 additions & 0 deletions drivers/infiniband/hw/bnxt_re/ib_verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -2912,6 +2912,106 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return rc;
}

static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
{
struct bnxt_re_dev *rdev = cq->rdev;

bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);

cq->qplib_cq.max_wqe = cq->resize_cqe;
if (cq->resize_umem) {
ib_umem_release(cq->umem);
cq->umem = cq->resize_umem;
cq->resize_umem = NULL;
cq->resize_cqe = 0;
}
}

int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
struct bnxt_qplib_sg_info sg_info = {};
struct bnxt_qplib_dpi *orig_dpi = NULL;
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_re_ucontext *uctx = NULL;
struct bnxt_re_resize_cq_req req;
struct bnxt_re_dev *rdev;
struct bnxt_re_cq *cq;
int rc, entries;

cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
dev_attr = &rdev->dev_attr;
if (!ibcq->uobject) {
ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
return -EOPNOTSUPP;
}

if (cq->resize_umem) {
ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
cq->qplib_cq.id);
return -EBUSY;
}

/* Check the requested cq depth out of supported depth */
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
cq->qplib_cq.id, cqe);
return -EINVAL;
}

entries = roundup_pow_of_two(cqe + 1);
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;

uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
ib_uctx);
/* uverbs consumer */
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
rc = -EFAULT;
goto fail;
}

cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
entries * sizeof(struct cq_base),
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->resize_umem)) {
rc = PTR_ERR(cq->resize_umem);
cq->resize_umem = NULL;
ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
__func__, rc);
goto fail;
}
cq->resize_cqe = entries;
memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
orig_dpi = cq->qplib_cq.dpi;

cq->qplib_cq.sg_info.umem = cq->resize_umem;
cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
cq->qplib_cq.dpi = &uctx->dpi;

rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
if (rc) {
ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
cq->qplib_cq.id);
goto fail;
}

cq->ib_cq.cqe = cq->resize_cqe;

return 0;

fail:
if (cq->resize_umem) {
ib_umem_release(cq->resize_umem);
cq->resize_umem = NULL;
cq->resize_cqe = 0;
memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
cq->qplib_cq.dpi = orig_dpi;
}
return rc;
}

static u8 __req_to_ib_wc_status(u8 qstatus)
{
switch (qstatus) {
Expand Down Expand Up @@ -3425,6 +3525,15 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
struct bnxt_re_sqp_entries *sqp_entry = NULL;
unsigned long flags;

/* User CQ; the only processing we do is to
* complete any pending CQ resize operation.
*/
if (cq->umem) {
if (cq->resize_umem)
bnxt_re_resize_cq_complete(cq);
return 0;
}

spin_lock_irqsave(&cq->cq_lock, flags);
budget = min_t(u32, num_entries, cq->max_cql);
num_entries = budget;
Expand Down
3 changes: 3 additions & 0 deletions drivers/infiniband/hw/bnxt_re/ib_verbs.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ struct bnxt_re_cq {
#define MAX_CQL_PER_POLL 1024
u32 max_cql;
struct ib_umem *umem;
struct ib_umem *resize_umem;
int resize_cqe;
};

struct bnxt_re_mr {
Expand Down Expand Up @@ -191,6 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
Expand Down
Loading

0 comments on commit af38772

Please sign in to comment.