diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 603c0aecc36144..ff58058aeadca7 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2912,6 +2912,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) return -EINVAL; + trace_icm_send_rej(&cm_id_priv->id, reason); + switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: @@ -2942,7 +2944,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, return -EINVAL; } - trace_icm_send_rej(&cm_id_priv->id, reason); ret = ib_post_send_mad(msg, NULL); if (ret) { cm_free_msg(msg); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6b9563d4f23c94..93a1c48d0c3281 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -709,8 +709,7 @@ cma_validate_port(struct ib_device *device, u32 port, } sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); - if (ndev) - dev_put(ndev); + dev_put(ndev); return sgid_attr; } @@ -2429,8 +2428,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id, mutex_unlock(&listen_id->handler_mutex); net_dev_put: - if (net_dev) - dev_put(net_dev); + dev_put(net_dev); return ret; } diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 0e9e04f8c6858f..7e5c33aad1619d 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -131,6 +131,11 @@ struct ib_umad_packet { struct ib_user_mad mad; }; +struct ib_rmpp_mad_hdr { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; +} __packed; + #define CREATE_TRACE_POINTS #include @@ -494,11 +499,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_umad_file *file = filp->private_data; + struct ib_rmpp_mad_hdr *rmpp_mad_hdr; struct ib_umad_packet *packet; struct ib_mad_agent *agent; struct rdma_ah_attr ah_attr; struct ib_ah *ah; - struct ib_rmpp_mad *rmpp_mad; __be64 *tid; int ret, data_len, hdr_len, copy_offset, rmpp_active; u8 base_version; @@ -506,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) return -EINVAL; - packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); + packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL); if (!packet) return -ENOMEM; @@ -560,13 +565,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, goto err_up; } - rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; - hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); + rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data; + hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class); - if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) + if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class) && ib_mad_kernel_rmpp_agent(agent)) { copy_offset = IB_MGMT_RMPP_HDR; - rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & + rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE; } else { copy_offset = IB_MGMT_MAD_HDR; @@ -615,12 +620,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | (be64_to_cpup(tid) & 0xffffffff)); - rmpp_mad->mad_hdr.tid = *tid; + rmpp_mad_hdr->mad_hdr.tid = *tid; } if (!ib_mad_kernel_rmpp_agent(agent) - && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) - && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { + && ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class) + && (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { spin_lock_irq(&file->send_lock); list_add_tail(&packet->list, &file->send_list); spin_unlock_irq(&file->send_lock); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 989edc78963381..e86afecfbe46e6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2912,6 +2912,106 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return rc; } +static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq) +{ + struct bnxt_re_dev *rdev = cq->rdev; + + bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq); + + cq->qplib_cq.max_wqe = cq->resize_cqe; + if (cq->resize_umem) { + ib_umem_release(cq->umem); + cq->umem = cq->resize_umem; + cq->resize_umem = NULL; + cq->resize_cqe = 0; + } +} + +int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) +{ + struct bnxt_qplib_sg_info sg_info = {}; + struct bnxt_qplib_dpi *orig_dpi = NULL; + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_ucontext *uctx = NULL; + struct bnxt_re_resize_cq_req req; + struct bnxt_re_dev *rdev; + struct bnxt_re_cq *cq; + int rc, entries; + + cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); + rdev = cq->rdev; + dev_attr = &rdev->dev_attr; + if (!ibcq->uobject) { + ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported"); + return -EOPNOTSUPP; + } + + if (cq->resize_umem) { + ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy", + cq->qplib_cq.id); + return -EBUSY; + } + + /* Check the requested cq depth out of supported depth */ + if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { + ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d", + cq->qplib_cq.id, cqe); + return -EINVAL; + } + + entries = roundup_pow_of_two(cqe + 1); + if (entries > dev_attr->max_cq_wqes + 1) + entries = dev_attr->max_cq_wqes + 1; + + uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, + ib_uctx); + /* uverbs consumer */ + if (ib_copy_from_udata(&req, udata, sizeof(req))) { + rc = -EFAULT; + goto fail; + } + + cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va, + entries * sizeof(struct cq_base), + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(cq->resize_umem)) { + rc = PTR_ERR(cq->resize_umem); + cq->resize_umem = NULL; + ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n", + __func__, rc); + goto fail; + } + cq->resize_cqe = entries; + memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info)); + orig_dpi = cq->qplib_cq.dpi; + + cq->qplib_cq.sg_info.umem = cq->resize_umem; + cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; + cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; + cq->qplib_cq.dpi = &uctx->dpi; + + rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries); + if (rc) { + ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!", + cq->qplib_cq.id); + goto fail; + } + + cq->ib_cq.cqe = cq->resize_cqe; + + return 0; + +fail: + if (cq->resize_umem) { + ib_umem_release(cq->resize_umem); + cq->resize_umem = NULL; + cq->resize_cqe = 0; + memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info)); + cq->qplib_cq.dpi = orig_dpi; + } + return rc; +} + static u8 __req_to_ib_wc_status(u8 qstatus) { switch (qstatus) { @@ -3425,6 +3525,15 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) struct bnxt_re_sqp_entries *sqp_entry = NULL; unsigned long flags; + /* User CQ; the only processing we do is to + * complete any pending CQ resize operation. + */ + if (cq->umem) { + if (cq->resize_umem) + bnxt_re_resize_cq_complete(cq); + return 0; + } + spin_lock_irqsave(&cq->cq_lock, flags); budget = min_t(u32, num_entries, cq->max_cql); num_entries = budget; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 94326267f9bb7c..31f7e34040f79d 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -104,6 +104,8 @@ struct bnxt_re_cq { #define MAX_CQL_PER_POLL 1024 u32 max_cql; struct ib_umem *umem; + struct ib_umem *resize_umem; + int resize_cqe; }; struct bnxt_re_mr { @@ -191,6 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, const struct ib_recv_wr **bad_recv_wr); int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); +int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index c5867e78f23196..b9e2f89337e85e 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -553,6 +553,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = { .query_srq = bnxt_re_query_srq, .reg_user_mr = bnxt_re_reg_user_mr, .req_notify_cq = bnxt_re_req_notify_cq, + .resize_cq = bnxt_re_resize_cq, INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah), INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), @@ -584,6 +585,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) return ret; dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX); + ibdev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ); return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev); } @@ -919,49 +921,6 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, } } -#define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02 -static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir, - u64 *cid_map) -{ - struct hwrm_queue_pri2cos_qcfg_input req = {0}; - struct hwrm_queue_pri2cos_qcfg_output resp; - struct bnxt_en_dev *en_dev = rdev->en_dev; - struct bnxt_fw_msg fw_msg; - u32 flags = 0; - u8 *qcfgmap, *tmp_map; - int rc = 0, i; - - if (!cid_map) - return -EINVAL; - - memset(&fw_msg, 0, sizeof(fw_msg)); - bnxt_re_init_hwrm_hdr(rdev, (void *)&req, - HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - flags |= (dir & 0x01); - flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN; - req.flags = cpu_to_le32(flags); - req.port_id = en_dev->pf_port_id; - - bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, - sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); - rc = bnxt_send_msg(en_dev, &fw_msg); - if (rc) - return rc; - - if (resp.queue_cfg_info) { - ibdev_warn(&rdev->ibdev, - "Asymmetric cos queue configuration detected"); - ibdev_warn(&rdev->ibdev, - " on device, QoS may not be fully functional\n"); - } - qcfgmap = &resp.pri0_cos_queue_id; - tmp_map = (u8 *)cid_map; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - tmp_map[i] = qcfgmap[i]; - - return rc; -} - static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) { @@ -1054,26 +1013,9 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev) return prio_map; } -static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq) -{ - u16 prio; - u8 id; - - for (prio = 0, id = 0; prio < 8; prio++) { - if (prio_map & (1 << prio)) { - cosq[id] = cid_map[prio]; - id++; - if (id == 2) /* Max 2 tcs supported */ - break; - } - } -} - static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) { u8 prio_map = 0; - u64 cid_map; - int rc; /* Get priority for roce */ prio_map = bnxt_re_get_priority_mask(rdev); @@ -1081,23 +1023,6 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) if (prio_map == rdev->cur_prio_map) return 0; rdev->cur_prio_map = prio_map; - /* Get cosq id for this priority */ - rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map); - if (rc) { - ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map); - return rc; - } - /* Parse CoS IDs for app priority */ - bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq); - - /* Config BONO. */ - rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq); - if (rc) { - ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n", - rdev->cosq[0], rdev->cosq[1]); - return rc; - } - /* Actual priorities are not programmed as they are already * done by L2 driver; just enable or disable priority vlan tagging */ @@ -1407,6 +1332,27 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode) return rc; } +static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable) +{ + struct bnxt_qplib_cc_param cc_param = {}; + + /* Currently enabling only for GenP5 adapters */ + if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) + return; + + if (enable) { + cc_param.enable = 1; + cc_param.cc_mode = CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE; + } + + cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN); + + if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param)) + ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable); +} + /* * "Notifier chain callback can be invoked for the same chain from * different CPUs at the same time". @@ -1475,7 +1421,7 @@ static void bnxt_re_remove(struct auxiliary_device *adev) */ goto skip_remove; } - + bnxt_re_setup_cc(rdev, false); ib_unregister_device(&rdev->ibdev); ib_dealloc_device(&rdev->ibdev); bnxt_re_dev_uninit(rdev); @@ -1507,6 +1453,7 @@ static int bnxt_re_probe(struct auxiliary_device *adev, goto err; } + bnxt_re_setup_cc(rdev, true); mutex_unlock(&bnxt_re_mutex); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 96e581ced50e24..f139d4cd17128a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -300,8 +300,6 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t) { struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet); struct bnxt_qplib_hwq *hwq = &nq->hwq; - int num_srqne_processed = 0; - int num_cqne_processed = 0; struct bnxt_qplib_cq *cq; int budget = nq->budget; u32 sw_cons, raw_cons; @@ -340,9 +338,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t) DBC_DBC_TYPE_CQ_ARMENA); spin_lock_bh(&cq->compl_lock); atomic_set(&cq->arm_state, 0); - if (!nq->cqn_handler(nq, (cq))) - num_cqne_processed++; - else + if (nq->cqn_handler(nq, (cq))) dev_warn(&nq->pdev->dev, "cqn - type 0x%x not handled\n", type); cq->cnq_events++; @@ -361,11 +357,9 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t) srq = (struct bnxt_qplib_srq *)q_handle; bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); - if (!nq->srqn_handler(nq, - (struct bnxt_qplib_srq *)q_handle, - nqsrqe->event)) - num_srqne_processed++; - else + if (nq->srqn_handler(nq, + (struct bnxt_qplib_srq *)q_handle, + nqsrqe->event)) dev_warn(&nq->pdev->dev, "SRQ event 0x%x not handled\n", nqsrqe->event); @@ -581,18 +575,20 @@ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_destroy_srq req; - struct creq_destroy_srq_resp resp; - u16 cmd_flags = 0; + struct creq_destroy_srq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_srq req = {}; int rc; - RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_DESTROY_SRQ, + sizeof(req)); /* Configure the request */ req.srq_cid = cpu_to_le32(srq->id); - rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req, - (struct creq_base *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); kfree(srq->swq); if (rc) return; @@ -604,10 +600,10 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_hwq_attr hwq_attr = {}; - struct creq_create_srq_resp resp; - struct cmdq_create_srq req; + struct creq_create_srq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_create_srq req = {}; struct bnxt_qplib_pbl *pbl; - u16 cmd_flags = 0; u16 pg_sz_lvl; int rc, idx; @@ -627,7 +623,9 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, goto fail; } - RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_CREATE_SRQ, + sizeof(req)); /* Configure the request */ req.dpi = cpu_to_le32(srq->dpi->dpi); @@ -644,8 +642,8 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, req.pd_id = cpu_to_le32(srq->pd->id); req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto fail; @@ -700,14 +698,16 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_query_srq req; - struct creq_query_srq_resp resp; + struct creq_query_srq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_srq_resp_sb *sb; - u16 cmd_flags = 0; + struct cmdq_query_srq req = {}; int rc = 0; - RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_QUERY_SRQ, + sizeof(req)); /* Configure the request */ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); @@ -716,8 +716,9 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; req.srq_cid = cpu_to_le32(srq->id); sb = sbuf->sb; - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - (void *)sbuf, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); srq->threshold = le16_to_cpu(sb->srq_limit); bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); @@ -811,19 +812,20 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_hwq_attr hwq_attr = {}; struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_create_qp1_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; - struct creq_create_qp1_resp resp; - struct cmdq_create_qp1 req; + struct cmdq_create_qp1 req = {}; struct bnxt_qplib_pbl *pbl; - u16 cmd_flags = 0; u32 qp_flags = 0; u8 pg_sz_lvl; u32 tbl_indx; int rc; - RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags); - + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_CREATE_QP1, + sizeof(req)); /* General */ req.type = qp->type; req.dpi = cpu_to_le32(qp->dpi->dpi); @@ -891,8 +893,8 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.qp_flags = cpu_to_le32(qp_flags); req.pd_id = cpu_to_le32(qp->pd->id); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto fail; @@ -952,20 +954,22 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_hwq_attr hwq_attr = {}; struct bnxt_qplib_sg_info sginfo = {}; + struct creq_create_qp_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; - struct creq_create_qp_resp resp; + struct cmdq_create_qp req = {}; int rc, req_size, psn_sz = 0; struct bnxt_qplib_hwq *xrrq; struct bnxt_qplib_pbl *pbl; - struct cmdq_create_qp req; - u16 cmd_flags = 0; u32 qp_flags = 0; u8 pg_sz_lvl; u32 tbl_indx; u16 nsge; - RCFW_CMD_PREP(req, CREATE_QP, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_CREATE_QP, + sizeof(req)); /* General */ req.type = qp->type; @@ -1098,8 +1102,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) } req.pd_id = cpu_to_le32(qp->pd->id); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto fail; @@ -1231,14 +1236,16 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp) int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_modify_qp req; - struct creq_modify_qp_resp resp; - u16 cmd_flags = 0; + struct creq_modify_qp_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_modify_qp req = {}; u32 temp32[4]; u32 bmask; int rc; - RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_MODIFY_QP, + sizeof(req)); /* Filter out the qp_attr_mask based on the state->new transition */ __filter_modify_flags(qp); @@ -1286,7 +1293,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) memcpy(req.dest_mac, qp->ah.dmac, 6); if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU) - req.path_mtu = qp->path_mtu; + req.path_mtu_pingpong_push_enable |= qp->path_mtu; if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT) req.timeout = qp->timeout; @@ -1324,8 +1331,8 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; qp->cur_qp_state = qp->state; @@ -1335,15 +1342,17 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_query_qp req; - struct creq_query_qp_resp resp; + struct creq_query_qp_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_qp_resp_sb *sb; - u16 cmd_flags = 0; + struct cmdq_query_qp req = {}; u32 temp32[4]; int i, rc = 0; - RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_QUERY_QP, + sizeof(req)); sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); if (!sbuf) @@ -1352,8 +1361,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.qp_cid = cpu_to_le32(qp->id); req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - (void *)sbuf, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto bail; /* Extract the context from the side buffer */ @@ -1460,9 +1470,9 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_destroy_qp req; - struct creq_destroy_qp_resp resp; - u16 cmd_flags = 0; + struct creq_destroy_qp_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_qp req = {}; u32 tbl_indx; int rc; @@ -1470,11 +1480,14 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID; rcfw->qp_tbl[tbl_indx].qp_handle = NULL; - RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_DESTROY_QP, + sizeof(req)); req.qp_cid = cpu_to_le32(qp->id); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) { rcfw->qp_tbl[tbl_indx].qp_id = qp->id; rcfw->qp_tbl[tbl_indx].qp_handle = qp; @@ -2036,10 +2049,10 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_hwq_attr hwq_attr = {}; - struct creq_create_cq_resp resp; + struct creq_create_cq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_create_cq req = {}; struct bnxt_qplib_pbl *pbl; - struct cmdq_create_cq req; - u16 cmd_flags = 0; u32 pg_sz_lvl; int rc; @@ -2052,7 +2065,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) if (rc) goto exit; - RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_CREATE_CQ, + sizeof(req)); if (!cq->dpi) { dev_err(&rcfw->pdev->dev, @@ -2071,9 +2086,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) req.cq_fco_cnq_id = cpu_to_le32( (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << CMDQ_CREATE_CQ_CNQ_ID_SFT); - - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto fail; @@ -2100,20 +2115,70 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) return rc; } +void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, + struct bnxt_qplib_cq *cq) +{ + bnxt_qplib_free_hwq(res, &cq->hwq); + memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq)); +} + +int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, + int new_cqes) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_resize_cq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_resize_cq req = {}; + struct bnxt_qplib_pbl *pbl; + u32 pg_sz, lvl, new_sz; + int rc; + + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_RESIZE_CQ, + sizeof(req)); + hwq_attr.sginfo = &cq->sg_info; + hwq_attr.res = res; + hwq_attr.depth = new_cqes; + hwq_attr.stride = sizeof(struct cq_base); + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr); + if (rc) + return rc; + + req.cq_cid = cpu_to_le32(cq->id); + pbl = &cq->resize_hwq.pbl[PBL_LVL_0]; + pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq); + lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) & + CMDQ_RESIZE_CQ_LVL_MASK; + new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) & + CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK; + req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl); + req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + return rc; +} + int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_destroy_cq req; - struct creq_destroy_cq_resp resp; + struct creq_destroy_cq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_cq req = {}; u16 total_cnq_events; - u16 cmd_flags = 0; int rc; - RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_DESTROY_CQ, + sizeof(req)); req.cq_cid = cpu_to_le32(cq->id); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; total_cnq_events = le16_to_cpu(resp.total_cnq_events); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 03750195254310..d74d5ead2e32a2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -400,6 +400,7 @@ struct bnxt_qplib_cq { u16 count; u16 period; struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_hwq resize_hwq; u32 cnq_hw_ring_id; struct bnxt_qplib_nq *nq; bool resize_in_progress; @@ -532,6 +533,10 @@ void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp); int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe); int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); +int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, + int new_cqes); +void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, + struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num, struct bnxt_qplib_qp **qp); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 061b2895dd9b57..de906910317730 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -49,6 +49,7 @@ #include "qplib_rcfw.h" #include "qplib_sp.h" #include "qplib_fp.h" +#include "qplib_tlv.h" static void bnxt_qplib_service_creq(struct tasklet_struct *t); @@ -85,8 +86,8 @@ static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) return count ? 0 : -ETIMEDOUT; }; -static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, - struct creq_base *resp, void *sb, u8 is_block) +static int __send_message(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_cmdqmsg *msg) { struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; struct bnxt_qplib_hwq *hwq = &cmdq->hwq; @@ -95,13 +96,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, u32 sw_prod, cmdq_prod; struct pci_dev *pdev; unsigned long flags; - u32 size, opcode; + u32 bsize, opcode; u16 cookie, cbit; u8 *preq; pdev = rcfw->pdev; - opcode = req->opcode; + opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz); if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) && (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW && @@ -124,7 +125,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, * cmdqe */ spin_lock_irqsave(&hwq->lock, flags); - if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) { + if (msg->req->cmd_size >= HWQ_FREE_SLOTS(hwq)) { dev_err(&pdev->dev, "RCFW: CMDQ is full!\n"); spin_unlock_irqrestore(&hwq->lock, flags); return -EAGAIN; @@ -133,36 +134,34 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE; cbit = cookie % rcfw->cmdq_depth; - if (is_block) + if (msg->block) cookie |= RCFW_CMD_IS_BLOCKING; set_bit(cbit, cmdq->cmdq_bitmap); - req->cookie = cpu_to_le16(cookie); + __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie)); crsqe = &rcfw->crsqe_tbl[cbit]; if (crsqe->resp) { spin_unlock_irqrestore(&hwq->lock, flags); return -EBUSY; } - size = req->cmd_size; /* change the cmd_size to the number of 16byte cmdq unit. * req->cmd_size is modified here */ - bnxt_qplib_set_cmd_slots(req); - - memset(resp, 0, sizeof(*resp)); - crsqe->resp = (struct creq_qp_event *)resp; - crsqe->resp->cookie = req->cookie; - crsqe->req_size = req->cmd_size; - if (req->resp_size && sb) { - struct bnxt_qplib_rcfw_sbuf *sbuf = sb; - - req->resp_addr = cpu_to_le64(sbuf->dma_addr); - req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / - BNXT_QPLIB_CMDQE_UNITS; + bsize = bnxt_qplib_set_cmd_slots(msg->req); + + memset(msg->resp, 0, sizeof(*msg->resp)); + crsqe->resp = (struct creq_qp_event *)msg->resp; + crsqe->resp->cookie = cpu_to_le16(cookie); + crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz); + if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) { + struct bnxt_qplib_rcfw_sbuf *sbuf = msg->sb; + __set_cmdq_base_resp_addr(msg->req, msg->req_sz, cpu_to_le64(sbuf->dma_addr)); + __set_cmdq_base_resp_size(msg->req, msg->req_sz, + ALIGN(sbuf->size, BNXT_QPLIB_CMDQE_UNITS)); } - preq = (u8 *)req; + preq = (u8 *)msg->req; do { /* Locate the next cmdq slot */ sw_prod = HWQ_CMP(hwq->prod, hwq); @@ -174,11 +173,11 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, } /* Copy a segment of the req cmd to the cmdq */ memset(cmdqe, 0, sizeof(*cmdqe)); - memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe))); - preq += min_t(u32, size, sizeof(*cmdqe)); - size -= min_t(u32, size, sizeof(*cmdqe)); + memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe))); + preq += min_t(u32, bsize, sizeof(*cmdqe)); + bsize -= min_t(u32, bsize, sizeof(*cmdqe)); hwq->prod++; - } while (size > 0); + } while (bsize > 0); cmdq->seq_num++; cmdq_prod = hwq->prod; @@ -191,7 +190,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG); clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags); } - /* ring CMDQ DB */ wmb(); writel(cmdq_prod, cmdq->cmdq_mbox.prod); @@ -203,11 +201,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, } int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, - struct creq_base *resp, - void *sb, u8 is_block) + struct bnxt_qplib_cmdqmsg *msg) { - struct creq_qp_event *evnt = (struct creq_qp_event *)resp; + struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp; u16 cookie; u8 opcode, retry_cnt = 0xFF; int rc = 0; @@ -217,23 +213,23 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, return 0; do { - opcode = req->opcode; - rc = __send_message(rcfw, req, resp, sb, is_block); - cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; + opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz); + rc = __send_message(rcfw, msg); + cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, msg->req_sz)) & + RCFW_MAX_COOKIE_VALUE; if (!rc) break; - if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { /* send failed */ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n", cookie, opcode); return rc; } - is_block ? mdelay(1) : usleep_range(500, 1000); + msg->block ? mdelay(1) : usleep_range(500, 1000); } while (retry_cnt--); - if (is_block) + if (msg->block) rc = __block_for_resp(rcfw, cookie); else rc = __wait_for_resp(rcfw, cookie); @@ -452,14 +448,17 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) /* RCFW */ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) { - struct cmdq_deinitialize_fw req; - struct creq_deinitialize_fw_resp resp; - u16 cmd_flags = 0; + struct creq_deinitialize_fw_resp resp = {}; + struct cmdq_deinitialize_fw req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; int rc; - RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - NULL, 0); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_DEINITIALIZE_FW, + sizeof(req)); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, + sizeof(req), sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; @@ -470,13 +469,15 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn) { - struct creq_initialize_fw_resp resp; - struct cmdq_initialize_fw req; - u16 cmd_flags = 0; + struct creq_initialize_fw_resp resp = {}; + struct cmdq_initialize_fw req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; u8 pgsz, lvl; int rc; - RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_INITIALIZE_FW, + sizeof(req)); /* Supply (log-base-2-of-host-page-size - base-page-shift) * to bono to adjust the doorbell page sizes. */ @@ -545,8 +546,8 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, skip_ctx_setup: req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 0a3d8e7da3d42e..dd5651478bbb77 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -39,6 +39,8 @@ #ifndef __BNXT_QPLIB_RCFW_H__ #define __BNXT_QPLIB_RCFW_H__ +#include "qplib_tlv.h" + #define RCFW_CMDQ_TRIG_VAL 1 #define RCFW_COMM_PCI_BAR_REGION 0 #define RCFW_COMM_CONS_PCI_BAR_REGION 2 @@ -51,25 +53,25 @@ #define RCFW_DBR_PCI_BAR_REGION 2 #define RCFW_DBR_BASE_PAGE_SHIFT 12 -#define RCFW_CMD_PREP(req, CMD, cmd_flags) \ - do { \ - memset(&(req), 0, sizeof((req))); \ - (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ - (req).cmd_size = sizeof((req)); \ - (req).flags = cpu_to_le16(cmd_flags); \ - } while (0) - -#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */ - /* Cmdq contains a fix number of a 16-Byte slots */ struct bnxt_qplib_cmdqe { u8 data[16]; }; +#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe) + +static inline void bnxt_qplib_rcfw_cmd_prep(struct cmdq_base *req, + u8 opcode, u8 cmd_size) +{ + req->opcode = opcode; + req->cmd_size = cmd_size; +} + +#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */ + /* CMDQ elements */ #define BNXT_QPLIB_CMDQE_MAX_CNT_256 256 #define BNXT_QPLIB_CMDQE_MAX_CNT_8192 8192 -#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe) #define BNXT_QPLIB_CMDQE_BYTES(depth) ((depth) * BNXT_QPLIB_CMDQE_UNITS) static inline u32 bnxt_qplib_cmdqe_npages(u32 depth) @@ -87,11 +89,21 @@ static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth) return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE); } -/* Set the cmd_size to a factor of CMDQE unit */ -static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req) +static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req) { - req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) / - BNXT_QPLIB_CMDQE_UNITS; + u32 cmd_byte = 0; + + if (HAS_TLV_HEADER(req)) { + struct roce_tlv *tlv_req = (struct roce_tlv *)req; + + cmd_byte = tlv_req->total_size * BNXT_QPLIB_CMDQE_UNITS; + } else { + cmd_byte = req->cmd_size; + req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; + } + + return cmd_byte; } #define RCFW_MAX_COOKIE_VALUE 0x7FFF @@ -190,6 +202,27 @@ struct bnxt_qplib_rcfw { u32 cmdq_depth; }; +struct bnxt_qplib_cmdqmsg { + struct cmdq_base *req; + struct creq_base *resp; + void *sb; + u32 req_sz; + u32 res_sz; + u8 block; +}; + +static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg, + void *req, void *resp, void *sb, + u32 req_sz, u32 res_sz, u8 block) +{ + msg->req = req; + msg->resp = resp; + msg->sb = sb; + msg->req_sz = req_sz; + msg->res_sz = res_sz; + msg->block = block; +} + void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_rcfw *rcfw, @@ -210,8 +243,7 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_rcfw_sbuf *sbuf); int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, struct creq_base *resp, - void *sbuf, u8 is_block); + struct bnxt_qplib_cmdqmsg *msg); int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index b802981b717164..1714a1e231132f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -48,6 +48,7 @@ #include "qplib_res.h" #include "qplib_rcfw.h" #include "qplib_sp.h" +#include "qplib_tlv.h" const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; @@ -68,15 +69,17 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, char *fw_ver) { - struct cmdq_query_version req; - struct creq_query_version_resp resp; - u16 cmd_flags = 0; + struct creq_query_version_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_query_version req = {}; int rc = 0; - RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_QUERY_VERSION, + sizeof(req)); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return; fw_ver[0] = resp.fw_maj; @@ -88,16 +91,18 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr, bool vf) { - struct cmdq_query_func req; - struct creq_query_func_resp resp; - struct bnxt_qplib_rcfw_sbuf *sbuf; + struct creq_query_func_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; struct creq_query_func_resp_sb *sb; - u16 cmd_flags = 0; - u32 temp; + struct bnxt_qplib_rcfw_sbuf *sbuf; + struct cmdq_query_func req = {}; u8 *tqm_alloc; int i, rc = 0; + u32 temp; - RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_QUERY_FUNC, + sizeof(req)); sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); if (!sbuf) { @@ -108,8 +113,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, sb = sbuf->sb; req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - (void *)sbuf, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto bail; @@ -174,12 +180,14 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx) { - struct cmdq_set_func_resources req; - struct creq_set_func_resources_resp resp; - u16 cmd_flags = 0; + struct creq_set_func_resources_resp resp = {}; + struct cmdq_set_func_resources req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; int rc = 0; - RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES, + sizeof(req)); req.number_of_qp = cpu_to_le32(ctx->qpc_count); req.number_of_mrw = cpu_to_le32(ctx->mrw_count); @@ -192,9 +200,9 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, - NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) { dev_err(&res->pdev->dev, "Failed to set function resources\n"); } @@ -245,20 +253,23 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } /* Remove GID from the SGID table */ if (update) { - struct cmdq_delete_gid req; - struct creq_delete_gid_resp resp; - u16 cmd_flags = 0; + struct creq_delete_gid_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_delete_gid req = {}; int rc; - RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_DELETE_GID, + sizeof(req)); if (sgid_tbl->hw_id[index] == 0xFFFF) { dev_err(&res->pdev->dev, "GID entry contains an invalid HW id\n"); return -EINVAL; } req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; } @@ -315,12 +326,14 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return -ENOMEM; } if (update) { - struct cmdq_add_gid req; - struct creq_add_gid_resp resp; - u16 cmd_flags = 0; + struct creq_add_gid_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_add_gid req = {}; int rc; - RCFW_CMD_PREP(req, ADD_GID, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_ADD_GID, + sizeof(req)); req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); @@ -345,8 +358,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); @@ -375,12 +389,14 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_res, sgid_tbl); struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct creq_modify_gid_resp resp; - struct cmdq_modify_gid req; + struct creq_modify_gid_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_modify_gid req = {}; int rc; - u16 cmd_flags = 0; - RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_MODIFY_GID, + sizeof(req)); req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); @@ -399,8 +415,9 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, req.gid_index = cpu_to_le16(gid_idx); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); return rc; } @@ -409,14 +426,16 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_create_ah req; - struct creq_create_ah_resp resp; - u16 cmd_flags = 0; + struct creq_create_ah_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_create_ah req = {}; u32 temp32[4]; u16 temp16[3]; int rc; - RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_CREATE_AH, + sizeof(req)); memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid)); req.dgid[0] = cpu_to_le32(temp32[0]); @@ -439,8 +458,9 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, req.dest_mac[1] = cpu_to_le16(temp16[1]); req.dest_mac[2] = cpu_to_le16(temp16[2]); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - NULL, block); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), block); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; @@ -452,26 +472,29 @@ void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_destroy_ah req; - struct creq_destroy_ah_resp resp; - u16 cmd_flags = 0; + struct creq_destroy_ah_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_ah req = {}; /* Clean up the AH table in the device */ - RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_DESTROY_AH, + sizeof(req)); req.ah_cid = cpu_to_le32(ah->id); - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, - block); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), block); + bnxt_qplib_rcfw_send_message(rcfw, &msg); } /* MRW */ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { + struct creq_deallocate_key_resp resp = {}; struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_deallocate_key req; - struct creq_deallocate_key_resp resp; - u16 cmd_flags = 0; + struct cmdq_deallocate_key req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; int rc; if (mrw->lkey == 0xFFFFFFFF) { @@ -479,7 +502,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) return 0; } - RCFW_CMD_PREP(req, DEALLOCATE_KEY, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_DEALLOCATE_KEY, + sizeof(req)); req.mrw_flags = mrw->type; @@ -490,8 +515,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) else req.key = cpu_to_le32(mrw->lkey); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; @@ -505,13 +531,15 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_allocate_mrw req; - struct creq_allocate_mrw_resp resp; - u16 cmd_flags = 0; + struct creq_allocate_mrw_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_allocate_mrw req = {}; unsigned long tmp; int rc; - RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_ALLOCATE_MRW, + sizeof(req)); req.pd_id = cpu_to_le32(mrw->pd->id); req.mrw_flags = mrw->type; @@ -523,8 +551,9 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) tmp = (unsigned long)mrw; req.mrw_handle = cpu_to_le64(tmp); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; @@ -541,16 +570,19 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, bool block) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_deregister_mr req; - struct creq_deregister_mr_resp resp; - u16 cmd_flags = 0; + struct creq_deregister_mr_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_deregister_mr req = {}; int rc; - RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_DEREGISTER_MR, + sizeof(req)); req.lkey = cpu_to_le32(mrw->lkey); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, block); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), block); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return rc; @@ -570,11 +602,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_hwq_attr hwq_attr = {}; struct bnxt_qplib_sg_info sginfo = {}; - struct creq_register_mr_resp resp; - struct cmdq_register_mr req; - u16 cmd_flags = 0, level; + struct creq_register_mr_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_register_mr req = {}; int pages, rc; u32 pg_size; + u16 level; if (num_pbls) { pages = roundup_pow_of_two(num_pbls); @@ -602,7 +635,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, } } - RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_REGISTER_MR, + sizeof(req)); /* Configure the request */ if (mr->hwq.level == PBL_LVL_MAX) { @@ -627,8 +662,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, req.key = cpu_to_le32(mr->lkey); req.mr_size = cpu_to_le64(mr->total_size); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, NULL, false); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto fail; @@ -679,32 +715,19 @@ int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res, return 0; } -int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) -{ - struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_map_tc_to_cos req; - struct creq_map_tc_to_cos_resp resp; - u16 cmd_flags = 0; - - RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); - req.cos0 = cpu_to_le16(cids[0]); - req.cos1 = cpu_to_le16(cids[1]); - - return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - NULL, 0); -} - int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_roce_stats *stats) { - struct cmdq_query_roce_stats req; - struct creq_query_roce_stats_resp resp; - struct bnxt_qplib_rcfw_sbuf *sbuf; + struct creq_query_roce_stats_resp resp = {}; struct creq_query_roce_stats_resp_sb *sb; - u16 cmd_flags = 0; + struct cmdq_query_roce_stats req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct bnxt_qplib_rcfw_sbuf *sbuf; int rc = 0; - RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_QUERY_ROCE_STATS, + sizeof(req)); sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); if (!sbuf) { @@ -715,8 +738,9 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, sb = sbuf->sb; req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, - (void *)sbuf, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto bail; /* Extract the context from the side buffer */ @@ -780,8 +804,8 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, struct creq_query_roce_stats_ext_resp resp = {}; struct creq_query_roce_stats_ext_resp_sb *sb; struct cmdq_query_roce_stats_ext req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; struct bnxt_qplib_rcfw_sbuf *sbuf; - u16 cmd_flags = 0; int rc; sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); @@ -791,15 +815,18 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, return -ENOMEM; } - RCFW_CMD_PREP(req, QUERY_ROCE_STATS_EXT, cmd_flags); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS, + sizeof(req)); req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); req.resp_addr = cpu_to_le64(sbuf->dma_addr); req.function_id = cpu_to_le32(fid); req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID); - rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void *)&resp, (void *)sbuf, 0); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) goto bail; @@ -823,3 +850,111 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); return rc; } + +static void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv *ext_req, + struct bnxt_qplib_cc_param_ext *cc_ext) +{ + ext_req->modify_mask = cpu_to_le64(cc_ext->ext_mask); + cc_ext->ext_mask = 0; + ext_req->inactivity_th_hi = cpu_to_le16(cc_ext->inact_th_hi); + ext_req->min_time_between_cnps = cpu_to_le16(cc_ext->min_delta_cnp); + ext_req->init_cp = cpu_to_le16(cc_ext->init_cp); + ext_req->tr_update_mode = cc_ext->tr_update_mode; + ext_req->tr_update_cycles = cc_ext->tr_update_cyls; + ext_req->fr_num_rtts = cc_ext->fr_rtt; + ext_req->ai_rate_increase = cc_ext->ai_rate_incr; + ext_req->reduction_relax_rtts_th = cpu_to_le16(cc_ext->rr_rtt_th); + ext_req->additional_relax_cr_th = cpu_to_le16(cc_ext->ar_cr_th); + ext_req->cr_min_th = cpu_to_le16(cc_ext->cr_min_th); + ext_req->bw_avg_weight = cc_ext->bw_avg_weight; + ext_req->actual_cr_factor = cc_ext->cr_factor; + ext_req->max_cp_cr_th = cpu_to_le16(cc_ext->cr_th_max_cp); + ext_req->cp_bias_en = cc_ext->cp_bias_en; + ext_req->cp_bias = cc_ext->cp_bias; + ext_req->cnp_ecn = cc_ext->cnp_ecn; + ext_req->rtt_jitter_en = cc_ext->rtt_jitter_en; + ext_req->link_bytes_per_usec = cpu_to_le16(cc_ext->bytes_per_usec); + ext_req->reset_cc_cr_th = cpu_to_le16(cc_ext->cc_cr_reset_th); + ext_req->cr_width = cc_ext->cr_width; + ext_req->quota_period_min = cc_ext->min_quota; + ext_req->quota_period_max = cc_ext->max_quota; + ext_req->quota_period_abs_max = cc_ext->abs_max_quota; + ext_req->tr_lower_bound = cpu_to_le16(cc_ext->tr_lb); + ext_req->cr_prob_factor = cc_ext->cr_prob_fac; + ext_req->tr_prob_factor = cc_ext->tr_prob_fac; + ext_req->fairness_cr_th = cpu_to_le16(cc_ext->fair_cr_th); + ext_req->red_div = cc_ext->red_div; + ext_req->cnp_ratio_th = cc_ext->cnp_ratio_th; + ext_req->exp_ai_rtts = cpu_to_le16(cc_ext->ai_ext_rtt); + ext_req->exp_ai_cr_cp_ratio = cc_ext->exp_crcp_ratio; + ext_req->use_rate_table = cc_ext->low_rate_en; + ext_req->cp_exp_update_th = cpu_to_le16(cc_ext->cpcr_update_th); + ext_req->high_exp_ai_rtts_th1 = cpu_to_le16(cc_ext->ai_rtt_th1); + ext_req->high_exp_ai_rtts_th2 = cpu_to_le16(cc_ext->ai_rtt_th2); + ext_req->actual_cr_cong_free_rtts_th = cpu_to_le16(cc_ext->cf_rtt_th); + ext_req->severe_cong_cr_th1 = cpu_to_le16(cc_ext->sc_cr_th1); + ext_req->severe_cong_cr_th2 = cpu_to_le16(cc_ext->sc_cr_th2); + ext_req->link64B_per_rtt = cpu_to_le32(cc_ext->l64B_per_rtt); + ext_req->cc_ack_bytes = cc_ext->cc_ack_bytes; +} + +int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, + struct bnxt_qplib_cc_param *cc_param) +{ + struct bnxt_qplib_tlv_modify_cc_req tlv_req = {}; + struct creq_modify_roce_cc_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_modify_roce_cc *req; + int req_size; + void *cmd; + int rc; + + /* Prepare the older base command */ + req = &tlv_req.base_req; + cmd = req; + req_size = sizeof(*req); + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)req, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC, + sizeof(*req)); + req->modify_mask = cpu_to_le32(cc_param->mask); + req->enable_cc = cc_param->enable; + req->g = cc_param->g; + req->num_phases_per_state = cc_param->nph_per_state; + req->time_per_phase = cc_param->time_pph; + req->pkts_per_phase = cc_param->pkts_pph; + req->init_cr = cpu_to_le16(cc_param->init_cr); + req->init_tr = cpu_to_le16(cc_param->init_tr); + req->tos_dscp_tos_ecn = (cc_param->tos_dscp << CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT) | + (cc_param->tos_ecn & CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK); + req->alt_vlan_pcp = cc_param->alt_vlan_pcp; + req->alt_tos_dscp = cpu_to_le16(cc_param->alt_tos_dscp); + req->rtt = cpu_to_le16(cc_param->rtt); + req->tcp_cp = cpu_to_le16(cc_param->tcp_cp); + req->cc_mode = cc_param->cc_mode; + req->inactivity_th = cpu_to_le16(cc_param->inact_th); + + /* For chip gen P5 onwards fill extended cmd and header */ + if (bnxt_qplib_is_chip_gen_p5(res->cctx)) { + struct roce_tlv *hdr; + u32 payload; + u32 chunks; + + cmd = &tlv_req; + req_size = sizeof(tlv_req); + /* Prepare primary tlv header */ + hdr = &tlv_req.tlv_hdr; + chunks = CHUNKS(sizeof(struct bnxt_qplib_tlv_modify_cc_req)); + payload = sizeof(struct cmdq_modify_roce_cc); + __roce_1st_tlv_prep(hdr, chunks, payload, true); + /* Prepare secondary tlv header */ + hdr = (struct roce_tlv *)&tlv_req.ext_req; + payload = sizeof(struct cmdq_modify_roce_cc_gen1_tlv) - + sizeof(struct roce_tlv); + __roce_ext_tlv_prep(hdr, TLV_TYPE_MODIFY_ROCE_CC_GEN1, payload, false, true); + bnxt_qplib_fill_cc_gen1(&tlv_req.ext_req, &cc_param->cc_ext); + } + + bnxt_qplib_fill_cmdqmsg(&msg, cmd, &resp, NULL, req_size, + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg); + return rc; +} diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 5939e8fc835357..5de874659cdfa3 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -244,6 +244,71 @@ struct bnxt_qplib_ext_stat { u64 rx_ecn_marked; }; +struct bnxt_qplib_cc_param_ext { + u64 ext_mask; + u16 inact_th_hi; + u16 min_delta_cnp; + u16 init_cp; + u8 tr_update_mode; + u8 tr_update_cyls; + u8 fr_rtt; + u8 ai_rate_incr; + u16 rr_rtt_th; + u16 ar_cr_th; + u16 cr_min_th; + u8 bw_avg_weight; + u8 cr_factor; + u16 cr_th_max_cp; + u8 cp_bias_en; + u8 cp_bias; + u8 cnp_ecn; + u8 rtt_jitter_en; + u16 bytes_per_usec; + u16 cc_cr_reset_th; + u8 cr_width; + u8 min_quota; + u8 max_quota; + u8 abs_max_quota; + u16 tr_lb; + u8 cr_prob_fac; + u8 tr_prob_fac; + u16 fair_cr_th; + u8 red_div; + u8 cnp_ratio_th; + u16 ai_ext_rtt; + u8 exp_crcp_ratio; + u8 low_rate_en; + u16 cpcr_update_th; + u16 ai_rtt_th1; + u16 ai_rtt_th2; + u16 cf_rtt_th; + u16 sc_cr_th1; /* severe congestion cr threshold 1 */ + u16 sc_cr_th2; /* severe congestion cr threshold 2 */ + u32 l64B_per_rtt; + u8 cc_ack_bytes; + u16 reduce_cf_rtt_th; +}; + +struct bnxt_qplib_cc_param { + u8 alt_vlan_pcp; + u16 alt_tos_dscp; + u8 cc_mode; + u8 enable; + u16 inact_th; + u16 init_cr; + u16 init_tr; + u16 rtt; + u8 g; + u8 nph_per_state; + u8 time_pph; + u8 pkts_pph; + u8 tos_ecn; + u8 tos_dscp; + u16 tcp_cp; + struct bnxt_qplib_cc_param_ext cc_ext; + u32 mask; +}; + int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, struct bnxt_qplib_gid *gid); @@ -277,10 +342,11 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, struct bnxt_qplib_frpl *frpl, int max); int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res, struct bnxt_qplib_frpl *frpl); -int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids); int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_roce_stats *stats); int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, struct bnxt_qplib_ext_stat *estat); +int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, + struct bnxt_qplib_cc_param *cc_param); #endif /* __BNXT_QPLIB_SP_H__*/ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_tlv.h b/drivers/infiniband/hw/bnxt_re/qplib_tlv.h new file mode 100644 index 00000000000000..402c220734f658 --- /dev/null +++ b/drivers/infiniband/hw/bnxt_re/qplib_tlv.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ + +#ifndef __QPLIB_TLV_H__ +#define __QPLIB_TLV_H__ + +struct roce_tlv { + struct tlv tlv; + u8 total_size; // in units of 16 byte chunks + u8 unused[7]; // for 16 byte alignment +}; + +#define CHUNK_SIZE 16 +#define CHUNKS(x) (((x) + CHUNK_SIZE - 1) / CHUNK_SIZE) + +static inline void __roce_1st_tlv_prep(struct roce_tlv *rtlv, u8 tot_chunks, + u16 content_bytes, u8 flags) +{ + rtlv->tlv.cmd_discr = cpu_to_le16(CMD_DISCR_TLV_ENCAP); + rtlv->tlv.tlv_type = cpu_to_le16(TLV_TYPE_ROCE_SP_COMMAND); + rtlv->tlv.length = cpu_to_le16(content_bytes); + rtlv->tlv.flags = TLV_FLAGS_REQUIRED; + rtlv->tlv.flags |= flags ? TLV_FLAGS_MORE : 0; + rtlv->total_size = (tot_chunks); +} + +static inline void __roce_ext_tlv_prep(struct roce_tlv *rtlv, u16 tlv_type, + u16 content_bytes, u8 more, u8 flags) +{ + rtlv->tlv.cmd_discr = cpu_to_le16(CMD_DISCR_TLV_ENCAP); + rtlv->tlv.tlv_type = cpu_to_le16(tlv_type); + rtlv->tlv.length = cpu_to_le16(content_bytes); + rtlv->tlv.flags |= more ? TLV_FLAGS_MORE : 0; + rtlv->tlv.flags |= flags ? TLV_FLAGS_REQUIRED : 0; +} + +/* + * TLV size in units of 16 byte chunks + */ +#define TLV_SIZE ((sizeof(struct roce_tlv) + 15) / 16) +/* + * TLV length in bytes + */ +#define TLV_BYTES (TLV_SIZE * 16) + +#define HAS_TLV_HEADER(msg) (le16_to_cpu(((struct tlv *)(msg))->cmd_discr) == CMD_DISCR_TLV_ENCAP) +#define GET_TLV_DATA(tlv) ((void *)&((uint8_t *)(tlv))[TLV_BYTES]) + +static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode; + else + return req->opcode; +} + +static inline void __set_cmdq_base_opcode(struct cmdq_base *req, + u32 size, u8 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val; + else + req->opcode = val; +} + +static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->cookie; + else + return req->cookie; +} + +static inline void __set_cmdq_base_cookie(struct cmdq_base *req, + u32 size, __le16 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->cookie = val; + else + req->cookie = val; +} + +static inline __le64 __get_cmdq_base_resp_addr(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr; + else + return req->resp_addr; +} + +static inline void __set_cmdq_base_resp_addr(struct cmdq_base *req, + u32 size, __le64 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr = val; + else + req->resp_addr = val; +} + +static inline u8 __get_cmdq_base_resp_size(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size; + else + return req->resp_size; +} + +static inline void __set_cmdq_base_resp_size(struct cmdq_base *req, + u32 size, u8 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size = val; + else + req->resp_size = val; +} + +static inline u8 __get_cmdq_base_cmd_size(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct roce_tlv *)(req))->total_size; + else + return req->cmd_size; +} + +static inline void __set_cmdq_base_cmd_size(struct cmdq_base *req, + u32 size, u8 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->cmd_size = val; + else + req->cmd_size = val; +} + +static inline __le16 __get_cmdq_base_flags(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->flags; + else + return req->flags; +} + +static inline void __set_cmdq_base_flags(struct cmdq_base *req, + u32 size, __le16 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->flags = val; + else + req->flags = val; +} + +struct bnxt_qplib_tlv_modify_cc_req { + struct roce_tlv tlv_hdr; + struct cmdq_modify_roce_cc base_req; + __le64 tlvpad; + struct cmdq_modify_roce_cc_gen1_tlv ext_req; +}; + +struct bnxt_qplib_tlv_query_rcc_sb { + struct roce_tlv tlv_hdr; + struct creq_query_roce_cc_resp_sb base_sb; + struct creq_query_roce_cc_gen1_resp_sb_tlv gen1_sb; +}; +#endif /* __QPLIB_TLV_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index ecb719098b75f7..4a10303e039254 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -42,3169 +42,4440 @@ /* include bnxt_hsi.h from bnxt_en driver */ #include "bnxt_hsi.h" -/* CMP Door Bell Format (4 bytes) */ -struct cmpl_doorbell { - __le32 key_mask_valid_idx; - #define CMPL_DOORBELL_IDX_MASK 0xffffffUL - #define CMPL_DOORBELL_IDX_SFT 0 - #define CMPL_DOORBELL_RESERVED_MASK 0x3000000UL - #define CMPL_DOORBELL_RESERVED_SFT 24 - #define CMPL_DOORBELL_IDX_VALID 0x4000000UL - #define CMPL_DOORBELL_MASK 0x8000000UL - #define CMPL_DOORBELL_KEY_MASK 0xf0000000UL - #define CMPL_DOORBELL_KEY_SFT 28 - #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28) -}; - -/* Status Door Bell Format (4 bytes) */ -struct status_doorbell { - __le32 key_idx; - #define STATUS_DOORBELL_IDX_MASK 0xffffffUL - #define STATUS_DOORBELL_IDX_SFT 0 - #define STATUS_DOORBELL_RESERVED_MASK 0xf000000UL - #define STATUS_DOORBELL_RESERVED_SFT 24 - #define STATUS_DOORBELL_KEY_MASK 0xf0000000UL - #define STATUS_DOORBELL_KEY_SFT 28 - #define STATUS_DOORBELL_KEY_STAT (0x3UL << 28) -}; - -/* RoCE Host Structures */ - -/* Doorbell Structures */ -/* dbc_dbc (size:64b/8B) */ -struct dbc_dbc { - __le32 index; - #define DBC_DBC_INDEX_MASK 0xffffffUL - #define DBC_DBC_INDEX_SFT 0 - __le32 type_path_xid; - #define DBC_DBC_XID_MASK 0xfffffUL - #define DBC_DBC_XID_SFT 0 - #define DBC_DBC_PATH_MASK 0x3000000UL - #define DBC_DBC_PATH_SFT 24 - #define DBC_DBC_PATH_ROCE (0x0UL << 24) - #define DBC_DBC_PATH_L2 (0x1UL << 24) - #define DBC_DBC_PATH_ENGINE (0x2UL << 24) - #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE - #define DBC_DBC_DEBUG_TRACE 0x8000000UL - #define DBC_DBC_TYPE_MASK 0xf0000000UL - #define DBC_DBC_TYPE_SFT 28 - #define DBC_DBC_TYPE_SQ (0x0UL << 28) - #define DBC_DBC_TYPE_RQ (0x1UL << 28) - #define DBC_DBC_TYPE_SRQ (0x2UL << 28) - #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28) - #define DBC_DBC_TYPE_CQ (0x4UL << 28) - #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28) - #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28) - #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28) - #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28) - #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28) - #define DBC_DBC_TYPE_NQ (0xaUL << 28) - #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28) - #define DBC_DBC_TYPE_NULL (0xfUL << 28) - #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL -}; - -/* dbc_dbc32 (size:32b/4B) */ -struct dbc_dbc32 { - __le32 type_abs_incr_xid; - #define DBC_DBC32_XID_MASK 0xfffffUL - #define DBC_DBC32_XID_SFT 0 - #define DBC_DBC32_PATH_MASK 0xc00000UL - #define DBC_DBC32_PATH_SFT 22 - #define DBC_DBC32_PATH_ROCE (0x0UL << 22) - #define DBC_DBC32_PATH_L2 (0x1UL << 22) - #define DBC_DBC32_PATH_LAST DBC_DBC32_PATH_L2 - #define DBC_DBC32_INCR_MASK 0xf000000UL - #define DBC_DBC32_INCR_SFT 24 - #define DBC_DBC32_ABS 0x10000000UL - #define DBC_DBC32_TYPE_MASK 0xe0000000UL - #define DBC_DBC32_TYPE_SFT 29 - #define DBC_DBC32_TYPE_SQ (0x0UL << 29) - #define DBC_DBC32_TYPE_LAST DBC_DBC32_TYPE_SQ -}; - -/* SQ WQE Structures */ -/* Base SQ WQE (8 bytes) */ -struct sq_base { - u8 wqe_type; - #define SQ_BASE_WQE_TYPE_SEND 0x0UL - #define SQ_BASE_WQE_TYPE_SEND_W_IMMEAD 0x1UL - #define SQ_BASE_WQE_TYPE_SEND_W_INVALID 0x2UL - #define SQ_BASE_WQE_TYPE_WRITE_WQE 0x4UL - #define SQ_BASE_WQE_TYPE_WRITE_W_IMMEAD 0x5UL - #define SQ_BASE_WQE_TYPE_READ_WQE 0x6UL - #define SQ_BASE_WQE_TYPE_ATOMIC_CS 0x8UL - #define SQ_BASE_WQE_TYPE_ATOMIC_FA 0xbUL - #define SQ_BASE_WQE_TYPE_LOCAL_INVALID 0xcUL - #define SQ_BASE_WQE_TYPE_FR_PMR 0xdUL - #define SQ_BASE_WQE_TYPE_BIND 0xeUL - u8 unused_0[7]; -}; - -/* WQE SGE (16 bytes) */ -struct sq_sge { - __le64 va_or_pa; - __le32 l_key; - __le32 size; +/* tx_doorbell (size:32b/4B) */ +struct tx_doorbell { + __le32 key_idx; + #define TX_DOORBELL_IDX_MASK 0xffffffUL + #define TX_DOORBELL_IDX_SFT 0 + #define TX_DOORBELL_KEY_MASK 0xf0000000UL + #define TX_DOORBELL_KEY_SFT 28 + #define TX_DOORBELL_KEY_TX (0x0UL << 28) + #define TX_DOORBELL_KEY_LAST TX_DOORBELL_KEY_TX }; -/* PSN Search Structure (8 bytes) */ -struct sq_psn_search { - __le32 opcode_start_psn; - #define SQ_PSN_SEARCH_START_PSN_MASK 0xffffffUL - #define SQ_PSN_SEARCH_START_PSN_SFT 0 - #define SQ_PSN_SEARCH_OPCODE_MASK 0xff000000UL - #define SQ_PSN_SEARCH_OPCODE_SFT 24 - __le32 flags_next_psn; - #define SQ_PSN_SEARCH_NEXT_PSN_MASK 0xffffffUL - #define SQ_PSN_SEARCH_NEXT_PSN_SFT 0 - #define SQ_PSN_SEARCH_FLAGS_MASK 0xff000000UL - #define SQ_PSN_SEARCH_FLAGS_SFT 24 +/* rx_doorbell (size:32b/4B) */ +struct rx_doorbell { + __le32 key_idx; + #define RX_DOORBELL_IDX_MASK 0xffffffUL + #define RX_DOORBELL_IDX_SFT 0 + #define RX_DOORBELL_KEY_MASK 0xf0000000UL + #define RX_DOORBELL_KEY_SFT 28 + #define RX_DOORBELL_KEY_RX (0x1UL << 28) + #define RX_DOORBELL_KEY_LAST RX_DOORBELL_KEY_RX }; -/* sq_psn_search_ext (size:128b/16B) */ -struct sq_psn_search_ext { - __le32 opcode_start_psn; - #define SQ_PSN_SEARCH_EXT_START_PSN_MASK 0xffffffUL - #define SQ_PSN_SEARCH_EXT_START_PSN_SFT 0 - #define SQ_PSN_SEARCH_EXT_OPCODE_MASK 0xff000000UL - #define SQ_PSN_SEARCH_EXT_OPCODE_SFT 24 - __le32 flags_next_psn; - #define SQ_PSN_SEARCH_EXT_NEXT_PSN_MASK 0xffffffUL - #define SQ_PSN_SEARCH_EXT_NEXT_PSN_SFT 0 - #define SQ_PSN_SEARCH_EXT_FLAGS_MASK 0xff000000UL - #define SQ_PSN_SEARCH_EXT_FLAGS_SFT 24 - __le16 start_slot_idx; - __le16 reserved16; - __le32 reserved32; -}; - -/* Send SQ WQE (40 bytes) */ -struct sq_send { - u8 wqe_type; - #define SQ_SEND_WQE_TYPE_SEND 0x0UL - #define SQ_SEND_WQE_TYPE_SEND_W_IMMEAD 0x1UL - #define SQ_SEND_WQE_TYPE_SEND_W_INVALID 0x2UL - u8 flags; - #define SQ_SEND_FLAGS_SIGNAL_COMP 0x1UL - #define SQ_SEND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL - #define SQ_SEND_FLAGS_UC_FENCE 0x4UL - #define SQ_SEND_FLAGS_SE 0x8UL - #define SQ_SEND_FLAGS_INLINE 0x10UL - u8 wqe_size; - u8 reserved8_1; - __le32 inv_key_or_imm_data; - __le32 length; - __le32 q_key; - __le32 dst_qp; - #define SQ_SEND_DST_QP_MASK 0xffffffUL - #define SQ_SEND_DST_QP_SFT 0 - #define SQ_SEND_RESERVED8_2_MASK 0xff000000UL - #define SQ_SEND_RESERVED8_2_SFT 24 - __le32 avid; - #define SQ_SEND_AVID_MASK 0xfffffUL - #define SQ_SEND_AVID_SFT 0 - #define SQ_SEND_RESERVED_AVID_MASK 0xfff00000UL - #define SQ_SEND_RESERVED_AVID_SFT 20 - __le64 reserved64; - __le32 data[24]; +/* cmpl_doorbell (size:32b/4B) */ +struct cmpl_doorbell { + __le32 key_mask_valid_idx; + #define CMPL_DOORBELL_IDX_MASK 0xffffffUL + #define CMPL_DOORBELL_IDX_SFT 0 + #define CMPL_DOORBELL_IDX_VALID 0x4000000UL + #define CMPL_DOORBELL_MASK 0x8000000UL + #define CMPL_DOORBELL_KEY_MASK 0xf0000000UL + #define CMPL_DOORBELL_KEY_SFT 28 + #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28) + #define CMPL_DOORBELL_KEY_LAST CMPL_DOORBELL_KEY_CMPL }; -/* sq_send_hdr (size:256b/32B) */ -struct sq_send_hdr { - u8 wqe_type; - u8 flags; - u8 wqe_size; - u8 reserved8_1; - __le32 inv_key_or_imm_data; - __le32 length; - __le32 q_key; - __le32 dst_qp; - __le32 avid; - __le64 reserved64; +/* status_doorbell (size:32b/4B) */ +struct status_doorbell { + __le32 key_idx; + #define STATUS_DOORBELL_IDX_MASK 0xffffffUL + #define STATUS_DOORBELL_IDX_SFT 0 + #define STATUS_DOORBELL_KEY_MASK 0xf0000000UL + #define STATUS_DOORBELL_KEY_SFT 28 + #define STATUS_DOORBELL_KEY_STAT (0x3UL << 28) + #define STATUS_DOORBELL_KEY_LAST STATUS_DOORBELL_KEY_STAT }; -/* Send Raw Ethernet and QP1 SQ WQE (40 bytes) */ -struct sq_send_raweth_qp1 { - u8 wqe_type; - #define SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND 0x0UL - u8 flags; - #define SQ_SEND_RAWETH_QP1_FLAGS_SIGNAL_COMP 0x1UL - #define SQ_SEND_RAWETH_QP1_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL - #define SQ_SEND_RAWETH_QP1_FLAGS_UC_FENCE 0x4UL - #define SQ_SEND_RAWETH_QP1_FLAGS_SE 0x8UL - #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE 0x10UL - u8 wqe_size; - u8 reserved8; - __le16 lflags; - #define SQ_SEND_RAWETH_QP1_LFLAGS_TCP_UDP_CHKSUM 0x1UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM 0x2UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_NOCRC 0x4UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_STAMP 0x8UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_T_IP_CHKSUM 0x10UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_RESERVED1_1 0x20UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_RESERVED1_2 0x40UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_RESERVED1_3 0x80UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC 0x100UL - #define SQ_SEND_RAWETH_QP1_LFLAGS_FCOE_CRC 0x200UL - __le16 cfa_action; - __le32 length; - __le32 reserved32_1; - __le32 cfa_meta; - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK 0xfffUL - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT 0 - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_DE 0x1000UL - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_MASK 0xe000UL - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_SFT 13 - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_MASK 0x70000UL - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_SFT 16 - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16) - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16) - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16) - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16) - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16) - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16) - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_LAST \ - SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_MASK 0xff80000UL - #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_SFT 19 - #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_MASK 0xf0000000UL - #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_SFT 28 - #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_NONE (0x0UL << 28) - #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG (0x1UL << 28) - #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_LAST \ - SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG - __le32 reserved32_2; - __le64 reserved64; - __le32 data[24]; +/* cmdq_init (size:128b/16B) */ +struct cmdq_init { + __le64 cmdq_pbl; + __le16 cmdq_size_cmdq_lvl; + #define CMDQ_INIT_CMDQ_LVL_MASK 0x3UL + #define CMDQ_INIT_CMDQ_LVL_SFT 0 + #define CMDQ_INIT_CMDQ_SIZE_MASK 0xfffcUL + #define CMDQ_INIT_CMDQ_SIZE_SFT 2 + __le16 creq_ring_id; + __le32 prod_idx; }; -/* sq_send_raweth_qp1_hdr (size:256b/32B) */ -struct sq_send_raweth_qp1_hdr { - u8 wqe_type; - u8 flags; - u8 wqe_size; +/* cmdq_base (size:128b/16B) */ +struct cmdq_base { + u8 opcode; + #define CMDQ_BASE_OPCODE_CREATE_QP 0x1UL + #define CMDQ_BASE_OPCODE_DESTROY_QP 0x2UL + #define CMDQ_BASE_OPCODE_MODIFY_QP 0x3UL + #define CMDQ_BASE_OPCODE_QUERY_QP 0x4UL + #define CMDQ_BASE_OPCODE_CREATE_SRQ 0x5UL + #define CMDQ_BASE_OPCODE_DESTROY_SRQ 0x6UL + #define CMDQ_BASE_OPCODE_QUERY_SRQ 0x8UL + #define CMDQ_BASE_OPCODE_CREATE_CQ 0x9UL + #define CMDQ_BASE_OPCODE_DESTROY_CQ 0xaUL + #define CMDQ_BASE_OPCODE_RESIZE_CQ 0xcUL + #define CMDQ_BASE_OPCODE_ALLOCATE_MRW 0xdUL + #define CMDQ_BASE_OPCODE_DEALLOCATE_KEY 0xeUL + #define CMDQ_BASE_OPCODE_REGISTER_MR 0xfUL + #define CMDQ_BASE_OPCODE_DEREGISTER_MR 0x10UL + #define CMDQ_BASE_OPCODE_ADD_GID 0x11UL + #define CMDQ_BASE_OPCODE_DELETE_GID 0x12UL + #define CMDQ_BASE_OPCODE_MODIFY_GID 0x17UL + #define CMDQ_BASE_OPCODE_QUERY_GID 0x18UL + #define CMDQ_BASE_OPCODE_CREATE_QP1 0x13UL + #define CMDQ_BASE_OPCODE_DESTROY_QP1 0x14UL + #define CMDQ_BASE_OPCODE_CREATE_AH 0x15UL + #define CMDQ_BASE_OPCODE_DESTROY_AH 0x16UL + #define CMDQ_BASE_OPCODE_INITIALIZE_FW 0x80UL + #define CMDQ_BASE_OPCODE_DEINITIALIZE_FW 0x81UL + #define CMDQ_BASE_OPCODE_STOP_FUNC 0x82UL + #define CMDQ_BASE_OPCODE_QUERY_FUNC 0x83UL + #define CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES 0x84UL + #define CMDQ_BASE_OPCODE_READ_CONTEXT 0x85UL + #define CMDQ_BASE_OPCODE_VF_BACKCHANNEL_REQUEST 0x86UL + #define CMDQ_BASE_OPCODE_READ_VF_MEMORY 0x87UL + #define CMDQ_BASE_OPCODE_COMPLETE_VF_REQUEST 0x88UL + #define CMDQ_BASE_OPCODE_EXTEND_CONTEXT_ARRRAY 0x89UL + #define CMDQ_BASE_OPCODE_MAP_TC_TO_COS 0x8aUL + #define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL + #define CMDQ_BASE_OPCODE_MODIFY_ROCE_CC 0x8cUL + #define CMDQ_BASE_OPCODE_QUERY_ROCE_CC 0x8dUL + #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL + #define CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE 0x8fUL + #define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL + #define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL + #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL + #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; u8 reserved8; - __le16 lflags; - __le16 cfa_action; - __le32 length; - __le32 reserved32_1; - __le32 cfa_meta; - __le32 reserved32_2; - __le64 reserved64; + __le64 resp_addr; }; -/* RDMA SQ WQE (40 bytes) */ -struct sq_rdma { - u8 wqe_type; - #define SQ_RDMA_WQE_TYPE_WRITE_WQE 0x4UL - #define SQ_RDMA_WQE_TYPE_WRITE_W_IMMEAD 0x5UL - #define SQ_RDMA_WQE_TYPE_READ_WQE 0x6UL - u8 flags; - #define SQ_RDMA_FLAGS_SIGNAL_COMP 0x1UL - #define SQ_RDMA_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL - #define SQ_RDMA_FLAGS_UC_FENCE 0x4UL - #define SQ_RDMA_FLAGS_SE 0x8UL - #define SQ_RDMA_FLAGS_INLINE 0x10UL - u8 wqe_size; - u8 reserved8; - __le32 imm_data; - __le32 length; - __le32 reserved32_1; - __le64 remote_va; - __le32 remote_key; - __le32 reserved32_2; - __le32 data[24]; +/* creq_base (size:128b/16B) */ +struct creq_base { + u8 type; + #define CREQ_BASE_TYPE_MASK 0x3fUL + #define CREQ_BASE_TYPE_SFT 0 + #define CREQ_BASE_TYPE_QP_EVENT 0x38UL + #define CREQ_BASE_TYPE_FUNC_EVENT 0x3aUL + #define CREQ_BASE_TYPE_LAST CREQ_BASE_TYPE_FUNC_EVENT + u8 reserved56[7]; + u8 v; + #define CREQ_BASE_V 0x1UL + u8 event; + u8 reserved48[6]; }; -/* sq_rdma_hdr (size:256b/32B) */ -struct sq_rdma_hdr { - u8 wqe_type; - u8 flags; - u8 wqe_size; +/* cmdq_query_version (size:128b/16B) */ +struct cmdq_query_version { + u8 opcode; + #define CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION 0x8bUL + #define CMDQ_QUERY_VERSION_OPCODE_LAST CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; u8 reserved8; - __le32 imm_data; - __le32 length; - __le32 reserved32_1; - __le64 remote_va; - __le32 remote_key; - __le32 reserved32_2; -}; - -/* Atomic SQ WQE (40 bytes) */ -struct sq_atomic { - u8 wqe_type; - #define SQ_ATOMIC_WQE_TYPE_ATOMIC_CS 0x8UL - #define SQ_ATOMIC_WQE_TYPE_ATOMIC_FA 0xbUL - u8 flags; - #define SQ_ATOMIC_FLAGS_SIGNAL_COMP 0x1UL - #define SQ_ATOMIC_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL - #define SQ_ATOMIC_FLAGS_UC_FENCE 0x4UL - #define SQ_ATOMIC_FLAGS_SE 0x8UL - #define SQ_ATOMIC_FLAGS_INLINE 0x10UL - __le16 reserved16; - __le32 remote_key; - __le64 remote_va; - __le64 swap_data; - __le64 cmp_data; - __le32 data[24]; + __le64 resp_addr; }; -/* sq_atomic_hdr (size:256b/32B) */ -struct sq_atomic_hdr { - u8 wqe_type; - u8 flags; +/* creq_query_version_resp (size:128b/16B) */ +struct creq_query_version_resp { + u8 type; + #define CREQ_QUERY_VERSION_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_VERSION_RESP_TYPE_SFT 0 + #define CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_VERSION_RESP_TYPE_LAST CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + u8 fw_maj; + u8 fw_minor; + u8 fw_bld; + u8 fw_rsvd; + u8 v; + #define CREQ_QUERY_VERSION_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION 0x8bUL + #define CREQ_QUERY_VERSION_RESP_EVENT_LAST \ + CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION __le16 reserved16; - __le32 remote_key; - __le64 remote_va; - __le64 swap_data; - __le64 cmp_data; + u8 intf_maj; + u8 intf_minor; + u8 intf_bld; + u8 intf_rsvd; }; -/* Local Invalidate SQ WQE (40 bytes) */ -struct sq_localinvalidate { - u8 wqe_type; - #define SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID 0xcUL - u8 flags; - #define SQ_LOCALINVALIDATE_FLAGS_SIGNAL_COMP 0x1UL - #define SQ_LOCALINVALIDATE_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL - #define SQ_LOCALINVALIDATE_FLAGS_UC_FENCE 0x4UL - #define SQ_LOCALINVALIDATE_FLAGS_SE 0x8UL - #define SQ_LOCALINVALIDATE_FLAGS_INLINE 0x10UL - __le16 reserved16; - __le32 inv_l_key; - __le64 reserved64; - __le32 reserved128[4]; - __le32 data[24]; -}; - -/* sq_localinvalidate_hdr (size:256b/32B) */ -struct sq_localinvalidate_hdr { - u8 wqe_type; - u8 flags; - __le16 reserved16; - __le32 inv_l_key; - __le64 reserved64; - u8 reserved128[16]; +/* cmdq_initialize_fw (size:896b/112B) */ +struct cmdq_initialize_fw { + u8 opcode; + #define CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW 0x80UL + #define CMDQ_INITIALIZE_FW_OPCODE_LAST CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW + u8 cmd_size; + __le16 flags; + #define CMDQ_INITIALIZE_FW_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL + #define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + u8 qpc_pg_size_qpc_lvl; + #define CMDQ_INITIALIZE_FW_QPC_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_QPC_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_QPC_LVL_LAST CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_LAST CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G + u8 mrw_pg_size_mrw_lvl; + #define CMDQ_INITIALIZE_FW_MRW_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_MRW_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_MRW_LVL_LAST CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_LAST CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define CMDQ_INITIALIZE_FW_SRQ_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_SRQ_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_SRQ_LVL_LAST CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_LAST CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define CMDQ_INITIALIZE_FW_CQ_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_CQ_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_CQ_LVL_LAST CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_LAST CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G + u8 tqm_pg_size_tqm_lvl; + #define CMDQ_INITIALIZE_FW_TQM_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_TQM_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_TQM_LVL_LAST CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define CMDQ_INITIALIZE_FW_TIM_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_TIM_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_TIM_LVL_LAST CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G + __le16 log2_dbr_pg_size; + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ + CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M + #define CMDQ_INITIALIZE_FW_RSVD_MASK 0xfff0UL + #define CMDQ_INITIALIZE_FW_RSVD_SFT 4 + __le64 qpc_page_dir; + __le64 mrw_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 tqm_page_dir; + __le64 tim_page_dir; + __le32 number_of_qp; + __le32 number_of_mrw; + __le32 number_of_srq; + __le32 number_of_cq; + __le32 max_qp_per_vf; + __le32 max_mrw_per_vf; + __le32 max_srq_per_vf; + __le32 max_cq_per_vf; + __le32 max_gid_per_vf; + __le32 stat_ctx_id; }; -/* FR-PMR SQ WQE (40 bytes) */ -struct sq_fr_pmr { - u8 wqe_type; - #define SQ_FR_PMR_WQE_TYPE_FR_PMR 0xdUL - u8 flags; - #define SQ_FR_PMR_FLAGS_SIGNAL_COMP 0x1UL - #define SQ_FR_PMR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL - #define SQ_FR_PMR_FLAGS_UC_FENCE 0x4UL - #define SQ_FR_PMR_FLAGS_SE 0x8UL - #define SQ_FR_PMR_FLAGS_INLINE 0x10UL - u8 access_cntl; - #define SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE 0x1UL - #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ 0x2UL - #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE 0x4UL - #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL - #define SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND 0x10UL - u8 zero_based_page_size_log; - #define SQ_FR_PMR_PAGE_SIZE_LOG_MASK 0x1fUL - #define SQ_FR_PMR_PAGE_SIZE_LOG_SFT 0 - #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL - #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL - #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL - #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL - #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL - #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL - #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL - #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL - #define SQ_FR_PMR_ZERO_BASED 0x20UL - #define SQ_FR_PMR_RESERVED2_MASK 0xc0UL - #define SQ_FR_PMR_RESERVED2_SFT 6 - __le32 l_key; - u8 length[5]; - u8 reserved8_1; - u8 reserved8_2; - u8 numlevels_pbl_page_size_log; - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT 0 - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL - #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL - #define SQ_FR_PMR_RESERVED1 0x20UL - #define SQ_FR_PMR_NUMLEVELS_MASK 0xc0UL - #define SQ_FR_PMR_NUMLEVELS_SFT 6 - #define SQ_FR_PMR_NUMLEVELS_PHYSICAL (0x0UL << 6) - #define SQ_FR_PMR_NUMLEVELS_LAYER1 (0x1UL << 6) - #define SQ_FR_PMR_NUMLEVELS_LAYER2 (0x2UL << 6) - __le64 pblptr; - __le64 va; - __le32 data[24]; +/* creq_initialize_fw_resp (size:128b/16B) */ +struct creq_initialize_fw_resp { + u8 type; + #define CREQ_INITIALIZE_FW_RESP_TYPE_MASK 0x3fUL + #define CREQ_INITIALIZE_FW_RESP_TYPE_SFT 0 + #define CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_INITIALIZE_FW_RESP_TYPE_LAST CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_INITIALIZE_FW_RESP_V 0x1UL + u8 event; + #define CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW 0x80UL + #define CREQ_INITIALIZE_FW_RESP_EVENT_LAST \ + CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW + u8 reserved48[6]; }; -/* sq_fr_pmr_hdr (size:256b/32B) */ -struct sq_fr_pmr_hdr { - u8 wqe_type; - u8 flags; - u8 access_cntl; - u8 zero_based_page_size_log; - __le32 l_key; - u8 length[5]; - u8 reserved8_1; - u8 reserved8_2; - u8 numlevels_pbl_page_size_log; - __le64 pblptr; - __le64 va; +/* cmdq_deinitialize_fw (size:128b/16B) */ +struct cmdq_deinitialize_fw { + u8 opcode; + #define CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW 0x81UL + #define CMDQ_DEINITIALIZE_FW_OPCODE_LAST \ + CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; }; -/* Bind SQ WQE (40 bytes) */ -struct sq_bind { - u8 wqe_type; - #define SQ_BIND_WQE_TYPE_BIND 0xeUL - u8 flags; - #define SQ_BIND_FLAGS_SIGNAL_COMP 0x1UL - #define SQ_BIND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL - #define SQ_BIND_FLAGS_UC_FENCE 0x4UL - #define SQ_BIND_FLAGS_SE 0x8UL - #define SQ_BIND_FLAGS_INLINE 0x10UL - u8 access_cntl; - #define SQ_BIND_ACCESS_CNTL_LOCAL_WRITE 0x1UL - #define SQ_BIND_ACCESS_CNTL_REMOTE_READ 0x2UL - #define SQ_BIND_ACCESS_CNTL_REMOTE_WRITE 0x4UL - #define SQ_BIND_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL - #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND 0x10UL - u8 reserved8_1; - u8 mw_type_zero_based; - #define SQ_BIND_ZERO_BASED 0x1UL - #define SQ_BIND_MW_TYPE 0x2UL - #define SQ_BIND_MW_TYPE_TYPE1 (0x0UL << 1) - #define SQ_BIND_MW_TYPE_TYPE2 (0x1UL << 1) - #define SQ_BIND_RESERVED6_MASK 0xfcUL - #define SQ_BIND_RESERVED6_SFT 2 - u8 reserved8_2; - __le16 reserved16; - __le32 parent_l_key; - __le32 l_key; - __le64 va; - u8 length[5]; - u8 data_reserved24[99]; - #define SQ_BIND_RESERVED24_MASK 0xffffff00UL - #define SQ_BIND_RESERVED24_SFT 8 - #define SQ_BIND_DATA_MASK 0xffffffffUL - #define SQ_BIND_DATA_SFT 0 +/* creq_deinitialize_fw_resp (size:128b/16B) */ +struct creq_deinitialize_fw_resp { + u8 type; + #define CREQ_DEINITIALIZE_FW_RESP_TYPE_MASK 0x3fUL + #define CREQ_DEINITIALIZE_FW_RESP_TYPE_SFT 0 + #define CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DEINITIALIZE_FW_RESP_TYPE_LAST CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_DEINITIALIZE_FW_RESP_V 0x1UL + u8 event; + #define CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW 0x81UL + #define CREQ_DEINITIALIZE_FW_RESP_EVENT_LAST \ + CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW + u8 reserved48[6]; }; -/* sq_bind_hdr (size:256b/32B) */ -struct sq_bind_hdr { - u8 wqe_type; - u8 flags; - u8 access_cntl; - u8 reserved8_1; - u8 mw_type_zero_based; - u8 reserved8_2; - __le16 reserved16; - __le32 parent_l_key; - __le32 l_key; - __le64 va; - u8 length[5]; - u8 reserved24[3]; +/* cmdq_create_qp (size:768b/96B) */ +struct cmdq_create_qp { + u8 opcode; + #define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL + #define CMDQ_CREATE_QP_OPCODE_LAST CMDQ_CREATE_QP_OPCODE_CREATE_QP + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 qp_handle; + __le32 qp_flags; + #define CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED 0x1UL + #define CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION 0x2UL + #define CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL + #define CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED 0x8UL + #define CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL + #define CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL + #define CMDQ_CREATE_QP_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL + #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL + #define CMDQ_CREATE_QP_QP_FLAGS_LAST \ + CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED + u8 type; + #define CMDQ_CREATE_QP_TYPE_RC 0x2UL + #define CMDQ_CREATE_QP_TYPE_UD 0x4UL + #define CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE 0x6UL + #define CMDQ_CREATE_QP_TYPE_GSI 0x7UL + #define CMDQ_CREATE_QP_TYPE_LAST CMDQ_CREATE_QP_TYPE_GSI + u8 sq_pg_size_sq_lvl; + #define CMDQ_CREATE_QP_SQ_LVL_MASK 0xfUL + #define CMDQ_CREATE_QP_SQ_LVL_SFT 0 + #define CMDQ_CREATE_QP_SQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_QP_SQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_QP_SQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_QP_SQ_LVL_LAST CMDQ_CREATE_QP_SQ_LVL_LVL_2 + #define CMDQ_CREATE_QP_SQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_CREATE_QP_SQ_PG_SIZE_SFT 4 + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_LAST CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G + u8 rq_pg_size_rq_lvl; + #define CMDQ_CREATE_QP_RQ_LVL_MASK 0xfUL + #define CMDQ_CREATE_QP_RQ_LVL_SFT 0 + #define CMDQ_CREATE_QP_RQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_QP_RQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_QP_RQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_QP_RQ_LVL_LAST CMDQ_CREATE_QP_RQ_LVL_LVL_2 + #define CMDQ_CREATE_QP_RQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_CREATE_QP_RQ_PG_SIZE_SFT 4 + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_LAST CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G + u8 unused_0; + __le32 dpi; + __le32 sq_size; + __le32 rq_size; + __le16 sq_fwo_sq_sge; + #define CMDQ_CREATE_QP_SQ_SGE_MASK 0xfUL + #define CMDQ_CREATE_QP_SQ_SGE_SFT 0 + #define CMDQ_CREATE_QP_SQ_FWO_MASK 0xfff0UL + #define CMDQ_CREATE_QP_SQ_FWO_SFT 4 + __le16 rq_fwo_rq_sge; + #define CMDQ_CREATE_QP_RQ_SGE_MASK 0xfUL + #define CMDQ_CREATE_QP_RQ_SGE_SFT 0 + #define CMDQ_CREATE_QP_RQ_FWO_MASK 0xfff0UL + #define CMDQ_CREATE_QP_RQ_FWO_SFT 4 + __le32 scq_cid; + __le32 rcq_cid; + __le32 srq_cid; + __le32 pd_id; + __le64 sq_pbl; + __le64 rq_pbl; + __le64 irrq_addr; + __le64 orrq_addr; }; -/* RQ/SRQ WQE Structures */ -/* RQ/SRQ WQE (40 bytes) */ -struct rq_wqe { - u8 wqe_type; - #define RQ_WQE_WQE_TYPE_RCV 0x80UL - u8 flags; - u8 wqe_size; - u8 reserved8; - __le32 reserved32; - __le32 wr_id[2]; - #define RQ_WQE_WR_ID_MASK 0xfffffUL - #define RQ_WQE_WR_ID_SFT 0 - #define RQ_WQE_RESERVED44_MASK 0xfff00000UL - #define RQ_WQE_RESERVED44_SFT 20 - __le32 reserved128[4]; - __le32 data[24]; +/* creq_create_qp_resp (size:128b/16B) */ +struct creq_create_qp_resp { + u8 type; + #define CREQ_CREATE_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_QP_RESP_TYPE_SFT 0 + #define CREQ_CREATE_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_QP_RESP_TYPE_LAST CREQ_CREATE_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_QP_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_QP_RESP_EVENT_CREATE_QP 0x1UL + #define CREQ_CREATE_QP_RESP_EVENT_LAST CREQ_CREATE_QP_RESP_EVENT_CREATE_QP + u8 optimized_transmit_enabled; + u8 reserved48[5]; }; -/* rq_wqe_hdr (size:256b/32B) */ -struct rq_wqe_hdr { - u8 wqe_type; - u8 flags; - u8 wqe_size; +/* cmdq_destroy_qp (size:192b/24B) */ +struct cmdq_destroy_qp { + u8 opcode; + #define CMDQ_DESTROY_QP_OPCODE_DESTROY_QP 0x2UL + #define CMDQ_DESTROY_QP_OPCODE_LAST CMDQ_DESTROY_QP_OPCODE_DESTROY_QP + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; u8 reserved8; - __le32 reserved32; - __le32 wr_id[2]; - u8 reserved128[16]; + __le64 resp_addr; + __le32 qp_cid; + __le32 unused_0; }; -/* CQ CQE Structures */ -/* Base CQE (32 bytes) */ -struct cq_base { - __le64 reserved64_1; - __le64 reserved64_2; - __le64 reserved64_3; - u8 cqe_type_toggle; - #define CQ_BASE_TOGGLE 0x1UL - #define CQ_BASE_CQE_TYPE_MASK 0x1eUL - #define CQ_BASE_CQE_TYPE_SFT 1 - #define CQ_BASE_CQE_TYPE_REQ (0x0UL << 1) - #define CQ_BASE_CQE_TYPE_RES_RC (0x1UL << 1) - #define CQ_BASE_CQE_TYPE_RES_UD (0x2UL << 1) - #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1) - #define CQ_BASE_CQE_TYPE_TERMINAL (0xeUL << 1) - #define CQ_BASE_CQE_TYPE_CUT_OFF (0xfUL << 1) - #define CQ_BASE_RESERVED3_MASK 0xe0UL - #define CQ_BASE_RESERVED3_SFT 5 - u8 status; - __le16 reserved16; - __le32 reserved32; -}; - -/* Requester CQ CQE (32 bytes) */ -struct cq_req { - __le64 qp_handle; - __le16 sq_cons_idx; - __le16 reserved16_1; - __le32 reserved32_2; - __le64 reserved64; - u8 cqe_type_toggle; - #define CQ_REQ_TOGGLE 0x1UL - #define CQ_REQ_CQE_TYPE_MASK 0x1eUL - #define CQ_REQ_CQE_TYPE_SFT 1 - #define CQ_REQ_CQE_TYPE_REQ (0x0UL << 1) - #define CQ_REQ_RESERVED3_MASK 0xe0UL - #define CQ_REQ_RESERVED3_SFT 5 - u8 status; - #define CQ_REQ_STATUS_OK 0x0UL - #define CQ_REQ_STATUS_BAD_RESPONSE_ERR 0x1UL - #define CQ_REQ_STATUS_LOCAL_LENGTH_ERR 0x2UL - #define CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR 0x3UL - #define CQ_REQ_STATUS_LOCAL_PROTECTION_ERR 0x4UL - #define CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL - #define CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL - #define CQ_REQ_STATUS_REMOTE_ACCESS_ERR 0x7UL - #define CQ_REQ_STATUS_REMOTE_OPERATION_ERR 0x8UL - #define CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR 0x9UL - #define CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR 0xaUL - #define CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR 0xbUL - __le16 reserved16_2; - __le32 reserved32_1; -}; - -/* Responder RC CQE (32 bytes) */ -struct cq_res_rc { - __le32 length; - __le32 imm_data_or_inv_r_key; - __le64 qp_handle; - __le64 mr_handle; - u8 cqe_type_toggle; - #define CQ_RES_RC_TOGGLE 0x1UL - #define CQ_RES_RC_CQE_TYPE_MASK 0x1eUL - #define CQ_RES_RC_CQE_TYPE_SFT 1 - #define CQ_RES_RC_CQE_TYPE_RES_RC (0x1UL << 1) - #define CQ_RES_RC_RESERVED3_MASK 0xe0UL - #define CQ_RES_RC_RESERVED3_SFT 5 - u8 status; - #define CQ_RES_RC_STATUS_OK 0x0UL - #define CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR 0x1UL - #define CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR 0x2UL - #define CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR 0x3UL - #define CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL - #define CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL - #define CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL - #define CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL - #define CQ_RES_RC_STATUS_HW_FLUSH_ERR 0x8UL - __le16 flags; - #define CQ_RES_RC_FLAGS_SRQ 0x1UL - #define CQ_RES_RC_FLAGS_SRQ_RQ (0x0UL << 0) - #define CQ_RES_RC_FLAGS_SRQ_SRQ (0x1UL << 0) - #define CQ_RES_RC_FLAGS_SRQ_LAST CQ_RES_RC_FLAGS_SRQ_SRQ - #define CQ_RES_RC_FLAGS_IMM 0x2UL - #define CQ_RES_RC_FLAGS_INV 0x4UL - #define CQ_RES_RC_FLAGS_RDMA 0x8UL - #define CQ_RES_RC_FLAGS_RDMA_SEND (0x0UL << 3) - #define CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE (0x1UL << 3) - #define CQ_RES_RC_FLAGS_RDMA_LAST CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE - __le32 srq_or_rq_wr_id; - #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL - #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_SFT 0 - #define CQ_RES_RC_RESERVED12_MASK 0xfff00000UL - #define CQ_RES_RC_RESERVED12_SFT 20 -}; - -/* Responder UD CQE (32 bytes) */ -struct cq_res_ud { - __le16 length; - #define CQ_RES_UD_LENGTH_MASK 0x3fffUL - #define CQ_RES_UD_LENGTH_SFT 0 - __le16 cfa_metadata; - #define CQ_RES_UD_CFA_METADATA_VID_MASK 0xfffUL - #define CQ_RES_UD_CFA_METADATA_VID_SFT 0 - #define CQ_RES_UD_CFA_METADATA_DE 0x1000UL - #define CQ_RES_UD_CFA_METADATA_PRI_MASK 0xe000UL - #define CQ_RES_UD_CFA_METADATA_PRI_SFT 13 - __le32 imm_data; - __le64 qp_handle; - __le16 src_mac[3]; - __le16 src_qp_low; - u8 cqe_type_toggle; - #define CQ_RES_UD_TOGGLE 0x1UL - #define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL - #define CQ_RES_UD_CQE_TYPE_SFT 1 - #define CQ_RES_UD_CQE_TYPE_RES_UD (0x2UL << 1) - u8 status; - #define CQ_RES_UD_STATUS_OK 0x0UL - #define CQ_RES_UD_STATUS_LOCAL_ACCESS_ERROR 0x1UL - #define CQ_RES_UD_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL - #define CQ_RES_UD_STATUS_LOCAL_PROTECTION_ERR 0x3UL - #define CQ_RES_UD_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL - #define CQ_RES_UD_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL - #define CQ_RES_UD_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL - #define CQ_RES_UD_STATUS_HW_FLUSH_ERR 0x8UL - __le16 flags; - #define CQ_RES_UD_FLAGS_SRQ 0x1UL - #define CQ_RES_UD_FLAGS_SRQ_RQ (0x0UL << 0) - #define CQ_RES_UD_FLAGS_SRQ_SRQ (0x1UL << 0) - #define CQ_RES_UD_FLAGS_SRQ_LAST CQ_RES_UD_FLAGS_SRQ_SRQ - #define CQ_RES_UD_FLAGS_IMM 0x2UL - #define CQ_RES_UD_FLAGS_UNUSED_MASK 0xcUL - #define CQ_RES_UD_FLAGS_UNUSED_SFT 2 - #define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0x30UL - #define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 4 - #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) - #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) - #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) - #define CQ_RES_UD_FLAGS_ROCE_IP_VER_LAST \ - CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 - #define CQ_RES_UD_FLAGS_META_FORMAT_MASK 0x3c0UL - #define CQ_RES_UD_FLAGS_META_FORMAT_SFT 6 - #define CQ_RES_UD_FLAGS_META_FORMAT_NONE (0x0UL << 6) - #define CQ_RES_UD_FLAGS_META_FORMAT_VLAN (0x1UL << 6) - #define CQ_RES_UD_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) - #define CQ_RES_UD_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) - #define CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) - #define CQ_RES_UD_FLAGS_META_FORMAT_LAST \ - CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET - #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_MASK 0xc00UL - #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_SFT 10 - - __le32 src_qp_high_srq_or_rq_wr_id; - #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL - #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_SFT 0 - #define CQ_RES_UD_SRC_QP_HIGH_MASK 0xff000000UL - #define CQ_RES_UD_SRC_QP_HIGH_SFT 24 -}; - -/* Responder RawEth and QP1 CQE (32 bytes) */ -struct cq_res_raweth_qp1 { - __le16 length; - #define CQ_RES_RAWETH_QP1_LENGTH_MASK 0x3fffUL - #define CQ_RES_RAWETH_QP1_LENGTH_SFT 0 - #define CQ_RES_RAWETH_QP1_RESERVED2_MASK 0xc000UL - #define CQ_RES_RAWETH_QP1_RESERVED2_SFT 14 - __le16 raweth_qp1_flags; - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ERROR 0x1UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_RESERVED5_1_MASK 0x3eUL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_RESERVED5_1_SFT 1 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_SFT 6 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP \ - (0x8UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP \ - (0x9UL << 6) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_LAST \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_MASK 0x3ffUL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_SFT 0 - #define CQ_RES_RAWETH_QP1_RESERVED6_MASK 0xfc00UL - #define CQ_RES_RAWETH_QP1_RESERVED6_SFT 10 - __le16 raweth_qp1_errors; - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_RESERVED4_MASK 0xfUL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_RESERVED4_SFT 0 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR \ - (0x0UL << 9) - #define \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \ - (0x1UL << 9) - #define \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \ - (0x2UL << 9) - #define \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \ - (0x3UL << 9) - #define \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \ - (0x4UL << 9) - #define \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \ - (0x5UL << 9) - #define \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \ - (0x6UL << 9) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR \ - (0x0UL << 12) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION \ - (0x1UL << 12) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \ - (0x2UL << 12) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL \ - (0x3UL << 12) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \ - (0x4UL << 12) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \ - (0x5UL << 12) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \ - (0x6UL << 12) - #define \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL\ - (0x7UL << 12) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ - (0x8UL << 12) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_LAST \ - CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN - __le16 raweth_qp1_cfa_code; - __le64 qp_handle; - __le32 raweth_qp1_flags2; - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC 0x1UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC 0x2UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_IP_CS_CALC 0x4UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_L4_CS_CALC 0x8UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_NONE \ - (0x0UL << 4) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN \ - (0x1UL << 4) - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_LAST\ - CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL - __le32 raweth_qp1_metadata; - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK 0xfffUL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_SFT 0 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_DE 0x1000UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK 0xe000UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT 13 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK 0xffff0000UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT 16 - u8 cqe_type_toggle; - #define CQ_RES_RAWETH_QP1_TOGGLE 0x1UL - #define CQ_RES_RAWETH_QP1_CQE_TYPE_MASK 0x1eUL - #define CQ_RES_RAWETH_QP1_CQE_TYPE_SFT 1 - #define CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1) - #define CQ_RES_RAWETH_QP1_RESERVED3_MASK 0xe0UL - #define CQ_RES_RAWETH_QP1_RESERVED3_SFT 5 - u8 status; - #define CQ_RES_RAWETH_QP1_STATUS_OK 0x0UL - #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR 0x1UL - #define CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL - #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR 0x3UL - #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL - #define CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL - #define CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL - #define CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR 0x8UL - __le16 flags; - #define CQ_RES_RAWETH_QP1_FLAGS_SRQ 0x1UL - #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_RQ 0x0UL - #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ 0x1UL - #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_LAST \ - CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ - __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id; - #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL - #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_SFT 0 - #define CQ_RES_RAWETH_QP1_RESERVED4_MASK 0xf00000UL - #define CQ_RES_RAWETH_QP1_RESERVED4_SFT 20 - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL - #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24 -}; - -/* Terminal CQE (32 bytes) */ -struct cq_terminal { - __le64 qp_handle; - __le16 sq_cons_idx; - __le16 rq_cons_idx; - __le32 reserved32_1; - __le64 reserved64_3; - u8 cqe_type_toggle; - #define CQ_TERMINAL_TOGGLE 0x1UL - #define CQ_TERMINAL_CQE_TYPE_MASK 0x1eUL - #define CQ_TERMINAL_CQE_TYPE_SFT 1 - #define CQ_TERMINAL_CQE_TYPE_TERMINAL (0xeUL << 1) - #define CQ_TERMINAL_RESERVED3_MASK 0xe0UL - #define CQ_TERMINAL_RESERVED3_SFT 5 - u8 status; - #define CQ_TERMINAL_STATUS_OK 0x0UL - __le16 reserved16; - __le32 reserved32_2; -}; - -/* Cutoff CQE (32 bytes) */ -struct cq_cutoff { - __le64 reserved64_1; - __le64 reserved64_2; - __le64 reserved64_3; - u8 cqe_type_toggle; - #define CQ_CUTOFF_TOGGLE 0x1UL - #define CQ_CUTOFF_CQE_TYPE_MASK 0x1eUL - #define CQ_CUTOFF_CQE_TYPE_SFT 1 - #define CQ_CUTOFF_CQE_TYPE_CUT_OFF (0xfUL << 1) - #define CQ_CUTOFF_RESERVED3_MASK 0xe0UL - #define CQ_CUTOFF_RESERVED3_SFT 5 - u8 status; - #define CQ_CUTOFF_STATUS_OK 0x0UL - __le16 reserved16; - __le32 reserved32; -}; - -/* Notification Queue (NQ) Structures */ -/* Base NQ Record (16 bytes) */ -struct nq_base { - __le16 info10_type; - #define NQ_BASE_TYPE_MASK 0x3fUL - #define NQ_BASE_TYPE_SFT 0 - #define NQ_BASE_TYPE_CQ_NOTIFICATION 0x30UL - #define NQ_BASE_TYPE_SRQ_EVENT 0x32UL - #define NQ_BASE_TYPE_DBQ_EVENT 0x34UL - #define NQ_BASE_TYPE_QP_EVENT 0x38UL - #define NQ_BASE_TYPE_FUNC_EVENT 0x3aUL - #define NQ_BASE_INFO10_MASK 0xffc0UL - #define NQ_BASE_INFO10_SFT 6 - __le16 info16; - __le32 info32; - __le32 info63_v[2]; - #define NQ_BASE_V 0x1UL - #define NQ_BASE_INFO63_MASK 0xfffffffeUL - #define NQ_BASE_INFO63_SFT 1 -}; - -/* Completion Queue Notification (16 bytes) */ -struct nq_cn { - __le16 type; - #define NQ_CN_TYPE_MASK 0x3fUL - #define NQ_CN_TYPE_SFT 0 - #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL - #define NQ_CN_RESERVED9_MASK 0xffc0UL - #define NQ_CN_RESERVED9_SFT 6 - __le16 reserved16; - __le32 cq_handle_low; - __le32 v; - #define NQ_CN_V 0x1UL - #define NQ_CN_RESERVED31_MASK 0xfffffffeUL - #define NQ_CN_RESERVED31_SFT 1 - __le32 cq_handle_high; -}; - -/* SRQ Event Notification (16 bytes) */ -struct nq_srq_event { - u8 type; - #define NQ_SRQ_EVENT_TYPE_MASK 0x3fUL - #define NQ_SRQ_EVENT_TYPE_SFT 0 - #define NQ_SRQ_EVENT_TYPE_SRQ_EVENT 0x32UL - #define NQ_SRQ_EVENT_RESERVED1_MASK 0xc0UL - #define NQ_SRQ_EVENT_RESERVED1_SFT 6 - u8 event; - #define NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT 0x1UL - __le16 reserved16; - __le32 srq_handle_low; - __le32 v; - #define NQ_SRQ_EVENT_V 0x1UL - #define NQ_SRQ_EVENT_RESERVED31_MASK 0xfffffffeUL - #define NQ_SRQ_EVENT_RESERVED31_SFT 1 - __le32 srq_handle_high; -}; - -/* DBQ Async Event Notification (16 bytes) */ -struct nq_dbq_event { - u8 type; - #define NQ_DBQ_EVENT_TYPE_MASK 0x3fUL - #define NQ_DBQ_EVENT_TYPE_SFT 0 - #define NQ_DBQ_EVENT_TYPE_DBQ_EVENT 0x34UL - #define NQ_DBQ_EVENT_RESERVED1_MASK 0xc0UL - #define NQ_DBQ_EVENT_RESERVED1_SFT 6 - u8 event; - #define NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT 0x1UL - __le16 db_pfid; - #define NQ_DBQ_EVENT_DB_PFID_MASK 0xfUL - #define NQ_DBQ_EVENT_DB_PFID_SFT 0 - #define NQ_DBQ_EVENT_RESERVED12_MASK 0xfff0UL - #define NQ_DBQ_EVENT_RESERVED12_SFT 4 - __le32 db_dpi; - #define NQ_DBQ_EVENT_DB_DPI_MASK 0xfffffUL - #define NQ_DBQ_EVENT_DB_DPI_SFT 0 - #define NQ_DBQ_EVENT_RESERVED12_2_MASK 0xfff00000UL - #define NQ_DBQ_EVENT_RESERVED12_2_SFT 20 - __le32 v; - #define NQ_DBQ_EVENT_V 0x1UL - #define NQ_DBQ_EVENT_RESERVED32_MASK 0xfffffffeUL - #define NQ_DBQ_EVENT_RESERVED32_SFT 1 - __le32 db_type_db_xid; - #define NQ_DBQ_EVENT_DB_XID_MASK 0xfffffUL - #define NQ_DBQ_EVENT_DB_XID_SFT 0 - #define NQ_DBQ_EVENT_RESERVED8_MASK 0xff00000UL - #define NQ_DBQ_EVENT_RESERVED8_SFT 20 - #define NQ_DBQ_EVENT_DB_TYPE_MASK 0xf0000000UL - #define NQ_DBQ_EVENT_DB_TYPE_SFT 28 -}; - -/* Read Request/Response Queue Structures */ -/* Input Read Request Queue (IRRQ) Message (32 bytes) */ -struct xrrq_irrq { - __le16 credits_type; - #define XRRQ_IRRQ_TYPE 0x1UL - #define XRRQ_IRRQ_TYPE_READ_REQ 0x0UL - #define XRRQ_IRRQ_TYPE_ATOMIC_REQ 0x1UL - #define XRRQ_IRRQ_RESERVED10_MASK 0x7feUL - #define XRRQ_IRRQ_RESERVED10_SFT 1 - #define XRRQ_IRRQ_CREDITS_MASK 0xf800UL - #define XRRQ_IRRQ_CREDITS_SFT 11 - __le16 reserved16; - __le32 reserved32; - __le32 psn; - #define XRRQ_IRRQ_PSN_MASK 0xffffffUL - #define XRRQ_IRRQ_PSN_SFT 0 - #define XRRQ_IRRQ_RESERVED8_1_MASK 0xff000000UL - #define XRRQ_IRRQ_RESERVED8_1_SFT 24 - __le32 msn; - #define XRRQ_IRRQ_MSN_MASK 0xffffffUL - #define XRRQ_IRRQ_MSN_SFT 0 - #define XRRQ_IRRQ_RESERVED8_2_MASK 0xff000000UL - #define XRRQ_IRRQ_RESERVED8_2_SFT 24 - __le64 va_or_atomic_result; - __le32 rdma_r_key; - __le32 length; -}; - -/* Output Read Request Queue (ORRQ) Message (32 bytes) */ -struct xrrq_orrq { - __le16 num_sges_type; - #define XRRQ_ORRQ_TYPE 0x1UL - #define XRRQ_ORRQ_TYPE_READ_REQ 0x0UL - #define XRRQ_ORRQ_TYPE_ATOMIC_REQ 0x1UL - #define XRRQ_ORRQ_RESERVED10_MASK 0x7feUL - #define XRRQ_ORRQ_RESERVED10_SFT 1 - #define XRRQ_ORRQ_NUM_SGES_MASK 0xf800UL - #define XRRQ_ORRQ_NUM_SGES_SFT 11 - __le16 reserved16; - __le32 length; - __le32 psn; - #define XRRQ_ORRQ_PSN_MASK 0xffffffUL - #define XRRQ_ORRQ_PSN_SFT 0 - #define XRRQ_ORRQ_RESERVED8_1_MASK 0xff000000UL - #define XRRQ_ORRQ_RESERVED8_1_SFT 24 - __le32 end_psn; - #define XRRQ_ORRQ_END_PSN_MASK 0xffffffUL - #define XRRQ_ORRQ_END_PSN_SFT 0 - #define XRRQ_ORRQ_RESERVED8_2_MASK 0xff000000UL - #define XRRQ_ORRQ_RESERVED8_2_SFT 24 - __le64 first_sge_phy_or_sing_sge_va; - __le32 single_sge_l_key; - __le32 single_sge_size; -}; - -/* Page Buffer List Memory Structures (PBL) */ -/* Page Table Entry (PTE) (8 bytes) */ -struct ptu_pte { - __le32 page_next_to_last_last_valid[2]; - #define PTU_PTE_VALID 0x1UL - #define PTU_PTE_LAST 0x2UL - #define PTU_PTE_NEXT_TO_LAST 0x4UL - #define PTU_PTE_PAGE_MASK 0xfffff000UL - #define PTU_PTE_PAGE_SFT 12 +/* creq_destroy_qp_resp (size:128b/16B) */ +struct creq_destroy_qp_resp { + u8 type; + #define CREQ_DESTROY_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_QP_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_QP_RESP_TYPE_LAST CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_QP_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP 0x2UL + #define CREQ_DESTROY_QP_RESP_EVENT_LAST CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP + u8 reserved48[6]; }; -/* Page Directory Entry (PDE) (8 bytes) */ -struct ptu_pde { - __le32 page_valid[2]; - #define PTU_PDE_VALID 0x1UL - #define PTU_PDE_PAGE_MASK 0xfffff000UL - #define PTU_PDE_PAGE_SFT 12 +/* cmdq_modify_qp (size:1024b/128B) */ +struct cmdq_modify_qp { + u8 opcode; + #define CMDQ_MODIFY_QP_OPCODE_MODIFY_QP 0x3UL + #define CMDQ_MODIFY_QP_OPCODE_LAST CMDQ_MODIFY_QP_OPCODE_MODIFY_QP + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_MODIFY_QP_MODIFY_MASK_STATE 0x1UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY 0x2UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS 0x4UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_PKEY 0x8UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_QKEY 0x10UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_DGID 0x20UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL 0x40UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX 0x80UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT 0x100UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS 0x200UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC 0x400UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_PINGPONG_PUSH_MODE 0x800UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU 0x1000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT 0x2000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT 0x4000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY 0x8000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN 0x10000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC 0x20000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER 0x40000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN 0x80000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC 0x100000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE 0x200000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE 0x400000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE 0x800000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE 0x1000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA 0x2000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID 0x4000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC 0x8000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID 0x10000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_ENABLE_CC 0x20000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_ECN 0x40000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP 0x80000000UL + __le32 qp_cid; + u8 network_type_en_sqd_async_notify_new_state; + #define CMDQ_MODIFY_QP_NEW_STATE_MASK 0xfUL + #define CMDQ_MODIFY_QP_NEW_STATE_SFT 0 + #define CMDQ_MODIFY_QP_NEW_STATE_RESET 0x0UL + #define CMDQ_MODIFY_QP_NEW_STATE_INIT 0x1UL + #define CMDQ_MODIFY_QP_NEW_STATE_RTR 0x2UL + #define CMDQ_MODIFY_QP_NEW_STATE_RTS 0x3UL + #define CMDQ_MODIFY_QP_NEW_STATE_SQD 0x4UL + #define CMDQ_MODIFY_QP_NEW_STATE_SQE 0x5UL + #define CMDQ_MODIFY_QP_NEW_STATE_ERR 0x6UL + #define CMDQ_MODIFY_QP_NEW_STATE_LAST CMDQ_MODIFY_QP_NEW_STATE_ERR + #define CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY 0x10UL + #define CMDQ_MODIFY_QP_UNUSED1 0x20UL + #define CMDQ_MODIFY_QP_NETWORK_TYPE_MASK 0xc0UL + #define CMDQ_MODIFY_QP_NETWORK_TYPE_SFT 6 + #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1 (0x0UL << 6) + #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 (0x2UL << 6) + #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 (0x3UL << 6) + #define CMDQ_MODIFY_QP_NETWORK_TYPE_LAST CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 + u8 access; + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK \ + 0xffUL + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT \ + 0 + #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL + __le16 pkey; + __le32 qkey; + __le32 dgid[4]; + __le32 flow_label; + __le16 sgid_index; + u8 hop_limit; + u8 traffic_class; + __le16 dest_mac[3]; + u8 tos_dscp_tos_ecn; + #define CMDQ_MODIFY_QP_TOS_ECN_MASK 0x3UL + #define CMDQ_MODIFY_QP_TOS_ECN_SFT 0 + #define CMDQ_MODIFY_QP_TOS_DSCP_MASK 0xfcUL + #define CMDQ_MODIFY_QP_TOS_DSCP_SFT 2 + u8 path_mtu_pingpong_push_enable; + #define CMDQ_MODIFY_QP_PINGPONG_PUSH_ENABLE 0x1UL + #define CMDQ_MODIFY_QP_UNUSED3_MASK 0xeUL + #define CMDQ_MODIFY_QP_UNUSED3_SFT 1 + #define CMDQ_MODIFY_QP_PATH_MTU_MASK 0xf0UL + #define CMDQ_MODIFY_QP_PATH_MTU_SFT 4 + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_256 (0x0UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_512 (0x1UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_1024 (0x2UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_2048 (0x3UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_4096 (0x4UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_8192 (0x5UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_LAST CMDQ_MODIFY_QP_PATH_MTU_MTU_8192 + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u8 min_rnr_timer; + __le32 rq_psn; + __le32 sq_psn; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + __le16 enable_cc; + #define CMDQ_MODIFY_QP_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_QP_UNUSED15_MASK 0xfffeUL + #define CMDQ_MODIFY_QP_UNUSED15_SFT 1 + __le32 sq_size; + __le32 rq_size; + __le16 sq_sge; + __le16 rq_sge; + __le32 max_inline_data; + __le32 dest_qp_id; + __le32 pingpong_push_dpi; + __le16 src_mac[3]; + __le16 vlan_pcp_vlan_dei_vlan_id; + #define CMDQ_MODIFY_QP_VLAN_ID_MASK 0xfffUL + #define CMDQ_MODIFY_QP_VLAN_ID_SFT 0 + #define CMDQ_MODIFY_QP_VLAN_DEI 0x1000UL + #define CMDQ_MODIFY_QP_VLAN_PCP_MASK 0xe000UL + #define CMDQ_MODIFY_QP_VLAN_PCP_SFT 13 + __le64 irrq_addr; + __le64 orrq_addr; }; -/* RoCE Fastpath Host Structures */ -/* Command Queue (CMDQ) Interface */ -/* Init CMDQ (16 bytes) */ -struct cmdq_init { - __le64 cmdq_pbl; - __le16 cmdq_size_cmdq_lvl; - #define CMDQ_INIT_CMDQ_LVL_MASK 0x3UL - #define CMDQ_INIT_CMDQ_LVL_SFT 0 - #define CMDQ_INIT_CMDQ_SIZE_MASK 0xfffcUL - #define CMDQ_INIT_CMDQ_SIZE_SFT 2 - __le16 creq_ring_id; - __le32 prod_idx; +/* creq_modify_qp_resp (size:128b/16B) */ +struct creq_modify_qp_resp { + u8 type; + #define CREQ_MODIFY_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_MODIFY_QP_RESP_TYPE_SFT 0 + #define CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MODIFY_QP_RESP_TYPE_LAST CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_MODIFY_QP_RESP_V 0x1UL + u8 event; + #define CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP 0x3UL + #define CREQ_MODIFY_QP_RESP_EVENT_LAST CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP + u8 pingpong_push_state_index_enabled; + #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED 0x1UL + #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_INDEX_MASK 0xeUL + #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_INDEX_SFT 1 + #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_STATE 0x10UL + u8 reserved8; + __le32 lag_src_mac; }; -/* Update CMDQ producer index (16 bytes) */ -struct cmdq_update { - __le64 reserved64; - __le32 reserved32; - __le32 prod_idx; +/* cmdq_query_qp (size:192b/24B) */ +struct cmdq_query_qp { + u8 opcode; + #define CMDQ_QUERY_QP_OPCODE_QUERY_QP 0x4UL + #define CMDQ_QUERY_QP_OPCODE_LAST CMDQ_QUERY_QP_OPCODE_QUERY_QP + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 qp_cid; + __le32 unused_0; }; -/* CMDQ common header structure (16 bytes) */ -struct cmdq_base { - u8 opcode; - #define CMDQ_BASE_OPCODE_CREATE_QP 0x1UL - #define CMDQ_BASE_OPCODE_DESTROY_QP 0x2UL - #define CMDQ_BASE_OPCODE_MODIFY_QP 0x3UL - #define CMDQ_BASE_OPCODE_QUERY_QP 0x4UL - #define CMDQ_BASE_OPCODE_CREATE_SRQ 0x5UL - #define CMDQ_BASE_OPCODE_DESTROY_SRQ 0x6UL - #define CMDQ_BASE_OPCODE_QUERY_SRQ 0x8UL - #define CMDQ_BASE_OPCODE_CREATE_CQ 0x9UL - #define CMDQ_BASE_OPCODE_DESTROY_CQ 0xaUL - #define CMDQ_BASE_OPCODE_RESIZE_CQ 0xcUL - #define CMDQ_BASE_OPCODE_ALLOCATE_MRW 0xdUL - #define CMDQ_BASE_OPCODE_DEALLOCATE_KEY 0xeUL - #define CMDQ_BASE_OPCODE_REGISTER_MR 0xfUL - #define CMDQ_BASE_OPCODE_DEREGISTER_MR 0x10UL - #define CMDQ_BASE_OPCODE_ADD_GID 0x11UL - #define CMDQ_BASE_OPCODE_DELETE_GID 0x12UL - #define CMDQ_BASE_OPCODE_MODIFY_GID 0x17UL - #define CMDQ_BASE_OPCODE_QUERY_GID 0x18UL - #define CMDQ_BASE_OPCODE_CREATE_QP1 0x13UL - #define CMDQ_BASE_OPCODE_DESTROY_QP1 0x14UL - #define CMDQ_BASE_OPCODE_CREATE_AH 0x15UL - #define CMDQ_BASE_OPCODE_DESTROY_AH 0x16UL - #define CMDQ_BASE_OPCODE_INITIALIZE_FW 0x80UL - #define CMDQ_BASE_OPCODE_DEINITIALIZE_FW 0x81UL - #define CMDQ_BASE_OPCODE_STOP_FUNC 0x82UL - #define CMDQ_BASE_OPCODE_QUERY_FUNC 0x83UL - #define CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES 0x84UL - #define CMDQ_BASE_OPCODE_READ_CONTEXT 0x85UL - #define CMDQ_BASE_OPCODE_VF_BACKCHANNEL_REQUEST 0x86UL - #define CMDQ_BASE_OPCODE_READ_VF_MEMORY 0x87UL - #define CMDQ_BASE_OPCODE_COMPLETE_VF_REQUEST 0x88UL - #define CMDQ_BASE_OPCODE_EXTEND_CONTEXT_ARRRAY 0x89UL - #define CMDQ_BASE_OPCODE_MAP_TC_TO_COS 0x8aUL - #define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL - #define CMDQ_BASE_OPCODE_MODIFY_CC 0x8cUL - #define CMDQ_BASE_OPCODE_QUERY_CC 0x8dUL - #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL - #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; -}; - -/* Create QP command (96 bytes) */ -struct cmdq_create_qp { - u8 opcode; - #define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le64 qp_handle; - __le32 qp_flags; - #define CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED 0x1UL - #define CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION 0x2UL - #define CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL - #define CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED 0x8UL - #define CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL - #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL - #define CMDQ_CREATE_QP_QP_FLAGS_LAST \ - CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED +/* creq_query_qp_resp (size:128b/16B) */ +struct creq_query_qp_resp { + u8 type; + #define CREQ_QUERY_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_QP_RESP_TYPE_SFT 0 + #define CREQ_QUERY_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_QP_RESP_TYPE_LAST CREQ_QUERY_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_QP_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_QP_RESP_EVENT_QUERY_QP 0x4UL + #define CREQ_QUERY_QP_RESP_EVENT_LAST CREQ_QUERY_QP_RESP_EVENT_QUERY_QP + u8 reserved48[6]; +}; - u8 type; - #define CMDQ_CREATE_QP_TYPE_RC 0x2UL - #define CMDQ_CREATE_QP_TYPE_UD 0x4UL - #define CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE 0x6UL - #define CMDQ_CREATE_QP_TYPE_GSI 0x7UL - u8 sq_pg_size_sq_lvl; - #define CMDQ_CREATE_QP_SQ_LVL_MASK 0xfUL - #define CMDQ_CREATE_QP_SQ_LVL_SFT 0 - #define CMDQ_CREATE_QP_SQ_LVL_LVL_0 0x0UL - #define CMDQ_CREATE_QP_SQ_LVL_LVL_1 0x1UL - #define CMDQ_CREATE_QP_SQ_LVL_LVL_2 0x2UL - #define CMDQ_CREATE_QP_SQ_PG_SIZE_MASK 0xf0UL - #define CMDQ_CREATE_QP_SQ_PG_SIZE_SFT 4 - #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G (0x5UL << 4) - u8 rq_pg_size_rq_lvl; - #define CMDQ_CREATE_QP_RQ_LVL_MASK 0xfUL - #define CMDQ_CREATE_QP_RQ_LVL_SFT 0 - #define CMDQ_CREATE_QP_RQ_LVL_LVL_0 0x0UL - #define CMDQ_CREATE_QP_RQ_LVL_LVL_1 0x1UL - #define CMDQ_CREATE_QP_RQ_LVL_LVL_2 0x2UL - #define CMDQ_CREATE_QP_RQ_PG_SIZE_MASK 0xf0UL - #define CMDQ_CREATE_QP_RQ_PG_SIZE_SFT 4 - #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G (0x5UL << 4) - u8 unused_0; - __le32 dpi; - __le32 sq_size; - __le32 rq_size; - __le16 sq_fwo_sq_sge; - #define CMDQ_CREATE_QP_SQ_SGE_MASK 0xfUL - #define CMDQ_CREATE_QP_SQ_SGE_SFT 0 - #define CMDQ_CREATE_QP_SQ_FWO_MASK 0xfff0UL - #define CMDQ_CREATE_QP_SQ_FWO_SFT 4 - __le16 rq_fwo_rq_sge; - #define CMDQ_CREATE_QP_RQ_SGE_MASK 0xfUL - #define CMDQ_CREATE_QP_RQ_SGE_SFT 0 - #define CMDQ_CREATE_QP_RQ_FWO_MASK 0xfff0UL - #define CMDQ_CREATE_QP_RQ_FWO_SFT 4 - __le32 scq_cid; - __le32 rcq_cid; - __le32 srq_cid; - __le32 pd_id; - __le64 sq_pbl; - __le64 rq_pbl; - __le64 irrq_addr; - __le64 orrq_addr; -}; - -/* Destroy QP command (24 bytes) */ -struct cmdq_destroy_qp { - u8 opcode; - #define CMDQ_DESTROY_QP_OPCODE_DESTROY_QP 0x2UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 qp_cid; - __le32 unused_0; -}; - -/* Modify QP command (112 bytes) */ -struct cmdq_modify_qp { - u8 opcode; - #define CMDQ_MODIFY_QP_OPCODE_MODIFY_QP 0x3UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 modify_mask; - #define CMDQ_MODIFY_QP_MODIFY_MASK_STATE 0x1UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY 0x2UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS 0x4UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_PKEY 0x8UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_QKEY 0x10UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_DGID 0x20UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL 0x40UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX 0x80UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT 0x100UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS 0x200UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC 0x400UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU 0x1000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT 0x2000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT 0x4000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY 0x8000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN 0x10000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC 0x20000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER 0x40000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN 0x80000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC 0x100000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE 0x200000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE 0x400000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE 0x800000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE 0x1000000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA 0x2000000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID 0x4000000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC 0x8000000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID 0x10000000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_ENABLE_CC 0x20000000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_ECN 0x40000000UL - #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP 0x80000000UL - __le32 qp_cid; - u8 network_type_en_sqd_async_notify_new_state; - #define CMDQ_MODIFY_QP_NEW_STATE_MASK 0xfUL - #define CMDQ_MODIFY_QP_NEW_STATE_SFT 0 - #define CMDQ_MODIFY_QP_NEW_STATE_RESET 0x0UL - #define CMDQ_MODIFY_QP_NEW_STATE_INIT 0x1UL - #define CMDQ_MODIFY_QP_NEW_STATE_RTR 0x2UL - #define CMDQ_MODIFY_QP_NEW_STATE_RTS 0x3UL - #define CMDQ_MODIFY_QP_NEW_STATE_SQD 0x4UL - #define CMDQ_MODIFY_QP_NEW_STATE_SQE 0x5UL - #define CMDQ_MODIFY_QP_NEW_STATE_ERR 0x6UL - #define CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY 0x10UL - #define CMDQ_MODIFY_QP_NETWORK_TYPE_MASK 0xc0UL - #define CMDQ_MODIFY_QP_NETWORK_TYPE_SFT 6 - #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1 (0x0UL << 6) - #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 (0x2UL << 6) - #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 (0x3UL << 6) - u8 access; - #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL - #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL - #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL - #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL - __le16 pkey; - __le32 qkey; - __le32 dgid[4]; - __le32 flow_label; - __le16 sgid_index; - u8 hop_limit; - u8 traffic_class; - __le16 dest_mac[3]; - u8 tos_dscp_tos_ecn; - #define CMDQ_MODIFY_QP_TOS_ECN_MASK 0x3UL - #define CMDQ_MODIFY_QP_TOS_ECN_SFT 0 - #define CMDQ_MODIFY_QP_TOS_DSCP_MASK 0xfcUL - #define CMDQ_MODIFY_QP_TOS_DSCP_SFT 2 - u8 path_mtu; - #define CMDQ_MODIFY_QP_PATH_MTU_MASK 0xf0UL - #define CMDQ_MODIFY_QP_PATH_MTU_SFT 4 - #define CMDQ_MODIFY_QP_PATH_MTU_MTU_256 (0x0UL << 4) - #define CMDQ_MODIFY_QP_PATH_MTU_MTU_512 (0x1UL << 4) - #define CMDQ_MODIFY_QP_PATH_MTU_MTU_1024 (0x2UL << 4) - #define CMDQ_MODIFY_QP_PATH_MTU_MTU_2048 (0x3UL << 4) - #define CMDQ_MODIFY_QP_PATH_MTU_MTU_4096 (0x4UL << 4) - #define CMDQ_MODIFY_QP_PATH_MTU_MTU_8192 (0x5UL << 4) - u8 timeout; - u8 retry_cnt; - u8 rnr_retry; - u8 min_rnr_timer; - __le32 rq_psn; - __le32 sq_psn; - u8 max_rd_atomic; - u8 max_dest_rd_atomic; - __le16 enable_cc; - #define CMDQ_MODIFY_QP_ENABLE_CC 0x1UL - __le32 sq_size; - __le32 rq_size; - __le16 sq_sge; - __le16 rq_sge; - __le32 max_inline_data; - __le32 dest_qp_id; - __le32 unused_3; - __le16 src_mac[3]; - __le16 vlan_pcp_vlan_dei_vlan_id; - #define CMDQ_MODIFY_QP_VLAN_ID_MASK 0xfffUL - #define CMDQ_MODIFY_QP_VLAN_ID_SFT 0 - #define CMDQ_MODIFY_QP_VLAN_DEI 0x1000UL - #define CMDQ_MODIFY_QP_VLAN_PCP_MASK 0xe000UL - #define CMDQ_MODIFY_QP_VLAN_PCP_SFT 13 -}; - -/* Query QP command (24 bytes) */ -struct cmdq_query_qp { - u8 opcode; - #define CMDQ_QUERY_QP_OPCODE_QUERY_QP 0x4UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 qp_cid; - __le32 unused_0; -}; - -/* Create SRQ command (48 bytes) */ -struct cmdq_create_srq { - u8 opcode; - #define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le64 srq_handle; - __le16 pg_size_lvl; - #define CMDQ_CREATE_SRQ_LVL_MASK 0x3UL - #define CMDQ_CREATE_SRQ_LVL_SFT 0 - #define CMDQ_CREATE_SRQ_LVL_LVL_0 0x0UL - #define CMDQ_CREATE_SRQ_LVL_LVL_1 0x1UL - #define CMDQ_CREATE_SRQ_LVL_LVL_2 0x2UL - #define CMDQ_CREATE_SRQ_PG_SIZE_MASK 0x1cUL - #define CMDQ_CREATE_SRQ_PG_SIZE_SFT 2 - #define CMDQ_CREATE_SRQ_PG_SIZE_PG_4K (0x0UL << 2) - #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8K (0x1UL << 2) - #define CMDQ_CREATE_SRQ_PG_SIZE_PG_64K (0x2UL << 2) - #define CMDQ_CREATE_SRQ_PG_SIZE_PG_2M (0x3UL << 2) - #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8M (0x4UL << 2) - #define CMDQ_CREATE_SRQ_PG_SIZE_PG_1G (0x5UL << 2) - __le16 eventq_id; - #define CMDQ_CREATE_SRQ_EVENTQ_ID_MASK 0xfffUL - #define CMDQ_CREATE_SRQ_EVENTQ_ID_SFT 0 - __le16 srq_size; - __le16 srq_fwo; - __le32 dpi; - __le32 pd_id; - __le64 pbl; -}; - -/* Destroy SRQ command (24 bytes) */ -struct cmdq_destroy_srq { - u8 opcode; - #define CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ 0x6UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 srq_cid; - __le32 unused_0; -}; - -/* Query SRQ command (24 bytes) */ -struct cmdq_query_srq { - u8 opcode; - #define CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ 0x8UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 srq_cid; - __le32 unused_0; -}; - -/* Create CQ command (48 bytes) */ -struct cmdq_create_cq { - u8 opcode; - #define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le64 cq_handle; - __le32 pg_size_lvl; - #define CMDQ_CREATE_CQ_LVL_MASK 0x3UL - #define CMDQ_CREATE_CQ_LVL_SFT 0 - #define CMDQ_CREATE_CQ_LVL_LVL_0 0x0UL - #define CMDQ_CREATE_CQ_LVL_LVL_1 0x1UL - #define CMDQ_CREATE_CQ_LVL_LVL_2 0x2UL - #define CMDQ_CREATE_CQ_PG_SIZE_MASK 0x1cUL - #define CMDQ_CREATE_CQ_PG_SIZE_SFT 2 - #define CMDQ_CREATE_CQ_PG_SIZE_PG_4K (0x0UL << 2) - #define CMDQ_CREATE_CQ_PG_SIZE_PG_8K (0x1UL << 2) - #define CMDQ_CREATE_CQ_PG_SIZE_PG_64K (0x2UL << 2) - #define CMDQ_CREATE_CQ_PG_SIZE_PG_2M (0x3UL << 2) - #define CMDQ_CREATE_CQ_PG_SIZE_PG_8M (0x4UL << 2) - #define CMDQ_CREATE_CQ_PG_SIZE_PG_1G (0x5UL << 2) - __le32 cq_fco_cnq_id; - #define CMDQ_CREATE_CQ_CNQ_ID_MASK 0xfffUL - #define CMDQ_CREATE_CQ_CNQ_ID_SFT 0 - #define CMDQ_CREATE_CQ_CQ_FCO_MASK 0xfffff000UL - #define CMDQ_CREATE_CQ_CQ_FCO_SFT 12 - __le32 dpi; - __le32 cq_size; - __le64 pbl; -}; - -/* Destroy CQ command (24 bytes) */ -struct cmdq_destroy_cq { - u8 opcode; - #define CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ 0xaUL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 cq_cid; - __le32 unused_0; -}; - -/* Resize CQ command (40 bytes) */ -struct cmdq_resize_cq { - u8 opcode; - #define CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ 0xcUL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 cq_cid; - __le32 new_cq_size_pg_size_lvl; - #define CMDQ_RESIZE_CQ_LVL_MASK 0x3UL - #define CMDQ_RESIZE_CQ_LVL_SFT 0 - #define CMDQ_RESIZE_CQ_LVL_LVL_0 0x0UL - #define CMDQ_RESIZE_CQ_LVL_LVL_1 0x1UL - #define CMDQ_RESIZE_CQ_LVL_LVL_2 0x2UL - #define CMDQ_RESIZE_CQ_PG_SIZE_MASK 0x1cUL - #define CMDQ_RESIZE_CQ_PG_SIZE_SFT 2 - #define CMDQ_RESIZE_CQ_PG_SIZE_PG_4K (0x0UL << 2) - #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8K (0x1UL << 2) - #define CMDQ_RESIZE_CQ_PG_SIZE_PG_64K (0x2UL << 2) - #define CMDQ_RESIZE_CQ_PG_SIZE_PG_2M (0x3UL << 2) - #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8M (0x4UL << 2) - #define CMDQ_RESIZE_CQ_PG_SIZE_PG_1G (0x5UL << 2) - #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK 0x1fffe0UL - #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT 5 - __le64 new_pbl; - __le32 new_cq_fco; - __le32 unused_2; -}; - -/* Allocate MRW command (32 bytes) */ -struct cmdq_allocate_mrw { - u8 opcode; - #define CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW 0xdUL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le64 mrw_handle; - u8 mrw_flags; - #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MASK 0xfUL - #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_SFT 0 - #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR 0x0UL - #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR 0x1UL - #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 0x2UL - #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL - #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL - u8 access; - #define CMDQ_ALLOCATE_MRW_ACCESS_RESERVED_MASK 0x1fUL - #define CMDQ_ALLOCATE_MRW_ACCESS_RESERVED_SFT 0 - #define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL - __le16 unused_1; - __le32 pd_id; -}; - -/* De-allocate key command (24 bytes) */ -struct cmdq_deallocate_key { - u8 opcode; - #define CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY 0xeUL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - u8 mrw_flags; - #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MASK 0xfUL - #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_SFT 0 - #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MR 0x0UL - #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_PMR 0x1UL - #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE1 0x2UL - #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2A 0x3UL - #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B 0x4UL - u8 unused_1[3]; - __le32 key; -}; - -/* Register MR command (48 bytes) */ -struct cmdq_register_mr { - u8 opcode; - #define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - u8 log2_pg_size_lvl; - #define CMDQ_REGISTER_MR_LVL_MASK 0x3UL - #define CMDQ_REGISTER_MR_LVL_SFT 0 - #define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL - #define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL - #define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL - #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2 - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2 - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2) - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2) - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2) - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2) - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2) - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2) - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2) - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2) - #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST \ - CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G - #define CMDQ_REGISTER_MR_UNUSED1 0x80UL - u8 access; - #define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL - #define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL - #define CMDQ_REGISTER_MR_ACCESS_REMOTE_WRITE 0x4UL - #define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL - #define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL - #define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL - __le16 log2_pbl_pg_size; - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0 - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL - #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST \ - CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G - #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL - #define CMDQ_REGISTER_MR_UNUSED11_SFT 5 - __le32 key; - __le64 pbl; - __le64 va; - __le64 mr_size; +/* creq_query_qp_resp_sb (size:832b/104B) */ +struct creq_query_qp_resp_sb { + u8 opcode; + #define CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP 0x4UL + #define CREQ_QUERY_QP_RESP_SB_OPCODE_LAST CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 xid; + u8 en_sqd_async_notify_state; + #define CREQ_QUERY_QP_RESP_SB_STATE_MASK 0xfUL + #define CREQ_QUERY_QP_RESP_SB_STATE_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_STATE_RESET 0x0UL + #define CREQ_QUERY_QP_RESP_SB_STATE_INIT 0x1UL + #define CREQ_QUERY_QP_RESP_SB_STATE_RTR 0x2UL + #define CREQ_QUERY_QP_RESP_SB_STATE_RTS 0x3UL + #define CREQ_QUERY_QP_RESP_SB_STATE_SQD 0x4UL + #define CREQ_QUERY_QP_RESP_SB_STATE_SQE 0x5UL + #define CREQ_QUERY_QP_RESP_SB_STATE_ERR 0x6UL + #define CREQ_QUERY_QP_RESP_SB_STATE_LAST CREQ_QUERY_QP_RESP_SB_STATE_ERR + #define CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY 0x10UL + #define CREQ_QUERY_QP_RESP_SB_UNUSED3_MASK 0xe0UL + #define CREQ_QUERY_QP_RESP_SB_UNUSED3_SFT 5 + u8 access; + #define \ + CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK\ + 0xffUL + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT\ + 0 + #define CREQ_QUERY_QP_RESP_SB_ACCESS_LOCAL_WRITE 0x1UL + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_WRITE 0x2UL + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_READ 0x4UL + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL + __le16 pkey; + __le32 qkey; + __le32 reserved32; + __le32 dgid[4]; + __le32 flow_label; + __le16 sgid_index; + u8 hop_limit; + u8 traffic_class; + __le16 dest_mac[3]; + __le16 path_mtu_dest_vlan_id; + #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_MASK 0xfffUL + #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK 0xf000UL + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT 12 + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_256 (0x0UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_512 (0x1UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_1024 (0x2UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_2048 (0x3UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_4096 (0x4UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192 (0x5UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_LAST CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192 + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u8 min_rnr_timer; + __le32 rq_psn; + __le32 sq_psn; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + u8 tos_dscp_tos_ecn; + #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_MASK 0x3UL + #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_MASK 0xfcUL + #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_SFT 2 + u8 enable_cc; + #define CREQ_QUERY_QP_RESP_SB_ENABLE_CC 0x1UL + __le32 sq_size; + __le32 rq_size; + __le16 sq_sge; + __le16 rq_sge; + __le32 max_inline_data; + __le32 dest_qp_id; + __le16 port_id; + u8 unused_0; + u8 stat_collection_id; + __le16 src_mac[3]; + __le16 vlan_pcp_vlan_dei_vlan_id; + #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK 0xfffUL + #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_VLAN_DEI 0x1000UL + #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_MASK 0xe000UL + #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_SFT 13 }; -/* Deregister MR command (24 bytes) */ -struct cmdq_deregister_mr { - u8 opcode; - #define CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR 0x10UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 lkey; - __le32 unused_0; -}; - -/* Add GID command (48 bytes) */ -struct cmdq_add_gid { - u8 opcode; - #define CMDQ_ADD_GID_OPCODE_ADD_GID 0x11UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __be32 gid[4]; - __be16 src_mac[3]; - __le16 vlan; - #define CMDQ_ADD_GID_VLAN_VLAN_ID_MASK 0xfffUL - #define CMDQ_ADD_GID_VLAN_VLAN_ID_SFT 0 - #define CMDQ_ADD_GID_VLAN_TPID_MASK 0x7000UL - #define CMDQ_ADD_GID_VLAN_TPID_SFT 12 - #define CMDQ_ADD_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12) - #define CMDQ_ADD_GID_VLAN_TPID_TPID_8100 (0x1UL << 12) - #define CMDQ_ADD_GID_VLAN_TPID_TPID_9100 (0x2UL << 12) - #define CMDQ_ADD_GID_VLAN_TPID_TPID_9200 (0x3UL << 12) - #define CMDQ_ADD_GID_VLAN_TPID_TPID_9300 (0x4UL << 12) - #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12) - #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12) - #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12) - #define CMDQ_ADD_GID_VLAN_TPID_LAST CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 - #define CMDQ_ADD_GID_VLAN_VLAN_EN 0x8000UL - __le16 ipid; - __le16 stats_ctx; - #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL - #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_SFT 0 - #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL - __le32 unused_0; -}; - -/* Delete GID command (24 bytes) */ -struct cmdq_delete_gid { - u8 opcode; - #define CMDQ_DELETE_GID_OPCODE_DELETE_GID 0x12UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le16 gid_index; - __le16 unused_0; - __le32 unused_1; -}; - -/* Modify GID command (48 bytes) */ -struct cmdq_modify_gid { - u8 opcode; - #define CMDQ_MODIFY_GID_OPCODE_MODIFY_GID 0x17UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __be32 gid[4]; - __be16 src_mac[3]; - __le16 vlan; - #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL - #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0 - #define CMDQ_MODIFY_GID_VLAN_TPID_MASK 0x7000UL - #define CMDQ_MODIFY_GID_VLAN_TPID_SFT 12 - #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12) - #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_8100 (0x1UL << 12) - #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9100 (0x2UL << 12) - #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9200 (0x3UL << 12) - #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9300 (0x4UL << 12) - #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12) - #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12) - #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12) - #define CMDQ_MODIFY_GID_VLAN_TPID_LAST \ - CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 - #define CMDQ_MODIFY_GID_VLAN_VLAN_EN 0x8000UL - __le16 ipid; - __le16 gid_index; - __le16 stats_ctx; - #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL - #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_SFT 0 - #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL - __le16 unused_0; -}; - -/* Query GID command (24 bytes) */ -struct cmdq_query_gid { - u8 opcode; - #define CMDQ_QUERY_GID_OPCODE_QUERY_GID 0x18UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le16 gid_index; - __le16 unused_0; - __le32 unused_1; -}; - -/* Create QP1 command (80 bytes) */ -struct cmdq_create_qp1 { - u8 opcode; - #define CMDQ_CREATE_QP1_OPCODE_CREATE_QP1 0x13UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le64 qp_handle; - __le32 qp_flags; - #define CMDQ_CREATE_QP1_QP_FLAGS_SRQ_USED 0x1UL - #define CMDQ_CREATE_QP1_QP_FLAGS_FORCE_COMPLETION 0x2UL - #define CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL - u8 type; - #define CMDQ_CREATE_QP1_TYPE_GSI 0x1UL - u8 sq_pg_size_sq_lvl; - #define CMDQ_CREATE_QP1_SQ_LVL_MASK 0xfUL - #define CMDQ_CREATE_QP1_SQ_LVL_SFT 0 - #define CMDQ_CREATE_QP1_SQ_LVL_LVL_0 0x0UL - #define CMDQ_CREATE_QP1_SQ_LVL_LVL_1 0x1UL - #define CMDQ_CREATE_QP1_SQ_LVL_LVL_2 0x2UL - #define CMDQ_CREATE_QP1_SQ_PG_SIZE_MASK 0xf0UL - #define CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT 4 - #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G (0x5UL << 4) - u8 rq_pg_size_rq_lvl; - #define CMDQ_CREATE_QP1_RQ_LVL_MASK 0xfUL - #define CMDQ_CREATE_QP1_RQ_LVL_SFT 0 - #define CMDQ_CREATE_QP1_RQ_LVL_LVL_0 0x0UL - #define CMDQ_CREATE_QP1_RQ_LVL_LVL_1 0x1UL - #define CMDQ_CREATE_QP1_RQ_LVL_LVL_2 0x2UL - #define CMDQ_CREATE_QP1_RQ_PG_SIZE_MASK 0xf0UL - #define CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT 4 - #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G (0x5UL << 4) - u8 unused_0; - __le32 dpi; - __le32 sq_size; - __le32 rq_size; - __le16 sq_fwo_sq_sge; - #define CMDQ_CREATE_QP1_SQ_SGE_MASK 0xfUL - #define CMDQ_CREATE_QP1_SQ_SGE_SFT 0 - #define CMDQ_CREATE_QP1_SQ_FWO_MASK 0xfff0UL - #define CMDQ_CREATE_QP1_SQ_FWO_SFT 4 - __le16 rq_fwo_rq_sge; - #define CMDQ_CREATE_QP1_RQ_SGE_MASK 0xfUL - #define CMDQ_CREATE_QP1_RQ_SGE_SFT 0 - #define CMDQ_CREATE_QP1_RQ_FWO_MASK 0xfff0UL - #define CMDQ_CREATE_QP1_RQ_FWO_SFT 4 - __le32 scq_cid; - __le32 rcq_cid; - __le32 srq_cid; - __le32 pd_id; - __le64 sq_pbl; - __le64 rq_pbl; -}; - -/* Destroy QP1 command (24 bytes) */ -struct cmdq_destroy_qp1 { - u8 opcode; - #define CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1 0x14UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 qp1_cid; - __le32 unused_0; -}; - -/* Create AH command (64 bytes) */ -struct cmdq_create_ah { - u8 opcode; - #define CMDQ_CREATE_AH_OPCODE_CREATE_AH 0x15UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le64 ah_handle; - __le32 dgid[4]; - u8 type; - #define CMDQ_CREATE_AH_TYPE_V1 0x0UL - #define CMDQ_CREATE_AH_TYPE_V2IPV4 0x2UL - #define CMDQ_CREATE_AH_TYPE_V2IPV6 0x3UL - u8 hop_limit; - __le16 sgid_index; - __le32 dest_vlan_id_flow_label; - #define CMDQ_CREATE_AH_FLOW_LABEL_MASK 0xfffffUL - #define CMDQ_CREATE_AH_FLOW_LABEL_SFT 0 - #define CMDQ_CREATE_AH_DEST_VLAN_ID_MASK 0xfff00000UL - #define CMDQ_CREATE_AH_DEST_VLAN_ID_SFT 20 - __le32 pd_id; - __le32 unused_0; - __le16 dest_mac[3]; - u8 traffic_class; - u8 unused_1; -}; - -/* Destroy AH command (24 bytes) */ -struct cmdq_destroy_ah { - u8 opcode; - #define CMDQ_DESTROY_AH_OPCODE_DESTROY_AH 0x16UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 ah_cid; - __le32 unused_0; -}; - -/* Initialize Firmware command (112 bytes) */ -struct cmdq_initialize_fw { - u8 opcode; - #define CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW 0x80UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - u8 qpc_pg_size_qpc_lvl; - #define CMDQ_INITIALIZE_FW_QPC_LVL_MASK 0xfUL - #define CMDQ_INITIALIZE_FW_QPC_LVL_SFT 0 - #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_0 0x0UL - #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_1 0x1UL - #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2 0x2UL - #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_MASK 0xf0UL - #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT 4 - #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G (0x5UL << 4) - u8 mrw_pg_size_mrw_lvl; - #define CMDQ_INITIALIZE_FW_MRW_LVL_MASK 0xfUL - #define CMDQ_INITIALIZE_FW_MRW_LVL_SFT 0 - #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_0 0x0UL - #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_1 0x1UL - #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2 0x2UL - #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_MASK 0xf0UL - #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_SFT 4 - #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G (0x5UL << 4) - u8 srq_pg_size_srq_lvl; - #define CMDQ_INITIALIZE_FW_SRQ_LVL_MASK 0xfUL - #define CMDQ_INITIALIZE_FW_SRQ_LVL_SFT 0 - #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_0 0x0UL - #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_1 0x1UL - #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2 0x2UL - #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_MASK 0xf0UL - #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_SFT 4 - #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G (0x5UL << 4) - u8 cq_pg_size_cq_lvl; - #define CMDQ_INITIALIZE_FW_CQ_LVL_MASK 0xfUL - #define CMDQ_INITIALIZE_FW_CQ_LVL_SFT 0 - #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_0 0x0UL - #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_1 0x1UL - #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2 0x2UL - #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_MASK 0xf0UL - #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_SFT 4 - #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G (0x5UL << 4) - u8 tqm_pg_size_tqm_lvl; - #define CMDQ_INITIALIZE_FW_TQM_LVL_MASK 0xfUL - #define CMDQ_INITIALIZE_FW_TQM_LVL_SFT 0 - #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_0 0x0UL - #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_1 0x1UL - #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2 0x2UL - #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_MASK 0xf0UL - #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_SFT 4 - #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G (0x5UL << 4) - u8 tim_pg_size_tim_lvl; - #define CMDQ_INITIALIZE_FW_TIM_LVL_MASK 0xfUL - #define CMDQ_INITIALIZE_FW_TIM_LVL_SFT 0 - #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_0 0x0UL - #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_1 0x1UL - #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2 0x2UL - #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_MASK 0xf0UL - #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_SFT 4 - #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_4K (0x0UL << 4) - #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8K (0x1UL << 4) - #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_64K (0x2UL << 4) - #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) - #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) - #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) - /* This value is (log-base-2-of-DBR-page-size - 12). - * 0 for 4KB. HW supported values are enumerated below. - */ - __le16 log2_dbr_pg_size; - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL - #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ - CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M - __le64 qpc_page_dir; - __le64 mrw_page_dir; - __le64 srq_page_dir; - __le64 cq_page_dir; - __le64 tqm_page_dir; - __le64 tim_page_dir; - __le32 number_of_qp; - __le32 number_of_mrw; - __le32 number_of_srq; - __le32 number_of_cq; - __le32 max_qp_per_vf; - __le32 max_mrw_per_vf; - __le32 max_srq_per_vf; - __le32 max_cq_per_vf; - __le32 max_gid_per_vf; - __le32 stat_ctx_id; -}; - -/* De-initialize Firmware command (16 bytes) */ -struct cmdq_deinitialize_fw { - u8 opcode; - #define CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW 0x81UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; -}; - -/* Stop function command (16 bytes) */ -struct cmdq_stop_func { - u8 opcode; - #define CMDQ_STOP_FUNC_OPCODE_STOP_FUNC 0x82UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; -}; - -/* Query function command (16 bytes) */ -struct cmdq_query_func { - u8 opcode; - #define CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC 0x83UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; +/* cmdq_query_qp_extend (size:192b/24B) */ +struct cmdq_query_qp_extend { + u8 opcode; + #define CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND 0x91UL + #define CMDQ_QUERY_QP_EXTEND_OPCODE_LAST CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 num_qps; + __le64 resp_addr; + __le32 function_id; + #define CMDQ_QUERY_QP_EXTEND_PF_NUM_MASK 0xffUL + #define CMDQ_QUERY_QP_EXTEND_PF_NUM_SFT 0 + #define CMDQ_QUERY_QP_EXTEND_VF_NUM_MASK 0xffff00UL + #define CMDQ_QUERY_QP_EXTEND_VF_NUM_SFT 8 + #define CMDQ_QUERY_QP_EXTEND_VF_VALID 0x1000000UL + __le32 current_index; }; -/* Set function resources command (16 bytes) */ -struct cmdq_set_func_resources { - u8 opcode; - #define CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES 0x84UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 number_of_qp; - __le32 number_of_mrw; - __le32 number_of_srq; - __le32 number_of_cq; - __le32 max_qp_per_vf; - __le32 max_mrw_per_vf; - __le32 max_srq_per_vf; - __le32 max_cq_per_vf; - __le32 max_gid_per_vf; - __le32 stat_ctx_id; -}; - -/* Read hardware resource context command (24 bytes) */ -struct cmdq_read_context { - u8 opcode; - #define CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT 0x85UL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le32 type_xid; - #define CMDQ_READ_CONTEXT_XID_MASK 0xffffffUL - #define CMDQ_READ_CONTEXT_XID_SFT 0 - #define CMDQ_READ_CONTEXT_TYPE_MASK 0xff000000UL - #define CMDQ_READ_CONTEXT_TYPE_SFT 24 - #define CMDQ_READ_CONTEXT_TYPE_QPC (0x0UL << 24) - #define CMDQ_READ_CONTEXT_TYPE_CQ (0x1UL << 24) - #define CMDQ_READ_CONTEXT_TYPE_MRW (0x2UL << 24) - #define CMDQ_READ_CONTEXT_TYPE_SRQ (0x3UL << 24) - __le32 unused_0; -}; - -/* Map TC to COS. Can only be issued from a PF (24 bytes) */ -struct cmdq_map_tc_to_cos { - u8 opcode; - #define CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS 0x8aUL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; - __le16 cos0; - #define CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE 0xffffUL - __le16 cos1; - #define CMDQ_MAP_TC_TO_COS_COS1_DISABLE 0x8000UL - #define CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE 0xffffUL - __le32 unused_0; -}; - -/* Query version command (16 bytes) */ -struct cmdq_query_version { - u8 opcode; - #define CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION 0x8bUL - u8 cmd_size; - __le16 flags; - __le16 cookie; - u8 resp_size; - u8 reserved8; - __le64 resp_addr; -}; - -/* Command-Response Event Queue (CREQ) Structures */ -/* Base CREQ Record (16 bytes) */ -struct creq_base { - u8 type; - #define CREQ_BASE_TYPE_MASK 0x3fUL - #define CREQ_BASE_TYPE_SFT 0 - #define CREQ_BASE_TYPE_QP_EVENT 0x38UL - #define CREQ_BASE_TYPE_FUNC_EVENT 0x3aUL - #define CREQ_BASE_RESERVED2_MASK 0xc0UL - #define CREQ_BASE_RESERVED2_SFT 6 - u8 reserved56[7]; - u8 v; - #define CREQ_BASE_V 0x1UL - #define CREQ_BASE_RESERVED7_MASK 0xfeUL - #define CREQ_BASE_RESERVED7_SFT 1 - u8 event; - __le16 reserved48[3]; -}; - -/* RoCE Function Async Event Notification (16 bytes) */ -struct creq_func_event { - u8 type; - #define CREQ_FUNC_EVENT_TYPE_MASK 0x3fUL - #define CREQ_FUNC_EVENT_TYPE_SFT 0 - #define CREQ_FUNC_EVENT_TYPE_FUNC_EVENT 0x3aUL - #define CREQ_FUNC_EVENT_RESERVED2_MASK 0xc0UL - #define CREQ_FUNC_EVENT_RESERVED2_SFT 6 - u8 reserved56[7]; - u8 v; - #define CREQ_FUNC_EVENT_V 0x1UL - #define CREQ_FUNC_EVENT_RESERVED7_MASK 0xfeUL - #define CREQ_FUNC_EVENT_RESERVED7_SFT 1 - u8 event; - #define CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR 0x1UL - #define CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR 0x2UL - #define CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR 0x3UL - #define CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR 0x4UL - #define CREQ_FUNC_EVENT_EVENT_CQ_ERROR 0x5UL - #define CREQ_FUNC_EVENT_EVENT_TQM_ERROR 0x6UL - #define CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR 0x7UL - #define CREQ_FUNC_EVENT_EVENT_CFCS_ERROR 0x8UL - #define CREQ_FUNC_EVENT_EVENT_CFCC_ERROR 0x9UL - #define CREQ_FUNC_EVENT_EVENT_CFCM_ERROR 0xaUL - #define CREQ_FUNC_EVENT_EVENT_TIM_ERROR 0xbUL - #define CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST 0x80UL - #define CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED 0x81UL - __le16 reserved48[3]; -}; - -/* RoCE Slowpath Command Completion (16 bytes) */ -struct creq_qp_event { - u8 type; - #define CREQ_QP_EVENT_TYPE_MASK 0x3fUL - #define CREQ_QP_EVENT_TYPE_SFT 0 - #define CREQ_QP_EVENT_TYPE_QP_EVENT 0x38UL - #define CREQ_QP_EVENT_RESERVED2_MASK 0xc0UL - #define CREQ_QP_EVENT_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 reserved32; - u8 v; - #define CREQ_QP_EVENT_V 0x1UL - #define CREQ_QP_EVENT_RESERVED7_MASK 0xfeUL - #define CREQ_QP_EVENT_RESERVED7_SFT 1 - u8 event; - #define CREQ_QP_EVENT_EVENT_CREATE_QP 0x1UL - #define CREQ_QP_EVENT_EVENT_DESTROY_QP 0x2UL - #define CREQ_QP_EVENT_EVENT_MODIFY_QP 0x3UL - #define CREQ_QP_EVENT_EVENT_QUERY_QP 0x4UL - #define CREQ_QP_EVENT_EVENT_CREATE_SRQ 0x5UL - #define CREQ_QP_EVENT_EVENT_DESTROY_SRQ 0x6UL - #define CREQ_QP_EVENT_EVENT_QUERY_SRQ 0x8UL - #define CREQ_QP_EVENT_EVENT_CREATE_CQ 0x9UL - #define CREQ_QP_EVENT_EVENT_DESTROY_CQ 0xaUL - #define CREQ_QP_EVENT_EVENT_RESIZE_CQ 0xcUL - #define CREQ_QP_EVENT_EVENT_ALLOCATE_MRW 0xdUL - #define CREQ_QP_EVENT_EVENT_DEALLOCATE_KEY 0xeUL - #define CREQ_QP_EVENT_EVENT_REGISTER_MR 0xfUL - #define CREQ_QP_EVENT_EVENT_DEREGISTER_MR 0x10UL - #define CREQ_QP_EVENT_EVENT_ADD_GID 0x11UL - #define CREQ_QP_EVENT_EVENT_DELETE_GID 0x12UL - #define CREQ_QP_EVENT_EVENT_MODIFY_GID 0x17UL - #define CREQ_QP_EVENT_EVENT_QUERY_GID 0x18UL - #define CREQ_QP_EVENT_EVENT_CREATE_QP1 0x13UL - #define CREQ_QP_EVENT_EVENT_DESTROY_QP1 0x14UL - #define CREQ_QP_EVENT_EVENT_CREATE_AH 0x15UL - #define CREQ_QP_EVENT_EVENT_DESTROY_AH 0x16UL - #define CREQ_QP_EVENT_EVENT_INITIALIZE_FW 0x80UL - #define CREQ_QP_EVENT_EVENT_DEINITIALIZE_FW 0x81UL - #define CREQ_QP_EVENT_EVENT_STOP_FUNC 0x82UL - #define CREQ_QP_EVENT_EVENT_QUERY_FUNC 0x83UL - #define CREQ_QP_EVENT_EVENT_SET_FUNC_RESOURCES 0x84UL - #define CREQ_QP_EVENT_EVENT_MAP_TC_TO_COS 0x8aUL - #define CREQ_QP_EVENT_EVENT_QUERY_VERSION 0x8bUL - #define CREQ_QP_EVENT_EVENT_MODIFY_CC 0x8cUL - #define CREQ_QP_EVENT_EVENT_QUERY_CC 0x8dUL - #define CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION 0xc0UL - __le16 reserved48[3]; -}; - -/* Create QP command response (16 bytes) */ -struct creq_create_qp_resp { - u8 type; - #define CREQ_CREATE_QP_RESP_TYPE_MASK 0x3fUL - #define CREQ_CREATE_QP_RESP_TYPE_SFT 0 - #define CREQ_CREATE_QP_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_CREATE_QP_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_CREATE_QP_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_CREATE_QP_RESP_V 0x1UL - #define CREQ_CREATE_QP_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_CREATE_QP_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_CREATE_QP_RESP_EVENT_CREATE_QP 0x1UL - __le16 reserved48[3]; -}; - -/* Destroy QP command response (16 bytes) */ -struct creq_destroy_qp_resp { - u8 type; - #define CREQ_DESTROY_QP_RESP_TYPE_MASK 0x3fUL - #define CREQ_DESTROY_QP_RESP_TYPE_SFT 0 - #define CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DESTROY_QP_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DESTROY_QP_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_DESTROY_QP_RESP_V 0x1UL - #define CREQ_DESTROY_QP_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DESTROY_QP_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP 0x2UL - __le16 reserved48[3]; -}; - -/* Modify QP command response (16 bytes) */ -struct creq_modify_qp_resp { - u8 type; - #define CREQ_MODIFY_QP_RESP_TYPE_MASK 0x3fUL - #define CREQ_MODIFY_QP_RESP_TYPE_SFT 0 - #define CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_MODIFY_QP_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_MODIFY_QP_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_MODIFY_QP_RESP_V 0x1UL - #define CREQ_MODIFY_QP_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_MODIFY_QP_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP 0x3UL - __le16 reserved48[3]; -}; - -/* cmdq_query_roce_stats (size:128b/16B) */ -struct cmdq_query_roce_stats { +/* creq_query_qp_extend_resp (size:128b/16B) */ +struct creq_query_qp_extend_resp { + u8 type; + #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_SFT 0 + #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_LAST CREQ_QUERY_QP_EXTEND_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_QP_EXTEND_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND 0x91UL + #define CREQ_QUERY_QP_EXTEND_RESP_EVENT_LAST CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND + __le16 reserved16; + __le32 current_index; +}; + +/* creq_query_qp_extend_resp_sb (size:384b/48B) */ +struct creq_query_qp_extend_resp_sb { u8 opcode; - #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL - #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST \ - CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS + #define CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_QUERY_QP_EXTEND 0x91UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_LAST \ + CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_QUERY_QP_EXTEND + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 xid; + u8 state; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_MASK 0xfUL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SFT 0 + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RESET 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_INIT 0x1UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RTR 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RTS 0x3UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SQD 0x4UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SQE 0x5UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_ERR 0x6UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_ERR + #define CREQ_QUERY_QP_EXTEND_RESP_SB_UNUSED4_MASK 0xf0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_UNUSED4_SFT 4 + u8 reserved_8; + __le16 port_id; + __le32 qkey; + __le16 sgid_index; + u8 network_type; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV1 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV4 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV6 0x3UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_LAST \ + CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV6 + u8 unused_0; + __le32 dgid[4]; + __le32 dest_qp_id; + u8 stat_collection_id; + u8 reservred_8; + __le16 reserved_16; +}; + +/* creq_query_qp_extend_resp_sb_tlv (size:512b/64B) */ +struct creq_query_qp_extend_resp_sb_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST \ + CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + u8 total_size; + u8 reserved56[7]; + u8 opcode; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_QUERY_QP_EXTEND 0x91UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_LAST \ + CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_QUERY_QP_EXTEND + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 xid; + u8 state; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_MASK 0xfUL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SFT 0 + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RESET 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_INIT 0x1UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RTR 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RTS 0x3UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SQD 0x4UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SQE 0x5UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_ERR 0x6UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_LAST \ + CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_ERR + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_UNUSED4_MASK 0xf0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_UNUSED4_SFT 4 + u8 reserved_8; + __le16 port_id; + __le32 qkey; + __le16 sgid_index; + u8 network_type; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV1 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV4 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV6 0x3UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_LAST \ + CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV6 + u8 unused_0; + __le32 dgid[4]; + __le32 dest_qp_id; + u8 stat_collection_id; + u8 reservred_8; + __le16 reserved_16; +}; + +/* cmdq_create_srq (size:384b/48B) */ +struct cmdq_create_srq { + u8 opcode; + #define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL + #define CMDQ_CREATE_SRQ_OPCODE_LAST CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ u8 cmd_size; __le16 flags; __le16 cookie; u8 resp_size; u8 reserved8; __le64 resp_addr; + __le64 srq_handle; + __le16 pg_size_lvl; + #define CMDQ_CREATE_SRQ_LVL_MASK 0x3UL + #define CMDQ_CREATE_SRQ_LVL_SFT 0 + #define CMDQ_CREATE_SRQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_SRQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_SRQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_SRQ_LVL_LAST CMDQ_CREATE_SRQ_LVL_LVL_2 + #define CMDQ_CREATE_SRQ_PG_SIZE_MASK 0x1cUL + #define CMDQ_CREATE_SRQ_PG_SIZE_SFT 2 + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_4K (0x0UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8K (0x1UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_64K (0x2UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_2M (0x3UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8M (0x4UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_1G (0x5UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_LAST CMDQ_CREATE_SRQ_PG_SIZE_PG_1G + #define CMDQ_CREATE_SRQ_UNUSED11_MASK 0xffe0UL + #define CMDQ_CREATE_SRQ_UNUSED11_SFT 5 + __le16 eventq_id; + #define CMDQ_CREATE_SRQ_EVENTQ_ID_MASK 0xfffUL + #define CMDQ_CREATE_SRQ_EVENTQ_ID_SFT 0 + #define CMDQ_CREATE_SRQ_UNUSED4_MASK 0xf000UL + #define CMDQ_CREATE_SRQ_UNUSED4_SFT 12 + __le16 srq_size; + __le16 srq_fwo; + __le32 dpi; + __le32 pd_id; + __le64 pbl; }; -/* Query QP command response (16 bytes) */ -struct creq_query_qp_resp { - u8 type; - #define CREQ_QUERY_QP_RESP_TYPE_MASK 0x3fUL - #define CREQ_QUERY_QP_RESP_TYPE_SFT 0 - #define CREQ_QUERY_QP_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_QUERY_QP_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_QUERY_QP_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 size; - u8 v; - #define CREQ_QUERY_QP_RESP_V 0x1UL - #define CREQ_QUERY_QP_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_QUERY_QP_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_QUERY_QP_RESP_EVENT_QUERY_QP 0x4UL - __le16 reserved48[3]; -}; - -/* Query QP command response side buffer structure (104 bytes) */ -struct creq_query_qp_resp_sb { - u8 opcode; - #define CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP 0x4UL - u8 status; - __le16 cookie; - __le16 flags; - u8 resp_size; - u8 reserved8; - __le32 xid; - u8 en_sqd_async_notify_state; - #define CREQ_QUERY_QP_RESP_SB_STATE_MASK 0xfUL - #define CREQ_QUERY_QP_RESP_SB_STATE_SFT 0 - #define CREQ_QUERY_QP_RESP_SB_STATE_RESET 0x0UL - #define CREQ_QUERY_QP_RESP_SB_STATE_INIT 0x1UL - #define CREQ_QUERY_QP_RESP_SB_STATE_RTR 0x2UL - #define CREQ_QUERY_QP_RESP_SB_STATE_RTS 0x3UL - #define CREQ_QUERY_QP_RESP_SB_STATE_SQD 0x4UL - #define CREQ_QUERY_QP_RESP_SB_STATE_SQE 0x5UL - #define CREQ_QUERY_QP_RESP_SB_STATE_ERR 0x6UL - #define CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY 0x10UL - u8 access; - #define CREQ_QUERY_QP_RESP_SB_ACCESS_LOCAL_WRITE 0x1UL - #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_WRITE 0x2UL - #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_READ 0x4UL - #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL - __le16 pkey; - __le32 qkey; - __le32 reserved32; - __le32 dgid[4]; - __le32 flow_label; - __le16 sgid_index; - u8 hop_limit; - u8 traffic_class; - __le16 dest_mac[3]; - __le16 path_mtu_dest_vlan_id; - #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_MASK 0xfffUL - #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_SFT 0 - #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK 0xf000UL - #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT 12 - #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_256 (0x0UL << 12) - #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_512 (0x1UL << 12) - #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_1024 (0x2UL << 12) - #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_2048 (0x3UL << 12) - #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_4096 (0x4UL << 12) - #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192 (0x5UL << 12) - u8 timeout; - u8 retry_cnt; - u8 rnr_retry; - u8 min_rnr_timer; - __le32 rq_psn; - __le32 sq_psn; - u8 max_rd_atomic; - u8 max_dest_rd_atomic; - u8 tos_dscp_tos_ecn; - #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_MASK 0x3UL - #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_SFT 0 - #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_MASK 0xfcUL - #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_SFT 2 - u8 enable_cc; - #define CREQ_QUERY_QP_RESP_SB_ENABLE_CC 0x1UL - #define CREQ_QUERY_QP_RESP_SB_RESERVED7_MASK 0xfeUL - #define CREQ_QUERY_QP_RESP_SB_RESERVED7_SFT 1 - __le32 sq_size; - __le32 rq_size; - __le16 sq_sge; - __le16 rq_sge; - __le32 max_inline_data; - __le32 dest_qp_id; - __le32 unused_1; - __le16 src_mac[3]; - __le16 vlan_pcp_vlan_dei_vlan_id; - #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK 0xfffUL - #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT 0 - #define CREQ_QUERY_QP_RESP_SB_VLAN_DEI 0x1000UL - #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_MASK 0xe000UL - #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_SFT 13 -}; - -/* Create SRQ command response (16 bytes) */ +/* creq_create_srq_resp (size:128b/16B) */ struct creq_create_srq_resp { - u8 type; - #define CREQ_CREATE_SRQ_RESP_TYPE_MASK 0x3fUL - #define CREQ_CREATE_SRQ_RESP_TYPE_SFT 0 - #define CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_CREATE_SRQ_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_CREATE_SRQ_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_CREATE_SRQ_RESP_V 0x1UL - #define CREQ_CREATE_SRQ_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_CREATE_SRQ_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ 0x5UL - __le16 reserved48[3]; -}; - -/* Destroy SRQ command response (16 bytes) */ + u8 type; + #define CREQ_CREATE_SRQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_SRQ_RESP_TYPE_SFT 0 + #define CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_SRQ_RESP_TYPE_LAST CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_SRQ_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ 0x5UL + #define CREQ_CREATE_SRQ_RESP_EVENT_LAST CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ + u8 reserved48[6]; +}; + +/* cmdq_destroy_srq (size:192b/24B) */ +struct cmdq_destroy_srq { + u8 opcode; + #define CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ 0x6UL + #define CMDQ_DESTROY_SRQ_OPCODE_LAST CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 srq_cid; + __le32 unused_0; +}; + +/* creq_destroy_srq_resp (size:128b/16B) */ struct creq_destroy_srq_resp { - u8 type; - #define CREQ_DESTROY_SRQ_RESP_TYPE_MASK 0x3fUL - #define CREQ_DESTROY_SRQ_RESP_TYPE_SFT 0 - #define CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DESTROY_SRQ_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DESTROY_SRQ_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_DESTROY_SRQ_RESP_V 0x1UL - #define CREQ_DESTROY_SRQ_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DESTROY_SRQ_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ 0x6UL - __le16 enable_for_arm[3]; - #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_MASK 0x30000UL - #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_SFT 16 - #define CREQ_DESTROY_SRQ_RESP_RESERVED46_MASK 0xfffc0000UL - #define CREQ_DESTROY_SRQ_RESP_RESERVED46_SFT 18 -}; - -/* Query SRQ command response (16 bytes) */ -struct creq_query_srq_resp { - u8 type; - #define CREQ_QUERY_SRQ_RESP_TYPE_MASK 0x3fUL - #define CREQ_QUERY_SRQ_RESP_TYPE_SFT 0 - #define CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_QUERY_SRQ_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_QUERY_SRQ_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 size; - u8 v; - #define CREQ_QUERY_SRQ_RESP_V 0x1UL - #define CREQ_QUERY_SRQ_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_QUERY_SRQ_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ 0x8UL - __le16 reserved48[3]; -}; - -/* Query SRQ command response side buffer structure (24 bytes) */ -struct creq_query_srq_resp_sb { - u8 opcode; - #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ 0x8UL - u8 status; - __le16 cookie; - __le16 flags; - u8 resp_size; - u8 reserved8; - __le32 xid; - __le16 srq_limit; - __le16 reserved16; - __le32 data[4]; -}; - -/* Create CQ command Response (16 bytes) */ -struct creq_create_cq_resp { - u8 type; - #define CREQ_CREATE_CQ_RESP_TYPE_MASK 0x3fUL - #define CREQ_CREATE_CQ_RESP_TYPE_SFT 0 - #define CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_CREATE_CQ_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_CREATE_CQ_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_CREATE_CQ_RESP_V 0x1UL - #define CREQ_CREATE_CQ_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_CREATE_CQ_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ 0x9UL - __le16 reserved48[3]; -}; - -/* Destroy CQ command response (16 bytes) */ -struct creq_destroy_cq_resp { - u8 type; - #define CREQ_DESTROY_CQ_RESP_TYPE_MASK 0x3fUL - #define CREQ_DESTROY_CQ_RESP_TYPE_SFT 0 - #define CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DESTROY_CQ_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DESTROY_CQ_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_DESTROY_CQ_RESP_V 0x1UL - #define CREQ_DESTROY_CQ_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DESTROY_CQ_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ 0xaUL - __le16 cq_arm_lvl; - #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_MASK 0x3UL - #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_SFT 0 - #define CREQ_DESTROY_CQ_RESP_RESERVED14_MASK 0xfffcUL - #define CREQ_DESTROY_CQ_RESP_RESERVED14_SFT 2 - __le16 total_cnq_events; - __le16 reserved16; -}; - -/* Resize CQ command response (16 bytes) */ -struct creq_resize_cq_resp { - u8 type; - #define CREQ_RESIZE_CQ_RESP_TYPE_MASK 0x3fUL - #define CREQ_RESIZE_CQ_RESP_TYPE_SFT 0 - #define CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_RESIZE_CQ_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_RESIZE_CQ_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_RESIZE_CQ_RESP_V 0x1UL - #define CREQ_RESIZE_CQ_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_RESIZE_CQ_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ 0xcUL - __le16 reserved48[3]; -}; - -/* Allocate MRW command response (16 bytes) */ -struct creq_allocate_mrw_resp { - u8 type; - #define CREQ_ALLOCATE_MRW_RESP_TYPE_MASK 0x3fUL - #define CREQ_ALLOCATE_MRW_RESP_TYPE_SFT 0 - #define CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_ALLOCATE_MRW_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_ALLOCATE_MRW_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_ALLOCATE_MRW_RESP_V 0x1UL - #define CREQ_ALLOCATE_MRW_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_ALLOCATE_MRW_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW 0xdUL - __le16 reserved48[3]; -}; - -/* De-allocate key command response (16 bytes) */ -struct creq_deallocate_key_resp { - u8 type; - #define CREQ_DEALLOCATE_KEY_RESP_TYPE_MASK 0x3fUL - #define CREQ_DEALLOCATE_KEY_RESP_TYPE_SFT 0 - #define CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DEALLOCATE_KEY_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DEALLOCATE_KEY_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_DEALLOCATE_KEY_RESP_V 0x1UL - #define CREQ_DEALLOCATE_KEY_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DEALLOCATE_KEY_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY 0xeUL - __le16 reserved16; - __le32 bound_window_info; -}; - -/* Register MR command response (16 bytes) */ -struct creq_register_mr_resp { - u8 type; - #define CREQ_REGISTER_MR_RESP_TYPE_MASK 0x3fUL - #define CREQ_REGISTER_MR_RESP_TYPE_SFT 0 - #define CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_REGISTER_MR_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_REGISTER_MR_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_REGISTER_MR_RESP_V 0x1UL - #define CREQ_REGISTER_MR_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_REGISTER_MR_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR 0xfUL - __le16 reserved48[3]; -}; - -/* Deregister MR command response (16 bytes) */ -struct creq_deregister_mr_resp { - u8 type; - #define CREQ_DEREGISTER_MR_RESP_TYPE_MASK 0x3fUL - #define CREQ_DEREGISTER_MR_RESP_TYPE_SFT 0 - #define CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DEREGISTER_MR_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DEREGISTER_MR_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_DEREGISTER_MR_RESP_V 0x1UL - #define CREQ_DEREGISTER_MR_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DEREGISTER_MR_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR 0x10UL - __le16 reserved16; - __le32 bound_windows; -}; - -/* Add GID command response (16 bytes) */ -struct creq_add_gid_resp { - u8 type; - #define CREQ_ADD_GID_RESP_TYPE_MASK 0x3fUL - #define CREQ_ADD_GID_RESP_TYPE_SFT 0 - #define CREQ_ADD_GID_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_ADD_GID_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_ADD_GID_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_ADD_GID_RESP_V 0x1UL - #define CREQ_ADD_GID_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_ADD_GID_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_ADD_GID_RESP_EVENT_ADD_GID 0x11UL - __le16 reserved48[3]; -}; - -/* Delete GID command response (16 bytes) */ -struct creq_delete_gid_resp { - u8 type; - #define CREQ_DELETE_GID_RESP_TYPE_MASK 0x3fUL - #define CREQ_DELETE_GID_RESP_TYPE_SFT 0 - #define CREQ_DELETE_GID_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DELETE_GID_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DELETE_GID_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_DELETE_GID_RESP_V 0x1UL - #define CREQ_DELETE_GID_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DELETE_GID_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DELETE_GID_RESP_EVENT_DELETE_GID 0x12UL - __le16 reserved48[3]; -}; - -/* Modify GID command response (16 bytes) */ -struct creq_modify_gid_resp { - u8 type; - #define CREQ_MODIFY_GID_RESP_TYPE_MASK 0x3fUL - #define CREQ_MODIFY_GID_RESP_TYPE_SFT 0 - #define CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_MODIFY_GID_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_MODIFY_GID_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_MODIFY_GID_RESP_V 0x1UL - #define CREQ_MODIFY_GID_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_MODIFY_GID_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_MODIFY_GID_RESP_EVENT_ADD_GID 0x11UL - __le16 reserved48[3]; -}; - -/* Query GID command response (16 bytes) */ -struct creq_query_gid_resp { - u8 type; - #define CREQ_QUERY_GID_RESP_TYPE_MASK 0x3fUL - #define CREQ_QUERY_GID_RESP_TYPE_SFT 0 - #define CREQ_QUERY_GID_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_QUERY_GID_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_QUERY_GID_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 size; - u8 v; - #define CREQ_QUERY_GID_RESP_V 0x1UL - #define CREQ_QUERY_GID_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_QUERY_GID_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_QUERY_GID_RESP_EVENT_QUERY_GID 0x18UL - __le16 reserved48[3]; -}; - -/* Query GID command response side buffer structure (40 bytes) */ -struct creq_query_gid_resp_sb { - u8 opcode; - #define CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID 0x18UL - u8 status; - __le16 cookie; - __le16 flags; - u8 resp_size; - u8 reserved8; - __le32 gid[4]; - __le16 src_mac[3]; - __le16 vlan; - #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_MASK 0xfffUL - #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_SFT 0 - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_MASK 0x7000UL - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_SFT 12 - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_88A8 (0x0UL << 12) - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_8100 (0x1UL << 12) - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9100 (0x2UL << 12) - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9200 (0x3UL << 12) - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9300 (0x4UL << 12) - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG1 (0x5UL << 12) - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG2 (0x6UL << 12) - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 (0x7UL << 12) - #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_LAST \ - CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 - #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN 0x8000UL - __le16 ipid; - __le16 gid_index; - __le32 unused_0; -}; - -/* Create QP1 command response (16 bytes) */ -struct creq_create_qp1_resp { - u8 type; - #define CREQ_CREATE_QP1_RESP_TYPE_MASK 0x3fUL - #define CREQ_CREATE_QP1_RESP_TYPE_SFT 0 - #define CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_CREATE_QP1_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_CREATE_QP1_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_CREATE_QP1_RESP_V 0x1UL - #define CREQ_CREATE_QP1_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_CREATE_QP1_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1 0x13UL - __le16 reserved48[3]; -}; - -/* Destroy QP1 command response (16 bytes) */ -struct creq_destroy_qp1_resp { - u8 type; - #define CREQ_DESTROY_QP1_RESP_TYPE_MASK 0x3fUL - #define CREQ_DESTROY_QP1_RESP_TYPE_SFT 0 - #define CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DESTROY_QP1_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DESTROY_QP1_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_DESTROY_QP1_RESP_V 0x1UL - #define CREQ_DESTROY_QP1_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DESTROY_QP1_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1 0x14UL - __le16 reserved48[3]; -}; - -/* Create AH command response (16 bytes) */ -struct creq_create_ah_resp { - u8 type; - #define CREQ_CREATE_AH_RESP_TYPE_MASK 0x3fUL - #define CREQ_CREATE_AH_RESP_TYPE_SFT 0 - #define CREQ_CREATE_AH_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_CREATE_AH_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_CREATE_AH_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_CREATE_AH_RESP_V 0x1UL - #define CREQ_CREATE_AH_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_CREATE_AH_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_CREATE_AH_RESP_EVENT_CREATE_AH 0x15UL - __le16 reserved48[3]; -}; - -/* Destroy AH command response (16 bytes) */ -struct creq_destroy_ah_resp { - u8 type; - #define CREQ_DESTROY_AH_RESP_TYPE_MASK 0x3fUL - #define CREQ_DESTROY_AH_RESP_TYPE_SFT 0 - #define CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DESTROY_AH_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DESTROY_AH_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 xid; - u8 v; - #define CREQ_DESTROY_AH_RESP_V 0x1UL - #define CREQ_DESTROY_AH_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DESTROY_AH_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH 0x16UL - __le16 reserved48[3]; -}; - -/* Initialize Firmware command response (16 bytes) */ -struct creq_initialize_fw_resp { - u8 type; - #define CREQ_INITIALIZE_FW_RESP_TYPE_MASK 0x3fUL - #define CREQ_INITIALIZE_FW_RESP_TYPE_SFT 0 - #define CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_INITIALIZE_FW_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_INITIALIZE_FW_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 reserved32; - u8 v; - #define CREQ_INITIALIZE_FW_RESP_V 0x1UL - #define CREQ_INITIALIZE_FW_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_INITIALIZE_FW_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW 0x80UL - __le16 reserved48[3]; -}; - -/* De-initialize Firmware command response (16 bytes) */ -struct creq_deinitialize_fw_resp { - u8 type; - #define CREQ_DEINITIALIZE_FW_RESP_TYPE_MASK 0x3fUL - #define CREQ_DEINITIALIZE_FW_RESP_TYPE_SFT 0 - #define CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_DEINITIALIZE_FW_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_DEINITIALIZE_FW_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 reserved32; - u8 v; - #define CREQ_DEINITIALIZE_FW_RESP_V 0x1UL - #define CREQ_DEINITIALIZE_FW_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_DEINITIALIZE_FW_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW 0x81UL - __le16 reserved48[3]; -}; - -/* Stop function command response (16 bytes) */ -struct creq_stop_func_resp { - u8 type; - #define CREQ_STOP_FUNC_RESP_TYPE_MASK 0x3fUL - #define CREQ_STOP_FUNC_RESP_TYPE_SFT 0 - #define CREQ_STOP_FUNC_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_STOP_FUNC_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_STOP_FUNC_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 reserved32; - u8 v; - #define CREQ_STOP_FUNC_RESP_V 0x1UL - #define CREQ_STOP_FUNC_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_STOP_FUNC_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_STOP_FUNC_RESP_EVENT_STOP_FUNC 0x82UL - __le16 reserved48[3]; -}; - -/* Query function command response (16 bytes) */ -struct creq_query_func_resp { - u8 type; - #define CREQ_QUERY_FUNC_RESP_TYPE_MASK 0x3fUL - #define CREQ_QUERY_FUNC_RESP_TYPE_SFT 0 - #define CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_QUERY_FUNC_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_QUERY_FUNC_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 size; - u8 v; - #define CREQ_QUERY_FUNC_RESP_V 0x1UL - #define CREQ_QUERY_FUNC_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_QUERY_FUNC_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC 0x83UL - __le16 reserved48[3]; -}; - -/* Query function command response side buffer structure (88 bytes) */ -struct creq_query_func_resp_sb { - u8 opcode; - #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC 0x83UL - u8 status; - __le16 cookie; - __le16 flags; - u8 resp_size; - u8 reserved8; - __le64 max_mr_size; - __le32 max_qp; - __le16 max_qp_wr; - __le16 dev_cap_flags; - #define CREQ_QUERY_FUNC_RESP_SB_DEV_CAP_FLAGS_RESIZE_QP 0x1UL - #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS 0x10UL - __le32 max_cq; - __le32 max_cqe; - __le32 max_pd; - u8 max_sge; - u8 max_srq_sge; - u8 max_qp_rd_atom; - u8 max_qp_init_rd_atom; - __le32 max_mr; - __le32 max_mw; - __le32 max_raw_eth_qp; - __le32 max_ah; - __le32 max_fmr; - __le32 max_srq_wr; - __le32 max_pkeys; - __le32 max_inline_data; - u8 max_map_per_fmr; - u8 l2_db_space_size; - __le16 max_srq; - __le32 max_gid; - __le32 tqm_alloc_reqs[12]; - __le32 max_dpi; - __le32 reserved_32; -}; - -/* Set resources command response (16 bytes) */ -struct creq_set_func_resources_resp { - u8 type; - #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_MASK 0x3fUL - #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_SFT 0 - #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_SET_FUNC_RESOURCES_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_SET_FUNC_RESOURCES_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 reserved32; - u8 v; - #define CREQ_SET_FUNC_RESOURCES_RESP_V 0x1UL - #define CREQ_SET_FUNC_RESOURCES_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_SET_FUNC_RESOURCES_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES 0x84UL - __le16 reserved48[3]; + u8 type; + #define CREQ_DESTROY_SRQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_SRQ_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_SRQ_RESP_TYPE_LAST CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_SRQ_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ 0x6UL + #define CREQ_DESTROY_SRQ_RESP_EVENT_LAST CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ + __le16 enable_for_arm[3]; + #define CREQ_DESTROY_SRQ_RESP_UNUSED0_MASK 0xffffUL + #define CREQ_DESTROY_SRQ_RESP_UNUSED0_SFT 0 + #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_MASK 0x30000UL + #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_SFT 16 }; -/* Map TC to COS response (16 bytes) */ -struct creq_map_tc_to_cos_resp { - u8 type; - #define CREQ_MAP_TC_TO_COS_RESP_TYPE_MASK 0x3fUL - #define CREQ_MAP_TC_TO_COS_RESP_TYPE_SFT 0 - #define CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_MAP_TC_TO_COS_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_MAP_TC_TO_COS_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 reserved32; - u8 v; - #define CREQ_MAP_TC_TO_COS_RESP_V 0x1UL - #define CREQ_MAP_TC_TO_COS_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_MAP_TC_TO_COS_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS 0x8aUL - __le16 reserved48[3]; -}; - -/* Query version response (16 bytes) */ -struct creq_query_version_resp { - u8 type; - #define CREQ_QUERY_VERSION_RESP_TYPE_MASK 0x3fUL - #define CREQ_QUERY_VERSION_RESP_TYPE_SFT 0 - #define CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_QUERY_VERSION_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_QUERY_VERSION_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - u8 fw_maj; - u8 fw_minor; - u8 fw_bld; - u8 fw_rsvd; - u8 v; - #define CREQ_QUERY_VERSION_RESP_V 0x1UL - #define CREQ_QUERY_VERSION_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_QUERY_VERSION_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION 0x8bUL - __le16 reserved16; - u8 intf_maj; - u8 intf_minor; - u8 intf_bld; - u8 intf_rsvd; -}; - -/* Modify congestion control command response (16 bytes) */ -struct creq_modify_cc_resp { - u8 type; - #define CREQ_MODIFY_CC_RESP_TYPE_MASK 0x3fUL - #define CREQ_MODIFY_CC_RESP_TYPE_SFT 0 - #define CREQ_MODIFY_CC_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_MODIFY_CC_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_MODIFY_CC_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 reserved32; - u8 v; - #define CREQ_MODIFY_CC_RESP_V 0x1UL - #define CREQ_MODIFY_CC_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_MODIFY_CC_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_MODIFY_CC_RESP_EVENT_MODIFY_CC 0x8cUL - __le16 reserved48[3]; -}; - -/* Query congestion control command response (16 bytes) */ -struct creq_query_cc_resp { - u8 type; - #define CREQ_QUERY_CC_RESP_TYPE_MASK 0x3fUL - #define CREQ_QUERY_CC_RESP_TYPE_SFT 0 - #define CREQ_QUERY_CC_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_QUERY_CC_RESP_RESERVED2_MASK 0xc0UL - #define CREQ_QUERY_CC_RESP_RESERVED2_SFT 6 - u8 status; - __le16 cookie; - __le32 size; - u8 v; - #define CREQ_QUERY_CC_RESP_V 0x1UL - #define CREQ_QUERY_CC_RESP_RESERVED7_MASK 0xfeUL - #define CREQ_QUERY_CC_RESP_RESERVED7_SFT 1 - u8 event; - #define CREQ_QUERY_CC_RESP_EVENT_QUERY_CC 0x8dUL - __le16 reserved48[3]; -}; - -/* Query congestion control command response side buffer structure (32 bytes) */ -struct creq_query_cc_resp_sb { - u8 opcode; - #define CREQ_QUERY_CC_RESP_SB_OPCODE_QUERY_CC 0x8dUL - u8 status; - __le16 cookie; - __le16 flags; - u8 resp_size; - u8 reserved8; - u8 enable_cc; - #define CREQ_QUERY_CC_RESP_SB_ENABLE_CC 0x1UL - u8 g; - #define CREQ_QUERY_CC_RESP_SB_G_MASK 0x7UL - #define CREQ_QUERY_CC_RESP_SB_G_SFT 0 - u8 num_phases_per_state; - __le16 init_cr; - u8 unused_2; - __le16 unused_3; - u8 unused_4; - __le16 init_tr; - u8 tos_dscp_tos_ecn; - #define CREQ_QUERY_CC_RESP_SB_TOS_ECN_MASK 0x3UL - #define CREQ_QUERY_CC_RESP_SB_TOS_ECN_SFT 0 - #define CREQ_QUERY_CC_RESP_SB_TOS_DSCP_MASK 0xfcUL - #define CREQ_QUERY_CC_RESP_SB_TOS_DSCP_SFT 2 - __le64 reserved64; - __le64 reserved64_1; +/* cmdq_query_srq (size:192b/24B) */ +struct cmdq_query_srq { + u8 opcode; + #define CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ 0x8UL + #define CMDQ_QUERY_SRQ_OPCODE_LAST CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 srq_cid; + __le32 unused_0; }; -/* creq_query_roce_stats_resp (size:128b/16B) */ -struct creq_query_roce_stats_resp { +/* creq_query_srq_resp (size:128b/16B) */ +struct creq_query_srq_resp { u8 type; - #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL - #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0 - #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST \ - CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT + #define CREQ_QUERY_SRQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_SRQ_RESP_TYPE_SFT 0 + #define CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_SRQ_RESP_TYPE_LAST CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT u8 status; __le16 cookie; __le32 size; u8 v; - #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL + #define CREQ_QUERY_SRQ_RESP_V 0x1UL u8 event; - #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL - #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST \ - CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS + #define CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ 0x8UL + #define CREQ_QUERY_SRQ_RESP_EVENT_LAST CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ u8 reserved48[6]; }; -/* creq_query_roce_stats_resp_sb (size:2624b/328B) */ -struct creq_query_roce_stats_resp_sb { +/* creq_query_srq_resp_sb (size:256b/32B) */ +struct creq_query_srq_resp_sb { u8 opcode; - #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL - #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST \ - CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS + #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ 0x8UL + #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_LAST CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ u8 status; __le16 cookie; __le16 flags; u8 resp_size; - u8 rsvd; - __le32 num_counters; - __le32 rsvd1; - __le64 to_retransmits; - __le64 seq_err_naks_rcvd; - __le64 max_retry_exceeded; - __le64 rnr_naks_rcvd; - __le64 missing_resp; - __le64 unrecoverable_err; - __le64 bad_resp_err; - __le64 local_qp_op_err; - __le64 local_protection_err; - __le64 mem_mgmt_op_err; - __le64 remote_invalid_req_err; - __le64 remote_access_err; - __le64 remote_op_err; - __le64 dup_req; - __le64 res_exceed_max; - __le64 res_length_mismatch; - __le64 res_exceeds_wqe; - __le64 res_opcode_err; - __le64 res_rx_invalid_rkey; - __le64 res_rx_domain_err; - __le64 res_rx_no_perm; - __le64 res_rx_range_err; - __le64 res_tx_invalid_rkey; - __le64 res_tx_domain_err; - __le64 res_tx_no_perm; - __le64 res_tx_range_err; - __le64 res_irrq_oflow; - __le64 res_unsup_opcode; - __le64 res_unaligned_atomic; - __le64 res_rem_inv_err; - __le64 res_mem_error; - __le64 res_srq_err; - __le64 res_cmp_err; - __le64 res_invalid_dup_rkey; - __le64 res_wqe_format_err; - __le64 res_cq_load_err; - __le64 res_srq_load_err; - __le64 res_tx_pci_err; - __le64 res_rx_pci_err; - __le64 res_oos_drop_count; - __le64 active_qp_count_p0; - __le64 active_qp_count_p1; - __le64 active_qp_count_p2; - __le64 active_qp_count_p3; + u8 reserved8; + __le32 xid; + __le16 srq_limit; + __le16 reserved16; + __le32 data[4]; }; -/* cmdq_query_roce_stats_ext (size:192b/24B) */ -struct cmdq_query_roce_stats_ext { - u8 opcode; - #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS 0x92UL - #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_LAST \ - CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS - u8 cmd_size; - __le16 flags; - #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID 0x1UL - #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID 0x2UL - __le16 cookie; - u8 resp_size; - u8 collection_id; - __le64 resp_addr; - __le32 function_id; - #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_MASK 0xffUL - #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_SFT 0 - #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_MASK 0xffff00UL - #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT 8 - #define CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID 0x1000000UL - __le32 reserved32; +/* cmdq_create_cq (size:384b/48B) */ +struct cmdq_create_cq { + u8 opcode; + #define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL + #define CMDQ_CREATE_CQ_OPCODE_LAST CMDQ_CREATE_CQ_OPCODE_CREATE_CQ + u8 cmd_size; + __le16 flags; + #define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 cq_handle; + __le32 pg_size_lvl; + #define CMDQ_CREATE_CQ_LVL_MASK 0x3UL + #define CMDQ_CREATE_CQ_LVL_SFT 0 + #define CMDQ_CREATE_CQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_CQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_CQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_CQ_LVL_LAST CMDQ_CREATE_CQ_LVL_LVL_2 + #define CMDQ_CREATE_CQ_PG_SIZE_MASK 0x1cUL + #define CMDQ_CREATE_CQ_PG_SIZE_SFT 2 + #define CMDQ_CREATE_CQ_PG_SIZE_PG_4K (0x0UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_8K (0x1UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_64K (0x2UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_2M (0x3UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_8M (0x4UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_1G (0x5UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_LAST CMDQ_CREATE_CQ_PG_SIZE_PG_1G + #define CMDQ_CREATE_CQ_UNUSED27_MASK 0xffffffe0UL + #define CMDQ_CREATE_CQ_UNUSED27_SFT 5 + __le32 cq_fco_cnq_id; + #define CMDQ_CREATE_CQ_CNQ_ID_MASK 0xfffUL + #define CMDQ_CREATE_CQ_CNQ_ID_SFT 0 + #define CMDQ_CREATE_CQ_CQ_FCO_MASK 0xfffff000UL + #define CMDQ_CREATE_CQ_CQ_FCO_SFT 12 + __le32 dpi; + __le32 cq_size; + __le64 pbl; }; -/* creq_query_roce_stats_ext_resp (size:128b/16B) */ -struct creq_query_roce_stats_ext_resp { - u8 type; - #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_MASK 0x3fUL - #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_SFT 0 - #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT 0x38UL - #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_LAST \ - CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT - u8 status; - __le16 cookie; - __le32 size; - u8 v; - #define CREQ_QUERY_ROCE_STATS_EXT_RESP_V 0x1UL - u8 event; +/* creq_create_cq_resp (size:128b/16B) */ +struct creq_create_cq_resp { + u8 type; + #define CREQ_CREATE_CQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_CQ_RESP_TYPE_SFT 0 + #define CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_CQ_RESP_TYPE_LAST CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_CQ_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ 0x9UL + #define CREQ_CREATE_CQ_RESP_EVENT_LAST CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ + u8 reserved48[6]; +}; + +/* cmdq_destroy_cq (size:192b/24B) */ +struct cmdq_destroy_cq { + u8 opcode; + #define CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ 0xaUL + #define CMDQ_DESTROY_CQ_OPCODE_LAST CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 cq_cid; + __le32 unused_0; +}; + +/* creq_destroy_cq_resp (size:128b/16B) */ +struct creq_destroy_cq_resp { + u8 type; + #define CREQ_DESTROY_CQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_CQ_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_CQ_RESP_TYPE_LAST CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_CQ_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ 0xaUL + #define CREQ_DESTROY_CQ_RESP_EVENT_LAST CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ + __le16 cq_arm_lvl; + #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_MASK 0x3UL + #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_SFT 0 + __le16 total_cnq_events; + __le16 reserved16; +}; + +/* cmdq_resize_cq (size:320b/40B) */ +struct cmdq_resize_cq { + u8 opcode; + #define CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ 0xcUL + #define CMDQ_RESIZE_CQ_OPCODE_LAST CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 cq_cid; + __le32 new_cq_size_pg_size_lvl; + #define CMDQ_RESIZE_CQ_LVL_MASK 0x3UL + #define CMDQ_RESIZE_CQ_LVL_SFT 0 + #define CMDQ_RESIZE_CQ_LVL_LVL_0 0x0UL + #define CMDQ_RESIZE_CQ_LVL_LVL_1 0x1UL + #define CMDQ_RESIZE_CQ_LVL_LVL_2 0x2UL + #define CMDQ_RESIZE_CQ_LVL_LAST CMDQ_RESIZE_CQ_LVL_LVL_2 + #define CMDQ_RESIZE_CQ_PG_SIZE_MASK 0x1cUL + #define CMDQ_RESIZE_CQ_PG_SIZE_SFT 2 + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_4K (0x0UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8K (0x1UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_64K (0x2UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_2M (0x3UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8M (0x4UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_1G (0x5UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_LAST CMDQ_RESIZE_CQ_PG_SIZE_PG_1G + #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK 0x1fffffe0UL + #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT 5 + __le64 new_pbl; + __le32 new_cq_fco; + __le32 unused_0; +}; + +/* creq_resize_cq_resp (size:128b/16B) */ +struct creq_resize_cq_resp { + u8 type; + #define CREQ_RESIZE_CQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_RESIZE_CQ_RESP_TYPE_SFT 0 + #define CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_RESIZE_CQ_RESP_TYPE_LAST CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_RESIZE_CQ_RESP_V 0x1UL + u8 event; + #define CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ 0xcUL + #define CREQ_RESIZE_CQ_RESP_EVENT_LAST CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ + u8 reserved48[6]; +}; + +/* cmdq_allocate_mrw (size:256b/32B) */ +struct cmdq_allocate_mrw { + u8 opcode; + #define CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW 0xdUL + #define CMDQ_ALLOCATE_MRW_OPCODE_LAST CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 mrw_handle; + u8 mrw_flags; + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MASK 0xfUL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_SFT 0 + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR 0x0UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR 0x1UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 0x2UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B + #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xf0UL + #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 4 + u8 access; + #define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL + __le16 unused16; + __le32 pd_id; +}; + +/* creq_allocate_mrw_resp (size:128b/16B) */ +struct creq_allocate_mrw_resp { + u8 type; + #define CREQ_ALLOCATE_MRW_RESP_TYPE_MASK 0x3fUL + #define CREQ_ALLOCATE_MRW_RESP_TYPE_SFT 0 + #define CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_ALLOCATE_MRW_RESP_TYPE_LAST CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_ALLOCATE_MRW_RESP_V 0x1UL + u8 event; + #define CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW 0xdUL + #define CREQ_ALLOCATE_MRW_RESP_EVENT_LAST CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW + u8 reserved48[6]; +}; + +/* cmdq_deallocate_key (size:192b/24B) */ +struct cmdq_deallocate_key { + u8 opcode; + #define CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY 0xeUL + #define CMDQ_DEALLOCATE_KEY_OPCODE_LAST CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + u8 mrw_flags; + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MASK 0xfUL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_SFT 0 + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MR 0x0UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_PMR 0x1UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE1 0x2UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2A 0x3UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B 0x4UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_LAST CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B + #define CMDQ_DEALLOCATE_KEY_UNUSED4_MASK 0xf0UL + #define CMDQ_DEALLOCATE_KEY_UNUSED4_SFT 4 + u8 unused24[3]; + __le32 key; +}; + +/* creq_deallocate_key_resp (size:128b/16B) */ +struct creq_deallocate_key_resp { + u8 type; + #define CREQ_DEALLOCATE_KEY_RESP_TYPE_MASK 0x3fUL + #define CREQ_DEALLOCATE_KEY_RESP_TYPE_SFT 0 + #define CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DEALLOCATE_KEY_RESP_TYPE_LAST CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DEALLOCATE_KEY_RESP_V 0x1UL + u8 event; + #define CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY 0xeUL + #define CREQ_DEALLOCATE_KEY_RESP_EVENT_LAST CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY + __le16 reserved16; + __le32 bound_window_info; +}; + +/* cmdq_register_mr (size:384b/48B) */ +struct cmdq_register_mr { + u8 opcode; + #define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL + #define CMDQ_REGISTER_MR_OPCODE_LAST CMDQ_REGISTER_MR_OPCODE_REGISTER_MR + u8 cmd_size; + __le16 flags; + #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + u8 log2_pg_size_lvl; + #define CMDQ_REGISTER_MR_LVL_MASK 0x3UL + #define CMDQ_REGISTER_MR_LVL_SFT 0 + #define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL + #define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL + #define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL + #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2 + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2 + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G + #define CMDQ_REGISTER_MR_UNUSED1 0x80UL + u8 access; + #define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL + #define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL + #define CMDQ_REGISTER_MR_ACCESS_REMOTE_WRITE 0x4UL + #define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL + #define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL + #define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL + __le16 log2_pbl_pg_size; + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0 + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G + #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL + #define CMDQ_REGISTER_MR_UNUSED11_SFT 5 + __le32 key; + __le64 pbl; + __le64 va; + __le64 mr_size; +}; + +/* creq_register_mr_resp (size:128b/16B) */ +struct creq_register_mr_resp { + u8 type; + #define CREQ_REGISTER_MR_RESP_TYPE_MASK 0x3fUL + #define CREQ_REGISTER_MR_RESP_TYPE_SFT 0 + #define CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_REGISTER_MR_RESP_TYPE_LAST CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_REGISTER_MR_RESP_V 0x1UL + u8 event; + #define CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR 0xfUL + #define CREQ_REGISTER_MR_RESP_EVENT_LAST CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR + u8 reserved48[6]; +}; + +/* cmdq_deregister_mr (size:192b/24B) */ +struct cmdq_deregister_mr { + u8 opcode; + #define CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR 0x10UL + #define CMDQ_DEREGISTER_MR_OPCODE_LAST CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 lkey; + __le32 unused_0; +}; + +/* creq_deregister_mr_resp (size:128b/16B) */ +struct creq_deregister_mr_resp { + u8 type; + #define CREQ_DEREGISTER_MR_RESP_TYPE_MASK 0x3fUL + #define CREQ_DEREGISTER_MR_RESP_TYPE_SFT 0 + #define CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DEREGISTER_MR_RESP_TYPE_LAST CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DEREGISTER_MR_RESP_V 0x1UL + u8 event; + #define CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR 0x10UL + #define CREQ_DEREGISTER_MR_RESP_EVENT_LAST CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR + __le16 reserved16; + __le32 bound_windows; +}; + +/* cmdq_add_gid (size:384b/48B) */ +struct cmdq_add_gid { + u8 opcode; + #define CMDQ_ADD_GID_OPCODE_ADD_GID 0x11UL + #define CMDQ_ADD_GID_OPCODE_LAST CMDQ_ADD_GID_OPCODE_ADD_GID + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __be32 gid[4]; + __be16 src_mac[3]; + __le16 vlan; + #define CMDQ_ADD_GID_VLAN_VLAN_EN_TPID_VLAN_ID_MASK 0xffffUL + #define CMDQ_ADD_GID_VLAN_VLAN_EN_TPID_VLAN_ID_SFT 0 + #define CMDQ_ADD_GID_VLAN_VLAN_ID_MASK 0xfffUL + #define CMDQ_ADD_GID_VLAN_VLAN_ID_SFT 0 + #define CMDQ_ADD_GID_VLAN_TPID_MASK 0x7000UL + #define CMDQ_ADD_GID_VLAN_TPID_SFT 12 + #define CMDQ_ADD_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_8100 (0x1UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_9100 (0x2UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_9200 (0x3UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_9300 (0x4UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_LAST CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 + #define CMDQ_ADD_GID_VLAN_VLAN_EN 0x8000UL + __le16 ipid; + __le16 stats_ctx; + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID_STATS_CTX_ID_MASK 0xffffUL + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID_STATS_CTX_ID_SFT 0 + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_SFT 0 + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL + __le32 unused_0; +}; + +/* creq_add_gid_resp (size:128b/16B) */ +struct creq_add_gid_resp { + u8 type; + #define CREQ_ADD_GID_RESP_TYPE_MASK 0x3fUL + #define CREQ_ADD_GID_RESP_TYPE_SFT 0 + #define CREQ_ADD_GID_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_ADD_GID_RESP_TYPE_LAST CREQ_ADD_GID_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_ADD_GID_RESP_V 0x1UL + u8 event; + #define CREQ_ADD_GID_RESP_EVENT_ADD_GID 0x11UL + #define CREQ_ADD_GID_RESP_EVENT_LAST CREQ_ADD_GID_RESP_EVENT_ADD_GID + u8 reserved48[6]; +}; + +/* cmdq_delete_gid (size:192b/24B) */ +struct cmdq_delete_gid { + u8 opcode; + #define CMDQ_DELETE_GID_OPCODE_DELETE_GID 0x12UL + #define CMDQ_DELETE_GID_OPCODE_LAST CMDQ_DELETE_GID_OPCODE_DELETE_GID + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le16 gid_index; + u8 unused_0[6]; +}; + +/* creq_delete_gid_resp (size:128b/16B) */ +struct creq_delete_gid_resp { + u8 type; + #define CREQ_DELETE_GID_RESP_TYPE_MASK 0x3fUL + #define CREQ_DELETE_GID_RESP_TYPE_SFT 0 + #define CREQ_DELETE_GID_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DELETE_GID_RESP_TYPE_LAST CREQ_DELETE_GID_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DELETE_GID_RESP_V 0x1UL + u8 event; + #define CREQ_DELETE_GID_RESP_EVENT_DELETE_GID 0x12UL + #define CREQ_DELETE_GID_RESP_EVENT_LAST CREQ_DELETE_GID_RESP_EVENT_DELETE_GID + u8 reserved48[6]; +}; + +/* cmdq_modify_gid (size:384b/48B) */ +struct cmdq_modify_gid { + u8 opcode; + #define CMDQ_MODIFY_GID_OPCODE_MODIFY_GID 0x17UL + #define CMDQ_MODIFY_GID_OPCODE_LAST CMDQ_MODIFY_GID_OPCODE_MODIFY_GID + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __be32 gid[4]; + __be16 src_mac[3]; + __le16 vlan; + #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL + #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0 + #define CMDQ_MODIFY_GID_VLAN_TPID_MASK 0x7000UL + #define CMDQ_MODIFY_GID_VLAN_TPID_SFT 12 + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_8100 (0x1UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9100 (0x2UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9200 (0x3UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9300 (0x4UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_LAST CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 + #define CMDQ_MODIFY_GID_VLAN_VLAN_EN 0x8000UL + __le16 ipid; + __le16 gid_index; + __le16 stats_ctx; + #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL + #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_SFT 0 + #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL + __le16 unused_0; +}; + +/* creq_modify_gid_resp (size:128b/16B) */ +struct creq_modify_gid_resp { + u8 type; + #define CREQ_MODIFY_GID_RESP_TYPE_MASK 0x3fUL + #define CREQ_MODIFY_GID_RESP_TYPE_SFT 0 + #define CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MODIFY_GID_RESP_TYPE_LAST CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_MODIFY_GID_RESP_V 0x1UL + u8 event; + #define CREQ_MODIFY_GID_RESP_EVENT_ADD_GID 0x11UL + #define CREQ_MODIFY_GID_RESP_EVENT_LAST CREQ_MODIFY_GID_RESP_EVENT_ADD_GID + u8 reserved48[6]; +}; + +/* cmdq_query_gid (size:192b/24B) */ +struct cmdq_query_gid { + u8 opcode; + #define CMDQ_QUERY_GID_OPCODE_QUERY_GID 0x18UL + #define CMDQ_QUERY_GID_OPCODE_LAST CMDQ_QUERY_GID_OPCODE_QUERY_GID + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le16 gid_index; + u8 unused16[6]; +}; + +/* creq_query_gid_resp (size:128b/16B) */ +struct creq_query_gid_resp { + u8 type; + #define CREQ_QUERY_GID_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_GID_RESP_TYPE_SFT 0 + #define CREQ_QUERY_GID_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_GID_RESP_TYPE_LAST CREQ_QUERY_GID_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_GID_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_GID_RESP_EVENT_QUERY_GID 0x18UL + #define CREQ_QUERY_GID_RESP_EVENT_LAST CREQ_QUERY_GID_RESP_EVENT_QUERY_GID + u8 reserved48[6]; +}; + +/* creq_query_gid_resp_sb (size:320b/40B) */ +struct creq_query_gid_resp_sb { + u8 opcode; + #define CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID 0x18UL + #define CREQ_QUERY_GID_RESP_SB_OPCODE_LAST CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 gid[4]; + __le16 src_mac[3]; + __le16 vlan; + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN_TPID_VLAN_ID_MASK 0xffffUL + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN_TPID_VLAN_ID_SFT 0 + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_MASK 0xfffUL + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_SFT 0 + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_MASK 0x7000UL + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_SFT 12 + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_88A8 (0x0UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_8100 (0x1UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9100 (0x2UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9200 (0x3UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9300 (0x4UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG1 (0x5UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG2 (0x6UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 (0x7UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_LAST CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN 0x8000UL + __le16 ipid; + __le16 gid_index; + __le32 unused_0; +}; + +/* cmdq_create_qp1 (size:640b/80B) */ +struct cmdq_create_qp1 { + u8 opcode; + #define CMDQ_CREATE_QP1_OPCODE_CREATE_QP1 0x13UL + #define CMDQ_CREATE_QP1_OPCODE_LAST CMDQ_CREATE_QP1_OPCODE_CREATE_QP1 + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 qp_handle; + __le32 qp_flags; + #define CMDQ_CREATE_QP1_QP_FLAGS_SRQ_USED 0x1UL + #define CMDQ_CREATE_QP1_QP_FLAGS_FORCE_COMPLETION 0x2UL + #define CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL + #define CMDQ_CREATE_QP1_QP_FLAGS_LAST CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE + u8 type; + #define CMDQ_CREATE_QP1_TYPE_GSI 0x1UL + #define CMDQ_CREATE_QP1_TYPE_LAST CMDQ_CREATE_QP1_TYPE_GSI + u8 sq_pg_size_sq_lvl; + #define CMDQ_CREATE_QP1_SQ_LVL_MASK 0xfUL + #define CMDQ_CREATE_QP1_SQ_LVL_SFT 0 + #define CMDQ_CREATE_QP1_SQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_QP1_SQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_QP1_SQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_QP1_SQ_LVL_LAST CMDQ_CREATE_QP1_SQ_LVL_LVL_2 + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT 4 + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_LAST CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G + u8 rq_pg_size_rq_lvl; + #define CMDQ_CREATE_QP1_RQ_LVL_MASK 0xfUL + #define CMDQ_CREATE_QP1_RQ_LVL_SFT 0 + #define CMDQ_CREATE_QP1_RQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_QP1_RQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_QP1_RQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_QP1_RQ_LVL_LAST CMDQ_CREATE_QP1_RQ_LVL_LVL_2 + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT 4 + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_LAST CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G + u8 unused_0; + __le32 dpi; + __le32 sq_size; + __le32 rq_size; + __le16 sq_fwo_sq_sge; + #define CMDQ_CREATE_QP1_SQ_SGE_MASK 0xfUL + #define CMDQ_CREATE_QP1_SQ_SGE_SFT 0 + #define CMDQ_CREATE_QP1_SQ_FWO_MASK 0xfff0UL + #define CMDQ_CREATE_QP1_SQ_FWO_SFT 4 + __le16 rq_fwo_rq_sge; + #define CMDQ_CREATE_QP1_RQ_SGE_MASK 0xfUL + #define CMDQ_CREATE_QP1_RQ_SGE_SFT 0 + #define CMDQ_CREATE_QP1_RQ_FWO_MASK 0xfff0UL + #define CMDQ_CREATE_QP1_RQ_FWO_SFT 4 + __le32 scq_cid; + __le32 rcq_cid; + __le32 srq_cid; + __le32 pd_id; + __le64 sq_pbl; + __le64 rq_pbl; +}; + +/* creq_create_qp1_resp (size:128b/16B) */ +struct creq_create_qp1_resp { + u8 type; + #define CREQ_CREATE_QP1_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_QP1_RESP_TYPE_SFT 0 + #define CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_QP1_RESP_TYPE_LAST CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_QP1_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1 0x13UL + #define CREQ_CREATE_QP1_RESP_EVENT_LAST CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1 + u8 reserved48[6]; +}; + +/* cmdq_destroy_qp1 (size:192b/24B) */ +struct cmdq_destroy_qp1 { + u8 opcode; + #define CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1 0x14UL + #define CMDQ_DESTROY_QP1_OPCODE_LAST CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1 + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 qp1_cid; + __le32 unused_0; +}; + +/* creq_destroy_qp1_resp (size:128b/16B) */ +struct creq_destroy_qp1_resp { + u8 type; + #define CREQ_DESTROY_QP1_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_QP1_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_QP1_RESP_TYPE_LAST CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_QP1_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1 0x14UL + #define CREQ_DESTROY_QP1_RESP_EVENT_LAST CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1 + u8 reserved48[6]; +}; + +/* cmdq_create_ah (size:512b/64B) */ +struct cmdq_create_ah { + u8 opcode; + #define CMDQ_CREATE_AH_OPCODE_CREATE_AH 0x15UL + #define CMDQ_CREATE_AH_OPCODE_LAST CMDQ_CREATE_AH_OPCODE_CREATE_AH + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 ah_handle; + __le32 dgid[4]; + u8 type; + #define CMDQ_CREATE_AH_TYPE_V1 0x0UL + #define CMDQ_CREATE_AH_TYPE_V2IPV4 0x2UL + #define CMDQ_CREATE_AH_TYPE_V2IPV6 0x3UL + #define CMDQ_CREATE_AH_TYPE_LAST CMDQ_CREATE_AH_TYPE_V2IPV6 + u8 hop_limit; + __le16 sgid_index; + __le32 dest_vlan_id_flow_label; + #define CMDQ_CREATE_AH_FLOW_LABEL_MASK 0xfffffUL + #define CMDQ_CREATE_AH_FLOW_LABEL_SFT 0 + #define CMDQ_CREATE_AH_DEST_VLAN_ID_MASK 0xfff00000UL + #define CMDQ_CREATE_AH_DEST_VLAN_ID_SFT 20 + __le32 pd_id; + __le32 unused_0; + __le16 dest_mac[3]; + u8 traffic_class; + u8 enable_cc; + #define CMDQ_CREATE_AH_ENABLE_CC 0x1UL +}; + +/* creq_create_ah_resp (size:128b/16B) */ +struct creq_create_ah_resp { + u8 type; + #define CREQ_CREATE_AH_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_AH_RESP_TYPE_SFT 0 + #define CREQ_CREATE_AH_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_AH_RESP_TYPE_LAST CREQ_CREATE_AH_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_AH_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_AH_RESP_EVENT_CREATE_AH 0x15UL + #define CREQ_CREATE_AH_RESP_EVENT_LAST CREQ_CREATE_AH_RESP_EVENT_CREATE_AH + u8 reserved48[6]; +}; + +/* cmdq_destroy_ah (size:192b/24B) */ +struct cmdq_destroy_ah { + u8 opcode; + #define CMDQ_DESTROY_AH_OPCODE_DESTROY_AH 0x16UL + #define CMDQ_DESTROY_AH_OPCODE_LAST CMDQ_DESTROY_AH_OPCODE_DESTROY_AH + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 ah_cid; + __le32 unused_0; +}; + +/* creq_destroy_ah_resp (size:128b/16B) */ +struct creq_destroy_ah_resp { + u8 type; + #define CREQ_DESTROY_AH_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_AH_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_AH_RESP_TYPE_LAST CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_AH_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH 0x16UL + #define CREQ_DESTROY_AH_RESP_EVENT_LAST CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH + u8 reserved48[6]; +}; + +/* cmdq_query_roce_stats (size:192b/24B) */ +struct cmdq_query_roce_stats { + u8 opcode; + #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL + #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS + u8 cmd_size; + __le16 flags; + #define CMDQ_QUERY_ROCE_STATS_FLAGS_COLLECTION_ID 0x1UL + #define CMDQ_QUERY_ROCE_STATS_FLAGS_FUNCTION_ID 0x2UL + __le16 cookie; + u8 resp_size; + u8 collection_id; + __le64 resp_addr; + __le32 function_id; + #define CMDQ_QUERY_ROCE_STATS_PF_NUM_MASK 0xffUL + #define CMDQ_QUERY_ROCE_STATS_PF_NUM_SFT 0 + #define CMDQ_QUERY_ROCE_STATS_VF_NUM_MASK 0xffff00UL + #define CMDQ_QUERY_ROCE_STATS_VF_NUM_SFT 8 + #define CMDQ_QUERY_ROCE_STATS_VF_VALID 0x1000000UL + __le32 reserved32; +}; + +/* creq_query_roce_stats_resp (size:128b/16B) */ +struct creq_query_roce_stats_resp { + u8 type; + #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0 + #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL + #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST \ + CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS + u8 reserved48[6]; +}; + +/* creq_query_roce_stats_resp_sb (size:2944b/368B) */ +struct creq_query_roce_stats_resp_sb { + u8 opcode; + #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL + #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST \ + CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 rsvd; + __le32 num_counters; + __le32 rsvd1; + __le64 to_retransmits; + __le64 seq_err_naks_rcvd; + __le64 max_retry_exceeded; + __le64 rnr_naks_rcvd; + __le64 missing_resp; + __le64 unrecoverable_err; + __le64 bad_resp_err; + __le64 local_qp_op_err; + __le64 local_protection_err; + __le64 mem_mgmt_op_err; + __le64 remote_invalid_req_err; + __le64 remote_access_err; + __le64 remote_op_err; + __le64 dup_req; + __le64 res_exceed_max; + __le64 res_length_mismatch; + __le64 res_exceeds_wqe; + __le64 res_opcode_err; + __le64 res_rx_invalid_rkey; + __le64 res_rx_domain_err; + __le64 res_rx_no_perm; + __le64 res_rx_range_err; + __le64 res_tx_invalid_rkey; + __le64 res_tx_domain_err; + __le64 res_tx_no_perm; + __le64 res_tx_range_err; + __le64 res_irrq_oflow; + __le64 res_unsup_opcode; + __le64 res_unaligned_atomic; + __le64 res_rem_inv_err; + __le64 res_mem_error; + __le64 res_srq_err; + __le64 res_cmp_err; + __le64 res_invalid_dup_rkey; + __le64 res_wqe_format_err; + __le64 res_cq_load_err; + __le64 res_srq_load_err; + __le64 res_tx_pci_err; + __le64 res_rx_pci_err; + __le64 res_oos_drop_count; + __le64 active_qp_count_p0; + __le64 active_qp_count_p1; + __le64 active_qp_count_p2; + __le64 active_qp_count_p3; +}; + +/* cmdq_query_roce_stats_ext (size:192b/24B) */ +struct cmdq_query_roce_stats_ext { + u8 opcode; + #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS 0x92UL + #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_LAST \ + CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS + u8 cmd_size; + __le16 flags; + #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID 0x1UL + #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID 0x2UL + __le16 cookie; + u8 resp_size; + u8 collection_id; + __le64 resp_addr; + __le32 function_id; + #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_MASK 0xffUL + #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_SFT 0 + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_MASK 0xffff00UL + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT 8 + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID 0x1000000UL + __le32 reserved32; +}; + +/* creq_query_roce_stats_ext_resp (size:128b/16B) */ +struct creq_query_roce_stats_ext_resp { + u8 type; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_SFT 0 + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_LAST \ + CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_V 0x1UL + u8 event; #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT 0x92UL #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_LAST \ CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT - u8 reserved48[6]; + u8 reserved48[6]; +}; + +/* creq_query_roce_stats_ext_resp_sb (size:1856b/232B) */ +struct creq_query_roce_stats_ext_resp_sb { + u8 opcode; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_LAST \ + CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 rsvd; + __le64 tx_atomic_req_pkts; + __le64 tx_read_req_pkts; + __le64 tx_read_res_pkts; + __le64 tx_write_req_pkts; + __le64 tx_send_req_pkts; + __le64 tx_roce_pkts; + __le64 tx_roce_bytes; + __le64 rx_atomic_req_pkts; + __le64 rx_read_req_pkts; + __le64 rx_read_res_pkts; + __le64 rx_write_req_pkts; + __le64 rx_send_req_pkts; + __le64 rx_roce_pkts; + __le64 rx_roce_bytes; + __le64 rx_roce_good_pkts; + __le64 rx_roce_good_bytes; + __le64 rx_out_of_buffer_pkts; + __le64 rx_out_of_sequence_pkts; + __le64 tx_cnp_pkts; + __le64 rx_cnp_pkts; + __le64 rx_ecn_marked_pkts; + __le64 tx_cnp_bytes; + __le64 rx_cnp_bytes; + __le64 seq_err_naks_rcvd; + __le64 rnr_naks_rcvd; + __le64 missing_resp; + __le64 to_retransmit; + __le64 dup_req; +}; + +/* cmdq_query_func (size:128b/16B) */ +struct cmdq_query_func { + u8 opcode; + #define CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC 0x83UL + #define CMDQ_QUERY_FUNC_OPCODE_LAST CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; +}; + +/* creq_query_func_resp (size:128b/16B) */ +struct creq_query_func_resp { + u8 type; + #define CREQ_QUERY_FUNC_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_FUNC_RESP_TYPE_SFT 0 + #define CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_FUNC_RESP_TYPE_LAST CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_FUNC_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC 0x83UL + #define CREQ_QUERY_FUNC_RESP_EVENT_LAST CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC + u8 reserved48[6]; +}; + +/* creq_query_func_resp_sb (size:1088b/136B) */ +struct creq_query_func_resp_sb { + u8 opcode; + #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC 0x83UL + #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_LAST CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le64 max_mr_size; + __le32 max_qp; + __le16 max_qp_wr; + __le16 dev_cap_flags; + #define CREQ_QUERY_FUNC_RESP_SB_RESIZE_QP 0x1UL + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_MASK 0xeUL + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_SFT 1 + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN0 (0x0UL << 1) + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1 (0x1UL << 1) + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT (0x2UL << 1) + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_LAST \ + CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT + #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS 0x10UL + #define CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC 0x20UL + #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZED_TRANSMIT_ENABLED 0x40UL + #define CREQ_QUERY_FUNC_RESP_SB_CQE_V2 0x80UL + #define CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE 0x100UL + #define CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED 0x200UL + #define CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED 0x400UL + __le32 max_cq; + __le32 max_cqe; + __le32 max_pd; + u8 max_sge; + u8 max_srq_sge; + u8 max_qp_rd_atom; + u8 max_qp_init_rd_atom; + __le32 max_mr; + __le32 max_mw; + __le32 max_raw_eth_qp; + __le32 max_ah; + __le32 max_fmr; + __le32 max_srq_wr; + __le32 max_pkeys; + __le32 max_inline_data; + u8 max_map_per_fmr; + u8 l2_db_space_size; + __le16 max_srq; + __le32 max_gid; + __le32 tqm_alloc_reqs[12]; + __le32 max_dpi; + u8 max_sge_var_wqe; + u8 reserved_8; + __le16 max_inline_data_var_wqe; +}; + +/* cmdq_set_func_resources (size:448b/56B) */ +struct cmdq_set_func_resources { + u8 opcode; + #define CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES 0x84UL + #define CMDQ_SET_FUNC_RESOURCES_OPCODE_LAST\ + CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES + u8 cmd_size; + __le16 flags; + #define CMDQ_SET_FUNC_RESOURCES_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 number_of_qp; + __le32 number_of_mrw; + __le32 number_of_srq; + __le32 number_of_cq; + __le32 max_qp_per_vf; + __le32 max_mrw_per_vf; + __le32 max_srq_per_vf; + __le32 max_cq_per_vf; + __le32 max_gid_per_vf; + __le32 stat_ctx_id; +}; + +/* creq_set_func_resources_resp (size:128b/16B) */ +struct creq_set_func_resources_resp { + u8 type; + #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_MASK 0x3fUL + #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_SFT 0 + #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_LAST CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_SET_FUNC_RESOURCES_RESP_V 0x1UL + u8 event; + #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES 0x84UL + #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_LAST \ + CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES + u8 reserved48[6]; +}; + +/* cmdq_map_tc_to_cos (size:192b/24B) */ +struct cmdq_map_tc_to_cos { + u8 opcode; + #define CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS 0x8aUL + #define CMDQ_MAP_TC_TO_COS_OPCODE_LAST CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le16 cos0; + #define CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE 0xffffUL + #define CMDQ_MAP_TC_TO_COS_COS0_LAST CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE + __le16 cos1; + #define CMDQ_MAP_TC_TO_COS_COS1_DISABLE 0x8000UL + #define CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE 0xffffUL + #define CMDQ_MAP_TC_TO_COS_COS1_LAST CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE + __le32 unused_0; +}; + +/* creq_map_tc_to_cos_resp (size:128b/16B) */ +struct creq_map_tc_to_cos_resp { + u8 type; + #define CREQ_MAP_TC_TO_COS_RESP_TYPE_MASK 0x3fUL + #define CREQ_MAP_TC_TO_COS_RESP_TYPE_SFT 0 + #define CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MAP_TC_TO_COS_RESP_TYPE_LAST CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_MAP_TC_TO_COS_RESP_V 0x1UL + u8 event; + #define CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS 0x8aUL + #define CREQ_MAP_TC_TO_COS_RESP_EVENT_LAST CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS + u8 reserved48[6]; +}; + +/* cmdq_query_roce_cc (size:128b/16B) */ +struct cmdq_query_roce_cc { + u8 opcode; + #define CMDQ_QUERY_ROCE_CC_OPCODE_QUERY_ROCE_CC 0x8dUL + #define CMDQ_QUERY_ROCE_CC_OPCODE_LAST CMDQ_QUERY_ROCE_CC_OPCODE_QUERY_ROCE_CC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; +}; + +/* creq_query_roce_cc_resp (size:128b/16B) */ +struct creq_query_roce_cc_resp { + u8 type; + #define CREQ_QUERY_ROCE_CC_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_ROCE_CC_RESP_TYPE_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_ROCE_CC_RESP_TYPE_LAST CREQ_QUERY_ROCE_CC_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_ROCE_CC_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_ROCE_CC_RESP_EVENT_QUERY_ROCE_CC 0x8dUL + #define CREQ_QUERY_ROCE_CC_RESP_EVENT_LAST CREQ_QUERY_ROCE_CC_RESP_EVENT_QUERY_ROCE_CC + u8 reserved48[6]; +}; + +/* creq_query_roce_cc_resp_sb (size:256b/32B) */ +struct creq_query_roce_cc_resp_sb { + u8 opcode; + #define CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_QUERY_ROCE_CC 0x8dUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_LAST \ + CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_QUERY_ROCE_CC + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + u8 enable_cc; + #define CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_UNUSED7_MASK 0xfeUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_UNUSED7_SFT 1 + u8 tos_dscp_tos_ecn; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK 0x3UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK 0xfcUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT 2 + u8 g; + u8 num_phases_per_state; + __le16 init_cr; + __le16 init_tr; + u8 alt_vlan_pcp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_VLAN_PCP_MASK 0x7UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_VLAN_PCP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD1_MASK 0xf8UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD1_SFT 3 + u8 alt_tos_dscp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_TOS_DSCP_MASK 0x3fUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_TOS_DSCP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD4_MASK 0xc0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD4_SFT 6 + u8 cc_mode; + #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_DCTCP 0x0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_PROBABILISTIC 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_LAST \ + CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_PROBABILISTIC + u8 tx_queue; + __le16 rtt; + #define CREQ_QUERY_ROCE_CC_RESP_SB_RTT_MASK 0x3fffUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RTT_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD5_MASK 0xc000UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD5_SFT 14 + __le16 tcp_cp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TCP_CP_MASK 0x3ffUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TCP_CP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD6_MASK 0xfc00UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD6_SFT 10 + __le16 inactivity_th; + u8 pkts_per_phase; + u8 time_per_phase; + __le32 reserved32; +}; + +/* creq_query_roce_cc_resp_sb_tlv (size:384b/48B) */ +struct creq_query_roce_cc_resp_sb_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST \ + CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + u8 total_size; + u8 reserved56[7]; + u8 opcode; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_QUERY_ROCE_CC 0x8dUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_LAST \ + CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_QUERY_ROCE_CC + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + u8 enable_cc; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ENABLE_CC 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_UNUSED7_MASK 0xfeUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_UNUSED7_SFT 1 + u8 tos_dscp_tos_ecn; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_MASK 0x3UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_MASK 0xfcUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_SFT 2 + u8 g; + u8 num_phases_per_state; + __le16 init_cr; + __le16 init_tr; + u8 alt_vlan_pcp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_VLAN_PCP_MASK 0x7UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_VLAN_PCP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD1_MASK 0xf8UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD1_SFT 3 + u8 alt_tos_dscp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_TOS_DSCP_MASK 0x3fUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_TOS_DSCP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD4_MASK 0xc0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD4_SFT 6 + u8 cc_mode; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_DCTCP 0x0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_PROBABILISTIC 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_LAST\ + CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_PROBABILISTIC + u8 tx_queue; + __le16 rtt; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RTT_MASK 0x3fffUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RTT_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD5_MASK 0xc000UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD5_SFT 14 + __le16 tcp_cp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TCP_CP_MASK 0x3ffUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TCP_CP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD6_MASK 0xfc00UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD6_SFT 10 + __le16 inactivity_th; + u8 pkts_per_phase; + u8 time_per_phase; + __le32 reserved32; +}; + +/* creq_query_roce_cc_gen1_resp_sb_tlv (size:704b/88B) */ +struct creq_query_roce_cc_gen1_resp_sb_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST \ + CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + __le64 reserved64; + __le16 inactivity_th_hi; + __le16 min_time_between_cnps; + __le16 init_cp; + u8 tr_update_mode; + u8 tr_update_cycles; + u8 fr_num_rtts; + u8 ai_rate_increase; + __le16 reduction_relax_rtts_th; + __le16 additional_relax_cr_th; + __le16 cr_min_th; + u8 bw_avg_weight; + u8 actual_cr_factor; + __le16 max_cp_cr_th; + u8 cp_bias_en; + u8 cp_bias; + u8 cnp_ecn; + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_NOT_ECT 0x0UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_1 0x1UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_0 0x2UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_LAST \ + CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_0 + u8 rtt_jitter_en; + __le16 link_bytes_per_usec; + __le16 reset_cc_cr_th; + u8 cr_width; + u8 quota_period_min; + u8 quota_period_max; + u8 quota_period_abs_max; + __le16 tr_lower_bound; + u8 cr_prob_factor; + u8 tr_prob_factor; + __le16 fairness_cr_th; + u8 red_div; + u8 cnp_ratio_th; + __le16 exp_ai_rtts; + u8 exp_ai_cr_cp_ratio; + u8 use_rate_table; + __le16 cp_exp_update_th; + __le16 high_exp_ai_rtts_th1; + __le16 high_exp_ai_rtts_th2; + __le16 actual_cr_cong_free_rtts_th; + __le16 severe_cong_cr_th1; + __le16 severe_cong_cr_th2; + __le32 link64B_per_rtt; + u8 cc_ack_bytes; + u8 reduce_init_en; + __le16 reduce_init_cong_free_rtts_th; + u8 random_no_red_en; + u8 actual_cr_shift_correction_en; + u8 quota_period_adjust_en; + u8 reserved[5]; +}; + +/* cmdq_modify_roce_cc (size:448b/56B) */ +struct cmdq_modify_roce_cc { + u8 opcode; + #define CMDQ_MODIFY_ROCE_CC_OPCODE_MODIFY_ROCE_CC 0x8cUL + #define CMDQ_MODIFY_ROCE_CC_OPCODE_LAST CMDQ_MODIFY_ROCE_CC_OPCODE_MODIFY_ROCE_CC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G 0x2UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE 0x4UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR 0x8UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR 0x10UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN 0x20UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP 0x40UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP 0x80UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP 0x100UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT 0x200UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE 0x400UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP 0x800UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE 0x1000UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP 0x2000UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE 0x4000UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_PKTS_PER_PHASE 0x8000UL + u8 enable_cc; + #define CMDQ_MODIFY_ROCE_CC_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_ROCE_CC_RSVD1_MASK 0xfeUL + #define CMDQ_MODIFY_ROCE_CC_RSVD1_SFT 1 + u8 g; + u8 num_phases_per_state; + u8 pkts_per_phase; + __le16 init_cr; + __le16 init_tr; + u8 tos_dscp_tos_ecn; + #define CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK 0x3UL + #define CMDQ_MODIFY_ROCE_CC_TOS_ECN_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_MASK 0xfcUL + #define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT 2 + u8 alt_vlan_pcp; + #define CMDQ_MODIFY_ROCE_CC_ALT_VLAN_PCP_MASK 0x7UL + #define CMDQ_MODIFY_ROCE_CC_ALT_VLAN_PCP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_RSVD3_MASK 0xf8UL + #define CMDQ_MODIFY_ROCE_CC_RSVD3_SFT 3 + __le16 alt_tos_dscp; + #define CMDQ_MODIFY_ROCE_CC_ALT_TOS_DSCP_MASK 0x3fUL + #define CMDQ_MODIFY_ROCE_CC_ALT_TOS_DSCP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_RSVD4_MASK 0xffc0UL + #define CMDQ_MODIFY_ROCE_CC_RSVD4_SFT 6 + __le16 rtt; + #define CMDQ_MODIFY_ROCE_CC_RTT_MASK 0x3fffUL + #define CMDQ_MODIFY_ROCE_CC_RTT_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_RSVD5_MASK 0xc000UL + #define CMDQ_MODIFY_ROCE_CC_RSVD5_SFT 14 + __le16 tcp_cp; + #define CMDQ_MODIFY_ROCE_CC_TCP_CP_MASK 0x3ffUL + #define CMDQ_MODIFY_ROCE_CC_TCP_CP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_RSVD6_MASK 0xfc00UL + #define CMDQ_MODIFY_ROCE_CC_RSVD6_SFT 10 + u8 cc_mode; + #define CMDQ_MODIFY_ROCE_CC_CC_MODE_DCTCP_CC_MODE 0x0UL + #define CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_CC_MODE_LAST CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE + u8 tx_queue; + __le16 inactivity_th; + u8 time_per_phase; + u8 reserved8_1; + __le16 reserved16; + __le32 reserved32; + __le64 reserved64; +}; + +/* cmdq_modify_roce_cc_tlv (size:640b/80B) */ +struct cmdq_modify_roce_cc_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_LAST \ + CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + u8 total_size; + u8 reserved56[7]; + u8 opcode; + #define CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_MODIFY_ROCE_CC 0x8cUL + #define CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_LAST CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_MODIFY_ROCE_CC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_G 0x2UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_NUMPHASEPERSTATE 0x4UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INIT_CR 0x8UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INIT_TR 0x10UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TOS_ECN 0x20UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TOS_DSCP 0x40UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ALT_VLAN_PCP 0x80UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ALT_TOS_DSCP 0x100UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_RTT 0x200UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_CC_MODE 0x400UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TCP_CP 0x800UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TX_QUEUE 0x1000UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INACTIVITY_CP 0x2000UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TIME_PER_PHASE 0x4000UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_PKTS_PER_PHASE 0x8000UL + u8 enable_cc; + #define CMDQ_MODIFY_ROCE_CC_TLV_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD1_MASK 0xfeUL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD1_SFT 1 + u8 g; + u8 num_phases_per_state; + u8 pkts_per_phase; + __le16 init_cr; + __le16 init_tr; + u8 tos_dscp_tos_ecn; + #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_MASK 0x3UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_MASK 0xfcUL + #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_SFT 2 + u8 alt_vlan_pcp; + #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_VLAN_PCP_MASK 0x7UL + #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_VLAN_PCP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD3_MASK 0xf8UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD3_SFT 3 + __le16 alt_tos_dscp; + #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_TOS_DSCP_MASK 0x3fUL + #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_TOS_DSCP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD4_MASK 0xffc0UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD4_SFT 6 + __le16 rtt; + #define CMDQ_MODIFY_ROCE_CC_TLV_RTT_MASK 0x3fffUL + #define CMDQ_MODIFY_ROCE_CC_TLV_RTT_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD5_MASK 0xc000UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD5_SFT 14 + __le16 tcp_cp; + #define CMDQ_MODIFY_ROCE_CC_TLV_TCP_CP_MASK 0x3ffUL + #define CMDQ_MODIFY_ROCE_CC_TLV_TCP_CP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD6_MASK 0xfc00UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD6_SFT 10 + u8 cc_mode; + #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_DCTCP_CC_MODE 0x0UL + #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_PROBABILISTIC_CC_MODE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_LAST\ + CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_PROBABILISTIC_CC_MODE + u8 tx_queue; + __le16 inactivity_th; + u8 time_per_phase; + u8 reserved8_1; + __le16 reserved16; + __le32 reserved32; + __le64 reserved64; + __le64 reservedtlvpad; +}; + +/* cmdq_modify_roce_cc_gen1_tlv (size:768b/96B) */ +struct cmdq_modify_roce_cc_gen1_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_LAST\ + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + __le64 reserved64; + __le64 modify_mask; + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MIN_TIME_BETWEEN_CNPS 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_INIT_CP 0x2UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_MODE 0x4UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_CYCLES 0x8UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FR_NUM_RTTS 0x10UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_AI_RATE_INCREASE 0x20UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCTION_RELAX_RTTS_TH 0x40UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ADDITIONAL_RELAX_CR_TH 0x80UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_MIN_TH 0x100UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_BW_AVG_WEIGHT 0x200UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_FACTOR 0x400UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MAX_CP_CR_TH 0x800UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS_EN 0x1000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS 0x2000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_ECN 0x4000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RTT_JITTER_EN 0x8000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK_BYTES_PER_USEC 0x10000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RESET_CC_CR_TH 0x20000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_WIDTH 0x40000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MIN 0x80000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MAX 0x100000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ABS_MAX 0x200000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_LOWER_BOUND 0x400000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_PROB_FACTOR 0x800000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_PROB_FACTOR 0x1000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FAIRNESS_CR_TH 0x2000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RED_DIV 0x4000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_RATIO_TH 0x8000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_RTTS 0x10000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_CR_CP_RATIO 0x20000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_EXP_UPDATE_TH 0x40000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH1 0x80000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH2 0x100000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_USE_RATE_TABLE 0x200000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK64B_PER_RTT 0x400000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_CONG_FREE_RTTS_TH 0x800000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH1 0x1000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH2 0x2000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CC_ACK_BYTES 0x4000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_EN 0x8000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_CONG_FREE_RTTS_TH \ + 0x10000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RANDOM_NO_RED_EN 0x20000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_SHIFT_CORRECTION_EN \ + 0x40000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ADJUST_EN 0x80000000000ULL + __le16 inactivity_th_hi; + __le16 min_time_between_cnps; + __le16 init_cp; + u8 tr_update_mode; + u8 tr_update_cycles; + u8 fr_num_rtts; + u8 ai_rate_increase; + __le16 reduction_relax_rtts_th; + __le16 additional_relax_cr_th; + __le16 cr_min_th; + u8 bw_avg_weight; + u8 actual_cr_factor; + __le16 max_cp_cr_th; + u8 cp_bias_en; + u8 cp_bias; + u8 cnp_ecn; + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_NOT_ECT 0x0UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_1 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_0 0x2UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_LAST CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_0 + u8 rtt_jitter_en; + __le16 link_bytes_per_usec; + __le16 reset_cc_cr_th; + u8 cr_width; + u8 quota_period_min; + u8 quota_period_max; + u8 quota_period_abs_max; + __le16 tr_lower_bound; + u8 cr_prob_factor; + u8 tr_prob_factor; + __le16 fairness_cr_th; + u8 red_div; + u8 cnp_ratio_th; + __le16 exp_ai_rtts; + u8 exp_ai_cr_cp_ratio; + u8 use_rate_table; + __le16 cp_exp_update_th; + __le16 high_exp_ai_rtts_th1; + __le16 high_exp_ai_rtts_th2; + __le16 actual_cr_cong_free_rtts_th; + __le16 severe_cong_cr_th1; + __le16 severe_cong_cr_th2; + __le32 link64B_per_rtt; + u8 cc_ack_bytes; + u8 reduce_init_en; + __le16 reduce_init_cong_free_rtts_th; + u8 random_no_red_en; + u8 actual_cr_shift_correction_en; + u8 quota_period_adjust_en; + u8 reserved[5]; +}; + +/* creq_modify_roce_cc_resp (size:128b/16B) */ +struct creq_modify_roce_cc_resp { + u8 type; + #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_MASK 0x3fUL + #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_SFT 0 + #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_LAST CREQ_MODIFY_ROCE_CC_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_MODIFY_ROCE_CC_RESP_V 0x1UL + u8 event; + #define CREQ_MODIFY_ROCE_CC_RESP_EVENT_MODIFY_ROCE_CC 0x8cUL + #define CREQ_MODIFY_ROCE_CC_RESP_EVENT_LAST CREQ_MODIFY_ROCE_CC_RESP_EVENT_MODIFY_ROCE_CC + u8 reserved48[6]; +}; + +/* cmdq_set_link_aggr_mode_cc (size:320b/40B) */ +struct cmdq_set_link_aggr_mode_cc { + u8 opcode; + #define CMDQ_SET_LINK_AGGR_MODE_OPCODE_SET_LINK_AGGR_MODE 0x8fUL + #define CMDQ_SET_LINK_AGGR_MODE_OPCODE_LAST \ + CMDQ_SET_LINK_AGGR_MODE_OPCODE_SET_LINK_AGGR_MODE + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_EN 0x1UL + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_ACTIVE_PORT_MAP 0x2UL + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_MEMBER_PORT_MAP 0x4UL + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_MODE 0x8UL + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_STAT_CTX_ID 0x10UL + u8 aggr_enable; + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_ENABLE 0x1UL + #define CMDQ_SET_LINK_AGGR_MODE_RSVD1_MASK 0xfeUL + #define CMDQ_SET_LINK_AGGR_MODE_RSVD1_SFT 1 + u8 active_port_map; + #define CMDQ_SET_LINK_AGGR_MODE_ACTIVE_PORT_MAP_MASK 0xfUL + #define CMDQ_SET_LINK_AGGR_MODE_ACTIVE_PORT_MAP_SFT 0 + #define CMDQ_SET_LINK_AGGR_MODE_RSVD2_MASK 0xf0UL + #define CMDQ_SET_LINK_AGGR_MODE_RSVD2_SFT 4 + u8 member_port_map; + u8 link_aggr_mode; + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_ACTIVE 0x1UL + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_BACKUP 0x2UL + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_BALANCE_XOR 0x3UL + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_802_3_AD 0x4UL + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_LAST CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_802_3_AD + __le16 stat_ctx_id[4]; + __le64 rsvd1; +}; + +/* creq_set_link_aggr_mode_resources_resp (size:128b/16B) */ +struct creq_set_link_aggr_mode_resources_resp { + u8 type; + #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_MASK 0x3fUL + #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_SFT 0 + #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_LAST CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_SET_LINK_AGGR_MODE_RESP_V 0x1UL + u8 event; + #define CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_SET_LINK_AGGR_MODE 0x8fUL + #define CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_LAST\ + CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_SET_LINK_AGGR_MODE + u8 reserved48[6]; +}; + +/* creq_func_event (size:128b/16B) */ +struct creq_func_event { + u8 type; + #define CREQ_FUNC_EVENT_TYPE_MASK 0x3fUL + #define CREQ_FUNC_EVENT_TYPE_SFT 0 + #define CREQ_FUNC_EVENT_TYPE_FUNC_EVENT 0x3aUL + #define CREQ_FUNC_EVENT_TYPE_LAST CREQ_FUNC_EVENT_TYPE_FUNC_EVENT + u8 reserved56[7]; + u8 v; + #define CREQ_FUNC_EVENT_V 0x1UL + u8 event; + #define CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR 0x1UL + #define CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR 0x2UL + #define CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR 0x3UL + #define CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR 0x4UL + #define CREQ_FUNC_EVENT_EVENT_CQ_ERROR 0x5UL + #define CREQ_FUNC_EVENT_EVENT_TQM_ERROR 0x6UL + #define CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR 0x7UL + #define CREQ_FUNC_EVENT_EVENT_CFCS_ERROR 0x8UL + #define CREQ_FUNC_EVENT_EVENT_CFCC_ERROR 0x9UL + #define CREQ_FUNC_EVENT_EVENT_CFCM_ERROR 0xaUL + #define CREQ_FUNC_EVENT_EVENT_TIM_ERROR 0xbUL + #define CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST 0x80UL + #define CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED 0x81UL + #define CREQ_FUNC_EVENT_EVENT_LAST CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED + u8 reserved48[6]; +}; + +/* creq_qp_event (size:128b/16B) */ +struct creq_qp_event { + u8 type; + #define CREQ_QP_EVENT_TYPE_MASK 0x3fUL + #define CREQ_QP_EVENT_TYPE_SFT 0 + #define CREQ_QP_EVENT_TYPE_QP_EVENT 0x38UL + #define CREQ_QP_EVENT_TYPE_LAST CREQ_QP_EVENT_TYPE_QP_EVENT + u8 status; + #define CREQ_QP_EVENT_STATUS_SUCCESS 0x0UL + #define CREQ_QP_EVENT_STATUS_FAIL 0x1UL + #define CREQ_QP_EVENT_STATUS_RESOURCES 0x2UL + #define CREQ_QP_EVENT_STATUS_INVALID_CMD 0x3UL + #define CREQ_QP_EVENT_STATUS_NOT_IMPLEMENTED 0x4UL + #define CREQ_QP_EVENT_STATUS_INVALID_PARAMETER 0x5UL + #define CREQ_QP_EVENT_STATUS_HARDWARE_ERROR 0x6UL + #define CREQ_QP_EVENT_STATUS_INTERNAL_ERROR 0x7UL + #define CREQ_QP_EVENT_STATUS_LAST CREQ_QP_EVENT_STATUS_INTERNAL_ERROR + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_QP_EVENT_V 0x1UL + u8 event; + #define CREQ_QP_EVENT_EVENT_CREATE_QP 0x1UL + #define CREQ_QP_EVENT_EVENT_DESTROY_QP 0x2UL + #define CREQ_QP_EVENT_EVENT_MODIFY_QP 0x3UL + #define CREQ_QP_EVENT_EVENT_QUERY_QP 0x4UL + #define CREQ_QP_EVENT_EVENT_CREATE_SRQ 0x5UL + #define CREQ_QP_EVENT_EVENT_DESTROY_SRQ 0x6UL + #define CREQ_QP_EVENT_EVENT_QUERY_SRQ 0x8UL + #define CREQ_QP_EVENT_EVENT_CREATE_CQ 0x9UL + #define CREQ_QP_EVENT_EVENT_DESTROY_CQ 0xaUL + #define CREQ_QP_EVENT_EVENT_RESIZE_CQ 0xcUL + #define CREQ_QP_EVENT_EVENT_ALLOCATE_MRW 0xdUL + #define CREQ_QP_EVENT_EVENT_DEALLOCATE_KEY 0xeUL + #define CREQ_QP_EVENT_EVENT_REGISTER_MR 0xfUL + #define CREQ_QP_EVENT_EVENT_DEREGISTER_MR 0x10UL + #define CREQ_QP_EVENT_EVENT_ADD_GID 0x11UL + #define CREQ_QP_EVENT_EVENT_DELETE_GID 0x12UL + #define CREQ_QP_EVENT_EVENT_MODIFY_GID 0x17UL + #define CREQ_QP_EVENT_EVENT_QUERY_GID 0x18UL + #define CREQ_QP_EVENT_EVENT_CREATE_QP1 0x13UL + #define CREQ_QP_EVENT_EVENT_DESTROY_QP1 0x14UL + #define CREQ_QP_EVENT_EVENT_CREATE_AH 0x15UL + #define CREQ_QP_EVENT_EVENT_DESTROY_AH 0x16UL + #define CREQ_QP_EVENT_EVENT_INITIALIZE_FW 0x80UL + #define CREQ_QP_EVENT_EVENT_DEINITIALIZE_FW 0x81UL + #define CREQ_QP_EVENT_EVENT_STOP_FUNC 0x82UL + #define CREQ_QP_EVENT_EVENT_QUERY_FUNC 0x83UL + #define CREQ_QP_EVENT_EVENT_SET_FUNC_RESOURCES 0x84UL + #define CREQ_QP_EVENT_EVENT_READ_CONTEXT 0x85UL + #define CREQ_QP_EVENT_EVENT_MAP_TC_TO_COS 0x8aUL + #define CREQ_QP_EVENT_EVENT_QUERY_VERSION 0x8bUL + #define CREQ_QP_EVENT_EVENT_MODIFY_CC 0x8cUL + #define CREQ_QP_EVENT_EVENT_QUERY_CC 0x8dUL + #define CREQ_QP_EVENT_EVENT_QUERY_ROCE_STATS 0x8eUL + #define CREQ_QP_EVENT_EVENT_SET_LINK_AGGR_MODE 0x8fUL + #define CREQ_QP_EVENT_EVENT_QUERY_QP_EXTEND 0x91UL + #define CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION 0xc0UL + #define CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION 0xc1UL + #define CREQ_QP_EVENT_EVENT_LAST CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION + u8 reserved48[6]; +}; + +/* creq_qp_error_notification (size:128b/16B) */ +struct creq_qp_error_notification { + u8 type; + #define CREQ_QP_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL + #define CREQ_QP_ERROR_NOTIFICATION_TYPE_SFT 0 + #define CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT 0x38UL + #define CREQ_QP_ERROR_NOTIFICATION_TYPE_LAST CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT + u8 status; + u8 req_slow_path_state; + u8 req_err_state_reason; + __le32 xid; + u8 v; + #define CREQ_QP_ERROR_NOTIFICATION_V 0x1UL + u8 event; + #define CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION 0xc0UL + #define CREQ_QP_ERROR_NOTIFICATION_EVENT_LAST \ + CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION + u8 res_slow_path_state; + u8 res_err_state_reason; + __le16 sq_cons_idx; + __le16 rq_cons_idx; +}; + +/* creq_cq_error_notification (size:128b/16B) */ +struct creq_cq_error_notification { + u8 type; + #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL + #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_SFT 0 + #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_CQ_EVENT 0x38UL + #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_LAST CREQ_CQ_ERROR_NOTIFICATION_TYPE_CQ_EVENT + u8 status; + u8 cq_err_reason; + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR 0x1UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR 0x2UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR 0x3UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR 0x4UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR 0x5UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR 0x6UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_LAST \ + CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR + u8 reserved8; + __le32 xid; + u8 v; + #define CREQ_CQ_ERROR_NOTIFICATION_V 0x1UL + u8 event; + #define CREQ_CQ_ERROR_NOTIFICATION_EVENT_CQ_ERROR_NOTIFICATION 0xc1UL + #define CREQ_CQ_ERROR_NOTIFICATION_EVENT_LAST \ + CREQ_CQ_ERROR_NOTIFICATION_EVENT_CQ_ERROR_NOTIFICATION + u8 reserved48[6]; +}; + +/* sq_base (size:64b/8B) */ +struct sq_base { + u8 wqe_type; + #define SQ_BASE_WQE_TYPE_SEND 0x0UL + #define SQ_BASE_WQE_TYPE_SEND_W_IMMEAD 0x1UL + #define SQ_BASE_WQE_TYPE_SEND_W_INVALID 0x2UL + #define SQ_BASE_WQE_TYPE_WRITE_WQE 0x4UL + #define SQ_BASE_WQE_TYPE_WRITE_W_IMMEAD 0x5UL + #define SQ_BASE_WQE_TYPE_READ_WQE 0x6UL + #define SQ_BASE_WQE_TYPE_ATOMIC_CS 0x8UL + #define SQ_BASE_WQE_TYPE_ATOMIC_FA 0xbUL + #define SQ_BASE_WQE_TYPE_LOCAL_INVALID 0xcUL + #define SQ_BASE_WQE_TYPE_FR_PMR 0xdUL + #define SQ_BASE_WQE_TYPE_BIND 0xeUL + #define SQ_BASE_WQE_TYPE_FR_PPMR 0xfUL + #define SQ_BASE_WQE_TYPE_LAST SQ_BASE_WQE_TYPE_FR_PPMR + u8 unused_0[7]; +}; + +/* sq_sge (size:128b/16B) */ +struct sq_sge { + __le64 va_or_pa; + __le32 l_key; + __le32 size; +}; + +/* sq_psn_search (size:64b/8B) */ +struct sq_psn_search { + __le32 opcode_start_psn; + #define SQ_PSN_SEARCH_START_PSN_MASK 0xffffffUL + #define SQ_PSN_SEARCH_START_PSN_SFT 0 + #define SQ_PSN_SEARCH_OPCODE_MASK 0xff000000UL + #define SQ_PSN_SEARCH_OPCODE_SFT 24 + __le32 flags_next_psn; + #define SQ_PSN_SEARCH_NEXT_PSN_MASK 0xffffffUL + #define SQ_PSN_SEARCH_NEXT_PSN_SFT 0 + #define SQ_PSN_SEARCH_FLAGS_MASK 0xff000000UL + #define SQ_PSN_SEARCH_FLAGS_SFT 24 +}; + +/* sq_psn_search_ext (size:128b/16B) */ +struct sq_psn_search_ext { + __le32 opcode_start_psn; + #define SQ_PSN_SEARCH_EXT_START_PSN_MASK 0xffffffUL + #define SQ_PSN_SEARCH_EXT_START_PSN_SFT 0 + #define SQ_PSN_SEARCH_EXT_OPCODE_MASK 0xff000000UL + #define SQ_PSN_SEARCH_EXT_OPCODE_SFT 24 + __le32 flags_next_psn; + #define SQ_PSN_SEARCH_EXT_NEXT_PSN_MASK 0xffffffUL + #define SQ_PSN_SEARCH_EXT_NEXT_PSN_SFT 0 + #define SQ_PSN_SEARCH_EXT_FLAGS_MASK 0xff000000UL + #define SQ_PSN_SEARCH_EXT_FLAGS_SFT 24 + __le16 start_slot_idx; + __le16 reserved16; + __le32 reserved32; +}; + +/* sq_send (size:1024b/128B) */ +struct sq_send { + u8 wqe_type; + #define SQ_SEND_WQE_TYPE_SEND 0x0UL + #define SQ_SEND_WQE_TYPE_SEND_W_IMMEAD 0x1UL + #define SQ_SEND_WQE_TYPE_SEND_W_INVALID 0x2UL + #define SQ_SEND_WQE_TYPE_LAST SQ_SEND_WQE_TYPE_SEND_W_INVALID + u8 flags; + #define SQ_SEND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_SEND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_SEND_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_FLAGS_SE 0x8UL + #define SQ_SEND_FLAGS_INLINE 0x10UL + #define SQ_SEND_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8_1; + __le32 inv_key_or_imm_data; + __le32 length; + __le32 q_key; + __le32 dst_qp; + #define SQ_SEND_DST_QP_MASK 0xffffffUL + #define SQ_SEND_DST_QP_SFT 0 + __le32 avid; + #define SQ_SEND_AVID_MASK 0xfffffUL + #define SQ_SEND_AVID_SFT 0 + __le32 reserved32; + __le32 timestamp; + #define SQ_SEND_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_TIMESTAMP_SFT 0 + __le32 data[24]; +}; + +/* sq_send_hdr (size:256b/32B) */ +struct sq_send_hdr { + u8 wqe_type; + #define SQ_SEND_HDR_WQE_TYPE_SEND 0x0UL + #define SQ_SEND_HDR_WQE_TYPE_SEND_W_IMMEAD 0x1UL + #define SQ_SEND_HDR_WQE_TYPE_SEND_W_INVALID 0x2UL + #define SQ_SEND_HDR_WQE_TYPE_LAST SQ_SEND_HDR_WQE_TYPE_SEND_W_INVALID + u8 flags; + #define SQ_SEND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_SEND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_SEND_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_HDR_FLAGS_SE 0x8UL + #define SQ_SEND_HDR_FLAGS_INLINE 0x10UL + #define SQ_SEND_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8_1; + __le32 inv_key_or_imm_data; + __le32 length; + __le32 q_key; + __le32 dst_qp; + #define SQ_SEND_HDR_DST_QP_MASK 0xffffffUL + #define SQ_SEND_HDR_DST_QP_SFT 0 + __le32 avid; + #define SQ_SEND_HDR_AVID_MASK 0xfffffUL + #define SQ_SEND_HDR_AVID_SFT 0 + __le32 reserved32; + __le32 timestamp; + #define SQ_SEND_HDR_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_HDR_TIMESTAMP_SFT 0 +}; + +/* sq_send_raweth_qp1 (size:1024b/128B) */ +struct sq_send_raweth_qp1 { + u8 wqe_type; + #define SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND 0x0UL + #define SQ_SEND_RAWETH_QP1_WQE_TYPE_LAST SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND + u8 flags; + #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK \ + 0xffUL + #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT \ + 0 + #define SQ_SEND_RAWETH_QP1_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_RAWETH_QP1_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_RAWETH_QP1_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_RAWETH_QP1_FLAGS_SE 0x8UL + #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE 0x10UL + #define SQ_SEND_RAWETH_QP1_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_RAWETH_QP1_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8; + __le16 lflags; + #define SQ_SEND_RAWETH_QP1_LFLAGS_TCP_UDP_CHKSUM 0x1UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM 0x2UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_NOCRC 0x4UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_STAMP 0x8UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_T_IP_CHKSUM 0x10UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC 0x100UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_FCOE_CRC 0x200UL + __le16 cfa_action; + __le32 length; + __le32 reserved32_1; + __le32 cfa_meta; + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK 0xfffUL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT 0 + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_DE 0x1000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_MASK 0xe000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_SFT 13 + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_MASK 0x70000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_SFT 16 + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_LAST\ + SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_MASK 0xff80000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_SFT 19 + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_MASK 0xf0000000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_SFT 28 + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_NONE (0x0UL << 28) + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG (0x1UL << 28) + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_LAST SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG + __le32 reserved32_2; + __le32 reserved32_3; + __le32 timestamp; + #define SQ_SEND_RAWETH_QP1_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_RAWETH_QP1_TIMESTAMP_SFT 0 + __le32 data[24]; +}; + +/* sq_send_raweth_qp1_hdr (size:256b/32B) */ +struct sq_send_raweth_qp1_hdr { + u8 wqe_type; + #define SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_SEND 0x0UL + #define SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_LAST SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_SEND + u8 flags; + #define \ + SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT\ + 0 + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_SE 0x8UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE 0x10UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8; + __le16 lflags; + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_TCP_UDP_CHKSUM 0x1UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_IP_CHKSUM 0x2UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_NOCRC 0x4UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_STAMP 0x8UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_T_IP_CHKSUM 0x10UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_ROCE_CRC 0x100UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_FCOE_CRC 0x200UL + __le16 cfa_action; + __le32 length; + __le32 reserved32_1; + __le32 cfa_meta; + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_VID_MASK 0xfffUL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_VID_SFT 0 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_DE 0x1000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_PRI_MASK 0xe000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_PRI_SFT 13 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_MASK 0x70000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_SFT 16 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_LAST\ + SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPIDCFG + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_RESERVED_MASK 0xff80000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_RESERVED_SFT 19 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_MASK 0xf0000000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_SFT 28 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_NONE (0x0UL << 28) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_VLAN_TAG (0x1UL << 28) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_LAST\ + SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_VLAN_TAG + __le32 reserved32_2; + __le32 reserved32_3; + __le32 timestamp; + #define SQ_SEND_RAWETH_QP1_HDR_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_RAWETH_QP1_HDR_TIMESTAMP_SFT 0 +}; + +/* sq_rdma (size:1024b/128B) */ +struct sq_rdma { + u8 wqe_type; + #define SQ_RDMA_WQE_TYPE_WRITE_WQE 0x4UL + #define SQ_RDMA_WQE_TYPE_WRITE_W_IMMEAD 0x5UL + #define SQ_RDMA_WQE_TYPE_READ_WQE 0x6UL + #define SQ_RDMA_WQE_TYPE_LAST SQ_RDMA_WQE_TYPE_READ_WQE + u8 flags; + #define SQ_RDMA_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_RDMA_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_RDMA_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_RDMA_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_RDMA_FLAGS_UC_FENCE 0x4UL + #define SQ_RDMA_FLAGS_SE 0x8UL + #define SQ_RDMA_FLAGS_INLINE 0x10UL + #define SQ_RDMA_FLAGS_WQE_TS_EN 0x20UL + #define SQ_RDMA_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8; + __le32 imm_data; + __le32 length; + __le32 reserved32_1; + __le64 remote_va; + __le32 remote_key; + __le32 timestamp; + #define SQ_RDMA_TIMESTAMP_MASK 0xffffffUL + #define SQ_RDMA_TIMESTAMP_SFT 0 + __le32 data[24]; }; -/* creq_query_roce_stats_ext_resp_sb (size:1536b/192B) */ -struct creq_query_roce_stats_ext_resp_sb { - u8 opcode; - #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL - #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_LAST \ - CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT - u8 status; - __le16 cookie; - __le16 flags; - u8 resp_size; - u8 rsvd; - __le64 tx_atomic_req_pkts; - __le64 tx_read_req_pkts; - __le64 tx_read_res_pkts; - __le64 tx_write_req_pkts; - __le64 tx_send_req_pkts; - __le64 tx_roce_pkts; - __le64 tx_roce_bytes; - __le64 rx_atomic_req_pkts; - __le64 rx_read_req_pkts; - __le64 rx_read_res_pkts; - __le64 rx_write_req_pkts; - __le64 rx_send_req_pkts; - __le64 rx_roce_pkts; - __le64 rx_roce_bytes; - __le64 rx_roce_good_pkts; - __le64 rx_roce_good_bytes; - __le64 rx_out_of_buffer_pkts; - __le64 rx_out_of_sequence_pkts; - __le64 tx_cnp_pkts; - __le64 rx_cnp_pkts; - __le64 rx_ecn_marked_pkts; - __le64 tx_cnp_bytes; - __le64 rx_cnp_bytes; -}; - -/* QP error notification event (16 bytes) */ -struct creq_qp_error_notification { - u8 type; - #define CREQ_QP_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL - #define CREQ_QP_ERROR_NOTIFICATION_TYPE_SFT 0 - #define CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT 0x38UL - #define CREQ_QP_ERROR_NOTIFICATION_RESERVED2_MASK 0xc0UL - #define CREQ_QP_ERROR_NOTIFICATION_RESERVED2_SFT 6 - u8 status; - u8 req_slow_path_state; - u8 req_err_state_reason; - __le32 xid; - u8 v; - #define CREQ_QP_ERROR_NOTIFICATION_V 0x1UL - #define CREQ_QP_ERROR_NOTIFICATION_RESERVED7_MASK 0xfeUL - #define CREQ_QP_ERROR_NOTIFICATION_RESERVED7_SFT 1 - u8 event; - #define CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION 0xc0UL - u8 res_slow_path_state; - u8 res_err_state_reason; - __le16 sq_cons_idx; - __le16 rq_cons_idx; +/* sq_rdma_hdr (size:256b/32B) */ +struct sq_rdma_hdr { + u8 wqe_type; + #define SQ_RDMA_HDR_WQE_TYPE_WRITE_WQE 0x4UL + #define SQ_RDMA_HDR_WQE_TYPE_WRITE_W_IMMEAD 0x5UL + #define SQ_RDMA_HDR_WQE_TYPE_READ_WQE 0x6UL + #define SQ_RDMA_HDR_WQE_TYPE_LAST SQ_RDMA_HDR_WQE_TYPE_READ_WQE + u8 flags; + #define SQ_RDMA_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_RDMA_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_RDMA_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_RDMA_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_RDMA_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_RDMA_HDR_FLAGS_SE 0x8UL + #define SQ_RDMA_HDR_FLAGS_INLINE 0x10UL + #define SQ_RDMA_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_RDMA_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8; + __le32 imm_data; + __le32 length; + __le32 reserved32_1; + __le64 remote_va; + __le32 remote_key; + __le32 timestamp; + #define SQ_RDMA_HDR_TIMESTAMP_MASK 0xffffffUL + #define SQ_RDMA_HDR_TIMESTAMP_SFT 0 }; -/* RoCE Slowpath HSI Specification 1.6.0 */ -#define ROCE_SP_HSI_VERSION_MAJOR 1 -#define ROCE_SP_HSI_VERSION_MINOR 6 -#define ROCE_SP_HSI_VERSION_UPDATE 0 +/* sq_atomic (size:1024b/128B) */ +struct sq_atomic { + u8 wqe_type; + #define SQ_ATOMIC_WQE_TYPE_ATOMIC_CS 0x8UL + #define SQ_ATOMIC_WQE_TYPE_ATOMIC_FA 0xbUL + #define SQ_ATOMIC_WQE_TYPE_LAST SQ_ATOMIC_WQE_TYPE_ATOMIC_FA + u8 flags; + #define SQ_ATOMIC_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_ATOMIC_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_ATOMIC_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_ATOMIC_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_ATOMIC_FLAGS_UC_FENCE 0x4UL + #define SQ_ATOMIC_FLAGS_SE 0x8UL + #define SQ_ATOMIC_FLAGS_INLINE 0x10UL + #define SQ_ATOMIC_FLAGS_WQE_TS_EN 0x20UL + #define SQ_ATOMIC_FLAGS_DEBUG_TRACE 0x40UL + __le16 reserved16; + __le32 remote_key; + __le64 remote_va; + __le64 swap_data; + __le64 cmp_data; + __le32 data[24]; +}; -#define ROCE_SP_HSI_VERSION_STR "1.6.0" -/* - * Following is the signature for ROCE_SP_HSI message field that indicates not - * applicable (All F's). Need to cast it the size of the field if needed. - */ -#define ROCE_SP_HSI_NA_SIGNATURE ((__le32)(-1)) -#endif /* __BNXT_RE_HSI_H__ */ +/* sq_atomic_hdr (size:256b/32B) */ +struct sq_atomic_hdr { + u8 wqe_type; + #define SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_CS 0x8UL + #define SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_FA 0xbUL + #define SQ_ATOMIC_HDR_WQE_TYPE_LAST SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_FA + u8 flags; + #define SQ_ATOMIC_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_ATOMIC_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_ATOMIC_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_ATOMIC_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_ATOMIC_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_ATOMIC_HDR_FLAGS_SE 0x8UL + #define SQ_ATOMIC_HDR_FLAGS_INLINE 0x10UL + #define SQ_ATOMIC_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_ATOMIC_HDR_FLAGS_DEBUG_TRACE 0x40UL + __le16 reserved16; + __le32 remote_key; + __le64 remote_va; + __le64 swap_data; + __le64 cmp_data; +}; + +/* sq_localinvalidate (size:1024b/128B) */ +struct sq_localinvalidate { + u8 wqe_type; + #define SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID 0xcUL + #define SQ_LOCALINVALIDATE_WQE_TYPE_LAST SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID + u8 flags; + #define SQ_LOCALINVALIDATE_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK\ + 0xffUL + #define SQ_LOCALINVALIDATE_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT\ + 0 + #define SQ_LOCALINVALIDATE_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_LOCALINVALIDATE_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_LOCALINVALIDATE_FLAGS_UC_FENCE 0x4UL + #define SQ_LOCALINVALIDATE_FLAGS_SE 0x8UL + #define SQ_LOCALINVALIDATE_FLAGS_INLINE 0x10UL + #define SQ_LOCALINVALIDATE_FLAGS_WQE_TS_EN 0x20UL + #define SQ_LOCALINVALIDATE_FLAGS_DEBUG_TRACE 0x40UL + __le16 reserved16; + __le32 inv_l_key; + __le64 reserved64; + u8 reserved128[16]; + __le32 data[24]; +}; + +/* sq_localinvalidate_hdr (size:256b/32B) */ +struct sq_localinvalidate_hdr { + u8 wqe_type; + #define SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LOCAL_INVALID 0xcUL + #define SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LAST SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LOCAL_INVALID + u8 flags; + #define \ + SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT\ + 0 + #define SQ_LOCALINVALIDATE_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_SE 0x8UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE 0x10UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_DEBUG_TRACE 0x40UL + __le16 reserved16; + __le32 inv_l_key; + __le64 reserved64; + u8 reserved128[16]; +}; + +/* sq_fr_pmr (size:1024b/128B) */ +struct sq_fr_pmr { + u8 wqe_type; + #define SQ_FR_PMR_WQE_TYPE_FR_PMR 0xdUL + #define SQ_FR_PMR_WQE_TYPE_LAST SQ_FR_PMR_WQE_TYPE_FR_PMR + u8 flags; + #define SQ_FR_PMR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_FR_PMR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_FR_PMR_FLAGS_UC_FENCE 0x4UL + #define SQ_FR_PMR_FLAGS_SE 0x8UL + #define SQ_FR_PMR_FLAGS_INLINE 0x10UL + #define SQ_FR_PMR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_FR_PMR_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 zero_based_page_size_log; + #define SQ_FR_PMR_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_LAST SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_ZERO_BASED 0x20UL + __le32 l_key; + u8 length[5]; + u8 reserved8_1; + u8 reserved8_2; + u8 numlevels_pbl_page_size_log; + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_NUMLEVELS_MASK 0xc0UL + #define SQ_FR_PMR_NUMLEVELS_SFT 6 + #define SQ_FR_PMR_NUMLEVELS_PHYSICAL (0x0UL << 6) + #define SQ_FR_PMR_NUMLEVELS_LAYER1 (0x1UL << 6) + #define SQ_FR_PMR_NUMLEVELS_LAYER2 (0x2UL << 6) + #define SQ_FR_PMR_NUMLEVELS_LAST SQ_FR_PMR_NUMLEVELS_LAYER2 + __le64 pblptr; + __le64 va; + __le32 data[24]; +}; + +/* sq_fr_pmr_hdr (size:256b/32B) */ +struct sq_fr_pmr_hdr { + u8 wqe_type; + #define SQ_FR_PMR_HDR_WQE_TYPE_FR_PMR 0xdUL + #define SQ_FR_PMR_HDR_WQE_TYPE_LAST SQ_FR_PMR_HDR_WQE_TYPE_FR_PMR + u8 flags; + #define SQ_FR_PMR_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_FR_PMR_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_FR_PMR_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_FR_PMR_HDR_FLAGS_SE 0x8UL + #define SQ_FR_PMR_HDR_FLAGS_INLINE 0x10UL + #define SQ_FR_PMR_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_FR_PMR_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define SQ_FR_PMR_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_FR_PMR_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 zero_based_page_size_log; + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_HDR_ZERO_BASED 0x20UL + __le32 l_key; + u8 length[5]; + u8 reserved8_1; + u8 reserved8_2; + u8 numlevels_pbl_page_size_log; + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_HDR_NUMLEVELS_MASK 0xc0UL + #define SQ_FR_PMR_HDR_NUMLEVELS_SFT 6 + #define SQ_FR_PMR_HDR_NUMLEVELS_PHYSICAL (0x0UL << 6) + #define SQ_FR_PMR_HDR_NUMLEVELS_LAYER1 (0x1UL << 6) + #define SQ_FR_PMR_HDR_NUMLEVELS_LAYER2 (0x2UL << 6) + #define SQ_FR_PMR_HDR_NUMLEVELS_LAST SQ_FR_PMR_HDR_NUMLEVELS_LAYER2 + __le64 pblptr; + __le64 va; +}; + +/* sq_bind (size:1024b/128B) */ +struct sq_bind { + u8 wqe_type; + #define SQ_BIND_WQE_TYPE_BIND 0xeUL + #define SQ_BIND_WQE_TYPE_LAST SQ_BIND_WQE_TYPE_BIND + u8 flags; + #define SQ_BIND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_BIND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_BIND_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_BIND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_BIND_FLAGS_UC_FENCE 0x4UL + #define SQ_BIND_FLAGS_SE 0x8UL + #define SQ_BIND_FLAGS_INLINE 0x10UL + #define SQ_BIND_FLAGS_WQE_TS_EN 0x20UL + #define SQ_BIND_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define \ + SQ_BIND_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_MASK\ + 0xffUL + #define \ + SQ_BIND_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_SFT 0 + #define SQ_BIND_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_BIND_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_BIND_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_BIND_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 reserved8_1; + u8 mw_type_zero_based; + #define SQ_BIND_ZERO_BASED 0x1UL + #define SQ_BIND_MW_TYPE 0x2UL + #define SQ_BIND_MW_TYPE_TYPE1 (0x0UL << 1) + #define SQ_BIND_MW_TYPE_TYPE2 (0x1UL << 1) + #define SQ_BIND_MW_TYPE_LAST SQ_BIND_MW_TYPE_TYPE2 + u8 reserved8_2; + __le16 reserved16; + __le32 parent_l_key; + __le32 l_key; + __le64 va; + u8 length[5]; + u8 reserved24[3]; + __le32 data[24]; +}; + +/* sq_bind_hdr (size:256b/32B) */ +struct sq_bind_hdr { + u8 wqe_type; + #define SQ_BIND_HDR_WQE_TYPE_BIND 0xeUL + #define SQ_BIND_HDR_WQE_TYPE_LAST SQ_BIND_HDR_WQE_TYPE_BIND + u8 flags; + #define SQ_BIND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_BIND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_BIND_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_BIND_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_BIND_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_BIND_HDR_FLAGS_SE 0x8UL + #define SQ_BIND_HDR_FLAGS_INLINE 0x10UL + #define SQ_BIND_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_BIND_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define \ + SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_MASK\ + 0xffUL + #define \ + SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_SFT \ + 0 + #define SQ_BIND_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 reserved8_1; + u8 mw_type_zero_based; + #define SQ_BIND_HDR_ZERO_BASED 0x1UL + #define SQ_BIND_HDR_MW_TYPE 0x2UL + #define SQ_BIND_HDR_MW_TYPE_TYPE1 (0x0UL << 1) + #define SQ_BIND_HDR_MW_TYPE_TYPE2 (0x1UL << 1) + #define SQ_BIND_HDR_MW_TYPE_LAST SQ_BIND_HDR_MW_TYPE_TYPE2 + u8 reserved8_2; + __le16 reserved16; + __le32 parent_l_key; + __le32 l_key; + __le64 va; + u8 length[5]; + u8 reserved24[3]; +}; + +/* rq_wqe (size:1024b/128B) */ +struct rq_wqe { + u8 wqe_type; + #define RQ_WQE_WQE_TYPE_RCV 0x80UL + #define RQ_WQE_WQE_TYPE_LAST RQ_WQE_WQE_TYPE_RCV + u8 flags; + u8 wqe_size; + u8 reserved8; + __le32 reserved32; + __le32 wr_id[2]; + #define RQ_WQE_WR_ID_MASK 0xfffffUL + #define RQ_WQE_WR_ID_SFT 0 + u8 reserved128[16]; + __le32 data[24]; +}; + +/* rq_wqe_hdr (size:256b/32B) */ +struct rq_wqe_hdr { + u8 wqe_type; + #define RQ_WQE_HDR_WQE_TYPE_RCV 0x80UL + #define RQ_WQE_HDR_WQE_TYPE_LAST RQ_WQE_HDR_WQE_TYPE_RCV + u8 flags; + u8 wqe_size; + u8 reserved8; + __le32 reserved32; + __le32 wr_id[2]; + #define RQ_WQE_HDR_WR_ID_MASK 0xfffffUL + #define RQ_WQE_HDR_WR_ID_SFT 0 + u8 reserved128[16]; +}; + +/* cq_base (size:256b/32B) */ +struct cq_base { + __le64 reserved64_1; + __le64 reserved64_2; + __le64 reserved64_3; + u8 cqe_type_toggle; + #define CQ_BASE_TOGGLE 0x1UL + #define CQ_BASE_CQE_TYPE_MASK 0x1eUL + #define CQ_BASE_CQE_TYPE_SFT 1 + #define CQ_BASE_CQE_TYPE_REQ (0x0UL << 1) + #define CQ_BASE_CQE_TYPE_RES_RC (0x1UL << 1) + #define CQ_BASE_CQE_TYPE_RES_UD (0x2UL << 1) + #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1) + #define CQ_BASE_CQE_TYPE_RES_UD_CFA (0x4UL << 1) + #define CQ_BASE_CQE_TYPE_NO_OP (0xdUL << 1) + #define CQ_BASE_CQE_TYPE_TERMINAL (0xeUL << 1) + #define CQ_BASE_CQE_TYPE_CUT_OFF (0xfUL << 1) + #define CQ_BASE_CQE_TYPE_LAST CQ_BASE_CQE_TYPE_CUT_OFF + u8 status; + __le16 reserved16; + __le32 reserved32; +}; + +/* cq_req (size:256b/32B) */ +struct cq_req { + __le64 qp_handle; + __le16 sq_cons_idx; + __le16 reserved16_1; + __le32 reserved32_2; + __le64 reserved64; + u8 cqe_type_toggle; + #define CQ_REQ_TOGGLE 0x1UL + #define CQ_REQ_CQE_TYPE_MASK 0x1eUL + #define CQ_REQ_CQE_TYPE_SFT 1 + #define CQ_REQ_CQE_TYPE_REQ (0x0UL << 1) + #define CQ_REQ_CQE_TYPE_LAST CQ_REQ_CQE_TYPE_REQ + #define CQ_REQ_PUSH 0x20UL + u8 status; + #define CQ_REQ_STATUS_OK 0x0UL + #define CQ_REQ_STATUS_BAD_RESPONSE_ERR 0x1UL + #define CQ_REQ_STATUS_LOCAL_LENGTH_ERR 0x2UL + #define CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR 0x3UL + #define CQ_REQ_STATUS_LOCAL_PROTECTION_ERR 0x4UL + #define CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL + #define CQ_REQ_STATUS_REMOTE_ACCESS_ERR 0x7UL + #define CQ_REQ_STATUS_REMOTE_OPERATION_ERR 0x8UL + #define CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR 0x9UL + #define CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR 0xaUL + #define CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR 0xbUL + #define CQ_REQ_STATUS_LAST CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR + __le16 reserved16_2; + __le32 reserved32_1; +}; + +/* cq_res_rc (size:256b/32B) */ +struct cq_res_rc { + __le32 length; + __le32 imm_data_or_inv_r_key; + __le64 qp_handle; + __le64 mr_handle; + u8 cqe_type_toggle; + #define CQ_RES_RC_TOGGLE 0x1UL + #define CQ_RES_RC_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_RC_CQE_TYPE_SFT 1 + #define CQ_RES_RC_CQE_TYPE_RES_RC (0x1UL << 1) + #define CQ_RES_RC_CQE_TYPE_LAST CQ_RES_RC_CQE_TYPE_RES_RC + u8 status; + #define CQ_RES_RC_STATUS_OK 0x0UL + #define CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL + #define CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_RC_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_RC_STATUS_LAST CQ_RES_RC_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_RC_FLAGS_SRQ 0x1UL + #define CQ_RES_RC_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_RC_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_RC_FLAGS_SRQ_LAST CQ_RES_RC_FLAGS_SRQ_SRQ + #define CQ_RES_RC_FLAGS_IMM 0x2UL + #define CQ_RES_RC_FLAGS_INV 0x4UL + #define CQ_RES_RC_FLAGS_RDMA 0x8UL + #define CQ_RES_RC_FLAGS_RDMA_SEND (0x0UL << 3) + #define CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE (0x1UL << 3) + #define CQ_RES_RC_FLAGS_RDMA_LAST CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE + __le32 srq_or_rq_wr_id; + #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_SFT 0 +}; + +/* cq_res_ud (size:256b/32B) */ +struct cq_res_ud { + __le16 length; + #define CQ_RES_UD_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_LENGTH_SFT 0 + __le16 cfa_metadata; + #define CQ_RES_UD_CFA_METADATA_VID_MASK 0xfffUL + #define CQ_RES_UD_CFA_METADATA_VID_SFT 0 + #define CQ_RES_UD_CFA_METADATA_DE 0x1000UL + #define CQ_RES_UD_CFA_METADATA_PRI_MASK 0xe000UL + #define CQ_RES_UD_CFA_METADATA_PRI_SFT 13 + __le32 imm_data; + __le64 qp_handle; + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_TOGGLE 0x1UL + #define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_CQE_TYPE_SFT 1 + #define CQ_RES_UD_CQE_TYPE_RES_UD (0x2UL << 1) + #define CQ_RES_UD_CQE_TYPE_LAST CQ_RES_UD_CQE_TYPE_RES_UD + u8 status; + #define CQ_RES_UD_STATUS_OK 0x0UL + #define CQ_RES_UD_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_UD_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_UD_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_UD_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_UD_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_UD_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_UD_STATUS_LAST CQ_RES_UD_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_UD_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_FLAGS_SRQ_LAST CQ_RES_UD_FLAGS_SRQ_SRQ + #define CQ_RES_UD_FLAGS_IMM 0x2UL + #define CQ_RES_UD_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_VLAN (0x1UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_LAST CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET + #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_MASK 0xc00UL + #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_SFT 10 + __le32 src_qp_high_srq_or_rq_wr_id; + #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_UD_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_SRC_QP_HIGH_SFT 24 +}; + +/* cq_res_ud_v2 (size:256b/32B) */ +struct cq_res_ud_v2 { + __le16 length; + #define CQ_RES_UD_V2_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_V2_LENGTH_SFT 0 + __le16 cfa_metadata0; + #define CQ_RES_UD_V2_CFA_METADATA0_VID_MASK 0xfffUL + #define CQ_RES_UD_V2_CFA_METADATA0_VID_SFT 0 + #define CQ_RES_UD_V2_CFA_METADATA0_DE 0x1000UL + #define CQ_RES_UD_V2_CFA_METADATA0_PRI_MASK 0xe000UL + #define CQ_RES_UD_V2_CFA_METADATA0_PRI_SFT 13 + __le32 imm_data; + __le64 qp_handle; + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_V2_TOGGLE 0x1UL + #define CQ_RES_UD_V2_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_V2_CQE_TYPE_SFT 1 + #define CQ_RES_UD_V2_CQE_TYPE_RES_UD (0x2UL << 1) + #define CQ_RES_UD_V2_CQE_TYPE_LAST CQ_RES_UD_V2_CQE_TYPE_RES_UD + u8 status; + #define CQ_RES_UD_V2_STATUS_OK 0x0UL + #define CQ_RES_UD_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_UD_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_UD_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_UD_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_UD_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_UD_V2_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_UD_V2_STATUS_LAST CQ_RES_UD_V2_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_UD_V2_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_V2_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_V2_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_V2_FLAGS_SRQ_LAST CQ_RES_UD_V2_FLAGS_SRQ_SRQ + #define CQ_RES_UD_V2_FLAGS_IMM 0x2UL + #define CQ_RES_UD_V2_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_V2_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_LAST CQ_RES_UD_V2_FLAGS_META_FORMAT_HDR_OFFSET + __le32 src_qp_high_srq_or_rq_wr_id; + #define CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_UD_V2_CFA_METADATA1_MASK 0xf00000UL + #define CQ_RES_UD_V2_CFA_METADATA1_SFT 20 + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_SFT 20 + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_LAST CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPIDCFG + #define CQ_RES_UD_V2_CFA_METADATA1_VALID 0x800000UL + #define CQ_RES_UD_V2_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_V2_SRC_QP_HIGH_SFT 24 +}; + +/* cq_res_ud_cfa (size:256b/32B) */ +struct cq_res_ud_cfa { + __le16 length; + #define CQ_RES_UD_CFA_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_CFA_LENGTH_SFT 0 + __le16 cfa_code; + __le32 imm_data; + __le32 qid; + #define CQ_RES_UD_CFA_QID_MASK 0xfffffUL + #define CQ_RES_UD_CFA_QID_SFT 0 + __le32 cfa_metadata; + #define CQ_RES_UD_CFA_CFA_METADATA_VID_MASK 0xfffUL + #define CQ_RES_UD_CFA_CFA_METADATA_VID_SFT 0 + #define CQ_RES_UD_CFA_CFA_METADATA_DE 0x1000UL + #define CQ_RES_UD_CFA_CFA_METADATA_PRI_MASK 0xe000UL + #define CQ_RES_UD_CFA_CFA_METADATA_PRI_SFT 13 + #define CQ_RES_UD_CFA_CFA_METADATA_TPID_MASK 0xffff0000UL + #define CQ_RES_UD_CFA_CFA_METADATA_TPID_SFT 16 + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_CFA_TOGGLE 0x1UL + #define CQ_RES_UD_CFA_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_CFA_CQE_TYPE_SFT 1 + #define CQ_RES_UD_CFA_CQE_TYPE_RES_UD_CFA (0x4UL << 1) + #define CQ_RES_UD_CFA_CQE_TYPE_LAST CQ_RES_UD_CFA_CQE_TYPE_RES_UD_CFA + u8 status; + #define CQ_RES_UD_CFA_STATUS_OK 0x0UL + #define CQ_RES_UD_CFA_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_UD_CFA_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_UD_CFA_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_UD_CFA_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_CFA_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_UD_CFA_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_UD_CFA_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_UD_CFA_STATUS_LAST CQ_RES_UD_CFA_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_UD_CFA_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_CFA_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_CFA_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_CFA_FLAGS_SRQ_LAST CQ_RES_UD_CFA_FLAGS_SRQ_SRQ + #define CQ_RES_UD_CFA_FLAGS_IMM 0x2UL + #define CQ_RES_UD_CFA_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_CFA_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_VLAN (0x1UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_FLAGS_META_FORMAT_HDR_OFFSET + #define CQ_RES_UD_CFA_FLAGS_EXT_META_FORMAT_MASK 0xc00UL + #define CQ_RES_UD_CFA_FLAGS_EXT_META_FORMAT_SFT 10 + __le32 src_qp_high_srq_or_rq_wr_id; + #define CQ_RES_UD_CFA_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_UD_CFA_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_UD_CFA_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_CFA_SRC_QP_HIGH_SFT 24 +}; + +/* cq_res_ud_cfa_v2 (size:256b/32B) */ +struct cq_res_ud_cfa_v2 { + __le16 length; + #define CQ_RES_UD_CFA_V2_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_CFA_V2_LENGTH_SFT 0 + __le16 cfa_metadata0; + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_VID_MASK 0xfffUL + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_VID_SFT 0 + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_DE 0x1000UL + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_PRI_MASK 0xe000UL + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_PRI_SFT 13 + __le32 imm_data; + __le32 qid; + #define CQ_RES_UD_CFA_V2_QID_MASK 0xfffffUL + #define CQ_RES_UD_CFA_V2_QID_SFT 0 + __le32 cfa_metadata2; + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_CFA_V2_TOGGLE 0x1UL + #define CQ_RES_UD_CFA_V2_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_CFA_V2_CQE_TYPE_SFT 1 + #define CQ_RES_UD_CFA_V2_CQE_TYPE_RES_UD_CFA (0x4UL << 1) + #define CQ_RES_UD_CFA_V2_CQE_TYPE_LAST CQ_RES_UD_CFA_V2_CQE_TYPE_RES_UD_CFA + u8 status; + #define CQ_RES_UD_CFA_V2_STATUS_OK 0x0UL + #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_UD_CFA_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_CFA_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_UD_CFA_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_UD_CFA_V2_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_UD_CFA_V2_STATUS_LAST CQ_RES_UD_CFA_V2_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_UD_CFA_V2_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_LAST CQ_RES_UD_CFA_V2_FLAGS_SRQ_SRQ + #define CQ_RES_UD_CFA_V2_FLAGS_IMM 0x2UL + #define CQ_RES_UD_CFA_V2_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_CFA_V2_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_LAST \ + CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_HDR_OFFSET + __le32 src_qp_high_srq_or_rq_wr_id; + #define CQ_RES_UD_CFA_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_UD_CFA_V2_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_MASK 0xf00000UL + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_SFT 20 + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_SFT 20 + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_LAST \ + CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPIDCFG + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_VALID 0x800000UL + #define CQ_RES_UD_CFA_V2_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_CFA_V2_SRC_QP_HIGH_SFT 24 +}; + +/* cq_res_raweth_qp1 (size:256b/32B) */ +struct cq_res_raweth_qp1 { + __le16 length; + #define CQ_RES_RAWETH_QP1_LENGTH_MASK 0x3fffUL + #define CQ_RES_RAWETH_QP1_LENGTH_SFT 0 + __le16 raweth_qp1_flags; + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_MASK 0x3ffUL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_SFT 0 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_SFT 6 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_LAST \ + CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP + __le16 raweth_qp1_errors; + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST \ + CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_LAST \ + CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + __le16 raweth_qp1_cfa_code; + __le64 qp_handle; + __le32 raweth_qp1_flags2; + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC 0x1UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC 0x2UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_IP_CS_CALC 0x4UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_L4_CS_CALC 0x8UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN (0x1UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_LAST \ + CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_MASK 0xc00UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_SFT 10 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16 + __le32 raweth_qp1_metadata; + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_DE_VID_MASK 0xffffUL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_DE_VID_SFT 0 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK 0xfffUL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_SFT 0 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_DE 0x1000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK 0xe000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT 13 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK 0xffff0000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT 16 + u8 cqe_type_toggle; + #define CQ_RES_RAWETH_QP1_TOGGLE 0x1UL + #define CQ_RES_RAWETH_QP1_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_RAWETH_QP1_CQE_TYPE_SFT 1 + #define CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1) + #define CQ_RES_RAWETH_QP1_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1 + u8 status; + #define CQ_RES_RAWETH_QP1_STATUS_OK 0x0UL + #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_RAWETH_QP1_STATUS_LAST CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_RAWETH_QP1_FLAGS_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ + __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id; + #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24 +}; + +/* cq_res_raweth_qp1_v2 (size:256b/32B) */ +struct cq_res_raweth_qp1_v2 { + __le16 length; + #define CQ_RES_RAWETH_QP1_V2_LENGTH_MASK 0x3fffUL + #define CQ_RES_RAWETH_QP1_V2_LENGTH_SFT 0 + __le16 raweth_qp1_flags; + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_MASK 0x3ffUL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_SFT 0 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_SFT 6 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_LAST \ + CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP + __le16 raweth_qp1_errors; + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST \ + CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \ + (0x7UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ + (0x8UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_LAST \ + CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + __le16 cfa_metadata0; + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_VID_MASK 0xfffUL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_VID_SFT 0 + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_DE 0x1000UL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_PRI_MASK 0xe000UL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_PRI_SFT 13 + __le64 qp_handle; + __le32 raweth_qp1_flags2; + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_ALL_OK_MODE 0x8UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_ACT_REC_PTR (0x1UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_LAST \ + CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_OK_MASK 0xfc00UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_OK_SFT 10 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16 + __le32 cfa_metadata2; + u8 cqe_type_toggle; + #define CQ_RES_RAWETH_QP1_V2_TOGGLE 0x1UL + #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_SFT 1 + #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1) + #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_V2_CQE_TYPE_RES_RAWETH_QP1 + u8 status; + #define CQ_RES_RAWETH_QP1_V2_STATUS_OK 0x0UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_LAST CQ_RES_RAWETH_QP1_V2_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_SRQ + __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id; + #define CQ_RES_RAWETH_QP1_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_RAWETH_QP1_V2_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_MASK 0xf00000UL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_SFT 20 + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_SFT 20 + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_LAST \ + CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPIDCFG + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_VALID 0x800000UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24 +}; + +/* cq_terminal (size:256b/32B) */ +struct cq_terminal { + __le64 qp_handle; + __le16 sq_cons_idx; + __le16 rq_cons_idx; + __le32 reserved32_1; + __le64 reserved64_3; + u8 cqe_type_toggle; + #define CQ_TERMINAL_TOGGLE 0x1UL + #define CQ_TERMINAL_CQE_TYPE_MASK 0x1eUL + #define CQ_TERMINAL_CQE_TYPE_SFT 1 + #define CQ_TERMINAL_CQE_TYPE_TERMINAL (0xeUL << 1) + #define CQ_TERMINAL_CQE_TYPE_LAST CQ_TERMINAL_CQE_TYPE_TERMINAL + u8 status; + #define CQ_TERMINAL_STATUS_OK 0x0UL + #define CQ_TERMINAL_STATUS_LAST CQ_TERMINAL_STATUS_OK + __le16 reserved16; + __le32 reserved32_2; +}; + +/* cq_cutoff (size:256b/32B) */ +struct cq_cutoff { + __le64 reserved64_1; + __le64 reserved64_2; + __le64 reserved64_3; + u8 cqe_type_toggle; + #define CQ_CUTOFF_TOGGLE 0x1UL + #define CQ_CUTOFF_CQE_TYPE_MASK 0x1eUL + #define CQ_CUTOFF_CQE_TYPE_SFT 1 + #define CQ_CUTOFF_CQE_TYPE_CUT_OFF (0xfUL << 1) + #define CQ_CUTOFF_CQE_TYPE_LAST CQ_CUTOFF_CQE_TYPE_CUT_OFF + u8 status; + #define CQ_CUTOFF_STATUS_OK 0x0UL + #define CQ_CUTOFF_STATUS_LAST CQ_CUTOFF_STATUS_OK + __le16 reserved16; + __le32 reserved32; +}; + +/* nq_base (size:128b/16B) */ +struct nq_base { + __le16 info10_type; + #define NQ_BASE_TYPE_MASK 0x3fUL + #define NQ_BASE_TYPE_SFT 0 + #define NQ_BASE_TYPE_CQ_NOTIFICATION 0x30UL + #define NQ_BASE_TYPE_SRQ_EVENT 0x32UL + #define NQ_BASE_TYPE_DBQ_EVENT 0x34UL + #define NQ_BASE_TYPE_QP_EVENT 0x38UL + #define NQ_BASE_TYPE_FUNC_EVENT 0x3aUL + #define NQ_BASE_TYPE_LAST NQ_BASE_TYPE_FUNC_EVENT + #define NQ_BASE_INFO10_MASK 0xffc0UL + #define NQ_BASE_INFO10_SFT 6 + __le16 info16; + __le32 info32; + __le32 info63_v[2]; + #define NQ_BASE_V 0x1UL + #define NQ_BASE_INFO63_MASK 0xfffffffeUL + #define NQ_BASE_INFO63_SFT 1 +}; + +/* nq_cn (size:128b/16B) */ +struct nq_cn { + __le16 type; + #define NQ_CN_TYPE_MASK 0x3fUL + #define NQ_CN_TYPE_SFT 0 + #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL + #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION + #define NQ_CN_TOGGLE_MASK 0xc0UL + #define NQ_CN_TOGGLE_SFT 6 + __le16 reserved16; + __le32 cq_handle_low; + __le32 v; + #define NQ_CN_V 0x1UL + __le32 cq_handle_high; +}; + +/* nq_srq_event (size:128b/16B) */ +struct nq_srq_event { + u8 type; + #define NQ_SRQ_EVENT_TYPE_MASK 0x3fUL + #define NQ_SRQ_EVENT_TYPE_SFT 0 + #define NQ_SRQ_EVENT_TYPE_SRQ_EVENT 0x32UL + #define NQ_SRQ_EVENT_TYPE_LAST NQ_SRQ_EVENT_TYPE_SRQ_EVENT + u8 event; + #define NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT 0x1UL + #define NQ_SRQ_EVENT_EVENT_LAST NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT + __le16 reserved16; + __le32 srq_handle_low; + __le32 v; + #define NQ_SRQ_EVENT_V 0x1UL + __le32 srq_handle_high; +}; + +/* nq_dbq_event (size:128b/16B) */ +struct nq_dbq_event { + u8 type; + #define NQ_DBQ_EVENT_TYPE_MASK 0x3fUL + #define NQ_DBQ_EVENT_TYPE_SFT 0 + #define NQ_DBQ_EVENT_TYPE_DBQ_EVENT 0x34UL + #define NQ_DBQ_EVENT_TYPE_LAST NQ_DBQ_EVENT_TYPE_DBQ_EVENT + u8 event; + #define NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT 0x1UL + #define NQ_DBQ_EVENT_EVENT_LAST NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT + __le16 db_pfid; + #define NQ_DBQ_EVENT_DB_PFID_MASK 0xfUL + #define NQ_DBQ_EVENT_DB_PFID_SFT 0 + __le32 db_dpi; + #define NQ_DBQ_EVENT_DB_DPI_MASK 0xfffffUL + #define NQ_DBQ_EVENT_DB_DPI_SFT 0 + __le32 v; + #define NQ_DBQ_EVENT_V 0x1UL + __le32 db_type_db_xid; + #define NQ_DBQ_EVENT_DB_XID_MASK 0xfffffUL + #define NQ_DBQ_EVENT_DB_XID_SFT 0 + #define NQ_DBQ_EVENT_DB_TYPE_MASK 0xf0000000UL + #define NQ_DBQ_EVENT_DB_TYPE_SFT 28 +}; + +/* xrrq_irrq (size:256b/32B) */ +struct xrrq_irrq { + __le16 credits_type; + #define XRRQ_IRRQ_TYPE 0x1UL + #define XRRQ_IRRQ_TYPE_READ_REQ 0x0UL + #define XRRQ_IRRQ_TYPE_ATOMIC_REQ 0x1UL + #define XRRQ_IRRQ_TYPE_LAST XRRQ_IRRQ_TYPE_ATOMIC_REQ + #define XRRQ_IRRQ_CREDITS_MASK 0xf800UL + #define XRRQ_IRRQ_CREDITS_SFT 11 + __le16 reserved16; + __le32 reserved32; + __le32 psn; + #define XRRQ_IRRQ_PSN_MASK 0xffffffUL + #define XRRQ_IRRQ_PSN_SFT 0 + __le32 msn; + #define XRRQ_IRRQ_MSN_MASK 0xffffffUL + #define XRRQ_IRRQ_MSN_SFT 0 + __le64 va_or_atomic_result; + __le32 rdma_r_key; + __le32 length; +}; + +/* xrrq_orrq (size:256b/32B) */ +struct xrrq_orrq { + __le16 num_sges_type; + #define XRRQ_ORRQ_TYPE 0x1UL + #define XRRQ_ORRQ_TYPE_READ_REQ 0x0UL + #define XRRQ_ORRQ_TYPE_ATOMIC_REQ 0x1UL + #define XRRQ_ORRQ_TYPE_LAST XRRQ_ORRQ_TYPE_ATOMIC_REQ + #define XRRQ_ORRQ_NUM_SGES_MASK 0xf800UL + #define XRRQ_ORRQ_NUM_SGES_SFT 11 + __le16 reserved16; + __le32 length; + __le32 psn; + #define XRRQ_ORRQ_PSN_MASK 0xffffffUL + #define XRRQ_ORRQ_PSN_SFT 0 + __le32 end_psn; + #define XRRQ_ORRQ_END_PSN_MASK 0xffffffUL + #define XRRQ_ORRQ_END_PSN_SFT 0 + __le64 first_sge_phy_or_sing_sge_va; + __le32 single_sge_l_key; + __le32 single_sge_size; +}; + +/* ptu_pte (size:64b/8B) */ +struct ptu_pte { + __le32 page_next_to_last_last_valid[2]; + #define PTU_PTE_VALID 0x1UL + #define PTU_PTE_LAST 0x2UL + #define PTU_PTE_NEXT_TO_LAST 0x4UL + #define PTU_PTE_UNUSED_MASK 0xff8UL + #define PTU_PTE_UNUSED_SFT 3 + #define PTU_PTE_PAGE_MASK 0xfffff000UL + #define PTU_PTE_PAGE_SFT 12 +}; + +/* ptu_pde (size:64b/8B) */ +struct ptu_pde { + __le32 page_valid[2]; + #define PTU_PDE_VALID 0x1UL + #define PTU_PDE_UNUSED_MASK 0xffeUL + #define PTU_PDE_UNUSED_SFT 1 + #define PTU_PDE_PAGE_MASK 0xfffff000UL + #define PTU_PDE_PAGE_SFT 12 +}; + +#endif /* ___BNXT_RE_HSI_H__ */ diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h index d4b9226088bd02..4e93ef7f84ee8a 100644 --- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_ADMIN_CMDS_H_ @@ -376,7 +376,9 @@ struct efa_admin_reg_mr_cmd { * 0 : local_write_enable - Local write permissions: * must be set for RQ buffers and buffers posted for * RDMA Read requests - * 1 : reserved1 - MBZ + * 1 : remote_write_enable - Remote write + * permissions: must be set to enable RDMA write to + * the region * 2 : remote_read_enable - Remote read permissions: * must be set to enable RDMA read from the region * 7:3 : reserved2 - MBZ @@ -618,7 +620,11 @@ struct efa_admin_feature_device_attr_desc { * TX queues * 1 : rnr_retry - If set, RNR retry is supported on * modify QP command - * 31:2 : reserved - MBZ + * 2 : data_polling_128 - If set, 128 bytes data + * polling is supported + * 3 : rdma_write - If set, RDMA Write is supported + * on TX queues + * 31:4 : reserved - MBZ */ u32 device_caps; @@ -672,7 +678,7 @@ struct efa_admin_feature_queue_attr_desc { /* The maximum size of LLQ in bytes */ u32 max_llq_size; - /* Maximum number of SGEs for a single RDMA read WQE */ + /* Maximum number of SGEs for a single RDMA read/write WQE */ u16 max_wr_rdma_sges; /* @@ -977,6 +983,7 @@ struct efa_admin_host_info { #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0) #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7) #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0) +#define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK BIT(1) #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2) /* create_cq_cmd */ @@ -991,6 +998,8 @@ struct efa_admin_host_info { /* feature_device_attr_desc */ #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0) #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1) +#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2) +#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK BIT(3) /* create_eq_cmd */ #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) diff --git a/drivers/infiniband/hw/efa/efa_io_defs.h b/drivers/infiniband/hw/efa/efa_io_defs.h index 17ba8984b11e9f..2d8eb96eaa81be 100644 --- a/drivers/infiniband/hw/efa/efa_io_defs.h +++ b/drivers/infiniband/hw/efa/efa_io_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_IO_H_ @@ -23,6 +23,8 @@ enum efa_io_send_op_type { EFA_IO_SEND = 0, /* RDMA read */ EFA_IO_RDMA_READ = 1, + /* RDMA write */ + EFA_IO_RDMA_WRITE = 2, }; enum efa_io_comp_status { @@ -62,8 +64,7 @@ struct efa_io_tx_meta_desc { /* * control flags - * 3:0 : op_type - operation type: send/rdma/fast mem - * ops/etc + * 3:0 : op_type - enum efa_io_send_op_type * 4 : has_imm - immediate_data field carries valid * data. * 5 : inline_msg - inline mode - inline message data @@ -219,21 +220,22 @@ struct efa_io_cdesc_common { * 2:1 : q_type - enum efa_io_queue_type: send/recv * 3 : has_imm - indicates that immediate data is * present - for RX completions only - * 7:4 : reserved28 - MBZ + * 6:4 : op_type - enum efa_io_send_op_type + * 7 : reserved31 - MBZ */ u8 flags; /* local QP number */ u16 qp_num; - - /* Transferred length */ - u16 length; }; /* Tx completion descriptor */ struct efa_io_tx_cdesc { /* Common completion info */ struct efa_io_cdesc_common common; + + /* MBZ */ + u16 reserved16; }; /* Rx Completion Descriptor */ @@ -241,6 +243,9 @@ struct efa_io_rx_cdesc { /* Common completion info */ struct efa_io_cdesc_common common; + /* Transferred length bits[15:0] */ + u16 length; + /* Remote Address Handle FW index, 0xFFFF indicates invalid ah */ u16 ah; @@ -250,16 +255,26 @@ struct efa_io_rx_cdesc { u32 imm; }; +/* Rx Completion Descriptor RDMA write info */ +struct efa_io_rx_cdesc_rdma_write { + /* Transferred length bits[31:16] */ + u16 length_hi; +}; + /* Extended Rx Completion Descriptor */ struct efa_io_rx_cdesc_ex { /* Base RX completion info */ - struct efa_io_rx_cdesc rx_cdesc_base; + struct efa_io_rx_cdesc base; - /* - * Valid only in case of unknown AH (0xFFFF) and CQ set_src_addr is - * enabled. - */ - u8 src_addr[16]; + union { + struct efa_io_rx_cdesc_rdma_write rdma_write; + + /* + * Valid only in case of unknown AH (0xFFFF) and CQ + * set_src_addr is enabled. + */ + u8 src_addr[16]; + } u; }; /* tx_meta_desc */ @@ -285,5 +300,6 @@ struct efa_io_rx_cdesc_ex { #define EFA_IO_CDESC_COMMON_PHASE_MASK BIT(0) #define EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1) #define EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3) +#define EFA_IO_CDESC_COMMON_OP_TYPE_MASK GENMASK(6, 4) #endif /* _EFA_IO_H_ */ diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 31454643f8c54f..8eca6c14d0cfca 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* - * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include @@ -250,6 +250,12 @@ int efa_query_device(struct ib_device *ibdev, if (EFA_DEV_CAP(dev, RNR_RETRY)) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY; + if (EFA_DEV_CAP(dev, DATA_POLLING_128)) + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128; + + if (EFA_DEV_CAP(dev, RDMA_WRITE)) + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE; + if (dev->neqs) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS; @@ -1569,7 +1575,8 @@ static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags, supp_access_flags = IB_ACCESS_LOCAL_WRITE | - (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0); + (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) | + (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0); access_flags &= ~IB_ACCESS_OPTIONAL; if (access_flags & ~supp_access_flags) { diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h index 3d8c11aa23a26f..e819e403249034 100644 --- a/drivers/infiniband/hw/erdma/erdma.h +++ b/drivers/infiniband/hw/erdma/erdma.h @@ -32,7 +32,7 @@ struct erdma_eq { atomic64_t event_num; atomic64_t notify_num; - u64 __iomem *db_addr; + void __iomem *db; u64 *db_record; }; diff --git a/drivers/infiniband/hw/erdma/erdma_cm.h b/drivers/infiniband/hw/erdma/erdma_cm.h index 8a3f998fec9bda..a26d807701884b 100644 --- a/drivers/infiniband/hw/erdma/erdma_cm.h +++ b/drivers/infiniband/hw/erdma/erdma_cm.h @@ -33,11 +33,11 @@ struct mpa_rr_params { * MPA request/response Hdr bits & fields */ enum { - MPA_RR_FLAG_MARKERS = __cpu_to_be16(0x8000), - MPA_RR_FLAG_CRC = __cpu_to_be16(0x4000), - MPA_RR_FLAG_REJECT = __cpu_to_be16(0x2000), - MPA_RR_RESERVED = __cpu_to_be16(0x1f00), - MPA_RR_MASK_REVISION = __cpu_to_be16(0x00ff) + MPA_RR_FLAG_MARKERS = cpu_to_be16(0x8000), + MPA_RR_FLAG_CRC = cpu_to_be16(0x4000), + MPA_RR_FLAG_REJECT = cpu_to_be16(0x2000), + MPA_RR_RESERVED = cpu_to_be16(0x1f00), + MPA_RR_MASK_REVISION = cpu_to_be16(0x00ff) }; /* diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c index 6ebfa6989b11eb..a151a7bdd5049d 100644 --- a/drivers/infiniband/hw/erdma/erdma_cmdq.c +++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c @@ -166,8 +166,7 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev) spin_lock_init(&eq->lock); atomic64_set(&eq->event_num, 0); - eq->db_addr = - (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG); + eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG; eq->db_record = (u64 *)(eq->qbuf + buf_size); erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG, @@ -183,9 +182,8 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev) int erdma_cmdq_init(struct erdma_dev *dev) { - int err, i; struct erdma_cmdq *cmdq = &dev->cmdq; - u32 sts, ctrl; + int err; cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING; cmdq->use_event = false; @@ -208,34 +206,10 @@ int erdma_cmdq_init(struct erdma_dev *dev) if (err) goto err_destroy_cq; - ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1); - erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl); - - for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) { - sts = erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG, - ERDMA_REG_DEV_ST_INIT_DONE_MASK); - if (sts) - break; - - msleep(ERDMA_REG_ACCESS_WAIT_MS); - } - - if (i == ERDMA_WAIT_DEV_DONE_CNT) { - dev_err(&dev->pdev->dev, "wait init done failed.\n"); - err = -ETIMEDOUT; - goto err_destroy_eq; - } - set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state); return 0; -err_destroy_eq: - dma_free_coherent(&dev->pdev->dev, - (cmdq->eq.depth << EQE_SHIFT) + - ERDMA_EXTRA_BUFFER_SIZE, - cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr); - err_destroy_cq: dma_free_coherent(&dev->pdev->dev, (cmdq->cq.depth << CQE_SHIFT) + @@ -283,7 +257,7 @@ static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq) __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci, cmdq->cq.depth, CQE_SHIFT); u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK, - __be32_to_cpu(READ_ONCE(*cqe))); + be32_to_cpu(READ_ONCE(*cqe))); return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL; } @@ -319,7 +293,6 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq) __be32 *cqe; u16 ctx_id; u64 *sqe; - int i; cqe = get_next_valid_cmdq_cqe(cmdq); if (!cqe) @@ -328,8 +301,8 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq) cmdq->cq.ci++; dma_rmb(); - hdr0 = __be32_to_cpu(*cqe); - sqe_idx = __be32_to_cpu(*(cqe + 1)); + hdr0 = be32_to_cpu(*cqe); + sqe_idx = be32_to_cpu(*(cqe + 1)); sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth, SQEBB_SHIFT); @@ -341,9 +314,8 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq) comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED; comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0); cmdq->sq.ci += cmdq->sq.wqebb_cnt; - - for (i = 0; i < 4; i++) - comp_wait->comp_data[i] = __be32_to_cpu(*(cqe + 2 + i)); + /* Copy 16B comp data after cqe hdr to outer */ + be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4); if (cmdq->use_event) complete(&comp_wait->wait_event); diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c index 7bc354273d4ec0..c1cb5568eab2d5 100644 --- a/drivers/infiniband/hw/erdma/erdma_cq.c +++ b/drivers/infiniband/hw/erdma/erdma_cq.c @@ -11,7 +11,7 @@ static void *get_next_valid_cqe(struct erdma_cq *cq) __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci, cq->depth, CQE_SHIFT); u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK, - __be32_to_cpu(READ_ONCE(*cqe))); + be32_to_cpu(READ_ONCE(*cqe))); return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL; } diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c index ed54130d924b30..ea47cb21fdb8ca 100644 --- a/drivers/infiniband/hw/erdma/erdma_eq.c +++ b/drivers/infiniband/hw/erdma/erdma_eq.c @@ -14,7 +14,7 @@ void notify_eq(struct erdma_eq *eq) FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1); *eq->db_record = db_data; - writeq(db_data, eq->db_addr); + writeq(db_data, eq->db); atomic64_inc(&eq->notify_num); } @@ -98,7 +98,7 @@ int erdma_aeq_init(struct erdma_dev *dev) atomic64_set(&eq->event_num, 0); atomic64_set(&eq->notify_num, 0); - eq->db_addr = (u64 __iomem *)(dev->func_bar + ERDMA_REGS_AEQ_DB_REG); + eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG; eq->db_record = (u64 *)(eq->qbuf + buf_size); erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG, @@ -243,9 +243,8 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn) atomic64_set(&eq->notify_num, 0); eq->depth = ERDMA_DEFAULT_EQ_DEPTH; - eq->db_addr = - (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG + - (ceqn + 1) * ERDMA_DB_SIZE); + eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG + + (ceqn + 1) * ERDMA_DB_SIZE; eq->db_record = (u64 *)(eq->qbuf + buf_size); eq->ci = 0; dev->ceqs[ceqn].dev = dev; diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h index 37ad1bb1917c49..76ce2856be28a6 100644 --- a/drivers/infiniband/hw/erdma/erdma_hw.h +++ b/drivers/infiniband/hw/erdma/erdma_hw.h @@ -112,6 +112,10 @@ #define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000 +/* Hardware page size definition */ +#define ERDMA_HW_PAGE_SHIFT 12 +#define ERDMA_HW_PAGE_SIZE 4096 + /* WQE related. */ #define EQE_SIZE 16 #define EQE_SHIFT 4 diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c index 4a29a53a6652ea..7c74abeee864cc 100644 --- a/drivers/infiniband/hw/erdma/erdma_main.c +++ b/drivers/infiniband/hw/erdma/erdma_main.c @@ -211,13 +211,36 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev) return 0; } -static void erdma_device_uninit(struct erdma_dev *dev) +static void erdma_hw_reset(struct erdma_dev *dev) { u32 ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_RESET_MASK, 1); erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl); } +static int erdma_wait_hw_init_done(struct erdma_dev *dev) +{ + int i; + + erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, + FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1)); + + for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) { + if (erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG, + ERDMA_REG_DEV_ST_INIT_DONE_MASK)) + break; + + msleep(ERDMA_REG_ACCESS_WAIT_MS); + } + + if (i == ERDMA_WAIT_DEV_DONE_CNT) { + dev_err(&dev->pdev->dev, "wait init done failed.\n"); + return -ETIMEDOUT; + } + + return 0; +} + static const struct pci_device_id erdma_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_ALIBABA, 0x107f) }, {} @@ -293,16 +316,22 @@ static int erdma_probe_dev(struct pci_dev *pdev) if (err) goto err_uninit_aeq; - err = erdma_ceqs_init(dev); + err = erdma_wait_hw_init_done(dev); if (err) goto err_uninit_cmdq; + err = erdma_ceqs_init(dev); + if (err) + goto err_reset_hw; + erdma_finish_cmdq_init(dev); return 0; +err_reset_hw: + erdma_hw_reset(dev); + err_uninit_cmdq: - erdma_device_uninit(dev); erdma_cmdq_destroy(dev); err_uninit_aeq: @@ -334,9 +363,7 @@ static void erdma_remove_dev(struct pci_dev *pdev) struct erdma_dev *dev = pci_get_drvdata(pdev); erdma_ceqs_uninit(dev); - - erdma_device_uninit(dev); - + erdma_hw_reset(dev); erdma_cmdq_destroy(dev); erdma_aeq_destroy(dev); erdma_comm_irq_uninit(dev); diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index 9c30d78730aa14..83e1b0d5597712 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -38,7 +38,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp) FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn); if (rdma_is_kernel_res(&qp->ibqp.res)) { - u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT; + u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT; req.sq_cqn_mtt_cfg = FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, @@ -66,13 +66,13 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp) user_qp = &qp->user_qp; req.sq_cqn_mtt_cfg = FIELD_PREP( ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, - ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT); + ilog2(user_qp->sq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT); req.sq_cqn_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn); req.rq_cqn_mtt_cfg = FIELD_PREP( ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, - ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT); + ilog2(user_qp->rq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT); req.rq_cqn_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn); @@ -162,7 +162,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq) if (rdma_is_kernel_res(&cq->ibcq.res)) { page_size = SZ_32M; req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK, - ilog2(page_size) - PAGE_SHIFT); + ilog2(page_size) - ERDMA_HW_PAGE_SHIFT); req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr); req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr); @@ -175,8 +175,9 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq) cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT); } else { mtt = &cq->user_cq.qbuf_mtt; - req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK, - ilog2(mtt->page_size) - PAGE_SHIFT); + req.cfg0 |= + FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK, + ilog2(mtt->page_size) - ERDMA_HW_PAGE_SHIFT); if (mtt->mtt_nents == 1) { req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf); req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf); @@ -636,7 +637,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, u32 rq_offset; int ret; - if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) + + if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) + qp->attrs.rq_size * RQE_SIZE)) return -EINVAL; @@ -646,7 +647,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, if (ret) return ret; - rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT); + rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE); qp->user_qp.rq_offset = rq_offset; ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset, diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 90b672feed83dd..9dbb89e9f4afc7 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12135,7 +12135,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, IS_RCVURGENT_START + rcd->ctxt, false); - hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl); + hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx", ctxt, rcvctrl); write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); /* work around sticky RcvCtxtStatus.BlockedRHQFull */ @@ -12205,10 +12205,10 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) hfi1_cdbg(CNTR, "reading %s", entry->name); if (entry->flags & CNTR_DISABLED) { /* Nothing */ - hfi1_cdbg(CNTR, "\tDisabled\n"); + hfi1_cdbg(CNTR, "\tDisabled"); } else { if (entry->flags & CNTR_VL) { - hfi1_cdbg(CNTR, "\tPer VL\n"); + hfi1_cdbg(CNTR, "\tPer VL"); for (j = 0; j < C_VL_COUNT; j++) { val = entry->rw_cntr(entry, dd, j, @@ -12216,21 +12216,21 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) 0); hfi1_cdbg( CNTR, - "\t\tRead 0x%llx for %d\n", + "\t\tRead 0x%llx for %d", val, j); dd->cntrs[entry->offset + j] = val; } } else if (entry->flags & CNTR_SDMA) { hfi1_cdbg(CNTR, - "\t Per SDMA Engine\n"); + "\t Per SDMA Engine"); for (j = 0; j < chip_sdma_engines(dd); j++) { val = entry->rw_cntr(entry, dd, j, CNTR_MODE_R, 0); hfi1_cdbg(CNTR, - "\t\tRead 0x%llx for %d\n", + "\t\tRead 0x%llx for %d", val, j); dd->cntrs[entry->offset + j] = val; @@ -12271,7 +12271,7 @@ u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp) hfi1_cdbg(CNTR, "reading %s", entry->name); if (entry->flags & CNTR_DISABLED) { /* Nothing */ - hfi1_cdbg(CNTR, "\tDisabled\n"); + hfi1_cdbg(CNTR, "\tDisabled"); continue; } @@ -12513,7 +12513,7 @@ static void do_update_synth_timer(struct work_struct *work) hfi1_cdbg( CNTR, - "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n", + "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx", dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { @@ -12527,7 +12527,7 @@ static void do_update_synth_timer(struct work_struct *work) } else { total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); hfi1_cdbg(CNTR, - "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit, + "[%d] total flits 0x%llx limit 0x%llx", dd->unit, total_flits, (u64)CNTR_32BIT_MAX); if (total_flits >= CNTR_32BIT_MAX) { hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating", diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index bcc6bc0540f033..f4492fa407e006 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -1597,7 +1597,7 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet) return 0; drop: - hfi1_cdbg(PKT, "%s: packet dropped\n", __func__); + hfi1_cdbg(PKT, "%s: packet dropped", __func__); ibp->rvp.n_pkt_drops++; return -EINVAL; } diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index f3d6ce45c39747..a5ab22cedd41a6 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -975,7 +975,7 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, ret = -ENOMEM; goto ctxdata_free; } - hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index, + hfi1_cdbg(PROC, "allocated send context %u(%u)", uctxt->sc->sw_index, uctxt->sc->hw_context); ret = sc_enable(uctxt->sc); if (ret) diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 62b6c502003950..6de37c5d7d2732 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -342,7 +342,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, INIT_LIST_HEAD(&rcd->flow_queue.queue_head); INIT_LIST_HEAD(&rcd->rarr_queue.queue_head); - hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); + hfi1_cdbg(PROC, "setting up context %u", rcd->ctxt); /* * Calculate the context's RcvArray entry starting point. @@ -400,7 +400,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, rcd->egrbufs.count = MAX_EAGER_ENTRIES; } hfi1_cdbg(PROC, - "ctxt%u: max Eager buffer RcvArray entries: %u\n", + "ctxt%u: max Eager buffer RcvArray entries: %u", rcd->ctxt, rcd->egrbufs.count); /* @@ -432,7 +432,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, if (rcd->egrbufs.size < hfi1_max_mtu) { rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); hfi1_cdbg(PROC, - "ctxt%u: eager bufs size too small. Adjusting to %u\n", + "ctxt%u: eager bufs size too small. Adjusting to %u", rcd->ctxt, rcd->egrbufs.size); } rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; @@ -1920,7 +1920,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) rcd->egrbufs.size = alloced_bytes; hfi1_cdbg(PROC, - "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n", + "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB", rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); @@ -1943,13 +1943,13 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; rcd->expected_base = rcd->eager_base + egrtop; - hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", + hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u", rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, rcd->eager_base, rcd->expected_base); if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { hfi1_cdbg(PROC, - "ctxt%u: current Eager buffer size is invalid %u\n", + "ctxt%u: current Eager buffer size is invalid %u", rcd->ctxt, rcd->egrbufs.rcvtid_size); ret = -EINVAL; goto bail_rcvegrbuf_phys; diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c index 5d9a7b09ca37ea..8973a081d641e6 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c @@ -215,6 +215,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx, const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ret = sdma_txadd_page(dd, + NULL, txreq, skb_frag_page(frag), frag->bv_offset, @@ -737,10 +738,13 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) txq->tx_ring.shift = ilog2(tx_item_size); txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq); tx_ring = &txq->tx_ring; - for (j = 0; j < tx_ring_size; j++) + for (j = 0; j < tx_ring_size; j++) { hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr = kzalloc_node(sizeof(*tx->sdma_hdr), GFP_KERNEL, priv->dd->node); + if (!hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr) + goto free_txqs; + } netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring); } diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 7333646021bb80..1cea8b0c78e0f0 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -46,12 +46,14 @@ int hfi1_mmu_rb_register(void *ops_arg, struct mmu_rb_handler **handler) { struct mmu_rb_handler *h; + void *free_ptr; int ret; - h = kzalloc(sizeof(*h), GFP_KERNEL); - if (!h) + free_ptr = kzalloc(sizeof(*h) + cache_line_size() - 1, GFP_KERNEL); + if (!free_ptr) return -ENOMEM; + h = PTR_ALIGN(free_ptr, cache_line_size()); h->root = RB_ROOT_CACHED; h->ops = ops; h->ops_arg = ops_arg; @@ -62,10 +64,11 @@ int hfi1_mmu_rb_register(void *ops_arg, INIT_LIST_HEAD(&h->del_list); INIT_LIST_HEAD(&h->lru_list); h->wq = wq; + h->free_ptr = free_ptr; ret = mmu_notifier_register(&h->mn, current->mm); if (ret) { - kfree(h); + kfree(free_ptr); return ret; } @@ -108,7 +111,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) /* Now the mm may be freed. */ mmdrop(handler->mn.mm); - kfree(handler); + kfree(handler->free_ptr); } int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, @@ -126,11 +129,11 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, spin_lock_irqsave(&handler->lock, flags); node = __mmu_rb_search(handler, mnode->addr, mnode->len); if (node) { - ret = -EINVAL; + ret = -EEXIST; goto unlock; } __mmu_int_rb_insert(mnode, &handler->root); - list_add(&mnode->list, &handler->lru_list); + list_add_tail(&mnode->list, &handler->lru_list); ret = handler->ops->insert(handler->ops_arg, mnode); if (ret) { @@ -143,6 +146,19 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, return ret; } +/* Caller must hold handler lock */ +struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler, + unsigned long addr, unsigned long len) +{ + struct mmu_rb_node *node; + + trace_hfi1_mmu_rb_search(addr, len); + node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1); + if (node) + list_move_tail(&node->list, &handler->lru_list); + return node; +} + /* Caller must hold handler lock */ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, unsigned long addr, @@ -167,32 +183,6 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, return node; } -bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler, - unsigned long addr, unsigned long len, - struct mmu_rb_node **rb_node) -{ - struct mmu_rb_node *node; - unsigned long flags; - bool ret = false; - - if (current->mm != handler->mn.mm) - return ret; - - spin_lock_irqsave(&handler->lock, flags); - node = __mmu_rb_search(handler, addr, len); - if (node) { - if (node->addr == addr && node->len == len) - goto unlock; - __mmu_int_rb_remove(node, &handler->root); - list_del(&node->list); /* remove from LRU list */ - ret = true; - } -unlock: - spin_unlock_irqrestore(&handler->lock, flags); - *rb_node = node; - return ret; -} - void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) { struct mmu_rb_node *rbnode, *ptr; @@ -206,8 +196,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) INIT_LIST_HEAD(&del_list); spin_lock_irqsave(&handler->lock, flags); - list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list, - list) { + list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, &stop)) { __mmu_int_rb_remove(rbnode, &handler->root); @@ -219,36 +208,11 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) } spin_unlock_irqrestore(&handler->lock, flags); - while (!list_empty(&del_list)) { - rbnode = list_first_entry(&del_list, struct mmu_rb_node, list); - list_del(&rbnode->list); + list_for_each_entry_safe(rbnode, ptr, &del_list, list) { handler->ops->remove(handler->ops_arg, rbnode); } } -/* - * It is up to the caller to ensure that this function does not race with the - * mmu invalidate notifier which may be calling the users remove callback on - * 'node'. - */ -void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, - struct mmu_rb_node *node) -{ - unsigned long flags; - - if (current->mm != handler->mn.mm) - return; - - /* Validity of handler and node pointers has been checked by caller. */ - trace_hfi1_mmu_rb_remove(node->addr, node->len); - spin_lock_irqsave(&handler->lock, flags); - __mmu_int_rb_remove(node, &handler->root); - list_del(&node->list); /* remove from LRU list */ - spin_unlock_irqrestore(&handler->lock, flags); - - handler->ops->remove(handler->ops_arg, node); -} - static int mmu_notifier_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range) { diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h index 7417be2b9dc8a5..c4da064188c9d3 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.h +++ b/drivers/infiniband/hw/hfi1/mmu_rb.h @@ -33,15 +33,25 @@ struct mmu_rb_ops { }; struct mmu_rb_handler { + /* + * struct mmu_notifier is 56 bytes, and spinlock_t is 4 bytes, so + * they fit together in one cache line. mn is relatively rarely + * accessed, so co-locating the spinlock with it achieves much of + * the cacheline contention reduction of giving the spinlock its own + * cacheline without the overhead of doing so. + */ struct mmu_notifier mn; - struct rb_root_cached root; - void *ops_arg; spinlock_t lock; /* protect the RB tree */ + + /* Begin on a new cachline boundary here */ + struct rb_root_cached root ____cacheline_aligned_in_smp; + void *ops_arg; struct mmu_rb_ops *ops; struct list_head lru_list; struct work_struct del_work; struct list_head del_list; struct workqueue_struct *wq; + void *free_ptr; }; int hfi1_mmu_rb_register(void *ops_arg, @@ -52,10 +62,8 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler); int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, struct mmu_rb_node *mnode); void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg); -void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, - struct mmu_rb_node *mnode); -bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler, - unsigned long addr, unsigned long len, - struct mmu_rb_node **rb_node); +struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler, + unsigned long addr, + unsigned long len); #endif /* _HFI1_MMU_RB_H */ diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index a0802332c8cb32..08732e1ac96627 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include "hfi.h" @@ -65,7 +64,6 @@ int hfi1_pcie_init(struct hfi1_devdata *dd) } pci_set_master(pdev); - (void)pci_enable_pcie_error_reporting(pdev); return 0; bail: diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 51ae58c02b15c7..62e7dc9bea7bde 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -820,7 +820,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, } hfi1_cdbg(PIO, - "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n", + "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u", sw_index, hw_context, sc_type_name(type), diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 8ed20392e9f0d2..bb2552dd29c1e2 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1593,22 +1593,7 @@ static inline void sdma_unmap_desc( struct hfi1_devdata *dd, struct sdma_desc *descp) { - switch (sdma_mapping_type(descp)) { - case SDMA_MAP_SINGLE: - dma_unmap_single( - &dd->pcidev->dev, - sdma_mapping_addr(descp), - sdma_mapping_len(descp), - DMA_TO_DEVICE); - break; - case SDMA_MAP_PAGE: - dma_unmap_page( - &dd->pcidev->dev, - sdma_mapping_addr(descp), - sdma_mapping_len(descp), - DMA_TO_DEVICE); - break; - } + system_descriptor_complete(dd, descp); } /* @@ -3128,7 +3113,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, /* Add descriptor for coalesce buffer */ tx->desc_limit = MAX_DESC; - return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, + return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, tx->tlen); } @@ -3167,10 +3152,12 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) return rval; } } + /* finish the one just added */ make_tx_sdma_desc( tx, SDMA_MAP_NONE, + NULL, dd->sdma_pad_phys, sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); tx->num_desc++; diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index b023fc461bd51e..95aaec14c6c288 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h @@ -594,6 +594,7 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d) static inline void make_tx_sdma_desc( struct sdma_txreq *tx, int type, + void *pinning_ctx, dma_addr_t addr, size_t len) { @@ -612,6 +613,7 @@ static inline void make_tx_sdma_desc( << SDMA_DESC0_PHY_ADDR_SHIFT) | (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK) << SDMA_DESC0_BYTE_COUNT_SHIFT); + desc->pinning_ctx = pinning_ctx; } /* helper to extend txreq */ @@ -643,6 +645,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd, static inline int _sdma_txadd_daddr( struct hfi1_devdata *dd, int type, + void *pinning_ctx, struct sdma_txreq *tx, dma_addr_t addr, u16 len) @@ -652,6 +655,7 @@ static inline int _sdma_txadd_daddr( make_tx_sdma_desc( tx, type, + pinning_ctx, addr, len); WARN_ON(len > tx->tlen); tx->num_desc++; @@ -672,6 +676,7 @@ static inline int _sdma_txadd_daddr( /** * sdma_txadd_page() - add a page to the sdma_txreq * @dd: the device to use for mapping + * @pinning_ctx: context to be released at descriptor retirement * @tx: tx request to which the page is added * @page: page to map * @offset: offset within the page @@ -687,6 +692,7 @@ static inline int _sdma_txadd_daddr( */ static inline int sdma_txadd_page( struct hfi1_devdata *dd, + void *pinning_ctx, struct sdma_txreq *tx, struct page *page, unsigned long offset, @@ -714,8 +720,7 @@ static inline int sdma_txadd_page( return -ENOSPC; } - return _sdma_txadd_daddr( - dd, SDMA_MAP_PAGE, tx, addr, len); + return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len); } /** @@ -749,7 +754,8 @@ static inline int sdma_txadd_daddr( return rval; } - return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len); + return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx, + addr, len); } /** @@ -795,8 +801,7 @@ static inline int sdma_txadd_kvaddr( return -ENOSPC; } - return _sdma_txadd_daddr( - dd, SDMA_MAP_SINGLE, tx, addr, len); + return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len); } struct iowait_work; @@ -1030,4 +1035,5 @@ extern uint mod_num_sdma; void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid); +void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp); #endif diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h index e262fb5c5ec611..fad946cb5e0d86 100644 --- a/drivers/infiniband/hw/hfi1/sdma_txreq.h +++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h @@ -19,6 +19,7 @@ struct sdma_desc { /* private: don't use directly */ u64 qw[2]; + void *pinning_ctx; }; /** diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h index 582b6f68df3dd6..489395bfb5b3bd 100644 --- a/drivers/infiniband/hw/hfi1/trace_dbg.h +++ b/drivers/infiniband/hw/hfi1/trace_dbg.h @@ -22,6 +22,11 @@ #define MAX_MSG_LEN 512 +#pragma GCC diagnostic push +#ifndef __clang__ +#pragma GCC diagnostic ignored "-Wsuggest-attribute=format" +#endif + DECLARE_EVENT_CLASS(hfi1_trace_template, TP_PROTO(const char *function, struct va_format *vaf), TP_ARGS(function, vaf), @@ -36,6 +41,8 @@ DECLARE_EVENT_CLASS(hfi1_trace_template, __get_str(msg)) ); +#pragma GCC diagnostic pop + /* * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an * actual function to work and can not be in a macro. diff --git a/drivers/infiniband/hw/hfi1/trace_mmu.h b/drivers/infiniband/hw/hfi1/trace_mmu.h index 187e9244fe5ed9..57900ebb7702e5 100644 --- a/drivers/infiniband/hw/hfi1/trace_mmu.h +++ b/drivers/infiniband/hw/hfi1/trace_mmu.h @@ -37,10 +37,6 @@ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_search, TP_PROTO(unsigned long addr, unsigned long len), TP_ARGS(addr, len)); -DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_remove, - TP_PROTO(unsigned long addr, unsigned long len), - TP_ARGS(addr, len)); - DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate, TP_PROTO(unsigned long addr, unsigned long len), TP_ARGS(addr, len)); diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index a71c5a36cebab8..ae58b48afe0749 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -24,7 +24,6 @@ #include "hfi.h" #include "sdma.h" -#include "mmu_rb.h" #include "user_sdma.h" #include "verbs.h" /* for the headers */ #include "common.h" /* for struct hfi1_tid_info */ @@ -39,11 +38,7 @@ static unsigned initial_pkt_count = 8; static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts); static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq); -static void user_sdma_free_request(struct user_sdma_request *req, bool unpin); -static int pin_vector_pages(struct user_sdma_request *req, - struct user_sdma_iovec *iovec); -static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, - unsigned start, unsigned npages); +static void user_sdma_free_request(struct user_sdma_request *req); static int check_header_template(struct user_sdma_request *req, struct hfi1_pkt_header *hdr, u32 lrhlen, u32 datalen); @@ -81,6 +76,11 @@ static struct mmu_rb_ops sdma_rb_ops = { .invalidate = sdma_rb_invalidate }; +static int add_system_pages_to_sdma_packet(struct user_sdma_request *req, + struct user_sdma_txreq *tx, + struct user_sdma_iovec *iovec, + u32 *pkt_remaining); + static int defer_packet_queue( struct sdma_engine *sde, struct iowait_work *wait, @@ -410,6 +410,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, ret = -EINVAL; goto free_req; } + /* Copy the header from the user buffer */ ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info), sizeof(req->hdr)); @@ -484,9 +485,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(req->iovs[i].iov)); - ret = pin_vector_pages(req, &req->iovs[i]); - if (ret) { - req->data_iovs = i; + if (req->iovs[i].iov.iov_len == 0) { + ret = -EINVAL; goto free_req; } req->data_len += req->iovs[i].iov.iov_len; @@ -584,7 +584,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, if (req->seqsubmitted) wait_event(pq->busy.wait_dma, (req->seqcomp == req->seqsubmitted - 1)); - user_sdma_free_request(req, true); + user_sdma_free_request(req); pq_update(pq); set_comp_state(pq, cq, info.comp_idx, ERROR, ret); } @@ -696,48 +696,6 @@ static int user_sdma_txadd_ahg(struct user_sdma_request *req, return ret; } -static int user_sdma_txadd(struct user_sdma_request *req, - struct user_sdma_txreq *tx, - struct user_sdma_iovec *iovec, u32 datalen, - u32 *queued_ptr, u32 *data_sent_ptr, - u64 *iov_offset_ptr) -{ - int ret; - unsigned int pageidx, len; - unsigned long base, offset; - u64 iov_offset = *iov_offset_ptr; - u32 queued = *queued_ptr, data_sent = *data_sent_ptr; - struct hfi1_user_sdma_pkt_q *pq = req->pq; - - base = (unsigned long)iovec->iov.iov_base; - offset = offset_in_page(base + iovec->offset + iov_offset); - pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >> - PAGE_SHIFT); - len = offset + req->info.fragsize > PAGE_SIZE ? - PAGE_SIZE - offset : req->info.fragsize; - len = min((datalen - queued), len); - ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], - offset, len); - if (ret) { - SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret); - return ret; - } - iov_offset += len; - queued += len; - data_sent += len; - if (unlikely(queued < datalen && pageidx == iovec->npages && - req->iov_idx < req->data_iovs - 1)) { - iovec->offset += iov_offset; - iovec = &req->iovs[++req->iov_idx]; - iov_offset = 0; - } - - *queued_ptr = queued; - *data_sent_ptr = data_sent; - *iov_offset_ptr = iov_offset; - return ret; -} - static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) { int ret = 0; @@ -769,8 +727,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) maxpkts = req->info.npkts - req->seqnum; while (npkts < maxpkts) { - u32 datalen = 0, queued = 0, data_sent = 0; - u64 iov_offset = 0; + u32 datalen = 0; /* * Check whether any of the completions have come back @@ -863,27 +820,17 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) goto free_txreq; } - /* - * If the request contains any data vectors, add up to - * fragsize bytes to the descriptor. - */ - while (queued < datalen && - (req->sent + data_sent) < req->data_len) { - ret = user_sdma_txadd(req, tx, iovec, datalen, - &queued, &data_sent, &iov_offset); - if (ret) - goto free_txreq; - } - /* - * The txreq was submitted successfully so we can update - * the counters. - */ req->koffset += datalen; if (req_opcode(req->info.ctrl) == EXPECTED) req->tidoffset += datalen; - req->sent += data_sent; - if (req->data_len) - iovec->offset += iov_offset; + req->sent += datalen; + while (datalen) { + ret = add_system_pages_to_sdma_packet(req, tx, iovec, + &datalen); + if (ret) + goto free_txreq; + iovec = &req->iovs[req->iov_idx]; + } list_add_tail(&tx->txreq.list, &req->txps); /* * It is important to increment this here as it is used to @@ -920,133 +867,14 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) { struct evict_data evict_data; + struct mmu_rb_handler *handler = pq->handler; evict_data.cleared = 0; evict_data.target = npages; - hfi1_mmu_rb_evict(pq->handler, &evict_data); + hfi1_mmu_rb_evict(handler, &evict_data); return evict_data.cleared; } -static int pin_sdma_pages(struct user_sdma_request *req, - struct user_sdma_iovec *iovec, - struct sdma_mmu_node *node, - int npages) -{ - int pinned, cleared; - struct page **pages; - struct hfi1_user_sdma_pkt_q *pq = req->pq; - - pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); - if (!pages) - return -ENOMEM; - memcpy(pages, node->pages, node->npages * sizeof(*pages)); - - npages -= node->npages; -retry: - if (!hfi1_can_pin_pages(pq->dd, current->mm, - atomic_read(&pq->n_locked), npages)) { - cleared = sdma_cache_evict(pq, npages); - if (cleared >= npages) - goto retry; - } - pinned = hfi1_acquire_user_pages(current->mm, - ((unsigned long)iovec->iov.iov_base + - (node->npages * PAGE_SIZE)), npages, 0, - pages + node->npages); - if (pinned < 0) { - kfree(pages); - return pinned; - } - if (pinned != npages) { - unpin_vector_pages(current->mm, pages, node->npages, pinned); - return -EFAULT; - } - kfree(node->pages); - node->rb.len = iovec->iov.iov_len; - node->pages = pages; - atomic_add(pinned, &pq->n_locked); - return pinned; -} - -static void unpin_sdma_pages(struct sdma_mmu_node *node) -{ - if (node->npages) { - unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0, - node->npages); - atomic_sub(node->npages, &node->pq->n_locked); - } -} - -static int pin_vector_pages(struct user_sdma_request *req, - struct user_sdma_iovec *iovec) -{ - int ret = 0, pinned, npages; - struct hfi1_user_sdma_pkt_q *pq = req->pq; - struct sdma_mmu_node *node = NULL; - struct mmu_rb_node *rb_node; - struct iovec *iov; - bool extracted; - - extracted = - hfi1_mmu_rb_remove_unless_exact(pq->handler, - (unsigned long) - iovec->iov.iov_base, - iovec->iov.iov_len, &rb_node); - if (rb_node) { - node = container_of(rb_node, struct sdma_mmu_node, rb); - if (!extracted) { - atomic_inc(&node->refcount); - iovec->pages = node->pages; - iovec->npages = node->npages; - iovec->node = node; - return 0; - } - } - - if (!node) { - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) - return -ENOMEM; - - node->rb.addr = (unsigned long)iovec->iov.iov_base; - node->pq = pq; - atomic_set(&node->refcount, 0); - } - - iov = &iovec->iov; - npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len); - if (node->npages < npages) { - pinned = pin_sdma_pages(req, iovec, node, npages); - if (pinned < 0) { - ret = pinned; - goto bail; - } - node->npages += pinned; - npages = node->npages; - } - iovec->pages = node->pages; - iovec->npages = npages; - iovec->node = node; - - ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb); - if (ret) { - iovec->node = NULL; - goto bail; - } - return 0; -bail: - unpin_sdma_pages(node); - kfree(node); - return ret; -} - -static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, - unsigned start, unsigned npages) -{ - hfi1_release_user_pages(mm, pages + start, npages, false); - kfree(pages); -} - static int check_header_template(struct user_sdma_request *req, struct hfi1_pkt_header *hdr, u32 lrhlen, u32 datalen) @@ -1388,7 +1216,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) if (req->seqcomp != req->info.npkts - 1) return; - user_sdma_free_request(req, false); + user_sdma_free_request(req); set_comp_state(pq, cq, req->info.comp_idx, state, status); pq_update(pq); } @@ -1399,10 +1227,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) wake_up(&pq->wait); } -static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) +static void user_sdma_free_request(struct user_sdma_request *req) { - int i; - if (!list_empty(&req->txps)) { struct sdma_txreq *t, *p; @@ -1415,21 +1241,6 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) } } - for (i = 0; i < req->data_iovs; i++) { - struct sdma_mmu_node *node = req->iovs[i].node; - - if (!node) - continue; - - req->iovs[i].node = NULL; - - if (unpin) - hfi1_mmu_rb_remove(req->pq->handler, - &node->rb); - else - atomic_dec(&node->refcount); - } - kfree(req->tids); clear_bit(req->info.comp_idx, req->pq->req_in_use); } @@ -1447,6 +1258,368 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, idx, state, ret); } +static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, + unsigned int start, unsigned int npages) +{ + hfi1_release_user_pages(mm, pages + start, npages, false); + kfree(pages); +} + +static void free_system_node(struct sdma_mmu_node *node) +{ + if (node->npages) { + unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0, + node->npages); + atomic_sub(node->npages, &node->pq->n_locked); + } + kfree(node); +} + +static inline void acquire_node(struct sdma_mmu_node *node) +{ + atomic_inc(&node->refcount); + WARN_ON(atomic_read(&node->refcount) < 0); +} + +static inline void release_node(struct mmu_rb_handler *handler, + struct sdma_mmu_node *node) +{ + atomic_dec(&node->refcount); + WARN_ON(atomic_read(&node->refcount) < 0); +} + +static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler, + unsigned long start, + unsigned long end) +{ + struct mmu_rb_node *rb_node; + struct sdma_mmu_node *node; + unsigned long flags; + + spin_lock_irqsave(&handler->lock, flags); + rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start)); + if (!rb_node) { + spin_unlock_irqrestore(&handler->lock, flags); + return NULL; + } + node = container_of(rb_node, struct sdma_mmu_node, rb); + acquire_node(node); + spin_unlock_irqrestore(&handler->lock, flags); + + return node; +} + +static int pin_system_pages(struct user_sdma_request *req, + uintptr_t start_address, size_t length, + struct sdma_mmu_node *node, int npages) +{ + struct hfi1_user_sdma_pkt_q *pq = req->pq; + int pinned, cleared; + struct page **pages; + + pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + +retry: + if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked), + npages)) { + SDMA_DBG(req, "Evicting: nlocked %u npages %u", + atomic_read(&pq->n_locked), npages); + cleared = sdma_cache_evict(pq, npages); + if (cleared >= npages) + goto retry; + } + + SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u", + start_address, node->npages, npages); + pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0, + pages); + + if (pinned < 0) { + kfree(pages); + SDMA_DBG(req, "pinned %d", pinned); + return pinned; + } + if (pinned != npages) { + unpin_vector_pages(current->mm, pages, node->npages, pinned); + SDMA_DBG(req, "npages %u pinned %d", npages, pinned); + return -EFAULT; + } + node->rb.addr = start_address; + node->rb.len = length; + node->pages = pages; + node->npages = npages; + atomic_add(pinned, &pq->n_locked); + SDMA_DBG(req, "done. pinned %d", pinned); + return 0; +} + +static int add_system_pinning(struct user_sdma_request *req, + struct sdma_mmu_node **node_p, + unsigned long start, unsigned long len) + +{ + struct hfi1_user_sdma_pkt_q *pq = req->pq; + struct sdma_mmu_node *node; + int ret; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->pq = pq; + ret = pin_system_pages(req, start, len, node, PFN_DOWN(len)); + if (ret == 0) { + ret = hfi1_mmu_rb_insert(pq->handler, &node->rb); + if (ret) + free_system_node(node); + else + *node_p = node; + + return ret; + } + + kfree(node); + return ret; +} + +static int get_system_cache_entry(struct user_sdma_request *req, + struct sdma_mmu_node **node_p, + size_t req_start, size_t req_len) +{ + struct hfi1_user_sdma_pkt_q *pq = req->pq; + u64 start = ALIGN_DOWN(req_start, PAGE_SIZE); + u64 end = PFN_ALIGN(req_start + req_len); + struct mmu_rb_handler *handler = pq->handler; + int ret; + + if ((end - start) == 0) { + SDMA_DBG(req, + "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx", + req_start, req_len, start, end); + return -EINVAL; + } + + SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len); + + while (1) { + struct sdma_mmu_node *node = + find_system_node(handler, start, end); + u64 prepend_len = 0; + + SDMA_DBG(req, "node %p start %llx end %llu", node, start, end); + if (!node) { + ret = add_system_pinning(req, node_p, start, + end - start); + if (ret == -EEXIST) { + /* + * Another execution context has inserted a + * conficting entry first. + */ + continue; + } + return ret; + } + + if (node->rb.addr <= start) { + /* + * This entry covers at least part of the region. If it doesn't extend + * to the end, then this will be called again for the next segment. + */ + *node_p = node; + return 0; + } + + SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d", + node->rb.addr, atomic_read(&node->refcount)); + prepend_len = node->rb.addr - start; + + /* + * This node will not be returned, instead a new node + * will be. So release the reference. + */ + release_node(handler, node); + + /* Prepend a node to cover the beginning of the allocation */ + ret = add_system_pinning(req, node_p, start, prepend_len); + if (ret == -EEXIST) { + /* Another execution context has inserted a conficting entry first. */ + continue; + } + return ret; + } +} + +static int add_mapping_to_sdma_packet(struct user_sdma_request *req, + struct user_sdma_txreq *tx, + struct sdma_mmu_node *cache_entry, + size_t start, + size_t from_this_cache_entry) +{ + struct hfi1_user_sdma_pkt_q *pq = req->pq; + unsigned int page_offset; + unsigned int from_this_page; + size_t page_index; + void *ctx; + int ret; + + /* + * Because the cache may be more fragmented than the memory that is being accessed, + * it's not strictly necessary to have a descriptor per cache entry. + */ + + while (from_this_cache_entry) { + page_index = PFN_DOWN(start - cache_entry->rb.addr); + + if (page_index >= cache_entry->npages) { + SDMA_DBG(req, + "Request for page_index %zu >= cache_entry->npages %u", + page_index, cache_entry->npages); + return -EINVAL; + } + + page_offset = start - ALIGN_DOWN(start, PAGE_SIZE); + from_this_page = PAGE_SIZE - page_offset; + + if (from_this_page < from_this_cache_entry) { + ctx = NULL; + } else { + /* + * In the case they are equal the next line has no practical effect, + * but it's better to do a register to register copy than a conditional + * branch. + */ + from_this_page = from_this_cache_entry; + ctx = cache_entry; + } + + ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq, + cache_entry->pages[page_index], + page_offset, from_this_page); + if (ret) { + /* + * When there's a failure, the entire request is freed by + * user_sdma_send_pkts(). + */ + SDMA_DBG(req, + "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u", + ret, page_index, page_offset, from_this_page); + return ret; + } + start += from_this_page; + from_this_cache_entry -= from_this_page; + } + return 0; +} + +static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req, + struct user_sdma_txreq *tx, + struct user_sdma_iovec *iovec, + size_t from_this_iovec) +{ + struct mmu_rb_handler *handler = req->pq->handler; + + while (from_this_iovec > 0) { + struct sdma_mmu_node *cache_entry; + size_t from_this_cache_entry; + size_t start; + int ret; + + start = (uintptr_t)iovec->iov.iov_base + iovec->offset; + ret = get_system_cache_entry(req, &cache_entry, start, + from_this_iovec); + if (ret) { + SDMA_DBG(req, "pin system segment failed %d", ret); + return ret; + } + + from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr); + if (from_this_cache_entry > from_this_iovec) + from_this_cache_entry = from_this_iovec; + + ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start, + from_this_cache_entry); + if (ret) { + /* + * We're guaranteed that there will be no descriptor + * completion callback that releases this node + * because only the last descriptor referencing it + * has a context attached, and a failure means the + * last descriptor was never added. + */ + release_node(handler, cache_entry); + SDMA_DBG(req, "add system segment failed %d", ret); + return ret; + } + + iovec->offset += from_this_cache_entry; + from_this_iovec -= from_this_cache_entry; + } + + return 0; +} + +static int add_system_pages_to_sdma_packet(struct user_sdma_request *req, + struct user_sdma_txreq *tx, + struct user_sdma_iovec *iovec, + u32 *pkt_data_remaining) +{ + size_t remaining_to_add = *pkt_data_remaining; + /* + * Walk through iovec entries, ensure the associated pages + * are pinned and mapped, add data to the packet until no more + * data remains to be added. + */ + while (remaining_to_add > 0) { + struct user_sdma_iovec *cur_iovec; + size_t from_this_iovec; + int ret; + + cur_iovec = iovec; + from_this_iovec = iovec->iov.iov_len - iovec->offset; + + if (from_this_iovec > remaining_to_add) { + from_this_iovec = remaining_to_add; + } else { + /* The current iovec entry will be consumed by this pass. */ + req->iov_idx++; + iovec++; + } + + ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec, + from_this_iovec); + if (ret) + return ret; + + remaining_to_add -= from_this_iovec; + } + *pkt_data_remaining = remaining_to_add; + + return 0; +} + +void system_descriptor_complete(struct hfi1_devdata *dd, + struct sdma_desc *descp) +{ + switch (sdma_mapping_type(descp)) { + case SDMA_MAP_SINGLE: + dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp), + sdma_mapping_len(descp), DMA_TO_DEVICE); + break; + case SDMA_MAP_PAGE: + dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp), + sdma_mapping_len(descp), DMA_TO_DEVICE); + break; + } + + if (descp->pinning_ctx) { + struct sdma_mmu_node *node = descp->pinning_ctx; + + release_node(node->rb.handler, node); + } +} + static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, unsigned long len) { @@ -1493,8 +1666,7 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode) struct sdma_mmu_node *node = container_of(mnode, struct sdma_mmu_node, rb); - unpin_sdma_pages(node); - kfree(node); + free_system_node(node); } static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode) diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index ea56eb57e65689..a241836371dc13 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h @@ -112,16 +112,11 @@ struct sdma_mmu_node { struct user_sdma_iovec { struct list_head list; struct iovec iov; - /* number of pages in this vector */ - unsigned int npages; - /* array of pinned pages for this vector */ - struct page **pages; /* * offset into the virtual address space of the vector at * which we last left off. */ u64 offset; - struct sdma_mmu_node *node; }; /* evict operation argument */ diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 7f6d7fc7951df6..fbdcfecb1768cf 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -778,8 +778,8 @@ static int build_verbs_tx_desc( /* add icrc, lt byte, and padding to flit */ if (extra_bytes) - ret = sdma_txadd_daddr(sde->dd, &tx->txreq, - sde->dd->sdma_pad_phys, extra_bytes); + ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys, + extra_bytes); bail_txadd: return ret; diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c index c3f0f8d877c370..727eedfba332a9 100644 --- a/drivers/infiniband/hw/hfi1/vnic_sdma.c +++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c @@ -64,6 +64,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde, /* combine physically continuous fragments later? */ ret = sdma_txadd_page(sde->dd, + NULL, &tx->txreq, skb_frag_page(frag), skb_frag_off(frag), diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index dbf97fe5948ff9..84f1167de1d9e7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1960,100 +1960,6 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) return hns_roce_cmq_send(hr_dev, &desc, 1); } -/* Use default caps when hns_roce_query_pf_caps() failed or init VF profile */ -static void set_default_caps(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_caps *caps = &hr_dev->caps; - - caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM; - caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM; - caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM; - caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM; - caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; - caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; - caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; - caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; - - caps->num_uars = HNS_ROCE_V2_UAR_NUM; - caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM; - caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM; - caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM; - caps->num_comp_vectors = 0; - - caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; - caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; - caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM; - caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM; - - caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; - caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; - caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ; - caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ; - caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ; - caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ; - caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ; - caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ; - caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ; - caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ; - caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; - caps->reserved_lkey = 0; - caps->reserved_pds = 0; - caps->reserved_mrws = 1; - caps->reserved_uars = 0; - caps->reserved_cqs = 0; - caps->reserved_srqs = 0; - caps->reserved_qps = HNS_ROCE_V2_RSV_QPS; - - caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; - caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; - caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; - caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; - caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM; - - caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM; - caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM; - caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM; - caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM; - caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM; - caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM; - caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM; - caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE; - - caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR | - HNS_ROCE_CAP_FLAG_ROCE_V1_V2 | - HNS_ROCE_CAP_FLAG_CQ_RECORD_DB | - HNS_ROCE_CAP_FLAG_QP_RECORD_DB; - - caps->pkey_table_len[0] = 1; - caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM; - caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM; - caps->local_ca_ack_delay = 0; - caps->max_mtu = IB_MTU_4096; - - caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR; - caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE; - - caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW | - HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR | - HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL; - - caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; - - if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { - caps->flags |= HNS_ROCE_CAP_FLAG_STASH | - HNS_ROCE_CAP_FLAG_DIRECT_WQE | - HNS_ROCE_CAP_FLAG_XRC; - caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; - } else { - caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; - - /* The following configuration are only valid for HIP08 */ - caps->qpc_sz = HNS_ROCE_V2_QPC_SZ; - caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ; - caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE; - } -} - static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num, u32 *buf_page_size, u32 *bt_page_size, u32 hem_type) { @@ -2239,7 +2145,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) set_hem_page_size(hr_dev); } -static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) +static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) { struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM]; struct hns_roce_caps *caps = &hr_dev->caps; @@ -2248,15 +2154,17 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) struct hns_roce_query_pf_caps_c *resp_c; struct hns_roce_query_pf_caps_d *resp_d; struct hns_roce_query_pf_caps_e *resp_e; + enum hns_roce_opcode_type cmd; int ctx_hop_num; int pbl_hop_num; int ret; int i; + cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM : + HNS_ROCE_OPC_QUERY_PF_CAPS_NUM; + for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) { - hns_roce_cmq_setup_basic_desc(&desc[i], - HNS_ROCE_OPC_QUERY_PF_CAPS_NUM, - true); + hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true); if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1)) desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); else @@ -2273,35 +2181,33 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data; resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data; - caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; - caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); - caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); - caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); + caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; + caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); + caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); + caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); - caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); + caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); - caps->num_aeq_vectors = resp_a->num_aeq_vectors; - caps->num_other_vectors = resp_a->num_other_vectors; - caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; - caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; - caps->cqe_sz = resp_a->cqe_sz; - - caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; - caps->irrl_entry_sz = resp_b->irrl_entry_sz; - caps->trrl_entry_sz = resp_b->trrl_entry_sz; - caps->cqc_entry_sz = resp_b->cqc_entry_sz; - caps->srqc_entry_sz = resp_b->srqc_entry_sz; - caps->idx_entry_sz = resp_b->idx_entry_sz; - caps->sccc_sz = resp_b->sccc_sz; - caps->max_mtu = resp_b->max_mtu; - caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz); - caps->min_cqes = resp_b->min_cqes; - caps->min_wqes = resp_b->min_wqes; - caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap); - caps->pkey_table_len[0] = resp_b->pkey_table_len; - caps->phy_num_uars = resp_b->phy_num_uars; - ctx_hop_num = resp_b->ctx_hop_num; - pbl_hop_num = resp_b->pbl_hop_num; + caps->num_aeq_vectors = resp_a->num_aeq_vectors; + caps->num_other_vectors = resp_a->num_other_vectors; + caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; + caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; + + caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; + caps->irrl_entry_sz = resp_b->irrl_entry_sz; + caps->trrl_entry_sz = resp_b->trrl_entry_sz; + caps->cqc_entry_sz = resp_b->cqc_entry_sz; + caps->srqc_entry_sz = resp_b->srqc_entry_sz; + caps->idx_entry_sz = resp_b->idx_entry_sz; + caps->sccc_sz = resp_b->sccc_sz; + caps->max_mtu = resp_b->max_mtu; + caps->min_cqes = resp_b->min_cqes; + caps->min_wqes = resp_b->min_wqes; + caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap); + caps->pkey_table_len[0] = resp_b->pkey_table_len; + caps->phy_num_uars = resp_b->phy_num_uars; + ctx_hop_num = resp_b->ctx_hop_num; + pbl_hop_num = resp_b->pbl_hop_num; caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS); @@ -2324,8 +2230,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH); caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS); caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH); - caps->default_aeq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST); - caps->default_ceq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST); caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS); caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS); caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS); @@ -2336,10 +2240,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS); caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); - caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); - caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); - caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); - caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period); caps->qpc_hop_num = ctx_hop_num; caps->sccc_hop_num = ctx_hop_num; @@ -2357,6 +2257,19 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) if (!(caps->page_size_cap & PAGE_SIZE)) caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; + if (!hr_dev->is_vf) { + caps->cqe_sz = resp_a->cqe_sz; + caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz); + caps->default_aeq_arm_st = + hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST); + caps->default_ceq_arm_st = + hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST); + caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); + caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); + caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); + caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period); + } + return 0; } @@ -2404,7 +2317,11 @@ static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev) hr_dev->func_num = 1; - set_default_caps(hr_dev); + ret = hns_roce_query_caps(hr_dev); + if (ret) { + dev_err(dev, "failed to query VF caps, ret = %d.\n", ret); + return ret; + } ret = hns_roce_query_vf_resource(hr_dev); if (ret) { @@ -2444,9 +2361,11 @@ static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev) return ret; } - ret = hns_roce_query_pf_caps(hr_dev); - if (ret) - set_default_caps(hr_dev); + ret = hns_roce_query_caps(hr_dev); + if (ret) { + dev_err(dev, "failed to query PF caps, ret = %d.\n", ret); + return ret; + } ret = hns_roce_query_pf_resource(hr_dev); if (ret) { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index af9d00225cdf54..1b44d2434ab415 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -35,43 +35,15 @@ #include -#define HNS_ROCE_V2_MAX_QP_NUM 0x1000 -#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 -#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 -#define HNS_ROCE_V2_MAX_SRQ_SGE 64 -#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 -#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100 -#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100 -#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 -#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 -#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 -#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 -#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 -#define HNS_ROCE_V3_MAX_SQ_INLINE 0x400 #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 -#define HNS_ROCE_V2_UAR_NUM 256 -#define HNS_ROCE_V2_PHY_UAR_NUM 1 +#define HNS_ROCE_V2_MTT_ENTRY_SZ 64 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 -#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 -#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 #define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000 #define HNS_ROCE_V2_RSV_XRCD_NUM 0 -#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128 -#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128 -#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64 -#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16 -#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 -#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 -#define HNS_ROCE_V2_CQC_ENTRY_SZ 64 -#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 -#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64 -#define HNS_ROCE_V2_MTT_ENTRY_SZ 64 -#define HNS_ROCE_V2_IDX_ENTRY_SZ 4 -#define HNS_ROCE_V2_SCCC_SZ 32 #define HNS_ROCE_V3_SCCC_SZ 64 #define HNS_ROCE_V3_GMV_ENTRY_SZ 32 @@ -232,6 +204,7 @@ enum hns_roce_opcode_type { HNS_ROCE_OPC_QUERY_FUNC_INFO = 0x8407, HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408, HNS_ROCE_OPC_CFG_ENTRY_SIZE = 0x8409, + HNS_ROCE_OPC_QUERY_VF_CAPS_NUM = 0x8410, HNS_ROCE_OPC_CFG_SGID_TB = 0x8500, HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501, HNS_ROCE_OPC_POST_MB = 0x8504, diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c index 8817864154af1f..70009b970e08b0 100644 --- a/drivers/infiniband/hw/irdma/cm.c +++ b/drivers/infiniband/hw/irdma/cm.c @@ -337,7 +337,7 @@ static struct irdma_puda_buf *irdma_form_ah_cm_frame(struct irdma_cm_node *cm_no pktsize = sizeof(*tcph) + opts_len + hdr_len + pd_len; - memset(buf, 0, pktsize); + memset(buf, 0, sizeof(*tcph)); sqbuf->totallen = pktsize; sqbuf->tcphlen = sizeof(*tcph) + opts_len; diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index a41e0d21143ae7..d88c9184007ea1 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -1867,8 +1867,6 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, vsi->mtu = info->params->mtu; vsi->exception_lan_q = info->exception_lan_q; vsi->vsi_idx = info->pf_data_vsi_num; - if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) - vsi->fcn_id = info->dev->hmc_fn_id; irdma_set_qos_info(vsi, info->params); for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { @@ -1887,32 +1885,56 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, } /** - * irdma_get_fcn_id - Return the function id + * irdma_get_stats_idx - Return stats index * @vsi: pointer to the vsi */ -static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi) +static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi) { struct irdma_stats_inst_info stats_info = {}; struct irdma_sc_dev *dev = vsi->dev; - u8 fcn_id = IRDMA_INVALID_FCN_ID; - u8 start_idx, max_stats, i; + u8 i; - if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) { + if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE, &stats_info)) return stats_info.stats_idx; } - start_idx = 1; - max_stats = 16; - for (i = start_idx; i < max_stats; i++) - if (!dev->fcn_id_array[i]) { - fcn_id = i; - dev->fcn_id_array[i] = true; - break; + for (i = 0; i < IRDMA_MAX_STATS_COUNT_GEN_1; i++) { + if (!dev->stats_idx_array[i]) { + dev->stats_idx_array[i] = true; + return i; } + } - return fcn_id; + return IRDMA_INVALID_STATS_IDX; +} + +/** + * irdma_hw_stats_init_gen1 - Initialize stat reg table used for gen1 + * @vsi: vsi structure where hw_regs are set + * + * Populate the HW stats table + */ +static void irdma_hw_stats_init_gen1(struct irdma_sc_vsi *vsi) +{ + struct irdma_sc_dev *dev = vsi->dev; + const struct irdma_hw_stat_map *map; + u64 *stat_reg = vsi->hw_stats_regs; + u64 *regs = dev->hw_stats_regs; + u16 i, stats_reg_set = vsi->stats_idx; + + map = dev->hw_stats_map; + + /* First 4 stat instances are reserved for port level statistics. */ + stats_reg_set += vsi->stats_inst_alloc ? IRDMA_FIRST_NON_PF_STAT : 0; + + for (i = 0; i < dev->hw_attrs.max_stat_idx; i++) { + if (map[i].bitmask <= IRDMA_MAX_STATS_32) + stat_reg[i] = regs[i] + stats_reg_set * sizeof(u32); + else + stat_reg[i] = regs[i] + stats_reg_set * sizeof(u64); + } } /** @@ -1923,7 +1945,6 @@ static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi) int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, struct irdma_vsi_stats_info *info) { - u8 fcn_id = info->fcn_id; struct irdma_dma_mem *stats_buff_mem; vsi->pestat = info->pestat; @@ -1944,26 +1965,24 @@ int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, IRDMA_GATHER_STATS_BUF_SIZE); irdma_hw_stats_start_timer(vsi); - if (info->alloc_fcn_id) - fcn_id = irdma_get_fcn_id(vsi); - if (fcn_id == IRDMA_INVALID_FCN_ID) - goto stats_error; - - vsi->stats_fcn_id_alloc = info->alloc_fcn_id; - vsi->fcn_id = fcn_id; - if (info->alloc_fcn_id) { - vsi->pestat->gather_info.use_stats_inst = true; - vsi->pestat->gather_info.stats_inst_index = fcn_id; - } - return 0; + /* when stat allocation is not required default to fcn_id. */ + vsi->stats_idx = info->fcn_id; + if (info->alloc_stats_inst) { + u8 stats_idx = irdma_get_stats_idx(vsi); -stats_error: - dma_free_coherent(vsi->pestat->hw->device, stats_buff_mem->size, - stats_buff_mem->va, stats_buff_mem->pa); - stats_buff_mem->va = NULL; + if (stats_idx != IRDMA_INVALID_STATS_IDX) { + vsi->stats_inst_alloc = true; + vsi->stats_idx = stats_idx; + vsi->pestat->gather_info.use_stats_inst = true; + vsi->pestat->gather_info.stats_inst_index = stats_idx; + } + } + + if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) + irdma_hw_stats_init_gen1(vsi); - return -EIO; + return 0; } /** @@ -1973,19 +1992,19 @@ int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi) { struct irdma_stats_inst_info stats_info = {}; - u8 fcn_id = vsi->fcn_id; struct irdma_sc_dev *dev = vsi->dev; + u8 stats_idx = vsi->stats_idx; - if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) { - if (vsi->stats_fcn_id_alloc) { - stats_info.stats_idx = vsi->fcn_id; + if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { + if (vsi->stats_inst_alloc) { + stats_info.stats_idx = vsi->stats_idx; irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE, &stats_info); } } else { - if (vsi->stats_fcn_id_alloc && - fcn_id < vsi->dev->hw_attrs.max_stat_inst) - vsi->dev->fcn_id_array[fcn_id] = false; + if (vsi->stats_inst_alloc && + stats_idx < vsi->dev->hw_attrs.max_stat_inst) + vsi->dev->stats_idx_array[stats_idx] = false; } if (!vsi->pestat) @@ -5297,7 +5316,8 @@ void sc_vsi_update_stats(struct irdma_sc_vsi *vsi) gather_stats = vsi->pestat->gather_info.gather_stats_va; last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va; irdma_update_stats(&vsi->pestat->hw_stats, gather_stats, - last_gather_stats); + last_gather_stats, vsi->dev->hw_stats_map, + vsi->dev->hw_attrs.max_stat_idx); } /** @@ -5404,186 +5424,62 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev, return ret_code; } +/** + * irdma_stat_val - Extract HW counter value from statistics buffer + * @stats_val: pointer to statistics buffer + * @byteoff: byte offset of counter value in the buffer (8B-aligned) + * @bitoff: bit offset of counter value within 8B entry + * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter) + */ +static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, u8 bitoff, + u64 bitmask) +{ + u16 idx = byteoff / sizeof(*stats_val); + + return (stats_val[idx] >> bitoff) & bitmask; +} + +/** + * irdma_stat_delta - Calculate counter delta + * @new_val: updated counter value + * @old_val: last counter value + * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter) + */ +static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val) +{ + if (new_val >= old_val) + return new_val - old_val; + + /* roll-over case */ + return max_val - old_val + new_val + 1; +} + /** * irdma_update_stats - Update statistics * @hw_stats: hw_stats instance to update * @gather_stats: updated stat counters * @last_gather_stats: last stat counters + * @map: HW stat map (hw_stats => gather_stats) + * @max_stat_idx: number of HW stats */ void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats, struct irdma_gather_stats *gather_stats, - struct irdma_gather_stats *last_gather_stats) -{ - u64 *stats_val = hw_stats->stats_val_32; - - stats_val[IRDMA_HW_STAT_INDEX_RXVLANERR] += - IRDMA_STATS_DELTA(gather_stats->rxvlanerr, - last_gather_stats->rxvlanerr, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] += - IRDMA_STATS_DELTA(gather_stats->ip4rxdiscard, - last_gather_stats->ip4rxdiscard, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] += - IRDMA_STATS_DELTA(gather_stats->ip4rxtrunc, - last_gather_stats->ip4rxtrunc, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] += - IRDMA_STATS_DELTA(gather_stats->ip4txnoroute, - last_gather_stats->ip4txnoroute, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] += - IRDMA_STATS_DELTA(gather_stats->ip6rxdiscard, - last_gather_stats->ip6rxdiscard, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] += - IRDMA_STATS_DELTA(gather_stats->ip6rxtrunc, - last_gather_stats->ip6rxtrunc, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] += - IRDMA_STATS_DELTA(gather_stats->ip6txnoroute, - last_gather_stats->ip6txnoroute, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_TCPRTXSEG] += - IRDMA_STATS_DELTA(gather_stats->tcprtxseg, - last_gather_stats->tcprtxseg, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] += - IRDMA_STATS_DELTA(gather_stats->tcprxopterr, - last_gather_stats->tcprxopterr, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] += - IRDMA_STATS_DELTA(gather_stats->tcprxprotoerr, - last_gather_stats->tcprxprotoerr, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] += - IRDMA_STATS_DELTA(gather_stats->rxrpcnphandled, - last_gather_stats->rxrpcnphandled, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] += - IRDMA_STATS_DELTA(gather_stats->rxrpcnpignored, - last_gather_stats->rxrpcnpignored, - IRDMA_MAX_STATS_32); - stats_val[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] += - IRDMA_STATS_DELTA(gather_stats->txnpcnpsent, - last_gather_stats->txnpcnpsent, - IRDMA_MAX_STATS_32); - stats_val = hw_stats->stats_val_64; - stats_val[IRDMA_HW_STAT_INDEX_IP4RXOCTS] += - IRDMA_STATS_DELTA(gather_stats->ip4rxocts, - last_gather_stats->ip4rxocts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP4RXPKTS] += - IRDMA_STATS_DELTA(gather_stats->ip4rxpkts, - last_gather_stats->ip4rxpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] += - IRDMA_STATS_DELTA(gather_stats->ip4txfrag, - last_gather_stats->ip4txfrag, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] += - IRDMA_STATS_DELTA(gather_stats->ip4rxmcpkts, - last_gather_stats->ip4rxmcpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP4TXOCTS] += - IRDMA_STATS_DELTA(gather_stats->ip4txocts, - last_gather_stats->ip4txocts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP4TXPKTS] += - IRDMA_STATS_DELTA(gather_stats->ip4txpkts, - last_gather_stats->ip4txpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] += - IRDMA_STATS_DELTA(gather_stats->ip4txfrag, - last_gather_stats->ip4txfrag, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] += - IRDMA_STATS_DELTA(gather_stats->ip4txmcpkts, - last_gather_stats->ip4txmcpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP6RXOCTS] += - IRDMA_STATS_DELTA(gather_stats->ip6rxocts, - last_gather_stats->ip6rxocts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP6RXPKTS] += - IRDMA_STATS_DELTA(gather_stats->ip6rxpkts, - last_gather_stats->ip6rxpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] += - IRDMA_STATS_DELTA(gather_stats->ip6txfrags, - last_gather_stats->ip6txfrags, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] += - IRDMA_STATS_DELTA(gather_stats->ip6rxmcpkts, - last_gather_stats->ip6rxmcpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP6TXOCTS] += - IRDMA_STATS_DELTA(gather_stats->ip6txocts, - last_gather_stats->ip6txocts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP6TXPKTS] += - IRDMA_STATS_DELTA(gather_stats->ip6txpkts, - last_gather_stats->ip6txpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] += - IRDMA_STATS_DELTA(gather_stats->ip6txfrags, - last_gather_stats->ip6txfrags, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] += - IRDMA_STATS_DELTA(gather_stats->ip6txmcpkts, - last_gather_stats->ip6txmcpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_TCPRXSEGS] += - IRDMA_STATS_DELTA(gather_stats->tcprxsegs, - last_gather_stats->tcprxsegs, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_TCPTXSEG] += - IRDMA_STATS_DELTA(gather_stats->tcptxsegs, - last_gather_stats->tcptxsegs, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RDMARXRDS] += - IRDMA_STATS_DELTA(gather_stats->rdmarxrds, - last_gather_stats->rdmarxrds, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RDMARXSNDS] += - IRDMA_STATS_DELTA(gather_stats->rdmarxsnds, - last_gather_stats->rdmarxsnds, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RDMARXWRS] += - IRDMA_STATS_DELTA(gather_stats->rdmarxwrs, - last_gather_stats->rdmarxwrs, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RDMATXRDS] += - IRDMA_STATS_DELTA(gather_stats->rdmatxrds, - last_gather_stats->rdmatxrds, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RDMATXSNDS] += - IRDMA_STATS_DELTA(gather_stats->rdmatxsnds, - last_gather_stats->rdmatxsnds, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RDMATXWRS] += - IRDMA_STATS_DELTA(gather_stats->rdmatxwrs, - last_gather_stats->rdmatxwrs, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RDMAVBND] += - IRDMA_STATS_DELTA(gather_stats->rdmavbn, - last_gather_stats->rdmavbn, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RDMAVINV] += - IRDMA_STATS_DELTA(gather_stats->rdmavinv, - last_gather_stats->rdmavinv, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_UDPRXPKTS] += - IRDMA_STATS_DELTA(gather_stats->udprxpkts, - last_gather_stats->udprxpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_UDPTXPKTS] += - IRDMA_STATS_DELTA(gather_stats->udptxpkts, - last_gather_stats->udptxpkts, - IRDMA_MAX_STATS_48); - stats_val[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] += - IRDMA_STATS_DELTA(gather_stats->rxnpecnmrkpkts, - last_gather_stats->rxnpecnmrkpkts, - IRDMA_MAX_STATS_48); + struct irdma_gather_stats *last_gather_stats, + const struct irdma_hw_stat_map *map, u16 max_stat_idx) +{ + u64 *stats_val = hw_stats->stats_val; + u16 i; + + for (i = 0; i < max_stat_idx; i++) { + u64 new_val = irdma_stat_val(gather_stats->val, map[i].byteoff, + map[i].bitoff, map[i].bitmask); + u64 last_val = irdma_stat_val(last_gather_stats->val, + map[i].byteoff, map[i].bitoff, + map[i].bitmask); + + stats_val[i] += + irdma_stat_delta(new_val, last_val, map[i].bitmask); + } + memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats)); } diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h index c1906cab5c8ad1..6014b9d06a9ba3 100644 --- a/drivers/infiniband/hw/irdma/defs.h +++ b/drivers/infiniband/hw/irdma/defs.h @@ -36,6 +36,7 @@ enum irdma_protocol_used { #define IRDMA_QP_STATE_ERROR 6 #define IRDMA_MAX_TRAFFIC_CLASS 8 +#define IRDMA_MAX_STATS_COUNT_GEN_1 12 #define IRDMA_MAX_USER_PRIORITY 8 #define IRDMA_MAX_APPS 8 #define IRDMA_MAX_STATS_COUNT 128 @@ -365,9 +366,11 @@ enum irdma_cqp_op_type { #define FLD_RS_32(dev, val, field) \ ((u64)((val) & (dev)->hw_masks[field ## _M]) >> (dev)->hw_shifts[field ## _S]) -#define IRDMA_STATS_DELTA(a, b, c) ((a) >= (b) ? (a) - (b) : (a) + (c) - (b)) -#define IRDMA_MAX_STATS_32 0xFFFFFFFFULL -#define IRDMA_MAX_STATS_48 0xFFFFFFFFFFFFULL +#define IRDMA_MAX_STATS_24 0xffffffULL +#define IRDMA_MAX_STATS_32 0xffffffffULL +#define IRDMA_MAX_STATS_48 0xffffffffffffULL +#define IRDMA_MAX_STATS_56 0xffffffffffffffULL +#define IRDMA_MAX_STATS_64 0xffffffffffffffffULL #define IRDMA_MAX_CQ_READ_THRESH 0x3FFFF #define IRDMA_CQPSQ_QHASH_VLANID GENMASK_ULL(43, 32) diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 43dfa4761f0698..795f7fd4f25748 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -1092,14 +1092,19 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, int status; if (rf->msix_shared && !ceq_id) { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev)); tasklet_setup(&rf->dpc_tasklet, irdma_dpc); status = request_irq(msix_vec->irq, irdma_irq_handler, 0, - "AEQCEQ", rf); + msix_vec->name, rf); } else { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "irdma-%s-CEQ-%d", + dev_name(&rf->pcidev->dev), ceq_id); tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc); status = request_irq(msix_vec->irq, irdma_ceq_handler, 0, - "CEQ", iwceq); + msix_vec->name, iwceq); } cpumask_clear(&msix_vec->mask); cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); @@ -1128,9 +1133,11 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf) u32 ret = 0; if (!rf->msix_shared) { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "irdma-%s-AEQ", dev_name(&rf->pcidev->dev)); tasklet_setup(&rf->dpc_tasklet, irdma_dpc); ret = request_irq(msix_vec->irq, irdma_irq_handler, 0, - "irdma", rf); + msix_vec->name, rf); } if (ret) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n"); @@ -1904,8 +1911,8 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf) break; rf->init_state = CEQ0_CREATED; /* Handles processing of CQP completions */ - rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq", - WQ_HIGHPRI | WQ_UNBOUND); + rf->cqp_cmpl_wq = + alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI); if (!rf->cqp_cmpl_wq) { status = -ENOMEM; break; diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c index 50299f58b6b319..37a40fb4d0d7e8 100644 --- a/drivers/infiniband/hw/irdma/i40iw_hw.c +++ b/drivers/infiniband/hw/irdma/i40iw_hw.c @@ -32,7 +32,7 @@ static u32 i40iw_regs[IRDMA_MAX_REGS] = { 0xffffffff /* PFINT_RATEN not used in FPK */ }; -static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = { +static u32 i40iw_stat_offsets[] = { I40E_GLPES_PFIP4RXDISCARD(0), I40E_GLPES_PFIP4RXTRUNC(0), I40E_GLPES_PFIP4TXNOROUTE(0), @@ -42,10 +42,8 @@ static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = { I40E_GLPES_PFTCPRTXSEG(0), I40E_GLPES_PFTCPRXOPTERR(0), I40E_GLPES_PFTCPRXPROTOERR(0), - I40E_GLPES_PFRXVLANERR(0) -}; + I40E_GLPES_PFRXVLANERR(0), -static u32 i40iw_stat_offsets_64[IRDMA_HW_STAT_INDEX_MAX_64] = { I40E_GLPES_PFIP4RXOCTSLO(0), I40E_GLPES_PFIP4RXPKTSLO(0), I40E_GLPES_PFIP4RXFRAGSLO(0), @@ -158,6 +156,51 @@ static const struct irdma_irq_ops i40iw_irq_ops = { .irdma_en_irq = i40iw_ena_irq, }; +static const struct irdma_hw_stat_map i40iw_hw_stat_map[] = { + [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 0, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 80, 0, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 88, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 128, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 136, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 144, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 152, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 160, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 168, 0, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 176, 0, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 184, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 192, 0, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 224, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 232, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 240, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 248, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 256, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 264, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 272, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 280, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 288, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 296, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 304, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 312, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 320, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 328, 0, IRDMA_MAX_STATS_48 }, +}; + void i40iw_init_hw(struct irdma_sc_dev *dev) { int i; @@ -172,11 +215,8 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr); } - for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_32; ++i) - dev->hw_stats_regs_32[i] = i40iw_stat_offsets_32[i]; - - for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_64; ++i) - dev->hw_stats_regs_64[i] = i40iw_stat_offsets_64[i]; + for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_GEN_1; ++i) + dev->hw_stats_regs[i] = i40iw_stat_offsets[i]; dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID; dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID; @@ -195,6 +235,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) dev->ceq_itr_mask_db = NULL; dev->aeq_itr_mask_db = NULL; dev->irq_ops = &i40iw_irq_ops; + dev->hw_stats_map = i40iw_hw_stat_map; /* Setup the hardware limits, hmc may limit further */ dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT; @@ -210,6 +251,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR; dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS; dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT; + dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1; dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE; dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE; dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS; diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c index 5986fd906308cc..298d14905993b9 100644 --- a/drivers/infiniband/hw/irdma/icrdma_hw.c +++ b/drivers/infiniband/hw/irdma/icrdma_hw.c @@ -111,6 +111,55 @@ static const struct irdma_irq_ops icrdma_irq_ops = { .irdma_en_irq = icrdma_ena_irq, }; +static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = { + [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 32, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 32, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 24, 0, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 32, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 40, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 32, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 72, 0, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 80, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 88, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 128, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 136, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 144, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 152, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 160, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 168, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 176, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 184, 32, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 184, 0, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 192, 32, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 200, 32, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 32, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 224, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 232, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 240, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 248, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 256, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 264, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 272, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 280, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 288, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 296, 0, IRDMA_MAX_STATS_48 }, + [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 304, 0, IRDMA_MAX_STATS_56 }, + [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 312, 32, IRDMA_MAX_STATS_24 }, + [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 312, 0, IRDMA_MAX_STATS_32 }, + [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 320, 0, IRDMA_MAX_STATS_32 }, +}; + void icrdma_init_hw(struct irdma_sc_dev *dev) { int i; @@ -140,9 +189,11 @@ void icrdma_init_hw(struct irdma_sc_dev *dev) dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; dev->irq_ops = &icrdma_irq_ops; dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; + dev->hw_stats_map = icrdma_hw_stat_map; dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; + dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2; dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR; dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE | diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h index 4789e85d717b3e..173e2dc2fc3556 100644 --- a/drivers/infiniband/hw/irdma/irdma.h +++ b/drivers/infiniband/hw/irdma/irdma.h @@ -147,6 +147,7 @@ struct irdma_hw_attrs { u32 max_sleep_count; u32 max_cqp_compl_wait_time_ms; u16 max_stat_inst; + u16 max_stat_idx; }; void i40iw_init_hw(struct irdma_sc_dev *dev); diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index 65e966ad345304..def6dd58dcd48a 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -115,6 +115,8 @@ extern struct auxiliary_driver i40iw_auxiliary_drv; #define IRDMA_REFLUSH BIT(2) #define IRDMA_FLUSH_WAIT BIT(3) +#define IRDMA_IRQ_NAME_STR_LEN (64) + enum init_completion_state { INVALID_STATE = 0, INITIAL_STATE, @@ -212,6 +214,7 @@ struct irdma_msix_vector { u32 cpu_affinity; u32 ceq_id; cpumask_t mask; + char name[IRDMA_IRQ_NAME_STR_LEN]; }; struct irdma_mc_table_info { diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c index cdc0b8a6ed4835..c0bef11436b940 100644 --- a/drivers/infiniband/hw/irdma/pble.c +++ b/drivers/infiniband/hw/irdma/pble.c @@ -423,15 +423,15 @@ static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine * @pble_rsrc: pble resources * @palloc: contains all inforamtion regarding pble (idx + pble addr) - * @level1_only: flag for a level 1 PBLE + * @lvl: Bitmask for requested pble level */ static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, - struct irdma_pble_alloc *palloc, bool level1_only) + struct irdma_pble_alloc *palloc, u8 lvl) { int status = 0; status = get_lvl1_pble(pble_rsrc, palloc); - if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE) + if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE) return status; status = get_lvl2_pble(pble_rsrc, palloc); @@ -444,11 +444,11 @@ static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, * @pble_rsrc: pble resources * @palloc: contains all inforamtion regarding pble (idx + pble addr) * @pble_cnt: #of pbles requested - * @level1_only: true if only pble level 1 to acquire + * @lvl: requested pble level mask */ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc, u32 pble_cnt, - bool level1_only) + u8 lvl) { int status = 0; int max_sds = 0; @@ -462,7 +462,7 @@ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, /*check first to see if we can get pble's without acquiring * additional sd's */ - status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only); + status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl); if (!status) goto exit; @@ -472,9 +472,9 @@ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, if (status) break; - status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only); + status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl); /* if level1_only, only go through it once */ - if (!status || level1_only) + if (!status || lvl) break; } diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h index 29d295463559d4..b31b7c5d66fe83 100644 --- a/drivers/infiniband/hw/irdma/pble.h +++ b/drivers/infiniband/hw/irdma/pble.h @@ -114,7 +114,7 @@ void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc); int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc, u32 pble_cnt, - bool level1_only); + u8 lvl); int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, struct irdma_chunk *pchunk); int irdma_prm_get_pbles(struct irdma_pble_prm *pprm, diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h index 9b6e919ae2a95b..113096b60323c2 100644 --- a/drivers/infiniband/hw/irdma/protos.h +++ b/drivers/infiniband/hw/irdma/protos.h @@ -28,9 +28,7 @@ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev, struct irdma_vsi_pestat *pestat); void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats, - struct irdma_dev_hw_stats *stats_values, - u64 *hw_stats_regs_32, u64 *hw_stats_regs_64, - u8 hw_rev); + const u64 *hw_stats_regs); int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, struct irdma_ws_node_info *node_info); int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq, @@ -43,7 +41,9 @@ u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev); void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id); void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats, struct irdma_gather_stats *gather_stats, - struct irdma_gather_stats *last_gather_stats); + struct irdma_gather_stats *last_gather_stats, + const struct irdma_hw_stat_map *map, u16 max_stat_idx); + /* vsi functions */ int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, struct irdma_vsi_stats_info *info); diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 517d41a1c2894c..5ee68604e59fc1 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -101,7 +101,8 @@ enum irdma_qp_event_type { IRDMA_QP_EVENT_REQ_ERR, }; -enum irdma_hw_stats_index_32b { +enum irdma_hw_stats_index { + /* gen1 - 32-bit */ IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0, IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1, IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2, @@ -111,50 +112,48 @@ enum irdma_hw_stats_index_32b { IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6, IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7, IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8, - IRDMA_HW_STAT_INDEX_MAX_32_GEN_1 = 9, /* Must be same value as next entry */ IRDMA_HW_STAT_INDEX_RXVLANERR = 9, - IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 10, - IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 11, - IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 12, - IRDMA_HW_STAT_INDEX_MAX_32, /* Must be last entry */ -}; - -enum irdma_hw_stats_index_64b { - IRDMA_HW_STAT_INDEX_IP4RXOCTS = 0, - IRDMA_HW_STAT_INDEX_IP4RXPKTS = 1, - IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 2, - IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 3, - IRDMA_HW_STAT_INDEX_IP4TXOCTS = 4, - IRDMA_HW_STAT_INDEX_IP4TXPKTS = 5, - IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 6, - IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 7, - IRDMA_HW_STAT_INDEX_IP6RXOCTS = 8, - IRDMA_HW_STAT_INDEX_IP6RXPKTS = 9, - IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 10, - IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 11, - IRDMA_HW_STAT_INDEX_IP6TXOCTS = 12, - IRDMA_HW_STAT_INDEX_IP6TXPKTS = 13, - IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 14, - IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 15, - IRDMA_HW_STAT_INDEX_TCPRXSEGS = 16, - IRDMA_HW_STAT_INDEX_TCPTXSEG = 17, - IRDMA_HW_STAT_INDEX_RDMARXRDS = 18, - IRDMA_HW_STAT_INDEX_RDMARXSNDS = 19, - IRDMA_HW_STAT_INDEX_RDMARXWRS = 20, - IRDMA_HW_STAT_INDEX_RDMATXRDS = 21, - IRDMA_HW_STAT_INDEX_RDMATXSNDS = 22, - IRDMA_HW_STAT_INDEX_RDMATXWRS = 23, - IRDMA_HW_STAT_INDEX_RDMAVBND = 24, - IRDMA_HW_STAT_INDEX_RDMAVINV = 25, - IRDMA_HW_STAT_INDEX_MAX_64_GEN_1 = 26, /* Must be same value as next entry */ - IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 26, - IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 27, - IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 28, - IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 29, - IRDMA_HW_STAT_INDEX_UDPRXPKTS = 30, - IRDMA_HW_STAT_INDEX_UDPTXPKTS = 31, - IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 32, - IRDMA_HW_STAT_INDEX_MAX_64, /* Must be last entry */ + /* gen1 - 64-bit */ + IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10, + IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11, + IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12, + IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13, + IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14, + IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15, + IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16, + IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17, + IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18, + IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19, + IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20, + IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21, + IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22, + IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23, + IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24, + IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25, + IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26, + IRDMA_HW_STAT_INDEX_TCPTXSEG = 27, + IRDMA_HW_STAT_INDEX_RDMARXRDS = 28, + IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29, + IRDMA_HW_STAT_INDEX_RDMARXWRS = 30, + IRDMA_HW_STAT_INDEX_RDMATXRDS = 31, + IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32, + IRDMA_HW_STAT_INDEX_RDMATXWRS = 33, + IRDMA_HW_STAT_INDEX_RDMAVBND = 34, + IRDMA_HW_STAT_INDEX_RDMAVINV = 35, + IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36, + IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37, + IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38, + IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39, + IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40, + IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41, + IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */ + /* gen2 - 64-bit */ + IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42, + /* gen2 - 32-bit */ + IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43, + IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44, + IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45, + IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46, }; enum irdma_feature_type { @@ -274,65 +273,21 @@ struct irdma_cq_shadow_area { }; struct irdma_dev_hw_stats_offsets { - u32 stats_offset_32[IRDMA_HW_STAT_INDEX_MAX_32]; - u32 stats_offset_64[IRDMA_HW_STAT_INDEX_MAX_64]; + u32 stats_offset[IRDMA_HW_STAT_INDEX_MAX_GEN_1]; }; struct irdma_dev_hw_stats { - u64 stats_val_32[IRDMA_HW_STAT_INDEX_MAX_32]; - u64 stats_val_64[IRDMA_HW_STAT_INDEX_MAX_64]; + u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)]; }; struct irdma_gather_stats { - u32 rsvd1; - u32 rxvlanerr; - u64 ip4rxocts; - u64 ip4rxpkts; - u32 ip4rxtrunc; - u32 ip4rxdiscard; - u64 ip4rxfrags; - u64 ip4rxmcocts; - u64 ip4rxmcpkts; - u64 ip6rxocts; - u64 ip6rxpkts; - u32 ip6rxtrunc; - u32 ip6rxdiscard; - u64 ip6rxfrags; - u64 ip6rxmcocts; - u64 ip6rxmcpkts; - u64 ip4txocts; - u64 ip4txpkts; - u64 ip4txfrag; - u64 ip4txmcocts; - u64 ip4txmcpkts; - u64 ip6txocts; - u64 ip6txpkts; - u64 ip6txfrags; - u64 ip6txmcocts; - u64 ip6txmcpkts; - u32 ip6txnoroute; - u32 ip4txnoroute; - u64 tcprxsegs; - u32 tcprxprotoerr; - u32 tcprxopterr; - u64 tcptxsegs; - u32 rsvd2; - u32 tcprtxseg; - u64 udprxpkts; - u64 udptxpkts; - u64 rdmarxwrs; - u64 rdmarxrds; - u64 rdmarxsnds; - u64 rdmatxwrs; - u64 rdmatxrds; - u64 rdmatxsnds; - u64 rdmavbn; - u64 rdmavinv; - u64 rxnpecnmrkpkts; - u32 rxrpcnphandled; - u32 rxrpcnpignored; - u32 txnpcnpsent; - u32 rsvd3[88]; + u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)]; +}; + +struct irdma_hw_stat_map { + u16 byteoff; + u8 bitoff; + u64 bitmask; }; struct irdma_stats_gather_info { @@ -584,7 +539,7 @@ struct irdma_qos { bool valid; }; -#define IRDMA_INVALID_FCN_ID 0xff +#define IRDMA_INVALID_STATS_IDX 0xff struct irdma_sc_vsi { u16 vsi_idx; struct irdma_sc_dev *dev; @@ -598,11 +553,9 @@ struct irdma_sc_vsi { u32 exception_lan_q; u16 mtu; u16 vm_id; - u8 fcn_id; enum irdma_vm_vf_type vm_vf_type; - bool stats_fcn_id_alloc:1; + bool stats_inst_alloc:1; bool tc_change_pending:1; - struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY]; struct irdma_vsi_pestat *pestat; atomic_t qp_suspend_reqs; int (*register_qset)(struct irdma_sc_vsi *vsi, @@ -611,14 +564,17 @@ struct irdma_sc_vsi { struct irdma_ws_node *tc_node); u8 qos_rel_bw; u8 qos_prio_type; + u8 stats_idx; u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; + struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY]; + u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1]; bool dscp_mode:1; }; struct irdma_sc_dev { struct list_head cqp_cmd_head; /* head of the CQP command list */ spinlock_t cqp_lock; /* protect CQP list access */ - bool fcn_id_array[IRDMA_MAX_STATS_COUNT]; + bool stats_idx_array[IRDMA_MAX_STATS_COUNT_GEN_1]; struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT]; u64 fpm_query_buf_pa; u64 fpm_commit_buf_pa; @@ -637,8 +593,8 @@ struct irdma_sc_dev { u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */ u64 hw_masks[IRDMA_MAX_MASKS]; u64 hw_shifts[IRDMA_MAX_SHIFTS]; - u64 hw_stats_regs_32[IRDMA_HW_STAT_INDEX_MAX_32]; - u64 hw_stats_regs_64[IRDMA_HW_STAT_INDEX_MAX_64]; + const struct irdma_hw_stat_map *hw_stats_map; + u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1]; u64 feature_info[IRDMA_MAX_FEATURES]; u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS]; struct irdma_hw_attrs hw_attrs; @@ -763,7 +719,7 @@ struct irdma_vsi_init_info { struct irdma_vsi_stats_info { struct irdma_vsi_pestat *pestat; u8 fcn_id; - bool alloc_fcn_id; + bool alloc_stats_inst; }; struct irdma_device_init_info { diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 7887230c867b1e..71e1c5d347092f 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -1634,10 +1634,10 @@ static void irdma_hw_stats_timeout(struct timer_list *t) from_timer(pf_devstat, t, stats_timer); struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi; - if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) - irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat); - else + if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false); + else + irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat); mod_timer(&pf_devstat->stats_timer, jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); @@ -1686,164 +1686,28 @@ void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev, { struct irdma_gather_stats *gather_stats = pestat->gather_info.gather_stats_va; + const struct irdma_hw_stat_map *map = dev->hw_stats_map; + u16 max_stats_idx = dev->hw_attrs.max_stat_idx; u32 stats_inst_offset_32; u32 stats_inst_offset_64; + u64 new_val; + u16 i; stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ? - pestat->gather_info.stats_inst_index : - pestat->hw->hmc.hmc_fn_id; + pestat->gather_info.stats_inst_index : + pestat->hw->hmc.hmc_fn_id; stats_inst_offset_32 *= 4; stats_inst_offset_64 = stats_inst_offset_32 * 2; - gather_stats->rxvlanerr = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_RXVLANERR] - + stats_inst_offset_32); - gather_stats->ip4rxdiscard = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] - + stats_inst_offset_32); - gather_stats->ip4rxtrunc = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] - + stats_inst_offset_32); - gather_stats->ip4txnoroute = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] - + stats_inst_offset_32); - gather_stats->ip6rxdiscard = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] - + stats_inst_offset_32); - gather_stats->ip6rxtrunc = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] - + stats_inst_offset_32); - gather_stats->ip6txnoroute = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] - + stats_inst_offset_32); - gather_stats->tcprtxseg = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRTXSEG] - + stats_inst_offset_32); - gather_stats->tcprxopterr = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] - + stats_inst_offset_32); - - gather_stats->ip4rxocts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXOCTS] - + stats_inst_offset_64); - gather_stats->ip4rxpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXPKTS] - + stats_inst_offset_64); - gather_stats->ip4txfrag = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] - + stats_inst_offset_64); - gather_stats->ip4rxmcpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] - + stats_inst_offset_64); - gather_stats->ip4txocts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXOCTS] - + stats_inst_offset_64); - gather_stats->ip4txpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXPKTS] - + stats_inst_offset_64); - gather_stats->ip4txfrag = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] - + stats_inst_offset_64); - gather_stats->ip4txmcpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] - + stats_inst_offset_64); - gather_stats->ip6rxocts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXOCTS] - + stats_inst_offset_64); - gather_stats->ip6rxpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXPKTS] - + stats_inst_offset_64); - gather_stats->ip6txfrags = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] - + stats_inst_offset_64); - gather_stats->ip6rxmcpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] - + stats_inst_offset_64); - gather_stats->ip6txocts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXOCTS] - + stats_inst_offset_64); - gather_stats->ip6txpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXPKTS] - + stats_inst_offset_64); - gather_stats->ip6txfrags = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] - + stats_inst_offset_64); - gather_stats->ip6txmcpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] - + stats_inst_offset_64); - gather_stats->tcprxsegs = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPRXSEGS] - + stats_inst_offset_64); - gather_stats->tcptxsegs = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPTXSEG] - + stats_inst_offset_64); - gather_stats->rdmarxrds = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXRDS] - + stats_inst_offset_64); - gather_stats->rdmarxsnds = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXSNDS] - + stats_inst_offset_64); - gather_stats->rdmarxwrs = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXWRS] - + stats_inst_offset_64); - gather_stats->rdmatxrds = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXRDS] - + stats_inst_offset_64); - gather_stats->rdmatxsnds = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXSNDS] - + stats_inst_offset_64); - gather_stats->rdmatxwrs = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXWRS] - + stats_inst_offset_64); - gather_stats->rdmavbn = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVBND] - + stats_inst_offset_64); - gather_stats->rdmavinv = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVINV] - + stats_inst_offset_64); - gather_stats->udprxpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPRXPKTS] - + stats_inst_offset_64); - gather_stats->udptxpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPTXPKTS] - + stats_inst_offset_64); + for (i = 0; i < max_stats_idx; i++) { + if (map[i].bitmask <= IRDMA_MAX_STATS_32) + new_val = rd32(dev->hw, + dev->hw_stats_regs[i] + stats_inst_offset_32); + else + new_val = rd64(dev->hw, + dev->hw_stats_regs[i] + stats_inst_offset_64); + gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val; + } irdma_process_stats(pestat); } diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 1b2e3e800c9a66..ab5cdf78278521 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -1226,10 +1226,6 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, udp_info->ipv4 = false; irdma_copy_ip_ntohl(local_ip, daddr); - udp_info->arp_idx = irdma_arp_table(iwdev->rf, - &local_ip[0], - false, NULL, - IRDMA_ARP_RESOLVE); } else if (av->net_type == RDMA_NETWORK_IPV4) { __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; @@ -2329,11 +2325,10 @@ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, * irdma_setup_pbles - copy user pg address to pble's * @rf: RDMA PCI function * @iwmr: mr pointer for this memory registration - * @use_pbles: flag if to use pble's - * @lvl_1_only: request only level 1 pble if true + * @lvl: requested pble levels */ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, - bool use_pbles, bool lvl_1_only) + u8 lvl) { struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; @@ -2342,9 +2337,9 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, int status; enum irdma_pble_level level = PBLE_LEVEL_1; - if (use_pbles) { + if (lvl) { status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, - lvl_1_only); + lvl); if (status) return status; @@ -2359,7 +2354,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, irdma_copy_user_pgaddrs(iwmr, pbl, level); - if (use_pbles) + if (lvl) iwmr->pgaddrmem[0] = *pbl; return 0; @@ -2370,11 +2365,11 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, * @iwdev: irdma device * @req: information for q memory management * @iwpbl: pble struct - * @use_pbles: flag to use pble + * @lvl: pble level mask */ static int irdma_handle_q_mem(struct irdma_device *iwdev, struct irdma_mem_reg_req *req, - struct irdma_pbl *iwpbl, bool use_pbles) + struct irdma_pbl *iwpbl, u8 lvl) { struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_mr *iwmr = iwpbl->iwmr; @@ -2387,11 +2382,11 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, bool ret = true; pg_size = iwmr->page_size; - err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true); + err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); if (err) return err; - if (use_pbles) + if (lvl) arr = palloc->level1.addr; switch (iwmr->type) { @@ -2400,7 +2395,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, hmc_p = &qpmr->sq_pbl; qpmr->shadow = (dma_addr_t)arr[total]; - if (use_pbles) { + if (lvl) { ret = irdma_check_mem_contiguous(arr, req->sq_pages, pg_size); if (ret) @@ -2425,7 +2420,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, if (!cqmr->split) cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; - if (use_pbles) + if (lvl) ret = irdma_check_mem_contiguous(arr, req->cq_pages, pg_size); @@ -2439,7 +2434,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, err = -EINVAL; } - if (use_pbles && ret) { + if (lvl && ret) { irdma_free_pble(iwdev->rf->pble_rsrc, palloc); iwpbl->pbl_allocated = false; } @@ -2749,17 +2744,17 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access) { struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); struct irdma_pbl *iwpbl = &iwmr->iwpbl; - bool use_pbles; u32 stag; + u8 lvl; int err; - use_pbles = iwmr->page_cnt != 1; + lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0; - err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false); + err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); if (err) return err; - if (use_pbles) { + if (lvl) { err = irdma_check_mr_contiguous(&iwpbl->pble_alloc, iwmr->page_size); if (err) { @@ -2843,17 +2838,17 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req, struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_ucontext *ucontext = NULL; unsigned long flags; - bool use_pbles; u32 total; int err; + u8 lvl; total = req.sq_pages + req.rq_pages + 1; if (total > iwmr->page_cnt) return -EINVAL; total = req.sq_pages + req.rq_pages; - use_pbles = total > 2; - err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; + err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); if (err) return err; @@ -2876,9 +2871,9 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req, struct irdma_ucontext *ucontext = NULL; u8 shadow_pgcnt = 1; unsigned long flags; - bool use_pbles; u32 total; int err; + u8 lvl; if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) shadow_pgcnt = 0; @@ -2886,8 +2881,8 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req, if (total > iwmr->page_cnt) return -EINVAL; - use_pbles = req.cq_pages > 1; - err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; + err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); if (err) return err; @@ -3708,89 +3703,59 @@ static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num, return 0; } -static const struct rdma_stat_desc irdma_hw_stat_descs[] = { - /* 32bit names */ - [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors", - [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards", - [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts", - [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes", - [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards", - [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts", - [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes", - [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs", - [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors", - [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors", - [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled", - [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored", - [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent", - - /* 64bit names */ - [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4InOctets", - [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4InPkts", - [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4InReasmRqd", - [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4InMcastOctets", - [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4InMcastPkts", - [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4OutOctets", - [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4OutPkts", - [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4OutSegRqd", - [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4OutMcastOctets", - [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip4OutMcastPkts", - [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6InOctets", - [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6InPkts", - [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6InReasmRqd", - [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6InMcastOctets", - [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6InMcastPkts", - [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6OutOctets", - [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6OutPkts", - [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6OutSegRqd", - [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6OutMcastOctets", - [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "ip6OutMcastPkts", - [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "tcpInSegs", - [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32].name = - "tcpOutSegs", - [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "iwInRdmaReads", - [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "iwInRdmaSends", - [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "iwInRdmaWrites", - [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "iwOutRdmaReads", - [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "iwOutRdmaSends", - [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "iwOutRdmaWrites", - [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32].name = - "iwRdmaBnd", - [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32].name = - "iwRdmaInv", - [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "RxUDP", - [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = - "TxUDP", - [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] - .name = "RxECNMrkd", +static const struct rdma_stat_desc irdma_hw_stat_names[] = { + /* gen1 - 32-bit */ + [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards", + [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts", + [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes", + [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards", + [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts", + [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes", + [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs", + [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors", + [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors", + [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors", + /* gen1 - 64-bit */ + [IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets", + [IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts", + [IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd", + [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts", + [IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets", + [IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts", + [IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd", + [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts", + [IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets", + [IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts", + [IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd", + [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts", + [IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets", + [IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts", + [IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd", + [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts", + [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs", + [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs", + [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads", + [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends", + [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites", + [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads", + [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends", + [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites", + [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd", + [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv", + + /* gen2 - 32-bit */ + [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled", + [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored", + [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent", + /* gen2 - 64-bit */ + [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets", + [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets", + [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets", + [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets", + [IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP", + [IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP", + [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd", + }; static void irdma_get_dev_fw_str(struct ib_device *dev, char *str) @@ -3810,14 +3775,13 @@ static void irdma_get_dev_fw_str(struct ib_device *dev, char *str) static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { - int num_counters = IRDMA_HW_STAT_INDEX_MAX_32 + - IRDMA_HW_STAT_INDEX_MAX_64; - unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; + struct irdma_device *iwdev = to_iwdev(ibdev); + struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; - BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_descs) != - (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64)); + int num_counters = dev->hw_attrs.max_stat_idx; + unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; - return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters, + return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters, lifespan); } @@ -3840,7 +3804,7 @@ static int irdma_get_hw_stats(struct ib_device *ibdev, else irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat); - memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats)); + memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters); return stats->num_counters; } @@ -4054,7 +4018,7 @@ static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id; if (vlan_id < VLAN_N_VID) mc_qht_elem->mc_grp_ctx.vlan_valid = true; - mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id; + mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id; mc_qht_elem->mc_grp_ctx.qs_handle = iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle; ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 884825b2e5f777..456656617c33f2 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -447,9 +447,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { + u32 cnt; + /* Sanity check SQ size before proceeding */ - if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || - ucmd->log_sq_stride > + if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || + cnt > dev->dev->caps.max_wqes) + return -EINVAL; + if (ucmd->log_sq_stride > ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) return -EINVAL; diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 3e1272695d993b..1c06920505d25f 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -5,6 +5,7 @@ #include "mlx5_ib.h" #include +#include #include "counters.h" #include "ib_rep.h" #include "qp.h" @@ -18,6 +19,10 @@ struct mlx5_ib_counter { #define INIT_Q_COUNTER(_name) \ { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)} +#define INIT_VPORT_Q_COUNTER(_name) \ + { .name = "vport_" #_name, .offset = \ + MLX5_BYTE_OFF(query_q_counter_out, _name)} + static const struct mlx5_ib_counter basic_q_cnts[] = { INIT_Q_COUNTER(rx_write_requests), INIT_Q_COUNTER(rx_read_requests), @@ -37,6 +42,25 @@ static const struct mlx5_ib_counter retrans_q_cnts[] = { INIT_Q_COUNTER(local_ack_timeout_err), }; +static const struct mlx5_ib_counter vport_basic_q_cnts[] = { + INIT_VPORT_Q_COUNTER(rx_write_requests), + INIT_VPORT_Q_COUNTER(rx_read_requests), + INIT_VPORT_Q_COUNTER(rx_atomic_requests), + INIT_VPORT_Q_COUNTER(out_of_buffer), +}; + +static const struct mlx5_ib_counter vport_out_of_seq_q_cnts[] = { + INIT_VPORT_Q_COUNTER(out_of_sequence), +}; + +static const struct mlx5_ib_counter vport_retrans_q_cnts[] = { + INIT_VPORT_Q_COUNTER(duplicate_request), + INIT_VPORT_Q_COUNTER(rnr_nak_retry_err), + INIT_VPORT_Q_COUNTER(packet_seq_err), + INIT_VPORT_Q_COUNTER(implied_nak_seq_err), + INIT_VPORT_Q_COUNTER(local_ack_timeout_err), +}; + #define INIT_CONG_COUNTER(_name) \ { .name = #_name, .offset = \ MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)} @@ -67,6 +91,25 @@ static const struct mlx5_ib_counter roce_accl_cnts[] = { INIT_Q_COUNTER(roce_slow_restart_trans), }; +static const struct mlx5_ib_counter vport_extended_err_cnts[] = { + INIT_VPORT_Q_COUNTER(resp_local_length_error), + INIT_VPORT_Q_COUNTER(resp_cqe_error), + INIT_VPORT_Q_COUNTER(req_cqe_error), + INIT_VPORT_Q_COUNTER(req_remote_invalid_request), + INIT_VPORT_Q_COUNTER(req_remote_access_errors), + INIT_VPORT_Q_COUNTER(resp_remote_access_errors), + INIT_VPORT_Q_COUNTER(resp_cqe_flush_error), + INIT_VPORT_Q_COUNTER(req_cqe_flush_error), +}; + +static const struct mlx5_ib_counter vport_roce_accl_cnts[] = { + INIT_VPORT_Q_COUNTER(roce_adp_retrans), + INIT_VPORT_Q_COUNTER(roce_adp_retrans_to), + INIT_VPORT_Q_COUNTER(roce_slow_restart), + INIT_VPORT_Q_COUNTER(roce_slow_restart_cnps), + INIT_VPORT_Q_COUNTER(roce_slow_restart_trans), +}; + #define INIT_EXT_PPCNT_COUNTER(_name) \ { .name = #_name, .offset = \ MLX5_BYTE_OFF(ppcnt_reg, \ @@ -153,12 +196,20 @@ static int mlx5_ib_create_counters(struct ib_counters *counters, return 0; } +static bool vport_qcounters_supported(struct mlx5_ib_dev *dev) +{ + return MLX5_CAP_GEN(dev->mdev, q_counter_other_vport) && + MLX5_CAP_GEN(dev->mdev, q_counter_aggregation); +} static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, u32 port_num) { - return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts : - &dev->port[port_num].cnts; + if ((is_mdev_switchdev_mode(dev->mdev) && + !vport_qcounters_supported(dev)) || !port_num) + return &dev->port[0].cnts; + + return &dev->port[port_num - 1].cnts; } /** @@ -172,7 +223,7 @@ static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, */ u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num) { - const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); + const struct mlx5_ib_counters *cnts = get_counters(dev, port_num + 1); return cnts->set_id; } @@ -270,12 +321,44 @@ static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, return ret; } +static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev, + u32 port_num, + const struct mlx5_ib_counters *cnts, + struct rdma_hw_stats *stats) + +{ + u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; + __be32 val; + int ret, i; + + if (!dev->port[port_num].rep || + dev->port[port_num].rep->vport == MLX5_VPORT_UPLINK) + return 0; + + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); + MLX5_SET(query_q_counter_in, in, other_vport, 1); + MLX5_SET(query_q_counter_in, in, vport_number, + dev->port[port_num].rep->vport); + MLX5_SET(query_q_counter_in, in, aggregate, 1); + ret = mlx5_cmd_exec_inout(dev->mdev, query_q_counter, in, out); + if (ret) + return ret; + + for (i = 0; i < cnts->num_q_counters; i++) { + val = *(__be32 *)((void *)out + cnts->offsets[i]); + stats->value[i] = (u64)be32_to_cpu(val); + } + + return 0; +} + static int do_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port_num, int index) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); + const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); struct mlx5_core_dev *mdev; int ret, num_counters; @@ -286,11 +369,19 @@ static int do_get_hw_stats(struct ib_device *ibdev, cnts->num_cong_counters + cnts->num_ext_ppcnt_counters; - /* q_counters are per IB device, query the master mdev */ - ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id); + if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0) + ret = mlx5_ib_query_q_counters_vport(dev, port_num - 1, cnts, + stats); + else + ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, + cnts->set_id); if (ret) return ret; + /* We don't expose device counters over Vports */ + if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0) + goto done; + if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats); if (ret) @@ -335,7 +426,8 @@ static int do_get_op_stat(struct ib_device *ibdev, u32 type; int ret; - cnts = get_counters(dev, port_num - 1); + cnts = get_counters(dev, port_num); + opfcs = cnts->opfcs; type = *(u32 *)cnts->descs[index].priv; if (type >= MLX5_IB_OPCOUNTER_MAX) @@ -362,7 +454,7 @@ static int do_get_op_stats(struct ib_device *ibdev, const struct mlx5_ib_counters *cnts; int index, ret, num_hw_counters; - cnts = get_counters(dev, port_num - 1); + cnts = get_counters(dev, port_num); num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + cnts->num_ext_ppcnt_counters; for (index = num_hw_counters; @@ -383,7 +475,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, struct mlx5_ib_dev *dev = to_mdev(ibdev); const struct mlx5_ib_counters *cnts; - cnts = get_counters(dev, port_num - 1); + cnts = get_counters(dev, port_num); num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + cnts->num_ext_ppcnt_counters; num_counters = num_hw_counters + cnts->num_op_counters; @@ -410,8 +502,7 @@ static struct rdma_hw_stats * mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) { struct mlx5_ib_dev *dev = to_mdev(counter->device); - const struct mlx5_ib_counters *cnts = - get_counters(dev, counter->port - 1); + const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port); return do_alloc_stats(cnts); } @@ -419,8 +510,7 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) { struct mlx5_ib_dev *dev = to_mdev(counter->device); - const struct mlx5_ib_counters *cnts = - get_counters(dev, counter->port - 1); + const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port); return mlx5_ib_query_q_counters(dev->mdev, cnts, counter->stats, counter->id); @@ -479,44 +569,55 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp) } static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, - struct rdma_stat_desc *descs, size_t *offsets) + struct rdma_stat_desc *descs, size_t *offsets, + u32 port_num) { - int i; - int j = 0; + bool is_vport = is_mdev_switchdev_mode(dev->mdev) && + port_num != MLX5_VPORT_PF; + const struct mlx5_ib_counter *names; + int j = 0, i; + names = is_vport ? vport_basic_q_cnts : basic_q_cnts; for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) { - descs[j].name = basic_q_cnts[i].name; + descs[j].name = names[i].name; offsets[j] = basic_q_cnts[i].offset; } + names = is_vport ? vport_out_of_seq_q_cnts : out_of_seq_q_cnts; if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) { - descs[j].name = out_of_seq_q_cnts[i].name; + descs[j].name = names[i].name; offsets[j] = out_of_seq_q_cnts[i].offset; } } + names = is_vport ? vport_retrans_q_cnts : retrans_q_cnts; if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) { - descs[j].name = retrans_q_cnts[i].name; + descs[j].name = names[i].name; offsets[j] = retrans_q_cnts[i].offset; } } + names = is_vport ? vport_extended_err_cnts : extended_err_cnts; if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) { - descs[j].name = extended_err_cnts[i].name; + descs[j].name = names[i].name; offsets[j] = extended_err_cnts[i].offset; } } + names = is_vport ? vport_roce_accl_cnts : roce_accl_cnts; if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) { - descs[j].name = roce_accl_cnts[i].name; + descs[j].name = names[i].name; offsets[j] = roce_accl_cnts[i].offset; } } + if (is_vport) + return; + if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) { descs[j].name = cong_cnts[i].name; @@ -558,9 +659,9 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, - struct mlx5_ib_counters *cnts) + struct mlx5_ib_counters *cnts, u32 port_num) { - u32 num_counters, num_op_counters; + u32 num_counters, num_op_counters = 0; num_counters = ARRAY_SIZE(basic_q_cnts); @@ -578,6 +679,9 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, cnts->num_q_counters = num_counters; + if (is_mdev_switchdev_mode(dev->mdev) && port_num != MLX5_VPORT_PF) + goto skip_non_qcounters; + if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { cnts->num_cong_counters = ARRAY_SIZE(cong_cnts); num_counters += ARRAY_SIZE(cong_cnts); @@ -597,6 +701,7 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, ft_field_support_2_nic_transmit_rdma.bth_opcode)) num_op_counters += ARRAY_SIZE(rdmatx_cnp_op_cnts); +skip_non_qcounters: cnts->num_op_counters = num_op_counters; num_counters += num_op_counters; cnts->descs = kcalloc(num_counters, @@ -623,7 +728,8 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) int num_cnt_ports; int i, j; - num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; + num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) || + vport_qcounters_supported(dev)) ? dev->num_ports : 1; MLX5_SET(dealloc_q_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_Q_COUNTER); @@ -662,15 +768,16 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; - num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; + num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) || + vport_qcounters_supported(dev)) ? dev->num_ports : 1; for (i = 0; i < num_cnt_ports; i++) { - err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); + err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts, i); if (err) goto err_alloc; mlx5_ib_fill_counters(dev, dev->port[i].cnts.descs, - dev->port[i].cnts.offsets); + dev->port[i].cnts.offsets, i); MLX5_SET(alloc_q_counter_in, in, uid, is_shared ? MLX5_SHARED_RESOURCE_UID : 0); @@ -889,6 +996,10 @@ static const struct ib_device_ops hw_stats_ops = { mlx5_ib_modify_stat : NULL, }; +static const struct ib_device_ops hw_switchdev_vport_op = { + .alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats, +}; + static const struct ib_device_ops hw_switchdev_stats_ops = { .alloc_hw_device_stats = mlx5_ib_alloc_hw_device_stats, .get_hw_stats = mlx5_ib_get_hw_stats, @@ -914,9 +1025,11 @@ int mlx5_ib_counters_init(struct mlx5_ib_dev *dev) if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) return 0; - if (is_mdev_switchdev_mode(dev->mdev)) + if (is_mdev_switchdev_mode(dev->mdev)) { ib_set_device_ops(&dev->ib_dev, &hw_switchdev_stats_ops); - else + if (vport_qcounters_supported(dev)) + ib_set_device_ops(&dev->ib_dev, &hw_switchdev_vport_op); + } else ib_set_device_ops(&dev->ib_dev, &hw_stats_ops); return mlx5_ib_alloc_counters(dev); } diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 2211a0be16f36c..db5fb196c728b0 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -666,7 +666,21 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs, obj_id; case MLX5_IB_OBJECT_DEVX_OBJ: - return ((struct devx_obj *)uobj->object)->obj_id == obj_id; + { + u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); + struct devx_obj *devx_uobj = uobj->object; + + if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER && + devx_uobj->flow_counter_bulk_size) { + u64 end; + + end = devx_uobj->obj_id + + devx_uobj->flow_counter_bulk_size; + return devx_uobj->obj_id <= obj_id && end > obj_id; + } + + return devx_uobj->obj_id == obj_id; + } default: return false; @@ -1517,10 +1531,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( goto obj_free; if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) { - u8 bulk = MLX5_GET(alloc_flow_counter_in, - cmd_in, - flow_counter_bulk); - obj->flow_counter_bulk_size = 128UL * bulk; + u32 bulk = MLX5_GET(alloc_flow_counter_in, + cmd_in, + flow_counter_bulk_log_size); + + if (bulk) + bulk = 1 << bulk; + else + bulk = 128UL * MLX5_GET(alloc_flow_counter_in, + cmd_in, + flow_counter_bulk); + obj->flow_counter_bulk_size = bulk; } uobj->object = obj; @@ -1993,7 +2014,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( int redirect_fd; bool use_eventfd = false; int num_events; - int num_alloc_xa_entries = 0; u16 obj_type = 0; u64 cookie = 0; u32 obj_id = 0; @@ -2075,7 +2095,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( if (err) goto err; - num_alloc_xa_entries++; event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL); if (!event_sub) { err = -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 67356f51526168..2017ede100a62d 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -67,11 +67,14 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); MLX5_SET(mkc, mkc, lr, 1); - if ((acc & IB_ACCESS_RELAXED_ORDERING) && - pcie_relaxed_ordering_enabled(dev->mdev->pdev)) { + if (acc & IB_ACCESS_RELAXED_ORDERING) { if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) MLX5_SET(mkc, mkc, relaxed_ordering_write, 1); - if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) + + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || + (MLX5_CAP_GEN(dev->mdev, + relaxed_ordering_read_pci_enabled) && + pcie_relaxed_ordering_enabled(dev->mdev->pdev))) MLX5_SET(mkc, mkc, relaxed_ordering_read, 1); } @@ -791,7 +794,8 @@ static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev, ret |= IB_ACCESS_RELAXED_ORDERING; if ((access_flags & IB_ACCESS_RELAXED_ORDERING) && - MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && + (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) && !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) ret |= IB_ACCESS_RELAXED_ORDERING; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7cc3b973dec7b3..70ca8ffa9256b1 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -60,6 +60,10 @@ enum raw_qp_set_mask_map { MLX5_RAW_QP_RATE_LIMIT = 1UL << 1, }; +enum { + MLX5_QP_RM_GO_BACK_N = 0x1, +}; + struct mlx5_modify_raw_qp_param { u16 operation; @@ -2519,6 +2523,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); + if (qp->flags & IB_QP_CREATE_INTEGRITY_EN && + MLX5_CAP_GEN(mdev, go_back_n)) + MLX5_SET(qpc, qpc, retry_mode, MLX5_QP_RM_GO_BACK_N); + err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); kvfree(in); if (err) @@ -2846,9 +2854,9 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag, case MLX5_QP_FLAG_SCATTER_CQE: case MLX5_QP_FLAG_ALLOW_SCATTER_CQE: /* - * We don't return error if these flags were provided, - * and mlx5 doesn't have right capability. - */ + * We don't return error if these flags were provided, + * and mlx5 doesn't have right capability. + */ *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE | MLX5_QP_FLAG_ALLOW_SCATTER_CQE); return; @@ -4485,7 +4493,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, return -EINVAL; if (attr->port_num == 0 || - attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) { + attr->port_num > dev->num_ports) { mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", attr->port_num, dev->num_ports); return -EINVAL; @@ -5592,8 +5600,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) { if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && MLX5_CAP_ETH(dev->mdev, vlan_cap))) { - mlx5_ib_dbg(dev, "VLAN offloads are not " - "supported\n"); + mlx5_ib_dbg(dev, "VLAN offloads are not supported\n"); err = -EOPNOTSUPP; goto out; } diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c index 55f4e048d94743..234bf30db7319a 100644 --- a/drivers/infiniband/hw/mlx5/umr.c +++ b/drivers/infiniband/hw/mlx5/umr.c @@ -380,6 +380,10 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev, struct mlx5_mkey_seg *seg, unsigned int access_flags) { + bool ro_read = (access_flags & IB_ACCESS_RELAXED_ORDERING) && + (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || + pcie_relaxed_ordering_enabled(dev->mdev->pdev)); + MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); @@ -387,8 +391,7 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev, MLX5_SET(mkc, seg, lr, 1); MLX5_SET(mkc, seg, relaxed_ordering_write, !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); - MLX5_SET(mkc, seg, relaxed_ordering_read, - !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); + MLX5_SET(mkc, seg, relaxed_ordering_read, ro_read); } int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h index c9d0021381a280..3799bb758e490a 100644 --- a/drivers/infiniband/hw/mlx5/umr.h +++ b/drivers/infiniband/hw/mlx5/umr.h @@ -62,7 +62,8 @@ static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev, return false; if ((diffs & IB_ACCESS_RELAXED_ORDERING) && - MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && + (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) && !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) return false; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index dd4021b1196300..58f994341e9acd 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1589,7 +1589,6 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) { unsigned long cq_flags; unsigned long flags; - int discard_cnt = 0; u32 cur_getp, stop_getp; struct ocrdma_cqe *cqe; u32 qpn = 0, wqe_idx = 0; @@ -1641,7 +1640,6 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) /* mark cqe discarded so that it is not picked up later * in the poll_cq(). */ - discard_cnt += 1; cqe->cmn.qpn = 0; skip_cqe: cur_getp = (cur_getp + 1) % cq->max_hw_cqe; diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index f22352f2b369f6..ef85bc8d9384cb 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -484,7 +484,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, const struct qib_tid_info *ti) { int ret = 0; - u32 tid, ctxttid, cnt, limit, tidcnt; + u32 tid, ctxttid, limit, tidcnt; struct qib_devdata *dd = rcd->dd; u64 __iomem *tidbase; unsigned long tidmap[8]; @@ -520,7 +520,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, /* just in case size changes in future */ limit = tidcnt; tid = find_first_bit(tidmap, limit); - for (cnt = 0; tid < limit; tid++) { + for (; tid < limit; tid++) { /* * small optimization; if we detect a run of 3 or so without * any set, use find_first_bit again. That's mainly to @@ -530,7 +530,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, */ if (!test_bit(tid, tidmap)) continue; - cnt++; + if (dd->pageshadow[ctxttid + tid]) { struct page *p; dma_addr_t phys; @@ -1768,7 +1768,7 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd) { struct qib_devdata *dd = rcd->dd; int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; - int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt; + int i, maxtid = ctxt_tidbase + dd->rcvtidcnt; for (i = ctxt_tidbase; i < maxtid; i++) { struct page *p = dd->pageshadow[i]; @@ -1783,7 +1783,6 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd) dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE, DMA_FROM_DEVICE); qib_release_user_pages(&p, 1); - cnt++; } } diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 692b64efad97b1..47bf64ace05c8d 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include "qib.h" @@ -105,13 +104,6 @@ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - ret = pci_enable_pcie_error_reporting(pdev); - if (ret) { - qib_early_err(&pdev->dev, - "Unable to enable pcie error reporting: %d\n", - ret); - ret = 0; - } goto done; bail: diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index 9fe03d6ffac1a7..336eb15a721feb 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -320,7 +320,6 @@ static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd, unpin_user_page(page); } else { /* coalesce case */ - kunmap(page); __free_page(page); } ret = -ENOMEM; @@ -572,7 +571,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd, goto done; } - mpage = kmap(page); + mpage = page_address(page); mpage_save = mpage; for (i = 0; i < niov; i++) { int cfur; @@ -581,7 +580,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd, iov[i].iov_base, iov[i].iov_len); if (cfur) { ret = -EFAULT; - goto free_unmap; + goto page_free; } mpage += iov[i].iov_len; @@ -592,8 +591,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd, page, 0, 0, len, mpage_save); goto done; -free_unmap: - kunmap(page); +page_free: __free_page(page); done: return ret; @@ -627,9 +625,6 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev, pkt->addr[i].dma_length, DMA_TO_DEVICE); - if (pkt->addr[i].kvaddr) - kunmap(pkt->addr[i].page); - if (pkt->addr[i].put_page) unpin_user_page(pkt->addr[i].page); else diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 46653ad56f5a12..13b654ddd3cc8d 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -602,7 +602,6 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev, usnic_vnic_free(vf->vnic); out_release_regions: pci_set_drvdata(pdev, NULL); - pci_clear_master(pdev); pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); @@ -623,7 +622,6 @@ static void usnic_ib_pci_remove(struct pci_dev *pdev) kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf); usnic_vnic_free(vf->vnic); pci_set_drvdata(pdev, NULL); - pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); kfree(vf); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 9b4c0389d2c006..dc83d0ac6a38a1 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -464,8 +464,6 @@ void rvt_qp_exit(struct rvt_dev_info *rdi) if (qps_inuse) rvt_pr_err(rdi, "QP memory leak! %u still in use\n", qps_inuse); - if (!rdi->qp_dev) - return; kfree(rdi->qp_dev->qp_table); free_qpn_table(&rdi->qp_dev->qpn_table); @@ -2040,7 +2038,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp, wqe = rvt_get_swqe_ptr(qp, qp->s_head); /* cplen has length from above */ - memcpy(&wqe->wr, wr, cplen); + memcpy(&wqe->ud_wr, wr, cplen); wqe->length = 0; j = 0; diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 136c2efe34660a..7a7e713de52db3 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -160,6 +160,8 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) port->attr.active_mtu = mtu; port->mtu_cap = ib_mtu_enum_to_int(mtu); + + rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap); } /* called by ifc layer to create new rxe device. @@ -175,26 +177,26 @@ int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name) static int rxe_newlink(const char *ibdev_name, struct net_device *ndev) { - struct rxe_dev *exists; + struct rxe_dev *rxe; int err = 0; if (is_vlan_dev(ndev)) { - pr_err("rxe creation allowed on top of a real device only\n"); + rxe_err("rxe creation allowed on top of a real device only"); err = -EPERM; goto err; } - exists = rxe_get_dev_from_net(ndev); - if (exists) { - ib_device_put(&exists->ib_dev); - rxe_dbg(exists, "already configured on %s\n", ndev->name); + rxe = rxe_get_dev_from_net(ndev); + if (rxe) { + ib_device_put(&rxe->ib_dev); + rxe_err_dev(rxe, "already configured on %s", ndev->name); err = -EEXIST; goto err; } err = rxe_net_add(ibdev_name, ndev); if (err) { - rxe_dbg(exists, "failed to add %s\n", ndev->name); + rxe_err("failed to add %s\n", ndev->name); goto err; } err: diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index 2415f3704f576b..d33dd6cf83d377 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -38,7 +38,8 @@ #define RXE_ROCE_V2_SPORT (0xc000) -#define rxe_dbg(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \ +#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__) +#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \ "%s: " fmt, __func__, ##__VA_ARGS__) #define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__) @@ -57,6 +58,48 @@ #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \ + ##__VA_ARGS__) +#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \ + "%s: " fmt, __func__, ##__VA_ARGS__) +#define rxe_err_uc(uc, fmt, ...) ibdev_err_ratelimited((uc)->ibuc.device, \ + "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_err_pd(pd, fmt, ...) ibdev_err_ratelimited((pd)->ibpd.device, \ + "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_err_ah(ah, fmt, ...) ibdev_err_ratelimited((ah)->ibah.device, \ + "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_err_srq(srq, fmt, ...) ibdev_err_ratelimited((srq)->ibsrq.device, \ + "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_err_qp(qp, fmt, ...) ibdev_err_ratelimited((qp)->ibqp.device, \ + "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \ + "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_err_mr(mr, fmt, ...) ibdev_err_ratelimited((mr)->ibmr.device, \ + "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \ + "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) + +#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \ + ##__VA_ARGS__) +#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \ + "%s: " fmt, __func__, ##__VA_ARGS__) +#define rxe_info_uc(uc, fmt, ...) ibdev_info_ratelimited((uc)->ibuc.device, \ + "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_info_pd(pd, fmt, ...) ibdev_info_ratelimited((pd)->ibpd.device, \ + "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_info_ah(ah, fmt, ...) ibdev_info_ratelimited((ah)->ibah.device, \ + "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_info_srq(srq, fmt, ...) ibdev_info_ratelimited((srq)->ibsrq.device, \ + "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_info_qp(qp, fmt, ...) ibdev_info_ratelimited((qp)->ibqp.device, \ + "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)->ibcq.device, \ + "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_info_mr(mr, fmt, ...) ibdev_info_ratelimited((mr)->ibmr.device, \ + "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__) +#define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \ + "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) + /* responder states */ enum resp_states { RESPST_NONE, @@ -90,7 +133,6 @@ enum resp_states { RESPST_ERR_LENGTH, RESPST_ERR_CQ_OVERFLOW, RESPST_ERROR, - RESPST_RESET, RESPST_DONE, RESPST_EXIT, }; diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 20737fec392bf7..db18ace74d2b53 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -118,10 +118,12 @@ void retransmit_timer(struct timer_list *t) rxe_dbg_qp(qp, "retransmit timer fired\n"); + spin_lock_bh(&qp->state_lock); if (qp->valid) { qp->comp.timeout = 1; rxe_sched_task(&qp->comp.task); } + spin_unlock_bh(&qp->state_lock); } void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) @@ -322,7 +324,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, qp->comp.psn = pkt->psn; if (qp->req.wait_psn) { qp->req.wait_psn = 0; - rxe_run_task(&qp->req.task); + rxe_sched_task(&qp->req.task); } } return COMPST_ERROR_RETRY; @@ -428,6 +430,10 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, uwc->wc_flags = IB_WC_WITH_IMM; uwc->byte_len = wqe->dma.length; } + } else { + if (wqe->status != IB_WC_WR_FLUSH_ERR) + rxe_err_qp(qp, "non-flush error status = %d", + wqe->status); } } @@ -469,30 +475,16 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) */ if (qp->req.wait_fence) { qp->req.wait_fence = 0; - rxe_run_task(&qp->req.task); + rxe_sched_task(&qp->req.task); } } -static inline enum comp_state complete_ack(struct rxe_qp *qp, - struct rxe_pkt_info *pkt, - struct rxe_send_wqe *wqe) +static void comp_check_sq_drain_done(struct rxe_qp *qp) { - if (wqe->has_rd_atomic) { - wqe->has_rd_atomic = 0; - atomic_inc(&qp->req.rd_atomic); - if (qp->req.need_rd_atomic) { - qp->comp.timeout_retry = 0; - qp->req.need_rd_atomic = 0; - rxe_run_task(&qp->req.task); - } - } - - if (unlikely(qp->req.state == QP_STATE_DRAIN)) { - /* state_lock used by requester & completer */ - spin_lock_bh(&qp->state_lock); - if ((qp->req.state == QP_STATE_DRAIN) && - (qp->comp.psn == qp->req.psn)) { - qp->req.state = QP_STATE_DRAINED; + spin_lock_bh(&qp->state_lock); + if (unlikely(qp_state(qp) == IB_QPS_SQD)) { + if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) { + qp->attr.sq_draining = 0; spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { @@ -504,10 +496,27 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } - } else { - spin_unlock_bh(&qp->state_lock); + return; } } + spin_unlock_bh(&qp->state_lock); +} + +static inline enum comp_state complete_ack(struct rxe_qp *qp, + struct rxe_pkt_info *pkt, + struct rxe_send_wqe *wqe) +{ + if (wqe->has_rd_atomic) { + wqe->has_rd_atomic = 0; + atomic_inc(&qp->req.rd_atomic); + if (qp->req.need_rd_atomic) { + qp->comp.timeout_retry = 0; + qp->req.need_rd_atomic = 0; + rxe_sched_task(&qp->req.task); + } + } + + comp_check_sq_drain_done(qp); do_complete(qp, wqe); @@ -538,25 +547,60 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, return COMPST_GET_WQE; } -static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) +/* drain incoming response packet queue */ +static void drain_resp_pkts(struct rxe_qp *qp) { struct sk_buff *skb; - struct rxe_send_wqe *wqe; - struct rxe_queue *q = qp->sq.queue; while ((skb = skb_dequeue(&qp->resp_pkts))) { rxe_put(qp); kfree_skb(skb); ib_device_put(qp->ibqp.device); } +} + +/* complete send wqe with flush error */ +static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) +{ + struct rxe_cqe cqe = {}; + struct ib_wc *wc = &cqe.ibwc; + struct ib_uverbs_wc *uwc = &cqe.uibwc; + int err; + + if (qp->is_user) { + uwc->wr_id = wqe->wr.wr_id; + uwc->status = IB_WC_WR_FLUSH_ERR; + uwc->qp_num = qp->ibqp.qp_num; + } else { + wc->wr_id = wqe->wr.wr_id; + wc->status = IB_WC_WR_FLUSH_ERR; + wc->qp = &qp->ibqp; + } + + err = rxe_cq_post(qp->scq, &cqe, 0); + if (err) + rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err); + + return err; +} + +/* drain and optionally complete the send queue + * if unable to complete a wqe, i.e. cq is full, stop + * completing and flush the remaining wqes + */ +static void flush_send_queue(struct rxe_qp *qp, bool notify) +{ + struct rxe_send_wqe *wqe; + struct rxe_queue *q = qp->sq.queue; + int err; while ((wqe = queue_head(q, q->type))) { if (notify) { - wqe->status = IB_WC_WR_FLUSH_ERR; - do_complete(qp, wqe); - } else { - queue_advance_consumer(q, q->type); + err = flush_send_wqe(qp, wqe); + if (err) + notify = 0; } + queue_advance_consumer(q, q->type); } } @@ -571,9 +615,28 @@ static void free_pkt(struct rxe_pkt_info *pkt) ib_device_put(dev); } -int rxe_completer(void *arg) +/* reset the retry timer if + * - QP is type RC + * - there is a packet sent by the requester that + * might be acked (we still might get spurious + * timeouts but try to keep them as few as possible) + * - the timeout parameter is set + * - the QP is alive + */ +static void reset_retry_timer(struct rxe_qp *qp) +{ + if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) { + spin_lock_bh(&qp->state_lock); + if (qp_state(qp) >= IB_QPS_RTS && + psn_compare(qp->req.psn, qp->comp.psn) > 0) + mod_timer(&qp->retrans_timer, + jiffies + qp->qp_timeout_jiffies); + spin_unlock_bh(&qp->state_lock); + } +} + +int rxe_completer(struct rxe_qp *qp) { - struct rxe_qp *qp = (struct rxe_qp *)arg; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_send_wqe *wqe = NULL; struct sk_buff *skb = NULL; @@ -581,15 +644,17 @@ int rxe_completer(void *arg) enum comp_state state; int ret; - if (!rxe_get(qp)) - return -EAGAIN; + spin_lock_bh(&qp->state_lock); + if (!qp->valid || qp_state(qp) == IB_QPS_ERR || + qp_state(qp) == IB_QPS_RESET) { + bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); - if (!qp->valid || qp->comp.state == QP_STATE_ERROR || - qp->comp.state == QP_STATE_RESET) { - rxe_drain_resp_pkts(qp, qp->valid && - qp->comp.state == QP_STATE_ERROR); + drain_resp_pkts(qp); + flush_send_queue(qp, notify); + spin_unlock_bh(&qp->state_lock); goto exit; } + spin_unlock_bh(&qp->state_lock); if (qp->comp.timeout) { qp->comp.timeout_retry = 1; @@ -677,20 +742,7 @@ int rxe_completer(void *arg) break; } - /* re reset the timeout counter if - * (1) QP is type RC - * (2) the QP is alive - * (3) there is a packet sent by the requester that - * might be acked (we still might get spurious - * timeouts but try to keep them as few as possible) - * (4) the timeout parameter is set - */ - if ((qp_type(qp) == IB_QPT_RC) && - (qp->req.state == QP_STATE_READY) && - (psn_compare(qp->req.psn, qp->comp.psn) > 0) && - qp->qp_timeout_jiffies) - mod_timer(&qp->retrans_timer, - jiffies + qp->qp_timeout_jiffies); + reset_retry_timer(qp); goto exit; case COMPST_ERROR_RETRY: @@ -730,7 +782,7 @@ int rxe_completer(void *arg) RXE_CNT_COMP_RETRY); qp->req.need_retry = 1; qp->comp.started_retry = 1; - rxe_run_task(&qp->req.task); + rxe_sched_task(&qp->req.task); } goto done; @@ -752,6 +804,7 @@ int rxe_completer(void *arg) */ qp->req.wait_for_rnr_timer = 1; rxe_dbg_qp(qp, "set rnr nak timer\n"); + // TODO who protects from destroy_qp?? mod_timer(&qp->rnr_nak_timer, jiffies + rnrnak_jiffies(aeth_syn(pkt) & ~AETH_TYPE_MASK)); @@ -784,7 +837,5 @@ int rxe_completer(void *arg) out: if (pkt) free_pkt(pkt); - rxe_put(qp); - return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index 1df186534639aa..20ff0c0c46052e 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -14,12 +14,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int count; if (cqe <= 0) { - rxe_dbg(rxe, "cqe(%d) <= 0\n", cqe); + rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe); goto err1; } if (cqe > rxe->attr.max_cqe) { - rxe_dbg(rxe, "cqe(%d) > max_cqe(%d)\n", + rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n", cqe, rxe->attr.max_cqe); goto err1; } @@ -39,21 +39,6 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, return -EINVAL; } -static void rxe_send_complete(struct tasklet_struct *t) -{ - struct rxe_cq *cq = from_tasklet(cq, t, comp_task); - unsigned long flags; - - spin_lock_irqsave(&cq->cq_lock, flags); - if (cq->is_dying) { - spin_unlock_irqrestore(&cq->cq_lock, flags); - return; - } - spin_unlock_irqrestore(&cq->cq_lock, flags); - - cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); -} - int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp) @@ -65,7 +50,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, cq->queue = rxe_queue_init(rxe, &cqe, sizeof(struct rxe_cqe), type); if (!cq->queue) { - rxe_dbg(rxe, "unable to create cq\n"); + rxe_dbg_dev(rxe, "unable to create cq\n"); return -ENOMEM; } @@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, cq->is_user = uresp; - cq->is_dying = false; - - tasklet_setup(&cq->comp_task, rxe_send_complete); - spin_lock_init(&cq->cq_lock); cq->ibcq.cqe = cqe; return 0; @@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, return err; } +/* caller holds reference to cq */ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) { struct ib_event ev; @@ -114,6 +96,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT); if (unlikely(full)) { + rxe_err_cq(cq, "queue full"); spin_unlock_irqrestore(&cq->cq_lock, flags); if (cq->ibcq.event_handler) { ev.device = cq->ibcq.device; @@ -135,21 +118,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) if ((cq->notify == IB_CQ_NEXT_COMP) || (cq->notify == IB_CQ_SOLICITED && solicited)) { cq->notify = 0; - tasklet_schedule(&cq->comp_task); + + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } return 0; } -void rxe_cq_disable(struct rxe_cq *cq) -{ - unsigned long flags; - - spin_lock_irqsave(&cq->cq_lock, flags); - cq->is_dying = true; - spin_unlock_irqrestore(&cq->cq_lock, flags); -} - void rxe_cq_cleanup(struct rxe_pool_elem *elem) { struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c index 71bc2c1895888f..fdf5f08cd8f173 100644 --- a/drivers/infiniband/sw/rxe/rxe_icrc.c +++ b/drivers/infiniband/sw/rxe/rxe_icrc.c @@ -21,7 +21,7 @@ int rxe_icrc_init(struct rxe_dev *rxe) tfm = crypto_alloc_shash("crc32", 0, 0); if (IS_ERR(tfm)) { - rxe_dbg(rxe, "failed to init crc32 algorithm err: %ld\n", + rxe_dbg_dev(rxe, "failed to init crc32 algorithm err: %ld\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } @@ -51,7 +51,7 @@ static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len) *(__be32 *)shash_desc_ctx(shash) = crc; err = crypto_shash_update(shash, next, len); if (unlikely(err)) { - rxe_dbg(rxe, "failed crc calculation, err: %d\n", err); + rxe_dbg_dev(rxe, "failed crc calculation, err: %d\n", err); return (__force __be32)crc32_le((__force u32)crc, next, len); } diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 1bb0cb479eb12d..804b15e929dd99 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -80,7 +80,6 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length); int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); -int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); void rxe_mr_cleanup(struct rxe_pool_elem *elem); /* rxe_mw.c */ @@ -171,9 +170,9 @@ void rxe_srq_cleanup(struct rxe_pool_elem *elem); void rxe_dealloc(struct ib_device *ib_dev); -int rxe_completer(void *arg); -int rxe_requester(void *arg); -int rxe_responder(void *arg); +int rxe_completer(struct rxe_qp *qp); +int rxe_requester(struct rxe_qp *qp); +int rxe_responder(struct rxe_qp *qp); /* rxe_icrc.c */ int rxe_icrc_init(struct rxe_dev *rxe); diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index a47d72dbc5376d..6b7f2bd6987991 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -79,7 +79,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) /* Don't allow a mmap larger than the object. */ if (size > ip->info.size) { - rxe_dbg(rxe, "mmap region is larger than the object!\n"); + rxe_dbg_dev(rxe, "mmap region is larger than the object!\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; @@ -87,7 +87,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) goto found_it; } - rxe_dbg(rxe, "unable to find pending mmap info\n"); + rxe_dbg_dev(rxe, "unable to find pending mmap info\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; @@ -98,7 +98,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ret = remap_vmalloc_range(vma, ip->obj, 0); if (ret) { - rxe_dbg(rxe, "err %d from remap_vmalloc_range\n", ret); + rxe_dbg_dev(rxe, "err %d from remap_vmalloc_range\n", ret); goto done; } diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index b10aa1580a644a..0e538fafcc20ff 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -210,10 +210,10 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr) return err; } -static int rxe_set_page(struct ib_mr *ibmr, u64 iova) +static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr) { struct rxe_mr *mr = to_rmr(ibmr); - struct page *page = virt_to_page(iova & mr->page_mask); + struct page *page = ib_virt_dma_to_page(dma_addr); bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT); int err; @@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, return 0; } -static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr, +static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr, unsigned int length, enum rxe_mr_copy_dir dir) { - unsigned int page_offset = iova & (PAGE_SIZE - 1); + unsigned int page_offset = dma_addr & (PAGE_SIZE - 1); unsigned int bytes; struct page *page; u8 *va; while (length) { - page = virt_to_page(iova & mr->page_mask); + page = ib_virt_dma_to_page(dma_addr); bytes = min_t(unsigned int, length, PAGE_SIZE - page_offset); va = kmap_local_page(page); @@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr, kunmap_local(va); page_offset = 0; - iova += bytes; + dma_addr += bytes; addr += bytes; length -= bytes; } @@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, if (mr->ibmr.type == IB_MR_TYPE_DMA) { page_offset = iova & (PAGE_SIZE - 1); - page = virt_to_page(iova & PAGE_MASK); + page = ib_virt_dma_to_page(iova); } else { unsigned long index; int err; @@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) if (mr->ibmr.type == IB_MR_TYPE_DMA) { page_offset = iova & (PAGE_SIZE - 1); - page = virt_to_page(iova & PAGE_MASK); + page = ib_virt_dma_to_page(iova); } else { unsigned long index; int err; @@ -722,19 +722,6 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) return 0; } -int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) -{ - struct rxe_mr *mr = to_rmr(ibmr); - - /* See IBA 10.6.7.2.6 */ - if (atomic_read(&mr->num_mw) > 0) - return -EINVAL; - - rxe_cleanup(mr); - kfree_rcu(mr); - return 0; -} - void rxe_mr_cleanup(struct rxe_pool_elem *elem) { struct rxe_mr *mr = container_of(elem, typeof(*mr), elem); diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index e02e1624bcf4db..2bc7361152ea70 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -413,11 +413,14 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, int is_request = pkt->mask & RXE_REQ_MASK; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - if ((is_request && (qp->req.state != QP_STATE_READY)) || - (!is_request && (qp->resp.state != QP_STATE_READY))) { + spin_lock_bh(&qp->state_lock); + if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || + (!is_request && (qp_state(qp) < IB_QPS_RTR))) { + spin_unlock_bh(&qp->state_lock); rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); goto drop; } + spin_unlock_bh(&qp->state_lock); rxe_icrc_generate(skb, pkt); @@ -596,7 +599,7 @@ static int rxe_notify(struct notifier_block *not_blk, rxe_port_down(rxe); break; case NETDEV_CHANGEMTU: - rxe_dbg(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu); + rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu); rxe_set_mtu(rxe, ndev->mtu); break; case NETDEV_CHANGE: @@ -608,7 +611,7 @@ static int rxe_notify(struct notifier_block *not_blk, case NETDEV_CHANGENAME: case NETDEV_FEAT_CHANGE: default: - rxe_dbg(rxe, "ignoring netdev event = %ld for %s\n", + rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n", event, ndev->name); break; } diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index ab72db68b58f69..c5451a4488ca35 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -19,33 +19,33 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, int has_srq) { if (cap->max_send_wr > rxe->attr.max_qp_wr) { - rxe_dbg(rxe, "invalid send wr = %u > %d\n", + rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n", cap->max_send_wr, rxe->attr.max_qp_wr); goto err1; } if (cap->max_send_sge > rxe->attr.max_send_sge) { - rxe_dbg(rxe, "invalid send sge = %u > %d\n", + rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n", cap->max_send_sge, rxe->attr.max_send_sge); goto err1; } if (!has_srq) { if (cap->max_recv_wr > rxe->attr.max_qp_wr) { - rxe_dbg(rxe, "invalid recv wr = %u > %d\n", + rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n", cap->max_recv_wr, rxe->attr.max_qp_wr); goto err1; } if (cap->max_recv_sge > rxe->attr.max_recv_sge) { - rxe_dbg(rxe, "invalid recv sge = %u > %d\n", + rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n", cap->max_recv_sge, rxe->attr.max_recv_sge); goto err1; } } if (cap->max_inline_data > rxe->max_inline_data) { - rxe_dbg(rxe, "invalid max inline data = %u > %d\n", + rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n", cap->max_inline_data, rxe->max_inline_data); goto err1; } @@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) } if (!init->recv_cq || !init->send_cq) { - rxe_dbg(rxe, "missing cq\n"); + rxe_dbg_dev(rxe, "missing cq\n"); goto err1; } @@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) if (init->qp_type == IB_QPT_GSI) { if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { - rxe_dbg(rxe, "invalid port = %d\n", port_num); + rxe_dbg_dev(rxe, "invalid port = %d\n", port_num); goto err1; } port = &rxe->port; if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { - rxe_dbg(rxe, "GSI QP exists for port %d\n", port_num); + rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num); goto err1; } } @@ -231,8 +231,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, qp->req.wqe_index = queue_get_producer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); - qp->req.state = QP_STATE_RESET; - qp->comp.state = QP_STATE_RESET; qp->req.opcode = -1; qp->comp.opcode = -1; @@ -287,7 +285,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, qp->resp.opcode = OPCODE_NONE; qp->resp.msn = 0; - qp->resp.state = QP_STATE_RESET; return 0; } @@ -328,8 +325,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, if (err) goto err2; + spin_lock_bh(&qp->state_lock); qp->attr.qp_state = IB_QPS_RESET; qp->valid = 1; + spin_unlock_bh(&qp->state_lock); return 0; @@ -380,30 +379,9 @@ int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) return 0; } -/* called by the modify qp verb, this routine checks all the parameters before - * making any changes - */ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) { - enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? - attr->cur_qp_state : qp->attr.qp_state; - enum ib_qp_state new_state = (mask & IB_QP_STATE) ? - attr->qp_state : cur_state; - - if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { - rxe_dbg_qp(qp, "invalid mask or state\n"); - goto err1; - } - - if (mask & IB_QP_STATE) { - if (cur_state == IB_QPS_SQD) { - if (qp->req.state == QP_STATE_DRAIN && - new_state != IB_QPS_ERR) - goto err1; - } - } - if (mask & IB_QP_PORT) { if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num); @@ -473,29 +451,18 @@ static void rxe_qp_reset(struct rxe_qp *qp) { /* stop tasks from running */ rxe_disable_task(&qp->resp.task); + rxe_disable_task(&qp->comp.task); + rxe_disable_task(&qp->req.task); - /* stop request/comp */ - if (qp->sq.queue) { - if (qp_type(qp) == IB_QPT_RC) - rxe_disable_task(&qp->comp.task); - rxe_disable_task(&qp->req.task); - } - - /* move qp to the reset state */ - qp->req.state = QP_STATE_RESET; - qp->comp.state = QP_STATE_RESET; - qp->resp.state = QP_STATE_RESET; + /* drain work and packet queuesc */ + rxe_requester(qp); + rxe_completer(qp); + rxe_responder(qp); - /* let state machines reset themselves drain work and packet queues - * etc. - */ - __rxe_do_task(&qp->resp.task); - - if (qp->sq.queue) { - __rxe_do_task(&qp->comp.task); - __rxe_do_task(&qp->req.task); + if (qp->rq.queue) + rxe_queue_reset(qp->rq.queue); + if (qp->sq.queue) rxe_queue_reset(qp->sq.queue); - } /* cleanup attributes */ atomic_set(&qp->ssn, 0); @@ -518,54 +485,103 @@ static void rxe_qp_reset(struct rxe_qp *qp) /* reenable tasks */ rxe_enable_task(&qp->resp.task); - - if (qp->sq.queue) { - if (qp_type(qp) == IB_QPT_RC) - rxe_enable_task(&qp->comp.task); - - rxe_enable_task(&qp->req.task); - } -} - -/* drain the send queue */ -static void rxe_qp_drain(struct rxe_qp *qp) -{ - if (qp->sq.queue) { - if (qp->req.state != QP_STATE_DRAINED) { - qp->req.state = QP_STATE_DRAIN; - if (qp_type(qp) == IB_QPT_RC) - rxe_sched_task(&qp->comp.task); - else - __rxe_do_task(&qp->comp.task); - rxe_sched_task(&qp->req.task); - } - } + rxe_enable_task(&qp->comp.task); + rxe_enable_task(&qp->req.task); } /* move the qp to the error state */ void rxe_qp_error(struct rxe_qp *qp) { - qp->req.state = QP_STATE_ERROR; - qp->resp.state = QP_STATE_ERROR; - qp->comp.state = QP_STATE_ERROR; + spin_lock_bh(&qp->state_lock); qp->attr.qp_state = IB_QPS_ERR; /* drain work and packet queues */ rxe_sched_task(&qp->resp.task); + rxe_sched_task(&qp->comp.task); + rxe_sched_task(&qp->req.task); + spin_unlock_bh(&qp->state_lock); +} - if (qp_type(qp) == IB_QPT_RC) - rxe_sched_task(&qp->comp.task); - else - __rxe_do_task(&qp->comp.task); +static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, + int mask) +{ + spin_lock_bh(&qp->state_lock); + qp->attr.sq_draining = 1; + rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->req.task); + spin_unlock_bh(&qp->state_lock); } +/* caller should hold qp->state_lock */ +static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr, + int mask) +{ + enum ib_qp_state cur_state; + enum ib_qp_state new_state; + + cur_state = (mask & IB_QP_CUR_STATE) ? + attr->cur_qp_state : qp->attr.qp_state; + new_state = (mask & IB_QP_STATE) ? + attr->qp_state : cur_state; + + if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) + return -EINVAL; + + if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) { + if (qp->attr.sq_draining && new_state != IB_QPS_ERR) + return -EINVAL; + } + + return 0; +} + +static const char *const qps2str[] = { + [IB_QPS_RESET] = "RESET", + [IB_QPS_INIT] = "INIT", + [IB_QPS_RTR] = "RTR", + [IB_QPS_RTS] = "RTS", + [IB_QPS_SQD] = "SQD", + [IB_QPS_SQE] = "SQE", + [IB_QPS_ERR] = "ERR", +}; + /* called by the modify qp verb */ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) { int err; + if (mask & IB_QP_CUR_STATE) + qp->attr.cur_qp_state = attr->qp_state; + + if (mask & IB_QP_STATE) { + spin_lock_bh(&qp->state_lock); + err = __qp_chk_state(qp, attr, mask); + if (!err) { + qp->attr.qp_state = attr->qp_state; + rxe_dbg_qp(qp, "state -> %s\n", + qps2str[attr->qp_state]); + } + spin_unlock_bh(&qp->state_lock); + + if (err) + return err; + + switch (attr->qp_state) { + case IB_QPS_RESET: + rxe_qp_reset(qp); + break; + case IB_QPS_SQD: + rxe_qp_sqd(qp, attr, mask); + break; + case IB_QPS_ERR: + rxe_qp_error(qp); + break; + default: + break; + } + } + if (mask & IB_QP_MAX_QP_RD_ATOMIC) { int max_rd_atomic = attr->max_rd_atomic ? roundup_pow_of_two(attr->max_rd_atomic) : 0; @@ -587,9 +603,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, return err; } - if (mask & IB_QP_CUR_STATE) - qp->attr.cur_qp_state = attr->qp_state; - if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; @@ -669,50 +682,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, if (mask & IB_QP_DEST_QPN) qp->attr.dest_qp_num = attr->dest_qp_num; - if (mask & IB_QP_STATE) { - qp->attr.qp_state = attr->qp_state; - - switch (attr->qp_state) { - case IB_QPS_RESET: - rxe_dbg_qp(qp, "state -> RESET\n"); - rxe_qp_reset(qp); - break; - - case IB_QPS_INIT: - rxe_dbg_qp(qp, "state -> INIT\n"); - qp->req.state = QP_STATE_INIT; - qp->resp.state = QP_STATE_INIT; - qp->comp.state = QP_STATE_INIT; - break; - - case IB_QPS_RTR: - rxe_dbg_qp(qp, "state -> RTR\n"); - qp->resp.state = QP_STATE_READY; - break; - - case IB_QPS_RTS: - rxe_dbg_qp(qp, "state -> RTS\n"); - qp->req.state = QP_STATE_READY; - qp->comp.state = QP_STATE_READY; - break; - - case IB_QPS_SQD: - rxe_dbg_qp(qp, "state -> SQD\n"); - rxe_qp_drain(qp); - break; - - case IB_QPS_SQE: - rxe_dbg_qp(qp, "state -> SQE !!?\n"); - /* Not possible from modify_qp. */ - break; - - case IB_QPS_ERR: - rxe_dbg_qp(qp, "state -> ERR\n"); - rxe_qp_error(qp); - break; - } - } - return 0; } @@ -736,18 +705,15 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); - if (qp->req.state == QP_STATE_DRAIN) { - attr->sq_draining = 1; - /* applications that get this state - * typically spin on it. yield the - * processor - */ + /* Applications that get this state typically spin on it. + * Yield the processor + */ + spin_lock_bh(&qp->state_lock); + if (qp->attr.sq_draining) { + spin_unlock_bh(&qp->state_lock); cond_resched(); - } else { - attr->sq_draining = 0; } - - rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining); + spin_unlock_bh(&qp->state_lock); return 0; } @@ -771,26 +737,29 @@ static void rxe_qp_do_cleanup(struct work_struct *work) { struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); + spin_lock_bh(&qp->state_lock); qp->valid = 0; + spin_unlock_bh(&qp->state_lock); qp->qp_timeout_jiffies = 0; - rxe_cleanup_task(&qp->resp.task); if (qp_type(qp) == IB_QPT_RC) { del_timer_sync(&qp->retrans_timer); del_timer_sync(&qp->rnr_nak_timer); } - rxe_cleanup_task(&qp->req.task); - rxe_cleanup_task(&qp->comp.task); + if (qp->resp.task.func) + rxe_cleanup_task(&qp->resp.task); - /* flush out any receive wr's or pending requests */ if (qp->req.task.func) - __rxe_do_task(&qp->req.task); + rxe_cleanup_task(&qp->req.task); - if (qp->sq.queue) { - __rxe_do_task(&qp->comp.task); - __rxe_do_task(&qp->req.task); - } + if (qp->comp.task.func) + rxe_cleanup_task(&qp->comp.task); + + /* flush out any receive wr's or pending requests */ + rxe_requester(qp); + rxe_completer(qp); + rxe_responder(qp); if (qp->sq.queue) rxe_queue_cleanup(qp->sq.queue); diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index d6dbf5a0058dcc..9611ee191a46dc 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -61,11 +61,11 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, /* num_elem == 0 is allowed, but uninteresting */ if (*num_elem < 0) - goto err1; + return NULL; q = kzalloc(sizeof(*q), GFP_KERNEL); if (!q) - goto err1; + return NULL; q->rxe = rxe; q->type = type; @@ -100,7 +100,6 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, err2: kfree(q); -err1: return NULL; } diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 434a693cd4a5a7..2f953cc74256d9 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -38,12 +38,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, return -EINVAL; } + spin_lock_bh(&qp->state_lock); if (pkt->mask & RXE_REQ_MASK) { - if (unlikely(qp->resp.state != QP_STATE_READY)) + if (unlikely(qp_state(qp) < IB_QPS_RTR)) { + spin_unlock_bh(&qp->state_lock); return -EINVAL; - } else if (unlikely(qp->req.state < QP_STATE_READY || - qp->req.state > QP_STATE_DRAINED)) - return -EINVAL; + } + } else { + if (unlikely(qp_state(qp) < IB_QPS_RTS)) { + spin_unlock_bh(&qp->state_lock); + return -EINVAL; + } + } + spin_unlock_bh(&qp->state_lock); return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 899c8779f8001c..65134a9aefe7bf 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -102,44 +102,44 @@ void rnr_nak_timer(struct timer_list *t) rxe_dbg_qp(qp, "nak timer fired\n"); - /* request a send queue retry */ - qp->req.need_retry = 1; - qp->req.wait_for_rnr_timer = 0; - rxe_sched_task(&qp->req.task); + spin_lock_bh(&qp->state_lock); + if (qp->valid) { + /* request a send queue retry */ + qp->req.need_retry = 1; + qp->req.wait_for_rnr_timer = 0; + rxe_sched_task(&qp->req.task); + } + spin_unlock_bh(&qp->state_lock); } -static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) +static void req_check_sq_drain_done(struct rxe_qp *qp) { - struct rxe_send_wqe *wqe; - struct rxe_queue *q = qp->sq.queue; - unsigned int index = qp->req.wqe_index; + struct rxe_queue *q; + unsigned int index; unsigned int cons; - unsigned int prod; + struct rxe_send_wqe *wqe; - wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT); - cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); - prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); + spin_lock_bh(&qp->state_lock); + if (qp_state(qp) == IB_QPS_SQD) { + q = qp->sq.queue; + index = qp->req.wqe_index; + cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); + wqe = queue_addr_from_index(q, cons); - if (unlikely(qp->req.state == QP_STATE_DRAIN)) { /* check to see if we are drained; * state_lock used by requester and completer */ - spin_lock_bh(&qp->state_lock); do { - if (qp->req.state != QP_STATE_DRAIN) { + if (!qp->attr.sq_draining) /* comp just finished */ - spin_unlock_bh(&qp->state_lock); break; - } if (wqe && ((index != cons) || - (wqe->state != wqe_state_posted))) { + (wqe->state != wqe_state_posted))) /* comp not done yet */ - spin_unlock_bh(&qp->state_lock); break; - } - qp->req.state = QP_STATE_DRAINED; + qp->attr.sq_draining = 0; spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { @@ -151,18 +151,42 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } + return; } while (0); } + spin_unlock_bh(&qp->state_lock); +} + +static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) +{ + struct rxe_queue *q = qp->sq.queue; + unsigned int index = qp->req.wqe_index; + unsigned int prod; + prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); if (index == prod) return NULL; + else + return queue_addr_from_index(q, index); +} + +static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) +{ + struct rxe_send_wqe *wqe; + + req_check_sq_drain_done(qp); - wqe = queue_addr_from_index(q, index); + wqe = __req_next_wqe(qp); + if (wqe == NULL) + return NULL; - if (unlikely((qp->req.state == QP_STATE_DRAIN || - qp->req.state == QP_STATE_DRAINED) && - (wqe->state != wqe_state_processing))) + spin_lock_bh(&qp->state_lock); + if (unlikely((qp_state(qp) == IB_QPS_SQD) && + (wqe->state != wqe_state_processing))) { + spin_unlock_bh(&qp->state_lock); return NULL; + } + spin_unlock_bh(&qp->state_lock); wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); return wqe; @@ -635,9 +659,8 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) return 0; } -int rxe_requester(void *arg) +int rxe_requester(struct rxe_qp *qp) { - struct rxe_qp *qp = (struct rxe_qp *)arg; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_pkt_info pkt; struct sk_buff *skb; @@ -654,24 +677,22 @@ int rxe_requester(void *arg) struct rxe_ah *ah; struct rxe_av *av; - if (!rxe_get(qp)) - return -EAGAIN; - - if (unlikely(!qp->valid)) + spin_lock_bh(&qp->state_lock); + if (unlikely(!qp->valid)) { + spin_unlock_bh(&qp->state_lock); goto exit; + } - if (unlikely(qp->req.state == QP_STATE_ERROR)) { - wqe = req_next_wqe(qp); + if (unlikely(qp_state(qp) == IB_QPS_ERR)) { + wqe = __req_next_wqe(qp); + spin_unlock_bh(&qp->state_lock); if (wqe) - /* - * Generate an error completion for error qp state - */ goto err; else goto exit; } - if (unlikely(qp->req.state == QP_STATE_RESET)) { + if (unlikely(qp_state(qp) == IB_QPS_RESET)) { qp->req.wqe_index = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); qp->req.opcode = -1; @@ -679,8 +700,10 @@ int rxe_requester(void *arg) qp->req.wait_psn = 0; qp->req.need_retry = 0; qp->req.wait_for_rnr_timer = 0; + spin_unlock_bh(&qp->state_lock); goto exit; } + spin_unlock_bh(&qp->state_lock); /* we come here if the retransmit timer has fired * or if the rnr timer has fired. If the retransmit @@ -757,7 +780,7 @@ int rxe_requester(void *arg) qp->req.wqe_index); wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; - rxe_run_task(&qp->comp.task); + rxe_sched_task(&qp->comp.task); goto done; } payload = mtu; @@ -840,12 +863,9 @@ int rxe_requester(void *arg) /* update wqe_index for each wqe completion */ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); wqe->state = wqe_state_error; - qp->req.state = QP_STATE_ERROR; - rxe_run_task(&qp->comp.task); + rxe_qp_error(qp); exit: ret = -EAGAIN; out: - rxe_put(qp); - return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 0cc1ba91d48cc8..68f6cd188d8ed2 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -42,7 +42,6 @@ static char *resp_state_name[] = { [RESPST_ERR_LENGTH] = "ERR_LENGTH", [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW", [RESPST_ERROR] = "ERROR", - [RESPST_RESET] = "RESET", [RESPST_DONE] = "DONE", [RESPST_EXIT] = "EXIT", }; @@ -69,17 +68,6 @@ static inline enum resp_states get_req(struct rxe_qp *qp, { struct sk_buff *skb; - if (qp->resp.state == QP_STATE_ERROR) { - while ((skb = skb_dequeue(&qp->req_pkts))) { - rxe_put(qp); - kfree_skb(skb); - ib_device_put(qp->ibqp.device); - } - - /* go drain recv wr queue */ - return RESPST_CHK_RESOURCE; - } - skb = skb_peek(&qp->req_pkts); if (!skb) return RESPST_EXIT; @@ -334,24 +322,6 @@ static enum resp_states check_resource(struct rxe_qp *qp, { struct rxe_srq *srq = qp->srq; - if (qp->resp.state == QP_STATE_ERROR) { - if (qp->resp.wqe) { - qp->resp.status = IB_WC_WR_FLUSH_ERR; - return RESPST_COMPLETE; - } else if (!srq) { - qp->resp.wqe = queue_head(qp->rq.queue, - QUEUE_TYPE_FROM_CLIENT); - if (qp->resp.wqe) { - qp->resp.status = IB_WC_WR_FLUSH_ERR; - return RESPST_COMPLETE; - } else { - return RESPST_EXIT; - } - } else { - return RESPST_EXIT; - } - } - if (pkt->mask & (RXE_READ_OR_ATOMIC_MASK | RXE_ATOMIC_WRITE_MASK)) { /* it is the requesters job to not send * too many read/atomic ops, we just @@ -1151,6 +1121,10 @@ static enum resp_states do_complete(struct rxe_qp *qp, wc->port_num = qp->attr.port_num; } + } else { + if (wc->status != IB_WC_WR_FLUSH_ERR) + rxe_err_qp(qp, "non-flush error status = %d", + wc->status); } /* have copy for srq and reference for !srq */ @@ -1163,8 +1137,13 @@ static enum resp_states do_complete(struct rxe_qp *qp, return RESPST_ERR_CQ_OVERFLOW; finish: - if (unlikely(qp->resp.state == QP_STATE_ERROR)) + spin_lock_bh(&qp->state_lock); + if (unlikely(qp_state(qp) == IB_QPS_ERR)) { + spin_unlock_bh(&qp->state_lock); return RESPST_CHK_RESOURCE; + } + spin_unlock_bh(&qp->state_lock); + if (unlikely(!pkt)) return RESPST_DONE; if (qp_type(qp) == IB_QPT_RC) @@ -1421,49 +1400,90 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp) } } -static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) +/* drain incoming request packet queue */ +static void drain_req_pkts(struct rxe_qp *qp) { struct sk_buff *skb; - struct rxe_queue *q = qp->rq.queue; while ((skb = skb_dequeue(&qp->req_pkts))) { rxe_put(qp); kfree_skb(skb); ib_device_put(qp->ibqp.device); } +} + +/* complete receive wqe with flush error */ +static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) +{ + struct rxe_cqe cqe = {}; + struct ib_wc *wc = &cqe.ibwc; + struct ib_uverbs_wc *uwc = &cqe.uibwc; + int err; + + if (qp->rcq->is_user) { + uwc->wr_id = wqe->wr_id; + uwc->status = IB_WC_WR_FLUSH_ERR; + uwc->qp_num = qp_num(qp); + } else { + wc->wr_id = wqe->wr_id; + wc->status = IB_WC_WR_FLUSH_ERR; + wc->qp = &qp->ibqp; + } - if (notify) + err = rxe_cq_post(qp->rcq, &cqe, 0); + if (err) + rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err); + + return err; +} + +/* drain and optionally complete the recive queue + * if unable to complete a wqe stop completing and + * just flush the remaining wqes + */ +static void flush_recv_queue(struct rxe_qp *qp, bool notify) +{ + struct rxe_queue *q = qp->rq.queue; + struct rxe_recv_wqe *wqe; + int err; + + if (qp->srq) return; - while (!qp->srq && q && queue_head(q, q->type)) + while ((wqe = queue_head(q, q->type))) { + if (notify) { + err = flush_recv_wqe(qp, wqe); + if (err) + notify = 0; + } queue_advance_consumer(q, q->type); + } + + qp->resp.wqe = NULL; } -int rxe_responder(void *arg) +int rxe_responder(struct rxe_qp *qp) { - struct rxe_qp *qp = (struct rxe_qp *)arg; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); enum resp_states state; struct rxe_pkt_info *pkt = NULL; int ret; - if (!rxe_get(qp)) - return -EAGAIN; + spin_lock_bh(&qp->state_lock); + if (!qp->valid || qp_state(qp) == IB_QPS_ERR || + qp_state(qp) == IB_QPS_RESET) { + bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); - qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; - - if (!qp->valid) + drain_req_pkts(qp); + flush_recv_queue(qp, notify); + spin_unlock_bh(&qp->state_lock); goto exit; + } + spin_unlock_bh(&qp->state_lock); - switch (qp->resp.state) { - case QP_STATE_RESET: - state = RESPST_RESET; - break; + qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; - default: - state = RESPST_GET_REQ; - break; - } + state = RESPST_GET_REQ; while (1) { rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]); @@ -1622,11 +1642,6 @@ int rxe_responder(void *arg) goto exit; - case RESPST_RESET: - rxe_drain_req_pkts(qp, false); - qp->resp.wqe = NULL; - goto exit; - case RESPST_ERROR: qp->resp.goto_error = 0; rxe_dbg_qp(qp, "moved to error state\n"); @@ -1648,6 +1663,5 @@ int rxe_responder(void *arg) exit: ret = -EAGAIN; out: - rxe_put(qp); return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index 82e37a41ced40d..27ca82ec0826b9 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -13,13 +13,13 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init) struct ib_srq_attr *attr = &init->attr; if (attr->max_wr > rxe->attr.max_srq_wr) { - rxe_dbg(rxe, "max_wr(%d) > max_srq_wr(%d)\n", + rxe_dbg_dev(rxe, "max_wr(%d) > max_srq_wr(%d)\n", attr->max_wr, rxe->attr.max_srq_wr); goto err1; } if (attr->max_wr <= 0) { - rxe_dbg(rxe, "max_wr(%d) <= 0\n", attr->max_wr); + rxe_dbg_dev(rxe, "max_wr(%d) <= 0\n", attr->max_wr); goto err1; } @@ -27,7 +27,7 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init) attr->max_wr = RXE_MIN_SRQ_WR; if (attr->max_sge > rxe->attr.max_srq_sge) { - rxe_dbg(rxe, "max_sge(%d) > max_srq_sge(%d)\n", + rxe_dbg_dev(rxe, "max_sge(%d) > max_srq_sge(%d)\n", attr->max_sge, rxe->attr.max_srq_sge); goto err1; } diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 60b90e33a88496..fb9a6bc8e62047 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -6,69 +6,128 @@ #include "rxe.h" -int __rxe_do_task(struct rxe_task *task) +/* Check if task is idle i.e. not running, not scheduled in + * tasklet queue and not draining. If so move to busy to + * reserve a slot in do_task() by setting to busy and taking + * a qp reference to cover the gap from now until the task finishes. + * state will move out of busy if task returns a non zero value + * in do_task(). If state is already busy it is raised to armed + * to indicate to do_task that additional pass should be made + * over the task. + * Context: caller should hold task->lock. + * Returns: true if state transitioned from idle to busy else false. + */ +static bool __reserve_if_idle(struct rxe_task *task) +{ + WARN_ON(rxe_read(task->qp) <= 0); + + if (task->tasklet.state & BIT(TASKLET_STATE_SCHED)) + return false; + + if (task->state == TASK_STATE_IDLE) { + rxe_get(task->qp); + task->state = TASK_STATE_BUSY; + task->num_sched++; + return true; + } + + if (task->state == TASK_STATE_BUSY) + task->state = TASK_STATE_ARMED; + return false; +} + +/* check if task is idle or drained and not currently + * scheduled in the tasklet queue. This routine is + * called by rxe_cleanup_task or rxe_disable_task to + * see if the queue is empty. + * Context: caller should hold task->lock. + * Returns true if done else false. + */ +static bool __is_done(struct rxe_task *task) { - int ret; + if (task->tasklet.state & BIT(TASKLET_STATE_SCHED)) + return false; - while ((ret = task->func(task->arg)) == 0) - ; + if (task->state == TASK_STATE_IDLE || + task->state == TASK_STATE_DRAINED) { + return true; + } - task->ret = ret; + return false; +} - return ret; +/* a locked version of __is_done */ +static bool is_done(struct rxe_task *task) +{ + unsigned long flags; + int done; + + spin_lock_irqsave(&task->lock, flags); + done = __is_done(task); + spin_unlock_irqrestore(&task->lock, flags); + + return done; } -/* - * this locking is due to a potential race where - * a second caller finds the task already running - * but looks just after the last call to func +/* do_task is a wrapper for the three tasks (requester, + * completer, responder) and calls them in a loop until + * they return a non-zero value. It is called either + * directly by rxe_run_task or indirectly if rxe_sched_task + * schedules the task. They must call __reserve_if_idle to + * move the task to busy before calling or scheduling. + * The task can also be moved to drained or invalid + * by calls to rxe-cleanup_task or rxe_disable_task. + * In that case tasks which get here are not executed but + * just flushed. The tasks are designed to look to see if + * there is work to do and do part of it before returning + * here with a return value of zero until all the work + * has been consumed then it retuens a non-zero value. + * The number of times the task can be run is limited by + * max iterations so one task cannot hold the cpu forever. */ static void do_task(struct tasklet_struct *t) { int cont; int ret; struct rxe_task *task = from_tasklet(task, t, tasklet); - struct rxe_qp *qp = (struct rxe_qp *)task->arg; - unsigned int iterations = RXE_MAX_ITERATIONS; + unsigned int iterations; + unsigned long flags; + int resched = 0; - spin_lock_bh(&task->lock); - switch (task->state) { - case TASK_STATE_START: - task->state = TASK_STATE_BUSY; - spin_unlock_bh(&task->lock); - break; - - case TASK_STATE_BUSY: - task->state = TASK_STATE_ARMED; - fallthrough; - case TASK_STATE_ARMED: - spin_unlock_bh(&task->lock); - return; + WARN_ON(rxe_read(task->qp) <= 0); - default: - spin_unlock_bh(&task->lock); - rxe_dbg_qp(qp, "failed with bad state %d\n", task->state); + spin_lock_irqsave(&task->lock, flags); + if (task->state >= TASK_STATE_DRAINED) { + rxe_put(task->qp); + task->num_done++; + spin_unlock_irqrestore(&task->lock, flags); return; } + spin_unlock_irqrestore(&task->lock, flags); do { + iterations = RXE_MAX_ITERATIONS; cont = 0; - ret = task->func(task->arg); - spin_lock_bh(&task->lock); + do { + ret = task->func(task->qp); + } while (ret == 0 && iterations-- > 0); + + spin_lock_irqsave(&task->lock, flags); switch (task->state) { case TASK_STATE_BUSY: if (ret) { - task->state = TASK_STATE_START; - } else if (iterations--) { - cont = 1; + task->state = TASK_STATE_IDLE; } else { - /* reschedule the tasklet and exit + /* This can happen if the client + * can add work faster than the + * tasklet can finish it. + * Reschedule the tasklet and exit * the loop to give up the cpu */ - tasklet_schedule(&task->tasklet); - task->state = TASK_STATE_START; + task->state = TASK_STATE_IDLE; + resched = 1; } break; @@ -81,71 +140,158 @@ static void do_task(struct tasklet_struct *t) cont = 1; break; + case TASK_STATE_DRAINING: + if (ret) + task->state = TASK_STATE_DRAINED; + else + cont = 1; + break; + default: - rxe_dbg_qp(qp, "failed with bad state %d\n", - task->state); + WARN_ON(1); + rxe_info_qp(task->qp, "unexpected task state = %d", task->state); + } + + if (!cont) { + task->num_done++; + if (WARN_ON(task->num_done != task->num_sched)) + rxe_err_qp(task->qp, "%ld tasks scheduled, %ld tasks done", + task->num_sched, task->num_done); } - spin_unlock_bh(&task->lock); + spin_unlock_irqrestore(&task->lock, flags); } while (cont); task->ret = ret; + + if (resched) + rxe_sched_task(task); + + rxe_put(task->qp); } -int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *)) +int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp, + int (*func)(struct rxe_qp *)) { - task->arg = arg; - task->func = func; - task->destroyed = false; + WARN_ON(rxe_read(qp) <= 0); + + task->qp = qp; + task->func = func; tasklet_setup(&task->tasklet, do_task); - task->state = TASK_STATE_START; + task->state = TASK_STATE_IDLE; spin_lock_init(&task->lock); return 0; } +/* rxe_cleanup_task is only called from rxe_do_qp_cleanup in + * process context. The qp is already completed with no + * remaining references. Once the queue is drained the + * task is moved to invalid and returns. The qp cleanup + * code then calls the task functions directly without + * using the task struct to drain any late arriving packets + * or work requests. + */ void rxe_cleanup_task(struct rxe_task *task) { - bool idle; + unsigned long flags; - /* - * Mark the task, then wait for it to finish. It might be - * running in a non-tasklet (direct call) context. - */ - task->destroyed = true; + spin_lock_irqsave(&task->lock, flags); + if (!__is_done(task) && task->state < TASK_STATE_DRAINED) { + task->state = TASK_STATE_DRAINING; + } else { + task->state = TASK_STATE_INVALID; + spin_unlock_irqrestore(&task->lock, flags); + return; + } + spin_unlock_irqrestore(&task->lock, flags); - do { - spin_lock_bh(&task->lock); - idle = (task->state == TASK_STATE_START); - spin_unlock_bh(&task->lock); - } while (!idle); + /* now the task cannot be scheduled or run just wait + * for the previously scheduled tasks to finish. + */ + while (!is_done(task)) + cond_resched(); tasklet_kill(&task->tasklet); + + spin_lock_irqsave(&task->lock, flags); + task->state = TASK_STATE_INVALID; + spin_unlock_irqrestore(&task->lock, flags); } +/* run the task inline if it is currently idle + * cannot call do_task holding the lock + */ void rxe_run_task(struct rxe_task *task) { - if (task->destroyed) - return; + unsigned long flags; + int run; + + WARN_ON(rxe_read(task->qp) <= 0); + + spin_lock_irqsave(&task->lock, flags); + run = __reserve_if_idle(task); + spin_unlock_irqrestore(&task->lock, flags); - do_task(&task->tasklet); + if (run) + do_task(&task->tasklet); } +/* schedule the task to run later as a tasklet. + * the tasklet)schedule call can be called holding + * the lock. + */ void rxe_sched_task(struct rxe_task *task) { - if (task->destroyed) - return; + unsigned long flags; - tasklet_schedule(&task->tasklet); + WARN_ON(rxe_read(task->qp) <= 0); + + spin_lock_irqsave(&task->lock, flags); + if (__reserve_if_idle(task)) + tasklet_schedule(&task->tasklet); + spin_unlock_irqrestore(&task->lock, flags); } +/* rxe_disable/enable_task are only called from + * rxe_modify_qp in process context. Task is moved + * to the drained state by do_task. + */ void rxe_disable_task(struct rxe_task *task) { + unsigned long flags; + + WARN_ON(rxe_read(task->qp) <= 0); + + spin_lock_irqsave(&task->lock, flags); + if (!__is_done(task) && task->state < TASK_STATE_DRAINED) { + task->state = TASK_STATE_DRAINING; + } else { + task->state = TASK_STATE_DRAINED; + spin_unlock_irqrestore(&task->lock, flags); + return; + } + spin_unlock_irqrestore(&task->lock, flags); + + while (!is_done(task)) + cond_resched(); + tasklet_disable(&task->tasklet); } void rxe_enable_task(struct rxe_task *task) { + unsigned long flags; + + WARN_ON(rxe_read(task->qp) <= 0); + + spin_lock_irqsave(&task->lock, flags); + if (task->state == TASK_STATE_INVALID) { + spin_unlock_irqrestore(&task->lock, flags); + return; + } + task->state = TASK_STATE_IDLE; tasklet_enable(&task->tasklet); + spin_unlock_irqrestore(&task->lock, flags); } diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h index 7b88129702ac6d..facb7c8e372957 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.h +++ b/drivers/infiniband/sw/rxe/rxe_task.h @@ -8,9 +8,12 @@ #define RXE_TASK_H enum { - TASK_STATE_START = 0, + TASK_STATE_IDLE = 0, TASK_STATE_BUSY = 1, TASK_STATE_ARMED = 2, + TASK_STATE_DRAINING = 3, + TASK_STATE_DRAINED = 4, + TASK_STATE_INVALID = 5, }; /* @@ -22,28 +25,24 @@ struct rxe_task { struct tasklet_struct tasklet; int state; spinlock_t lock; - void *arg; - int (*func)(void *arg); + struct rxe_qp *qp; + int (*func)(struct rxe_qp *qp); int ret; - bool destroyed; + long num_sched; + long num_done; }; /* * init rxe_task structure - * arg => parameter to pass to fcn + * qp => parameter to pass to func * func => function to call until it returns != 0 */ -int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *)); +int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp, + int (*func)(struct rxe_qp *)); /* cleanup task */ void rxe_cleanup_task(struct rxe_task *task); -/* - * raw call to func in loop without any checking - * can call when tasklets are disabled - */ -int __rxe_do_task(struct rxe_task *task); - void rxe_run_task(struct rxe_task *task); void rxe_sched_task(struct rxe_task *task); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index e14050a692766c..dea605b7f68335 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -12,31 +12,48 @@ #include "rxe_queue.h" #include "rxe_hw_counters.h" -static int rxe_query_device(struct ib_device *dev, +static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr); + +/* dev */ +static int rxe_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, - struct ib_udata *uhw) + struct ib_udata *udata) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); + int err; - if (uhw->inlen || uhw->outlen) - return -EINVAL; + if (udata->inlen || udata->outlen) { + rxe_dbg_dev(rxe, "malformed udata"); + err = -EINVAL; + goto err_out; + } + + memcpy(attr, &rxe->attr, sizeof(*attr)); - *attr = rxe->attr; return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_query_port(struct ib_device *dev, +static int rxe_query_port(struct ib_device *ibdev, u32 port_num, struct ib_port_attr *attr) { - struct rxe_dev *rxe = to_rdev(dev); - int rc; + struct rxe_dev *rxe = to_rdev(ibdev); + int err, ret; + + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + goto err_out; + } - /* *attr being zeroed by the caller, avoid zeroing it here */ - *attr = rxe->port.attr; + memcpy(attr, &rxe->port.attr, sizeof(*attr)); mutex_lock(&rxe->usdev_lock); - rc = ib_get_eth_speed(dev, port_num, &attr->active_speed, - &attr->active_width); + ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed, + &attr->active_width); if (attr->state == IB_PORT_ACTIVE) attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; @@ -47,27 +64,45 @@ static int rxe_query_port(struct ib_device *dev, mutex_unlock(&rxe->usdev_lock); - return rc; + return ret; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_query_pkey(struct ib_device *device, +static int rxe_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, u16 *pkey) { - if (index > 0) - return -EINVAL; + struct rxe_dev *rxe = to_rdev(ibdev); + int err; + + if (index != 0) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad pkey index = %d", index); + goto err_out; + } *pkey = IB_DEFAULT_PKEY_FULL; return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_modify_device(struct ib_device *dev, +static int rxe_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *attr) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); + int err; if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | - IB_DEVICE_MODIFY_NODE_DESC)) - return -EOPNOTSUPP; + IB_DEVICE_MODIFY_NODE_DESC)) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask); + goto err_out; + } if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); @@ -78,16 +113,33 @@ static int rxe_modify_device(struct ib_device *dev, } return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_modify_port(struct ib_device *dev, - u32 port_num, int mask, struct ib_port_modify *attr) +static int rxe_modify_port(struct ib_device *ibdev, u32 port_num, + int mask, struct ib_port_modify *attr) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); struct rxe_port *port; + int err; - port = &rxe->port; + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + goto err_out; + } + //TODO is shutdown useful + if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask); + goto err_out; + } + + port = &rxe->port; port->attr.port_cap_flags |= attr->set_port_cap_mask; port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; @@ -95,73 +147,125 @@ static int rxe_modify_port(struct ib_device *dev, port->attr.qkey_viol_cntr = 0; return 0; -} -static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, - u32 port_num) -{ - return IB_LINK_LAYER_ETHERNET; +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) +static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev, + u32 port_num) { - struct rxe_dev *rxe = to_rdev(ibuc->device); - struct rxe_ucontext *uc = to_ruc(ibuc); + struct rxe_dev *rxe = to_rdev(ibdev); + int err; - return rxe_add_to_pool(&rxe->uc_pool, uc); -} + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + goto err_out; + } -static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) -{ - struct rxe_ucontext *uc = to_ruc(ibuc); + return IB_LINK_LAYER_ETHERNET; - rxe_cleanup(uc); +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } -static int rxe_port_immutable(struct ib_device *dev, u32 port_num, +static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { + struct rxe_dev *rxe = to_rdev(ibdev); + struct ib_port_attr attr = {}; int err; - struct ib_port_attr attr; - immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + goto err_out; + } - err = ib_query_port(dev, port_num, &attr); + err = ib_query_port(ibdev, port_num, &attr); if (err) - return err; + goto err_out; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; +} + +/* uc */ +static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) +{ + struct rxe_dev *rxe = to_rdev(ibuc->device); + struct rxe_ucontext *uc = to_ruc(ibuc); + int err; + + err = rxe_add_to_pool(&rxe->uc_pool, uc); + if (err) + rxe_err_dev(rxe, "unable to create uc"); + + return err; +} + +static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) +{ + struct rxe_ucontext *uc = to_ruc(ibuc); + int err; + + err = rxe_cleanup(uc); + if (err) + rxe_err_uc(uc, "cleanup failed, err = %d", err); } +/* pd */ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); + int err; - return rxe_add_to_pool(&rxe->pd_pool, pd); + err = rxe_add_to_pool(&rxe->pd_pool, pd); + if (err) { + rxe_dbg_dev(rxe, "unable to alloc pd"); + goto err_out; + } + + return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct rxe_pd *pd = to_rpd(ibpd); + int err; + + err = rxe_cleanup(pd); + if (err) + rxe_err_pd(pd, "cleanup failed, err = %d", err); - rxe_cleanup(pd); return 0; } +/* ah */ static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) - { struct rxe_dev *rxe = to_rdev(ibah->device); struct rxe_ah *ah = to_rah(ibah); struct rxe_create_ah_resp __user *uresp = NULL; - int err; + int err, cleanup_err; if (udata) { /* test if new user provider */ @@ -174,16 +278,18 @@ static int rxe_create_ah(struct ib_ah *ibah, err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "unable to create ah"); + goto err_out; + } /* create index > 0 */ ah->ah_num = ah->elem.index; err = rxe_ah_chk_attr(ah, init_attr->ah_attr); if (err) { - rxe_cleanup(ah); - return err; + rxe_dbg_ah(ah, "bad attr"); + goto err_cleanup; } if (uresp) { @@ -191,8 +297,9 @@ static int rxe_create_ah(struct ib_ah *ibah, err = copy_to_user(&uresp->ah_num, &ah->ah_num, sizeof(uresp->ah_num)); if (err) { - rxe_cleanup(ah); - return -EFAULT; + err = -EFAULT; + rxe_dbg_ah(ah, "unable to copy to user"); + goto err_cleanup; } } else if (ah->is_user) { /* only if old user provider */ @@ -203,19 +310,34 @@ static int rxe_create_ah(struct ib_ah *ibah, rxe_finalize(ah); return 0; + +err_cleanup: + cleanup_err = rxe_cleanup(ah); + if (cleanup_err) + rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err); +err_out: + rxe_err_ah(ah, "returned err = %d", err); + return err; } static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) { - int err; struct rxe_ah *ah = to_rah(ibah); + int err; err = rxe_ah_chk_attr(ah, attr); - if (err) - return err; + if (err) { + rxe_dbg_ah(ah, "bad attr"); + goto err_out; + } rxe_init_av(attr, &ah->av); + return 0; + +err_out: + rxe_err_ah(ah, "returned err = %d", err); + return err; } static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) @@ -225,92 +347,77 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) memset(attr, 0, sizeof(*attr)); attr->type = ibah->type; rxe_av_to_attr(&ah->av, attr); + return 0; } static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) { struct rxe_ah *ah = to_rah(ibah); + int err; - rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); - - return 0; -} - -static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) -{ - int i; - u32 length; - struct rxe_recv_wqe *recv_wqe; - int num_sge = ibwr->num_sge; - int full; - - full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); - if (unlikely(full)) - return -ENOMEM; - - if (unlikely(num_sge > rq->max_sge)) - return -EINVAL; - - length = 0; - for (i = 0; i < num_sge; i++) - length += ibwr->sg_list[i].length; - - recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP); - recv_wqe->wr_id = ibwr->wr_id; - - memcpy(recv_wqe->dma.sge, ibwr->sg_list, - num_sge * sizeof(struct ib_sge)); - - recv_wqe->dma.length = length; - recv_wqe->dma.resid = length; - recv_wqe->dma.num_sge = num_sge; - recv_wqe->dma.cur_sge = 0; - recv_wqe->dma.sge_offset = 0; - - queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP); + err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); + if (err) + rxe_err_ah(ah, "cleanup failed, err = %d", err); return 0; } +/* srq */ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibsrq->device); struct rxe_pd *pd = to_rpd(ibsrq->pd); struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_create_srq_resp __user *uresp = NULL; + int err, cleanup_err; if (udata) { - if (udata->outlen < sizeof(*uresp)) - return -EINVAL; + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_err_dev(rxe, "malformed udata"); + goto err_out; + } uresp = udata->outbuf; } - if (init->srq_type != IB_SRQT_BASIC) - return -EOPNOTSUPP; + if (init->srq_type != IB_SRQT_BASIC) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "srq type = %d, not supported", + init->srq_type); + goto err_out; + } err = rxe_srq_chk_init(rxe, init); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "invalid init attributes"); + goto err_out; + } err = rxe_add_to_pool(&rxe->srq_pool, srq); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "unable to create srq, err = %d", err); + goto err_out; + } rxe_get(pd); srq->pd = pd; err = rxe_srq_from_init(rxe, srq, init, udata, uresp); - if (err) + if (err) { + rxe_dbg_srq(srq, "create srq failed, err = %d", err); goto err_cleanup; + } return 0; err_cleanup: - rxe_cleanup(srq); - + cleanup_err = rxe_cleanup(srq); + if (cleanup_err) + rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d", err); return err; } @@ -318,46 +425,64 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata) { - int err; struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_dev *rxe = to_rdev(ibsrq->device); - struct rxe_modify_srq_cmd ucmd = {}; + struct rxe_modify_srq_cmd cmd = {}; + int err; if (udata) { - if (udata->inlen < sizeof(ucmd)) - return -EINVAL; + if (udata->inlen < sizeof(cmd)) { + err = -EINVAL; + rxe_dbg_srq(srq, "malformed udata"); + goto err_out; + } - err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); - if (err) - return err; + err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); + if (err) { + err = -EFAULT; + rxe_dbg_srq(srq, "unable to read udata"); + goto err_out; + } } err = rxe_srq_chk_attr(rxe, srq, attr, mask); - if (err) - return err; + if (err) { + rxe_dbg_srq(srq, "bad init attributes"); + goto err_out; + } + + err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata); + if (err) { + rxe_dbg_srq(srq, "bad attr"); + goto err_out; + } - return rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); + return 0; + +err_out: + rxe_err_srq(srq, "returned err = %d", err); + return err; } static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { struct rxe_srq *srq = to_rsrq(ibsrq); + int err; - if (srq->error) - return -EINVAL; + if (srq->error) { + err = -EINVAL; + rxe_dbg_srq(srq, "srq in error state"); + goto err_out; + } attr->max_wr = srq->rq.queue->buf->index_mask; attr->max_sge = srq->rq.max_sge; attr->srq_limit = srq->limit; return 0; -} - -static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) -{ - struct rxe_srq *srq = to_rsrq(ibsrq); - rxe_cleanup(srq); - return 0; +err_out: + rxe_err_srq(srq, "returned err = %d", err); + return err; } static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, @@ -378,76 +503,116 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&srq->rq.producer_lock, flags); - if (err) + if (err) { *bad_wr = wr; + rxe_err_srq(srq, "returned err = %d", err); + } return err; } +static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +{ + struct rxe_srq *srq = to_rsrq(ibsrq); + int err; + + err = rxe_cleanup(srq); + if (err) + rxe_err_srq(srq, "cleanup failed, err = %d", err); + + return 0; +} + +/* qp */ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_pd *pd = to_rpd(ibqp->pd); struct rxe_qp *qp = to_rqp(ibqp); struct rxe_create_qp_resp __user *uresp = NULL; + int err, cleanup_err; if (udata) { - if (udata->outlen < sizeof(*uresp)) - return -EINVAL; - uresp = udata->outbuf; - } - - if (init->create_flags) - return -EOPNOTSUPP; - - err = rxe_qp_chk_init(rxe, init); - if (err) - return err; + if (udata->inlen) { + err = -EINVAL; + rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + goto err_out; + } - if (udata) { - if (udata->inlen) - return -EINVAL; + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + goto err_out; + } qp->is_user = true; + uresp = udata->outbuf; } else { qp->is_user = false; } + if (init->create_flags) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err); + goto err_out; + } + + err = rxe_qp_chk_init(rxe, init); + if (err) { + rxe_dbg_dev(rxe, "bad init attr, err = %d", err); + goto err_out; + } + err = rxe_add_to_pool(&rxe->qp_pool, qp); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "unable to create qp, err = %d", err); + goto err_out; + } err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); - if (err) - goto qp_init; + if (err) { + rxe_dbg_qp(qp, "create qp failed, err = %d", err); + goto err_cleanup; + } rxe_finalize(qp); return 0; -qp_init: - rxe_cleanup(qp); +err_cleanup: + cleanup_err = rxe_cleanup(qp); + if (cleanup_err) + rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d", err); return err; } static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_qp *qp = to_rqp(ibqp); + int err; - if (mask & ~IB_QP_ATTR_STANDARD_BITS) - return -EOPNOTSUPP; + if (mask & ~IB_QP_ATTR_STANDARD_BITS) { + err = -EOPNOTSUPP; + rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d", + mask, err); + goto err_out; + } err = rxe_qp_chk_attr(rxe, qp, attr, mask); - if (err) - return err; + if (err) { + rxe_dbg_qp(qp, "bad mask/attr, err = %d", err); + goto err_out; + } - err = rxe_qp_from_attr(qp, attr, mask, udata); - if (err) - return err; + err = rxe_qp_from_attr(qp, attr, mask, udata); + if (err) { + rxe_dbg_qp(qp, "modify qp failed, err = %d", err); + goto err_out; + } if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, @@ -455,6 +620,10 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->attr.dest_qp_num); return 0; + +err_out: + rxe_err_qp(qp, "returned err = %d", err); + return err; } static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, @@ -471,41 +640,90 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct rxe_qp *qp = to_rqp(ibqp); - int ret; + int err; - ret = rxe_qp_chk_destroy(qp); - if (ret) - return ret; + err = rxe_qp_chk_destroy(qp); + if (err) { + rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err); + goto err_out; + } + + err = rxe_cleanup(qp); + if (err) + rxe_err_qp(qp, "cleanup failed, err = %d", err); - rxe_cleanup(qp); return 0; + +err_out: + rxe_err_qp(qp, "returned err = %d", err); + return err; } +/* send wr */ + +/* sanity check incoming send work request */ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, - unsigned int mask, unsigned int length) + unsigned int *maskp, unsigned int *lengthp) { int num_sge = ibwr->num_sge; struct rxe_sq *sq = &qp->sq; + unsigned int mask = 0; + unsigned long length = 0; + int err = -EINVAL; + int i; - if (unlikely(num_sge > sq->max_sge)) - return -EINVAL; + do { + mask = wr_opcode_mask(ibwr->opcode, qp); + if (!mask) { + rxe_err_qp(qp, "bad wr opcode for qp type"); + break; + } - if (unlikely(mask & WR_ATOMIC_MASK)) { - if (length < 8) - return -EINVAL; + if (num_sge > sq->max_sge) { + rxe_err_qp(qp, "num_sge > max_sge"); + break; + } - if (atomic_wr(ibwr)->remote_addr & 0x7) - return -EINVAL; - } + length = 0; + for (i = 0; i < ibwr->num_sge; i++) + length += ibwr->sg_list[i].length; - if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && - (length > sq->max_inline))) - return -EINVAL; + if (length > (1UL << 31)) { + rxe_err_qp(qp, "message length too long"); + break; + } - return 0; + if (mask & WR_ATOMIC_MASK) { + if (length != 8) { + rxe_err_qp(qp, "atomic length != 8"); + break; + } + if (atomic_wr(ibwr)->remote_addr & 0x7) { + rxe_err_qp(qp, "misaligned atomic address"); + break; + } + } + if (ibwr->send_flags & IB_SEND_INLINE) { + if (!(mask & WR_INLINE_MASK)) { + rxe_err_qp(qp, "opcode doesn't support inline data"); + break; + } + if (length > sq->max_inline) { + rxe_err_qp(qp, "inline length too big"); + break; + } + } + + err = 0; + } while (0); + + *maskp = mask; + *lengthp = (int)length; + + return err; } -static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, +static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, const struct ib_send_wr *ibwr) { wr->wr_id = ibwr->wr_id; @@ -521,8 +739,18 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, wr->wr.ud.ah_num = to_rah(ibah)->ah_num; if (qp_type(qp) == IB_QPT_GSI) wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; - if (wr->opcode == IB_WR_SEND_WITH_IMM) + + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: wr->ex.imm_data = ibwr->ex.imm_data; + break; + case IB_WR_SEND: + break; + default: + rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP", + wr->opcode); + return -EINVAL; + } } else { switch (wr->opcode) { case IB_WR_RDMA_WRITE_WITH_IMM: @@ -539,6 +767,11 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, case IB_WR_SEND_WITH_INV: wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; break; + case IB_WR_RDMA_READ_WITH_INV: + wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; + wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; + wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; + break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: wr->wr.atomic.remote_addr = @@ -550,16 +783,26 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, break; case IB_WR_LOCAL_INV: wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; - break; + break; case IB_WR_REG_MR: wr->wr.reg.mr = reg_wr(ibwr)->mr; wr->wr.reg.key = reg_wr(ibwr)->key; wr->wr.reg.access = reg_wr(ibwr)->access; - break; + break; + case IB_WR_SEND: + case IB_WR_BIND_MW: + case IB_WR_FLUSH: + case IB_WR_ATOMIC_WRITE: + break; default: + rxe_err_qp(qp, "unsupported wr opcode %d", + wr->opcode); + return -EINVAL; break; } } + + return 0; } static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, @@ -570,24 +813,27 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, int i; for (i = 0; i < ibwr->num_sge; i++, sge++) { - memcpy(p, (void *)(uintptr_t)sge->addr, sge->length); + memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length); p += sge->length; } } -static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, +static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, unsigned int mask, unsigned int length, struct rxe_send_wqe *wqe) { int num_sge = ibwr->num_sge; + int err; - init_send_wr(qp, &wqe->wr, ibwr); + err = init_send_wr(qp, &wqe->wr, ibwr); + if (err) + return err; /* local operation */ if (unlikely(mask & WR_LOCAL_OP_MASK)) { wqe->mask = mask; wqe->state = wqe_state_posted; - return; + return 0; } if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) @@ -606,82 +852,62 @@ static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, wqe->dma.sge_offset = 0; wqe->state = wqe_state_posted; wqe->ssn = atomic_add_return(1, &qp->ssn); + + return 0; } -static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, - unsigned int mask, u32 length) +static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr) { int err; struct rxe_sq *sq = &qp->sq; struct rxe_send_wqe *send_wqe; - unsigned long flags; + unsigned int mask; + unsigned int length; int full; - err = validate_send_wr(qp, ibwr, mask, length); + err = validate_send_wr(qp, ibwr, &mask, &length); if (err) return err; - spin_lock_irqsave(&qp->sq.sq_lock, flags); - full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP); - if (unlikely(full)) { - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + rxe_err_qp(qp, "send queue full"); return -ENOMEM; } send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP); - init_send_wqe(qp, ibwr, mask, length, send_wqe); - - queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP); - - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + err = init_send_wqe(qp, ibwr, mask, length, send_wqe); + if (!err) + queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP); - return 0; + return err; } -static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, +static int rxe_post_send_kernel(struct rxe_qp *qp, + const struct ib_send_wr *ibwr, const struct ib_send_wr **bad_wr) { int err = 0; - unsigned int mask; - unsigned int length = 0; - int i; - struct ib_send_wr *next; - - while (wr) { - mask = wr_opcode_mask(wr->opcode, qp); - if (unlikely(!mask)) { - err = -EINVAL; - *bad_wr = wr; - break; - } - - if (unlikely((wr->send_flags & IB_SEND_INLINE) && - !(mask & WR_INLINE_MASK))) { - err = -EINVAL; - *bad_wr = wr; - break; - } - - next = wr->next; - - length = 0; - for (i = 0; i < wr->num_sge; i++) - length += wr->sg_list[i].length; - - err = post_one_send(qp, wr, mask, length); + unsigned long flags; + spin_lock_irqsave(&qp->sq.sq_lock, flags); + while (ibwr) { + err = post_one_send(qp, ibwr); if (err) { - *bad_wr = wr; + *bad_wr = ibwr; break; } - wr = next; + ibwr = ibwr->next; } + spin_unlock_irqrestore(&qp->sq.sq_lock, flags); - rxe_sched_task(&qp->req.task); - if (unlikely(qp->req.state == QP_STATE_ERROR)) + if (!err) + rxe_sched_task(&qp->req.task); + + spin_lock_bh(&qp->state_lock); + if (qp_state(qp) == IB_QPS_ERR) rxe_sched_task(&qp->comp.task); + spin_unlock_bh(&qp->state_lock); return err; } @@ -690,23 +916,88 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct rxe_qp *qp = to_rqp(ibqp); + int err; - if (unlikely(!qp->valid)) { - *bad_wr = wr; + spin_lock_bh(&qp->state_lock); + /* caller has already called destroy_qp */ + if (WARN_ON_ONCE(!qp->valid)) { + spin_unlock_bh(&qp->state_lock); + rxe_err_qp(qp, "qp has been destroyed"); return -EINVAL; } - if (unlikely(qp->req.state < QP_STATE_READY)) { + if (unlikely(qp_state(qp) < IB_QPS_RTS)) { + spin_unlock_bh(&qp->state_lock); *bad_wr = wr; + rxe_err_qp(qp, "qp not ready to send"); return -EINVAL; } + spin_unlock_bh(&qp->state_lock); if (qp->is_user) { /* Utilize process context to do protocol processing */ rxe_run_task(&qp->req.task); - return 0; - } else - return rxe_post_send_kernel(qp, wr, bad_wr); + } else { + err = rxe_post_send_kernel(qp, wr, bad_wr); + if (err) + return err; + } + + return 0; +} + +/* recv wr */ +static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) +{ + int i; + unsigned long length; + struct rxe_recv_wqe *recv_wqe; + int num_sge = ibwr->num_sge; + int full; + int err; + + full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); + if (unlikely(full)) { + err = -ENOMEM; + rxe_dbg("queue full"); + goto err_out; + } + + if (unlikely(num_sge > rq->max_sge)) { + err = -EINVAL; + rxe_dbg("bad num_sge > max_sge"); + goto err_out; + } + + length = 0; + for (i = 0; i < num_sge; i++) + length += ibwr->sg_list[i].length; + + /* IBA max message size is 2^31 */ + if (length >= (1UL<<31)) { + err = -EINVAL; + rxe_dbg("message length too long"); + goto err_out; + } + + recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP); + + recv_wqe->wr_id = ibwr->wr_id; + recv_wqe->dma.length = length; + recv_wqe->dma.resid = length; + recv_wqe->dma.num_sge = num_sge; + recv_wqe->dma.cur_sge = 0; + recv_wqe->dma.sge_offset = 0; + memcpy(recv_wqe->dma.sge, ibwr->sg_list, + num_sge * sizeof(struct ib_sge)); + + queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP); + + return 0; + +err_out: + rxe_dbg("returned err = %d", err); + return err; } static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, @@ -717,13 +1008,26 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, struct rxe_rq *rq = &qp->rq; unsigned long flags; - if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { + spin_lock_bh(&qp->state_lock); + /* caller has already called destroy_qp */ + if (WARN_ON_ONCE(!qp->valid)) { + spin_unlock_bh(&qp->state_lock); + rxe_err_qp(qp, "qp has been destroyed"); + return -EINVAL; + } + + /* see C10-97.2.1 */ + if (unlikely((qp_state(qp) < IB_QPS_INIT))) { + spin_unlock_bh(&qp->state_lock); *bad_wr = wr; + rxe_dbg_qp(qp, "qp not ready to post recv"); return -EINVAL; } + spin_unlock_bh(&qp->state_lock); if (unlikely(qp->srq)) { *bad_wr = wr; + rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead"); return -EINVAL; } @@ -740,76 +1044,102 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&rq->producer_lock, flags); - if (qp->resp.state == QP_STATE_ERROR) + spin_lock_bh(&qp->state_lock); + if (qp_state(qp) == IB_QPS_ERR) rxe_sched_task(&qp->resp.task); + spin_unlock_bh(&qp->state_lock); return err; } +/* cq */ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { - int err; struct ib_device *dev = ibcq->device; struct rxe_dev *rxe = to_rdev(dev); struct rxe_cq *cq = to_rcq(ibcq); struct rxe_create_cq_resp __user *uresp = NULL; + int err, cleanup_err; if (udata) { - if (udata->outlen < sizeof(*uresp)) - return -EINVAL; + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + goto err_out; + } uresp = udata->outbuf; } - if (attr->flags) - return -EOPNOTSUPP; + if (attr->flags) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err); + goto err_out; + } err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector); - if (err) - return err; + if (err) { + rxe_dbg_dev(rxe, "bad init attributes, err = %d", err); + goto err_out; + } + + err = rxe_add_to_pool(&rxe->cq_pool, cq); + if (err) { + rxe_dbg_dev(rxe, "unable to create cq, err = %d", err); + goto err_out; + } err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, uresp); - if (err) - return err; - - return rxe_add_to_pool(&rxe->cq_pool, cq); -} - -static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) -{ - struct rxe_cq *cq = to_rcq(ibcq); - - /* See IBA C11-17: The CI shall return an error if this Verb is - * invoked while a Work Queue is still associated with the CQ. - */ - if (atomic_read(&cq->num_wq)) - return -EINVAL; - - rxe_cq_disable(cq); + if (err) { + rxe_dbg_cq(cq, "create cq failed, err = %d", err); + goto err_cleanup; + } - rxe_cleanup(cq); return 0; + +err_cleanup: + cleanup_err = rxe_cleanup(cq); + if (cleanup_err) + rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d", err); + return err; } static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) { - int err; struct rxe_cq *cq = to_rcq(ibcq); struct rxe_dev *rxe = to_rdev(ibcq->device); struct rxe_resize_cq_resp __user *uresp = NULL; + int err; if (udata) { - if (udata->outlen < sizeof(*uresp)) - return -EINVAL; + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_cq(cq, "malformed udata"); + goto err_out; + } uresp = udata->outbuf; } err = rxe_cq_chk_attr(rxe, cq, cqe, 0); - if (err) - return err; + if (err) { + rxe_dbg_cq(cq, "bad attr, err = %d", err); + goto err_out; + } + + err = rxe_cq_resize_queue(cq, cqe, uresp, udata); + if (err) { + rxe_dbg_cq(cq, "resize cq failed, err = %d", err); + goto err_out; + } + + return 0; - return rxe_cq_resize_queue(cq, cqe, uresp, udata); +err_out: + rxe_err_cq(cq, "returned err = %d", err); + return err; } static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) @@ -823,7 +1153,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) for (i = 0; i < num_entries; i++) { cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP); if (!cqe) - break; + break; /* queue empty */ memcpy(wc++, &cqe->ibwc, sizeof(*wc)); queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP); @@ -864,6 +1194,32 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) return ret; } +static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +{ + struct rxe_cq *cq = to_rcq(ibcq); + int err; + + /* See IBA C11-17: The CI shall return an error if this Verb is + * invoked while a Work Queue is still associated with the CQ. + */ + if (atomic_read(&cq->num_wq)) { + err = -EINVAL; + rxe_dbg_cq(cq, "still in use"); + goto err_out; + } + + err = rxe_cleanup(cq); + if (err) + rxe_err_cq(cq, "cleanup failed, err = %d", err); + + return 0; + +err_out: + rxe_err_cq(cq, "returned err = %d", err); + return err; +} + +/* mr */ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) { struct rxe_dev *rxe = to_rdev(ibpd->device); @@ -872,14 +1228,14 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) int err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; - goto err_out; - } + if (!mr) + return ERR_PTR(-ENOMEM); err = rxe_add_to_pool(&rxe->mr_pool, mr); - if (err) + if (err) { + rxe_dbg_dev(rxe, "unable to create mr"); goto err_free; + } rxe_get(pd); mr->ibmr.pd = ibpd; @@ -891,47 +1247,49 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) err_free: kfree(mr); -err_out: + rxe_err_pd(pd, "returned err = %d", err); return ERR_PTR(err); } -static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, - u64 start, - u64 length, - u64 iova, - int access, struct ib_udata *udata) +static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, + u64 length, u64 iova, int access, + struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); struct rxe_mr *mr; + int err, cleanup_err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; - goto err_out; - } + if (!mr) + return ERR_PTR(-ENOMEM); err = rxe_add_to_pool(&rxe->mr_pool, mr); - if (err) + if (err) { + rxe_dbg_pd(pd, "unable to create mr"); goto err_free; + } rxe_get(pd); mr->ibmr.pd = ibpd; mr->ibmr.device = ibpd->device; err = rxe_mr_init_user(rxe, start, length, iova, access, mr); - if (err) + if (err) { + rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err); goto err_cleanup; + } rxe_finalize(mr); return &mr->ibmr; err_cleanup: - rxe_cleanup(mr); + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); err_free: kfree(mr); -err_out: + rxe_err_pd(pd, "returned err = %d", err); return ERR_PTR(err); } @@ -941,17 +1299,19 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); struct rxe_mr *mr; - int err; + int err, cleanup_err; - if (mr_type != IB_MR_TYPE_MEM_REG) - return ERR_PTR(-EINVAL); - - mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; + if (mr_type != IB_MR_TYPE_MEM_REG) { + err = -EINVAL; + rxe_dbg_pd(pd, "mr type %d not supported, err = %d", + mr_type, err); goto err_out; } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + err = rxe_add_to_pool(&rxe->mr_pool, mr); if (err) goto err_free; @@ -961,20 +1321,49 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, mr->ibmr.device = ibpd->device; err = rxe_mr_init_fast(max_num_sg, mr); - if (err) + if (err) { + rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err); goto err_cleanup; + } rxe_finalize(mr); return &mr->ibmr; err_cleanup: - rxe_cleanup(mr); + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d", err); err_free: kfree(mr); err_out: + rxe_err_pd(pd, "returned err = %d", err); return ERR_PTR(err); } +static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +{ + struct rxe_mr *mr = to_rmr(ibmr); + int err, cleanup_err; + + /* See IBA 10.6.7.2.6 */ + if (atomic_read(&mr->num_mw) > 0) { + err = -EINVAL; + rxe_dbg_mr(mr, "mr has mw's bound"); + goto err_out; + } + + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); + + kfree_rcu(mr); + return 0; + +err_out: + rxe_err_mr(mr, "returned err = %d", err); + return err; +} + static ssize_t parent_show(struct device *device, struct device_attribute *attr, char *buf) { @@ -1095,7 +1484,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) err = ib_register_device(dev, ibdev_name, NULL); if (err) - rxe_dbg(rxe, "failed with error %d\n", err); + rxe_dbg_dev(rxe, "failed with error %d\n", err); /* * Note that rxe may be invalid at this point if another thread diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index c269ae2a322432..26a20f08869287 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -63,9 +63,7 @@ struct rxe_cq { struct rxe_queue *queue; spinlock_t cq_lock; u8 notify; - bool is_dying; bool is_user; - struct tasklet_struct comp_task; atomic_t num_wq; }; @@ -104,17 +102,7 @@ struct rxe_srq { int error; }; -enum rxe_qp_state { - QP_STATE_RESET, - QP_STATE_INIT, - QP_STATE_READY, - QP_STATE_DRAIN, /* req only */ - QP_STATE_DRAINED, /* req only */ - QP_STATE_ERROR -}; - struct rxe_req_info { - enum rxe_qp_state state; int wqe_index; u32 psn; int opcode; @@ -129,7 +117,6 @@ struct rxe_req_info { }; struct rxe_comp_info { - enum rxe_qp_state state; u32 psn; int opcode; int timeout; @@ -175,7 +162,6 @@ struct resp_res { }; struct rxe_resp_info { - enum rxe_qp_state state; u32 msn; u32 psn; u32 ack_psn; diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index dacc174604bf2f..65b5cda5457baf 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -437,9 +437,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event, dev_dbg(&netdev->dev, "siw: event %lu\n", event); - if (dev_net(netdev) != &init_net) - return NOTIFY_OK; - base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW); if (!base_dev) return NOTIFY_OK; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index fd721cc19682eb..58bbf738e4e599 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -139,7 +139,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, break; bytes = min(bytes, len); - if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) == + if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) == bytes) { copied += bytes; offset += bytes; @@ -487,7 +487,7 @@ int siw_proc_send(struct siw_qp *qp) mem_p = *mem; if (mem_p->mem_obj == NULL) rv = siw_rx_kva(srx, - (void *)(uintptr_t)(sge->laddr + frx->sge_off), + ib_virt_dma_to_ptr(sge->laddr + frx->sge_off), sge_bytes); else if (!mem_p->is_pbl) rv = siw_rx_umem(srx, mem_p->umem, @@ -852,7 +852,7 @@ int siw_proc_rresp(struct siw_qp *qp) if (mem_p->mem_obj == NULL) rv = siw_rx_kva(srx, - (void *)(uintptr_t)(sge->laddr + wqe->processed), + ib_virt_dma_to_ptr(sge->laddr + wqe->processed), bytes); else if (!mem_p->is_pbl) rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 05052b49107f27..4b292e0504f1a1 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) - return virt_to_page((void *)(uintptr_t)paddr); + return ib_virt_dma_to_page(paddr); return NULL; } @@ -56,8 +56,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) if (!mem->mem_obj) { /* Kernel client using kva */ - memcpy(paddr, - (const void *)(uintptr_t)sge->laddr, bytes); + memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes); } else if (c_tx->in_syscall) { if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr), bytes)) @@ -477,7 +476,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) * or memory region with assigned kernel buffer */ iov[seg].iov_base = - (void *)(uintptr_t)(sge->laddr + sge_off); + ib_virt_dma_to_ptr(sge->laddr + sge_off); iov[seg].iov_len = sge_len; if (do_crc) @@ -537,19 +536,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) * Cast to an uintptr_t to preserve all 64 bits * in sge->laddr. */ - uintptr_t va = (uintptr_t)(sge->laddr + sge_off); + u64 va = sge->laddr + sge_off; - /* - * virt_to_page() takes a (void *) pointer - * so cast to a (void *) meaning it will be 64 - * bits on a 64 bit platform and 32 bits on a - * 32 bit platform. - */ - page_array[seg] = virt_to_page((void *)(va & PAGE_MASK)); + page_array[seg] = ib_virt_dma_to_page(va); if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, - (void *)va, + ib_virt_dma_to_ptr(va), plen); } @@ -558,7 +551,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) data_len -= plen; fp_off = 0; - if (++seg > (int)MAX_ARRAY) { + if (++seg >= (int)MAX_ARRAY) { siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); siw_unmap_pages(iov, kmap_mask, seg-1); wqe->processed -= c_tx->bytes_unsent; diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 906fde1a2a0de2..398ec13db62481 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -660,7 +660,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, bytes = -EINVAL; break; } - memcpy(kbuf, (void *)(uintptr_t)core_sge->addr, + memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr), core_sge->length); kbuf += core_sge->length; @@ -1523,7 +1523,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, } siw_dbg_mem(mem, "sge[%d], size %u, addr 0x%p, total %lu\n", - i, pble->size, (void *)(uintptr_t)pble->addr, + i, pble->size, ib_virt_dma_to_ptr(pble->addr), pbl_size); } rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 7b83f48f60c5ea..39ea73f690168c 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -141,10 +141,14 @@ static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz, /* creates a new tx descriptor and adds header regd buffer */ static void iser_create_send_desc(struct iser_conn *iser_conn, - struct iser_tx_desc *tx_desc) + struct iser_tx_desc *tx_desc, enum iser_desc_type type, + void (*done)(struct ib_cq *cq, struct ib_wc *wc)) { struct iser_device *device = iser_conn->ib_conn.device; + tx_desc->type = type; + tx_desc->cqe.done = done; + ib_dma_sync_single_for_cpu(device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); @@ -349,9 +353,8 @@ int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task) edtl = ntohl(hdr->data_length); /* build the tx desc regd header and add it to the tx desc dto */ - tx_desc->type = ISCSI_TX_SCSI_COMMAND; - tx_desc->cqe.done = iser_cmd_comp; - iser_create_send_desc(iser_conn, tx_desc); + iser_create_send_desc(iser_conn, tx_desc, ISCSI_TX_SCSI_COMMAND, + iser_cmd_comp); if (hdr->flags & ISCSI_FLAG_CMD_READ) { data_buf = &iser_task->data[ISER_DIR_IN]; @@ -457,7 +460,6 @@ int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task, iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n", itt, buf_offset, data_seg_len); - err = iser_post_send(&iser_conn->ib_conn, tx_desc); if (!err) return 0; @@ -478,9 +480,8 @@ int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task) struct iser_device *device; /* build the tx desc regd header and add it to the tx desc dto */ - mdesc->type = ISCSI_TX_CONTROL; - mdesc->cqe.done = iser_ctrl_comp; - iser_create_send_desc(iser_conn, mdesc); + iser_create_send_desc(iser_conn, mdesc, ISCSI_TX_CONTROL, + iser_ctrl_comp); device = iser_conn->ib_conn.device; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 1b8eda0dae4e0c..95b8eebf7e045f 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -37,12 +37,6 @@ #include "iscsi_iser.h" -#define ISCSI_ISER_MAX_CONN 8 -#define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) -#define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) -#define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \ - ISCSI_ISER_MAX_CONN) - static void iser_qp_event_callback(struct ib_event *cause, void *context) { iser_err("qp event %s (%d)\n", diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 3e9343fd2d8e43..c12005eab14c19 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -549,6 +549,7 @@ static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid) */ static int srpt_refresh_port(struct srpt_port *sport) { + struct ib_mad_agent *mad_agent; struct ib_mad_reg_req reg_req; struct ib_port_modify port_modify; struct ib_port_attr port_attr; @@ -593,24 +594,26 @@ static int srpt_refresh_port(struct srpt_port *sport) set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask); set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask); - sport->mad_agent = ib_register_mad_agent(sport->sdev->device, - sport->port, - IB_QPT_GSI, - ®_req, 0, - srpt_mad_send_handler, - srpt_mad_recv_handler, - sport, 0); - if (IS_ERR(sport->mad_agent)) { + mad_agent = ib_register_mad_agent(sport->sdev->device, + sport->port, + IB_QPT_GSI, + ®_req, 0, + srpt_mad_send_handler, + srpt_mad_recv_handler, + sport, 0); + if (IS_ERR(mad_agent)) { pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n", dev_name(&sport->sdev->device->dev), sport->port, - PTR_ERR(sport->mad_agent)); + PTR_ERR(mad_agent)); sport->mad_agent = NULL; memset(&port_modify, 0, sizeof(port_modify)); port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); - + return 0; } + + sport->mad_agent = mad_agent; } return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index a5408879e077e9..b31de4cf6534bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -10321,6 +10321,87 @@ struct hwrm_selftest_irq_output { u8 valid; }; +/* dbc_dbc (size:64b/8B) */ +struct dbc_dbc { + u32 index; + #define DBC_DBC_INDEX_MASK 0xffffffUL + #define DBC_DBC_INDEX_SFT 0 + #define DBC_DBC_EPOCH 0x1000000UL + #define DBC_DBC_TOGGLE_MASK 0x6000000UL + #define DBC_DBC_TOGGLE_SFT 25 + u32 type_path_xid; + #define DBC_DBC_XID_MASK 0xfffffUL + #define DBC_DBC_XID_SFT 0 + #define DBC_DBC_PATH_MASK 0x3000000UL + #define DBC_DBC_PATH_SFT 24 + #define DBC_DBC_PATH_ROCE (0x0UL << 24) + #define DBC_DBC_PATH_L2 (0x1UL << 24) + #define DBC_DBC_PATH_ENGINE (0x2UL << 24) + #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE + #define DBC_DBC_VALID 0x4000000UL + #define DBC_DBC_DEBUG_TRACE 0x8000000UL + #define DBC_DBC_TYPE_MASK 0xf0000000UL + #define DBC_DBC_TYPE_SFT 28 + #define DBC_DBC_TYPE_SQ (0x0UL << 28) + #define DBC_DBC_TYPE_RQ (0x1UL << 28) + #define DBC_DBC_TYPE_SRQ (0x2UL << 28) + #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_DBC_TYPE_CQ (0x4UL << 28) + #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28) + #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28) + #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28) + #define DBC_DBC_TYPE_NQ (0xaUL << 28) + #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28) + #define DBC_DBC_TYPE_NULL (0xfUL << 28) + #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL +}; + +/* db_push_start (size:64b/8B) */ +struct db_push_start { + u64 db; + #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL + #define DB_PUSH_START_DB_INDEX_SFT 0 + #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL + #define DB_PUSH_START_DB_PI_LO_SFT 24 + #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL + #define DB_PUSH_START_DB_XID_SFT 32 + #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL + #define DB_PUSH_START_DB_PI_HI_SFT 52 + #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL + #define DB_PUSH_START_DB_TYPE_SFT 60 + #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60) + #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60) + #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END +}; + +/* db_push_end (size:64b/8B) */ +struct db_push_end { + u64 db; + #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL + #define DB_PUSH_END_DB_INDEX_SFT 0 + #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL + #define DB_PUSH_END_DB_PI_LO_SFT 24 + #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL + #define DB_PUSH_END_DB_XID_SFT 32 + #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL + #define DB_PUSH_END_DB_PI_HI_SFT 52 + #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL + #define DB_PUSH_END_DB_PATH_SFT 56 + #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56) + #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56) + #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56) + #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE + #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL + #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL + #define DB_PUSH_END_DB_TYPE_SFT 60 + #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60) + #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60) + #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END +}; + /* db_push_info (size:64b/8B) */ struct db_push_info { u32 push_size_push_index; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index ef546ed8b4d93e..9c94807097cb84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -930,8 +930,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; - bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && - MLX5_CAP_GEN(mdev, relaxed_ordering_write); + bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); return ro && lro_en ? MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 4c9a3210600c23..1f90594499c609 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -39,12 +39,13 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) { - bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); - bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); + bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read) || + (pcie_relaxed_ordering_enabled(mdev->pdev) && + MLX5_CAP_GEN(mdev, relaxed_ordering_read_pci_enabled)); - MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); - MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read); + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write); } int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index edc738e86cac03..995eb2d5ace0b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -691,6 +691,9 @@ static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx) MLX5_ST_SZ_BYTES(roce_cap)); MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1); + if (MLX5_CAP_ROCE_MAX(dev, qp_ooo_transmit_default)) + MLX5_SET(roce_cap, set_hca_cap, qp_ooo_transmit_default, 1); + err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE); return err; } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index b42696d74c9f02..dc5e2cb302a594 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1100,7 +1100,9 @@ struct mlx5_ifc_roce_cap_bits { u8 sw_r_roce_src_udp_port[0x1]; u8 fl_rc_qp_when_roce_disabled[0x1]; u8 fl_rc_qp_when_roce_enabled[0x1]; - u8 reserved_at_7[0x17]; + u8 reserved_at_7[0x1]; + u8 qp_ooo_transmit_default[0x1]; + u8 reserved_at_9[0x15]; u8 qp_ts_format[0x2]; u8 reserved_at_20[0x60]; @@ -1516,7 +1518,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_b0[0x1]; u8 uplink_follow[0x1]; u8 ts_cqe_to_dest_cqn[0x1]; - u8 reserved_at_b3[0x7]; + u8 reserved_at_b3[0x6]; + u8 go_back_n[0x1]; u8 shampo[0x1]; u8 reserved_at_bb[0x5]; @@ -1531,7 +1534,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_eq_sz[0x8]; u8 relaxed_ordering_write[0x1]; - u8 relaxed_ordering_read[0x1]; + u8 relaxed_ordering_read_pci_enabled[0x1]; u8 log_max_mkey[0x6]; u8 reserved_at_f0[0x6]; u8 terminate_scatter_list_mkey[0x1]; @@ -1747,9 +1750,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_320[0x3]; u8 log_max_transport_domain[0x5]; - u8 reserved_at_328[0x3]; + u8 reserved_at_328[0x2]; + u8 relaxed_ordering_read[0x1]; u8 log_max_pd[0x5]; - u8 reserved_at_330[0xb]; + u8 reserved_at_330[0x9]; + u8 q_counter_aggregation[0x1]; + u8 q_counter_other_vport[0x1]; u8 log_max_xrcd[0x5]; u8 nic_receive_steering_discard[0x1]; @@ -3291,7 +3297,8 @@ struct mlx5_ifc_qpc_bits { u8 log_rq_stride[0x3]; u8 no_sq[0x1]; u8 log_sq_size[0x4]; - u8 reserved_at_55[0x3]; + u8 reserved_at_55[0x1]; + u8 retry_mode[0x2]; u8 ts_format[0x2]; u8 reserved_at_5a[0x1]; u8 rlky[0x1]; @@ -5629,10 +5636,15 @@ struct mlx5_ifc_query_q_counter_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x80]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x60]; u8 clear[0x1]; - u8 reserved_at_c1[0x1f]; + u8 aggregate[0x1]; + u8 reserved_at_c2[0x1e]; u8 reserved_at_e0[0x18]; u8 counter_set_id[0x8]; @@ -9315,7 +9327,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x38]; + u8 reserved_at_40[0x33]; + u8 flow_counter_bulk_log_size[0x5]; u8 flow_counter_bulk[0x8]; }; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 949cf4ffc536c5..1e7774ac808f08 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4035,6 +4035,31 @@ static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev) return dma_pci_p2pdma_supported(dev->dma_device); } +/** + * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer + * @dma_addr: The DMA address + * + * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after + * going through the dma_addr marshalling. + */ +static inline void *ib_virt_dma_to_ptr(u64 dma_addr) +{ + /* virt_dma mode maps the kvs's directly into the dma addr */ + return (void *)(uintptr_t)dma_addr; +} + +/** + * ib_virt_dma_to_page - Convert a dma_addr to a struct page + * @dma_addr: The DMA address + * + * Used by ib_uses_virt_dma() device to get back to the struct page after going + * through the dma_addr marshalling. + */ +static inline struct page *ib_virt_dma_to_page(u64 dma_addr) +{ + return virt_to_page(ib_virt_dma_to_ptr(dma_addr)); +} + /** * ib_dma_mapping_error - check a DMA addr for error * @dev: The device for which the dma_addr was created diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h index b1de99bf56ce6b..c4e90775da0c25 100644 --- a/include/uapi/rdma/bnxt_re-abi.h +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -96,6 +96,10 @@ struct bnxt_re_cq_resp { __u32 rsvd; }; +struct bnxt_re_resize_cq_req { + __aligned_u64 cq_va; +}; + struct bnxt_re_qp_req { __aligned_u64 qpsva; __aligned_u64 qprva; diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index 163ac79556d680..d94c32f2880432 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ /* - * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef EFA_ABI_USER_H @@ -120,6 +120,8 @@ enum { EFA_QUERY_DEVICE_CAPS_RNR_RETRY = 1 << 1, EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS = 1 << 2, EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID = 1 << 3, + EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128 = 1 << 4, + EFA_QUERY_DEVICE_CAPS_RDMA_WRITE = 1 << 5, }; struct efa_ibv_ex_query_device_resp {