diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_ud.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ud.c | 382 |
1 files changed, 131 insertions, 251 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index f9a3338..16a2a93 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c @@ -36,68 +36,17 @@ #include "ipath_verbs.h" #include "ipath_kernel.h" -static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, - u32 *lengthp, struct ipath_sge_state *ss) -{ - int user = to_ipd(qp->ibqp.pd)->user; - int i, j, ret; - struct ib_wc wc; - - *lengthp = 0; - for (i = j = 0; i < wqe->num_sge; i++) { - if (wqe->sg_list[i].length == 0) - continue; - /* Check LKEY */ - if ((user && wqe->sg_list[i].lkey == 0) || - !ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, - &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) - goto bad_lkey; - *lengthp += wqe->sg_list[i].length; - j++; - } - ss->num_sge = j; - ret = 1; - goto bail; - -bad_lkey: - wc.wr_id = wqe->wr_id; - wc.status = IB_WC_LOC_PROT_ERR; - wc.opcode = IB_WC_RECV; - wc.vendor_err = 0; - wc.byte_len = 0; - wc.imm_data = 0; - wc.qp = &qp->ibqp; - wc.src_qp = 0; - wc.wc_flags = 0; - wc.pkey_index = 0; - wc.slid = 0; - wc.sl = 0; - wc.dlid_path_bits = 0; - wc.port_num = 0; - /* Signal solicited completion event. */ - ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); - ret = 0; -bail: - return ret; -} - /** * ipath_ud_loopback - handle send on loopback QPs - * @sqp: the QP - * @ss: the SGE state - * @length: the length of the data to send - * @wr: the work request - * @wc: the work completion entry + * @sqp: the sending QP + * @swqe: the send work request * - * This is called from ipath_post_ud_send() to forward a WQE addressed + * This is called from ipath_make_ud_req() to forward a WQE addressed * to the same HCA. * Note that the receive interrupt handler may be calling ipath_ud_rcv() * while this is being called. */ -static void ipath_ud_loopback(struct ipath_qp *sqp, - struct ipath_sge_state *ss, - u32 length, struct ib_send_wr *wr, - struct ib_wc *wc) +static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) { struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); struct ipath_qp *qp; @@ -110,12 +59,18 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_rwq *wq; struct ipath_rwqe *wqe; void (*handler)(struct ib_event *, void *); + struct ib_wc wc; u32 tail; u32 rlen; + u32 length; - qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn); - if (!qp) - return; + qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); + if (!qp) { + dev->n_pkt_drops++; + goto send_comp; + } + + rsge.sg_list = NULL; /* * Check that the qkey matches (except for QP0, see 9.6.1.4.1). @@ -123,39 +78,34 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, * qkey from the QP context instead of the WR (see 10.2.5). */ if (unlikely(qp->ibqp.qp_num && - ((int) wr->wr.ud.remote_qkey < 0 - ? qp->qkey : wr->wr.ud.remote_qkey) != qp->qkey)) { + ((int) swqe->wr.wr.ud.remote_qkey < 0 ? + sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { /* XXX OK to lose a count once in a while. */ dev->qkey_violations++; dev->n_pkt_drops++; - goto done; + goto drop; } /* * A GRH is expected to preceed the data even if not * present on the wire. */ - wc->byte_len = length + sizeof(struct ib_grh); + length = swqe->length; + wc.byte_len = length + sizeof(struct ib_grh); - if (wr->opcode == IB_WR_SEND_WITH_IMM) { - wc->wc_flags = IB_WC_WITH_IMM; - wc->imm_data = wr->imm_data; + if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { + wc.wc_flags = IB_WC_WITH_IMM; + wc.imm_data = swqe->wr.imm_data; } else { - wc->wc_flags = 0; - wc->imm_data = 0; + wc.wc_flags = 0; + wc.imm_data = 0; } - if (wr->num_sge > 1) { - rsge.sg_list = kmalloc((wr->num_sge - 1) * - sizeof(struct ipath_sge), - GFP_ATOMIC); - } else - rsge.sg_list = NULL; - /* - * Get the next work request entry to find where to put the data. - * Note that it is safe to drop the lock after changing rq->tail - * since ipath_post_receive() won't fill the empty slot. + * This would be a lot simpler if we could call ipath_get_rwqe() + * but that uses state that the receive interrupt handler uses + * so we would need to lock out receive interrupts while doing + * local loopback. */ if (qp->ibqp.srq) { srq = to_isrq(qp->ibqp.srq); @@ -167,32 +117,53 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, rq = &qp->r_rq; } + if (rq->max_sge > 1) { + /* + * XXX We could use GFP_KERNEL if ipath_do_send() + * was always called from the tasklet instead of + * from ipath_post_send(). + */ + rsge.sg_list = kmalloc((rq->max_sge - 1) * + sizeof(struct ipath_sge), + GFP_ATOMIC); + if (!rsge.sg_list) { + dev->n_pkt_drops++; + goto drop; + } + } + + /* + * Get the next work request entry to find where to put the data. + * Note that it is safe to drop the lock after changing rq->tail + * since ipath_post_receive() won't fill the empty slot. + */ spin_lock_irqsave(&rq->lock, flags); wq = rq->wq; tail = wq->tail; - while (1) { - if (unlikely(tail == wq->head)) { - spin_unlock_irqrestore(&rq->lock, flags); - dev->n_pkt_drops++; - goto bail_sge; - } - /* Make sure entry is read after head index is read. */ - smp_rmb(); - wqe = get_rwqe_ptr(rq, tail); - if (++tail >= rq->size) - tail = 0; - if (init_sge(qp, wqe, &rlen, &rsge)) - break; - wq->tail = tail; + /* Validate tail before using it since it is user writable. */ + if (tail >= rq->size) + tail = 0; + if (unlikely(tail == wq->head)) { + spin_unlock_irqrestore(&rq->lock, flags); + dev->n_pkt_drops++; + goto drop; + } + wqe = get_rwqe_ptr(rq, tail); + if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { + spin_unlock_irqrestore(&rq->lock, flags); + dev->n_pkt_drops++; + goto drop; } /* Silently drop packets which are too big. */ - if (wc->byte_len > rlen) { + if (wc.byte_len > rlen) { spin_unlock_irqrestore(&rq->lock, flags); dev->n_pkt_drops++; - goto bail_sge; + goto drop; } + if (++tail >= rq->size) + tail = 0; wq->tail = tail; - wc->wr_id = wqe->wr_id; + wc.wr_id = wqe->wr_id; if (handler) { u32 n; @@ -221,13 +192,13 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, } else spin_unlock_irqrestore(&rq->lock, flags); - ah_attr = &to_iah(wr->wr.ud.ah)->attr; + ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; if (ah_attr->ah_flags & IB_AH_GRH) { ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); - wc->wc_flags |= IB_WC_GRH; + wc.wc_flags |= IB_WC_GRH; } else ipath_skip_sge(&rsge, sizeof(struct ib_grh)); - sge = &ss->sge; + sge = swqe->sg_list; while (length) { u32 len = sge->length; @@ -241,8 +212,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { - if (--ss->num_sge) - *sge = *ss->sg_list++; + if (--swqe->wr.num_sge) + sge++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) @@ -256,123 +227,60 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, } length -= len; } - wc->status = IB_WC_SUCCESS; - wc->opcode = IB_WC_RECV; - wc->vendor_err = 0; - wc->qp = &qp->ibqp; - wc->src_qp = sqp->ibqp.qp_num; + wc.status = IB_WC_SUCCESS; + wc.opcode = IB_WC_RECV; + wc.vendor_err = 0; + wc.qp = &qp->ibqp; + wc.src_qp = sqp->ibqp.qp_num; /* XXX do we know which pkey matched? Only needed for GSI. */ - wc->pkey_index = 0; - wc->slid = dev->dd->ipath_lid | + wc.pkey_index = 0; + wc.slid = dev->dd->ipath_lid | (ah_attr->src_path_bits & - ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1)); - wc->sl = ah_attr->sl; - wc->dlid_path_bits = - ah_attr->dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); + ((1 << dev->dd->ipath_lmc) - 1)); + wc.sl = ah_attr->sl; + wc.dlid_path_bits = + ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1); + wc.port_num = 1; /* Signal completion event if the solicited bit is set. */ - ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, - wr->send_flags & IB_SEND_SOLICITED); - -bail_sge: + ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + swqe->wr.send_flags & IB_SEND_SOLICITED); +drop: kfree(rsge.sg_list); -done: if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); +send_comp: + ipath_send_complete(sqp, swqe, IB_WC_SUCCESS); } /** - * ipath_post_ud_send - post a UD send on QP + * ipath_make_ud_req - construct a UD request packet * @qp: the QP - * @wr: the work request * - * Note that we actually send the data as it is posted instead of putting - * the request into a ring buffer. If we wanted to use a ring buffer, - * we would need to save a reference to the destination address in the SWQE. + * Return 1 if constructed; otherwise, return 0. */ -int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) +int ipath_make_ud_req(struct ipath_qp *qp) { struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_other_headers *ohdr; struct ib_ah_attr *ah_attr; - struct ipath_sge_state ss; - struct ipath_sge *sg_list; - struct ib_wc wc; - u32 hwords; + struct ipath_swqe *wqe; u32 nwords; - u32 len; u32 extra_bytes; u32 bth0; u16 lrh0; u16 lid; - int i; - int ret; + int ret = 0; - if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { - ret = 0; + if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))) goto bail; - } - if (wr->wr.ud.ah->pd != qp->ibqp.pd) { - ret = -EPERM; + if (qp->s_cur == qp->s_head) goto bail; - } - /* IB spec says that num_sge == 0 is OK. */ - if (wr->num_sge > qp->s_max_sge) { - ret = -EINVAL; - goto bail; - } - - if (wr->num_sge > 1) { - sg_list = kmalloc((qp->s_max_sge - 1) * sizeof(*sg_list), - GFP_ATOMIC); - if (!sg_list) { - ret = -ENOMEM; - goto bail; - } - } else - sg_list = NULL; - - /* Check the buffer to send. */ - ss.sg_list = sg_list; - ss.sge.mr = NULL; - ss.sge.vaddr = NULL; - ss.sge.length = 0; - ss.sge.sge_length = 0; - ss.num_sge = 0; - len = 0; - for (i = 0; i < wr->num_sge; i++) { - /* Check LKEY */ - if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) { - ret = -EINVAL; - goto bail; - } - - if (wr->sg_list[i].length == 0) - continue; - if (!ipath_lkey_ok(qp, ss.num_sge ? - sg_list + ss.num_sge - 1 : &ss.sge, - &wr->sg_list[i], 0)) { - ret = -EINVAL; - goto bail; - } - len += wr->sg_list[i].length; - ss.num_sge++; - } - /* Check for invalid packet size. */ - if (len > dev->dd->ipath_ibmtu) { - ret = -EINVAL; - goto bail; - } - extra_bytes = (4 - len) & 3; - nwords = (len + extra_bytes) >> 2; + wqe = get_swqe_ptr(qp, qp->s_cur); /* Construct the header. */ - ah_attr = &to_iah(wr->wr.ud.ah)->attr; - if (ah_attr->dlid == 0) { - ret = -EINVAL; - goto bail; - } + ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) { if (ah_attr->dlid != IPATH_PERMISSIVE_LID) dev->n_multicast_xmit++; @@ -381,74 +289,63 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) } else { dev->n_unicast_xmit++; lid = ah_attr->dlid & - ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); + ~((1 << dev->dd->ipath_lmc) - 1); if (unlikely(lid == dev->dd->ipath_lid)) { - /* - * Pass in an uninitialized ib_wc to save stack - * space. - */ - ipath_ud_loopback(qp, &ss, len, wr, &wc); + ipath_ud_loopback(qp, wqe); goto done; } } + + extra_bytes = -wqe->length & 3; + nwords = (wqe->length + extra_bytes) >> 2; + + /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */ + qp->s_hdrwords = 7; + if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) + qp->s_hdrwords++; + qp->s_cur_size = wqe->length; + qp->s_cur_sge = &qp->s_sge; + qp->s_wqe = wqe; + qp->s_sge.sge = wqe->sg_list[0]; + qp->s_sge.sg_list = wqe->sg_list + 1; + qp->s_sge.num_sge = wqe->wr.num_sge; + if (ah_attr->ah_flags & IB_AH_GRH) { /* Header size in 32-bit words. */ - hwords = 17; + qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh, + &ah_attr->grh, + qp->s_hdrwords, nwords); lrh0 = IPATH_LRH_GRH; ohdr = &qp->s_hdr.u.l.oth; - qp->s_hdr.u.l.grh.version_tclass_flow = - cpu_to_be32((6 << 28) | - (ah_attr->grh.traffic_class << 20) | - ah_attr->grh.flow_label); - qp->s_hdr.u.l.grh.paylen = - cpu_to_be16(((wr->opcode == - IB_WR_SEND_WITH_IMM ? 6 : 5) + - nwords + SIZE_OF_CRC) << 2); - /* next_hdr is defined by C8-7 in ch. 8.4.1 */ - qp->s_hdr.u.l.grh.next_hdr = 0x1B; - qp->s_hdr.u.l.grh.hop_limit = ah_attr->grh.hop_limit; - /* The SGID is 32-bit aligned. */ - qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = - dev->gid_prefix; - qp->s_hdr.u.l.grh.sgid.global.interface_id = - dev->dd->ipath_guid; - qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid; /* * Don't worry about sending to locally attached multicast * QPs. It is unspecified by the spec. what happens. */ } else { /* Header size in 32-bit words. */ - hwords = 7; lrh0 = IPATH_LRH_BTH; ohdr = &qp->s_hdr.u.oth; } - if (wr->opcode == IB_WR_SEND_WITH_IMM) { - ohdr->u.ud.imm_data = wr->imm_data; - wc.imm_data = wr->imm_data; - hwords += 1; + if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { + ohdr->u.ud.imm_data = wqe->wr.imm_data; bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; - } else if (wr->opcode == IB_WR_SEND) { - wc.imm_data = 0; + } else bth0 = IB_OPCODE_UD_SEND_ONLY << 24; - } else { - ret = -EINVAL; - goto bail; - } lrh0 |= ah_attr->sl << 4; if (qp->ibqp.qp_type == IB_QPT_SMI) lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ - qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); + qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + + SIZE_OF_CRC); lid = dev->dd->ipath_lid; if (lid) { lid |= ah_attr->src_path_bits & - ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); + ((1 << dev->dd->ipath_lmc) - 1); qp->s_hdr.lrh[3] = cpu_to_be16(lid); } else qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; - if (wr->send_flags & IB_SEND_SOLICITED) + if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= 1 << 23; bth0 |= extra_bytes << 20; bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : @@ -460,38 +357,20 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && ah_attr->dlid != IPATH_PERMISSIVE_LID ? __constant_cpu_to_be32(IPATH_MULTICAST_QPN) : - cpu_to_be32(wr->wr.ud.remote_qpn); - /* XXX Could lose a PSN count but not worth locking */ + cpu_to_be32(wqe->wr.wr.ud.remote_qpn); ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); /* * Qkeys with the high order bit set mean use the * qkey from the QP context instead of the WR (see 10.2.5). */ - ohdr->u.ud.deth[0] = cpu_to_be32((int)wr->wr.ud.remote_qkey < 0 ? - qp->qkey : wr->wr.ud.remote_qkey); + ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? + qp->qkey : wqe->wr.wr.ud.remote_qkey); ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); - if (ipath_verbs_send(dev->dd, hwords, (u32 *) &qp->s_hdr, - len, &ss)) - dev->n_no_piobuf++; done: - /* Queue the completion status entry. */ - if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || - (wr->send_flags & IB_SEND_SIGNALED)) { - wc.wr_id = wr->wr_id; - wc.status = IB_WC_SUCCESS; - wc.vendor_err = 0; - wc.opcode = IB_WC_SEND; - wc.byte_len = len; - wc.qp = &qp->ibqp; - wc.src_qp = 0; - wc.wc_flags = 0; - /* XXX initialize other fields? */ - ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); - } - kfree(sg_list); - - ret = 0; + if (++qp->s_cur >= qp->s_size) + qp->s_cur = 0; + ret = 1; bail: return ret; @@ -672,7 +551,8 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, * Save the LMC lower bits if the destination LID is a unicast LID. */ wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 : - dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); + dlid & ((1 << dev->dd->ipath_lmc) - 1); + wc.port_num = 1; /* Signal completion event if the solicited bit is set. */ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & |