diff options
author | Michael S. Tsirkin <mst@mellanox.co.il> | 2005-05-13 20:40:53 +0000 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-11-09 19:57:01 -0800 |
commit | 95cd7c74934fd45ec6cc7f1efdf1d1f78f8b6447 (patch) | |
tree | fadc268e51cd7c32206679f1d2c73d69394f6f03 | |
parent | e4ebb31fb433a4175f3c9f8d41b32a64e0002288 (diff) | |
download | libmthca-95cd7c74934fd45ec6cc7f1efdf1d1f78f8b6447.tar.gz |
Size send WQEs based on max_inline_data
Use the consumer's cap.max_inline_data value to size send queue work
requests, and return the actual value available via the same
structure.
Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Roland Dreier <roland@topspin.com>
-rw-r--r-- | src/mthca.h | 5 | ||||
-rw-r--r-- | src/qp.c | 42 | ||||
-rw-r--r-- | src/verbs.c | 6 |
3 files changed, 48 insertions, 5 deletions
diff --git a/src/mthca.h b/src/mthca.h index e292a30..d98e58d 100644 --- a/src/mthca.h +++ b/src/mthca.h @@ -284,7 +284,10 @@ extern int mthca_arbel_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr, struct ibv_send_wr **bad_wr); extern int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr, struct ibv_recv_wr **bad_wr); -extern int mthca_alloc_qp_buf(struct ibv_pd *pd, struct mthca_qp *qp); +extern int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap, + struct mthca_qp *qp); +extern void mthca_return_cap(struct ibv_pd *pd, struct mthca_qp *qp, + struct ibv_qp_cap *cap); extern struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn); extern int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp); extern void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn); @@ -718,10 +718,17 @@ out: return ret; } -int mthca_alloc_qp_buf(struct ibv_pd *pd, struct mthca_qp *qp) +int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap, + struct mthca_qp *qp) { int size; + qp->rq.max_gs = cap->max_recv_sge; + qp->sq.max_gs = align(cap->max_inline_data + sizeof (struct mthca_inline_seg), + sizeof (struct mthca_data_seg)) / sizeof (struct mthca_data_seg); + if (qp->sq.max_gs < cap->max_send_sge) + qp->sq.max_gs = cap->max_send_sge; + qp->wrid = malloc((qp->rq.max + qp->sq.max) * sizeof (uint64_t)); if (!qp->wrid) return -1; @@ -796,6 +803,39 @@ int mthca_alloc_qp_buf(struct ibv_pd *pd, struct mthca_qp *qp) return 0; } +void mthca_return_cap(struct ibv_pd *pd, struct mthca_qp *qp, struct ibv_qp_cap *cap) +{ + /* + * Maximum inline data size is the full WQE size less the size + * of the next segment, inline segment and other non-data segments. + */ + cap->max_inline_data = (1 << qp->sq.wqe_shift) - + sizeof (struct mthca_next_seg) - + sizeof (struct mthca_inline_seg); + + switch (qp->qpt) { + case IBV_QPT_UD: + if (mthca_is_memfree(pd->context)) + cap->max_inline_data -= sizeof (struct mthca_arbel_ud_seg); + else + cap->max_inline_data -= sizeof (struct mthca_tavor_ud_seg); + break; + + default: + /* + * inline data won't be used in the same WQE as an + * atomic or bind segment, so we don't have to + * subtract anything off here. + */ + break; + } + + cap->max_send_wr = qp->sq.max; + cap->max_recv_wr = qp->rq.max; + cap->max_send_sge = qp->sq.max_gs; + cap->max_recv_sge = qp->rq.max_gs; +} + struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn) { int tind = (qpn & (ctx->num_qps - 1)) >> ctx->qp_table_shift; diff --git a/src/verbs.c b/src/verbs.c index 65ecb67..420db5c 100644 --- a/src/verbs.c +++ b/src/verbs.c @@ -270,7 +270,6 @@ struct ibv_qp *mthca_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr) qp->qpt = attr->qp_type; qp->sq.max = align_qp_size(pd->context, attr->cap.max_send_wr); - qp->sq.max_gs = attr->cap.max_send_sge; qp->sq.next_ind = 0; qp->sq.last_comp = qp->sq.max - 1; qp->sq.head = 0; @@ -278,14 +277,13 @@ struct ibv_qp *mthca_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr) qp->sq.last = NULL; qp->rq.max = align_qp_size(pd->context, attr->cap.max_recv_wr); - qp->rq.max_gs = attr->cap.max_recv_sge; qp->rq.next_ind = 0; qp->rq.last_comp = qp->rq.max - 1; qp->rq.head = 0; qp->rq.tail = 0; qp->rq.last = NULL; - if (mthca_alloc_qp_buf(pd, qp)) + if (mthca_alloc_qp_buf(pd, &attr->cap, qp)) goto err; if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) || @@ -332,6 +330,8 @@ struct ibv_qp *mthca_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr) if (ret) goto err_destroy; + mthca_return_cap(pd, qp, &attr->cap); + return &qp->ibv_qp; err_destroy: |