aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-04-12 21:23:59 -0700
committerRoland Dreier <rolandd@cisco.com>2007-04-12 21:23:59 -0700
commit3cf2867c0df9ed00d44ee4b8d34ea5cd54718ca4 (patch)
treeb9bf3a340f8b2bfdd100aa01c632d9ae9fc16c52
parent0701358570278edde42b477842a1cb0e8b0b1864 (diff)
downloadlibmlx4-3cf2867c0df9ed00d44ee4b8d34ea5cd54718ca4.tar.gz
Implement posting of RDMA and atomic operations
Clean up the definitions of remote address and atomic operations WQE segments. Fill in the missing code that fills in these segments when posting RDMA or atomic operations to a send queue. Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--src/qp.c50
-rw-r--r--src/wqe.h29
2 files changed, 54 insertions, 25 deletions
diff --git a/src/qp.c b/src/qp.c
index 07e1875..81fe191 100644
--- a/src/qp.c
+++ b/src/qp.c
@@ -150,13 +150,43 @@ int mlx4_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
switch (wr->opcode) {
case IBV_WR_ATOMIC_CMP_AND_SWP:
case IBV_WR_ATOMIC_FETCH_AND_ADD:
- /*XXX*/
+ ((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
+ htonll(wr->wr.atomic.remote_addr);
+ ((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
+ htonl(wr->wr.atomic.rkey);
+ ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
+
+ wqe += sizeof (struct mlx4_wqe_raddr_seg);
+
+ if (wr->opcode == IBV_WR_ATOMIC_CMP_AND_SWP) {
+ ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
+ htonll(wr->wr.atomic.swap);
+ ((struct mlx4_wqe_atomic_seg *) wqe)->compare =
+ htonll(wr->wr.atomic.compare_add);
+ } else {
+ ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
+ htonll(wr->wr.atomic.compare_add);
+ ((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0;
+ }
+
+ wqe += sizeof (struct mlx4_wqe_atomic_seg);
+ size += (sizeof (struct mlx4_wqe_raddr_seg) +
+ sizeof (struct mlx4_wqe_atomic_seg)) / 16;
+
break;
case IBV_WR_RDMA_WRITE:
case IBV_WR_RDMA_WRITE_WITH_IMM:
case IBV_WR_RDMA_READ:
- /*XXX*/
+ ((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
+ htonll(wr->wr.rdma.remote_addr);
+ ((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
+ htonl(wr->wr.rdma.rkey);
+ ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
+
+ wqe += sizeof (struct mlx4_wqe_raddr_seg);
+ size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
+
break;
default:
@@ -334,20 +364,20 @@ int mlx4_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
break;
case IBV_QPT_UC:
- size += sizeof (struct mlx4_raddr_seg);
+ size += sizeof (struct mlx4_wqe_raddr_seg);
break;
case IBV_QPT_RC:
- size += sizeof (struct mlx4_raddr_seg);
+ size += sizeof (struct mlx4_wqe_raddr_seg);
/*
* An atomic op will require an atomic segment, a
* remote address segment and one scatter entry.
*/
- if (size < (sizeof (struct mlx4_atomic_seg) +
- sizeof (struct mlx4_raddr_seg) +
+ if (size < (sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_raddr_seg) +
sizeof (struct mlx4_wqe_data_seg)))
- size = (sizeof (struct mlx4_atomic_seg) +
- sizeof (struct mlx4_raddr_seg) +
+ size = (sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_raddr_seg) +
sizeof (struct mlx4_wqe_data_seg));
break;
@@ -356,8 +386,8 @@ int mlx4_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
}
/* Make sure that we have enough space for a bind request */
- if (size < sizeof (struct mlx4_bind_seg))
- size = sizeof (struct mlx4_bind_seg);
+ if (size < sizeof (struct mlx4_wqe_bind_seg))
+ size = sizeof (struct mlx4_wqe_bind_seg);
size += sizeof (struct mlx4_wqe_ctrl_seg);
diff --git a/src/wqe.h b/src/wqe.h
index b19a31a..ee649b9 100644
--- a/src/wqe.h
+++ b/src/wqe.h
@@ -96,25 +96,24 @@ struct mlx4_wqe_srq_next_seg {
uint32_t reserved2[3];
};
-/* XXX the rest of these are still old WQE formats... */
-struct mlx4_bind_seg {
- uint32_t flags; /* [31] Atomic [30] rem write [29] rem read */
- uint32_t reserved;
- uint32_t new_rkey;
- uint32_t lkey;
- uint64_t addr;
- uint64_t length;
+struct mlx4_wqe_raddr_seg {
+ uint64_t raddr;
+ uint32_t rkey;
+ uint32_t reserved;
};
-struct mlx4_raddr_seg {
- uint64_t raddr;
- uint32_t rkey;
- uint32_t reserved;
+struct mlx4_wqe_atomic_seg {
+ uint64_t swap_add;
+ uint64_t compare;
};
-struct mlx4_atomic_seg {
- uint64_t swap_add;
- uint64_t compare;
+struct mlx4_wqe_bind_seg {
+ uint32_t flags1;
+ uint32_t flags2;
+ uint32_t new_rkey;
+ uint32_t lkey;
+ uint64_t addr;
+ uint64_t length;
};
#endif /* WQE_H */