aboutsummaryrefslogtreecommitdiffstats
path: root/src/net/infiniband
diff options
context:
space:
mode:
authorMichael Brown <mcb30@etherboot.org>2009-07-06 19:12:12 +0100
committerMichael Brown <mcb30@etherboot.org>2009-07-17 23:06:33 +0100
commit887d296b886aeb5eb9dd67dd85221f64f2683849 (patch)
tree51d5502ceaafcb56875eb9733efe36700574b334 /src/net/infiniband
parent1f5c0239b4fc5dea895645397d5aaa3d4b95205a (diff)
downloadipxe-887d296b886aeb5eb9dd67dd85221f64f2683849.tar.gz
[infiniband] Poll completion queues automatically
Currently, all Infiniband users must create a process for polling their completion queues (or rely on a regular hook such as netdev_poll() in ipoib.c). Move instead to a model whereby the Infiniband core maintains a single process calling ib_poll_eq(), and polling the event queue triggers polls of the applicable completion queues. (At present, the Infiniband core simply polls all of the device's completion queues.) Polling a completion queue will now implicitly refill all attached receive work queues; this is analogous to the way that netdev_poll() implicitly refills the RX ring. Infiniband users no longer need to create a process just to poll their completion queues and refill their receive rings.
Diffstat (limited to 'src/net/infiniband')
-rw-r--r--src/net/infiniband/ib_qset.c34
-rw-r--r--src/net/infiniband/ib_sma.c53
2 files changed, 1 insertions, 86 deletions
diff --git a/src/net/infiniband/ib_qset.c b/src/net/infiniband/ib_qset.c
index 799489f94..0a1e1f9d2 100644
--- a/src/net/infiniband/ib_qset.c
+++ b/src/net/infiniband/ib_qset.c
@@ -54,9 +54,6 @@ int ib_create_qset ( struct ib_device *ibdev, struct ib_queue_set *qset,
assert ( qset->cq == NULL );
assert ( qset->qp == NULL );
- /* Store queue parameters */
- qset->recv_max_fill = num_recv_wqes;
-
/* Allocate completion queue */
qset->cq = ib_create_cq ( ibdev, num_cqes, cq_op );
if ( ! qset->cq ) {
@@ -84,37 +81,6 @@ int ib_create_qset ( struct ib_device *ibdev, struct ib_queue_set *qset,
}
/**
- * Refill IPoIB receive ring
- *
- * @v ibdev Infiniband device
- * @v qset Queue set
- */
-void ib_qset_refill_recv ( struct ib_device *ibdev,
- struct ib_queue_set *qset ) {
- struct io_buffer *iobuf;
- int rc;
-
- while ( qset->qp->recv.fill < qset->recv_max_fill ) {
-
- /* Allocate I/O buffer */
- iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
- if ( ! iobuf ) {
- /* Non-fatal; we will refill on next attempt */
- return;
- }
-
- /* Post I/O buffer */
- if ( ( rc = ib_post_recv ( ibdev, qset->qp, iobuf ) ) != 0 ) {
- DBGC ( ibdev, "IBDEV %p could not refill: %s\n",
- ibdev, strerror ( rc ) );
- free_iob ( iobuf );
- /* Give up */
- return;
- }
- }
-}
-
-/**
* Destroy queue set
*
* @v ibdev Infiniband device
diff --git a/src/net/infiniband/ib_sma.c b/src/net/infiniband/ib_sma.c
index b83d20ea1..5fd1319c5 100644
--- a/src/net/infiniband/ib_sma.c
+++ b/src/net/infiniband/ib_sma.c
@@ -27,7 +27,6 @@ FILE_LICENCE ( GPL2_OR_LATER );
#include <byteswap.h>
#include <gpxe/infiniband.h>
#include <gpxe/iobuf.h>
-#include <gpxe/process.h>
#include <gpxe/ib_sma.h>
/**
@@ -349,36 +348,6 @@ static int ib_sma_mad ( struct ib_sma *sma, union ib_mad *mad ) {
}
/**
- * Refill SMA receive ring
- *
- * @v sma Subnet management agent
- */
-static void ib_sma_refill_recv ( struct ib_sma *sma ) {
- struct ib_device *ibdev = sma->ibdev;
- struct io_buffer *iobuf;
- int rc;
-
- while ( sma->qp->recv.fill < IB_SMA_NUM_RECV_WQES ) {
-
- /* Allocate I/O buffer */
- iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
- if ( ! iobuf ) {
- /* Non-fatal; we will refill on next attempt */
- return;
- }
-
- /* Post I/O buffer */
- if ( ( rc = ib_post_recv ( ibdev, sma->qp, iobuf ) ) != 0 ) {
- DBGC ( sma, "SMA %p could not refill: %s\n",
- sma, strerror ( rc ) );
- free_iob ( iobuf );
- /* Give up */
- return;
- }
- }
-}
-
-/**
* Complete SMA send
*
*
@@ -457,23 +426,6 @@ static struct ib_completion_queue_operations ib_sma_completion_ops = {
};
/**
- * Poll SMA
- *
- * @v process Process
- */
-static void ib_sma_step ( struct process *process ) {
- struct ib_sma *sma =
- container_of ( process, struct ib_sma, poll );
- struct ib_device *ibdev = sma->ibdev;
-
- /* Poll the kernel completion queue */
- ib_poll_cq ( ibdev, sma->cq );
-
- /* Refill the receive ring */
- ib_sma_refill_recv ( sma );
-}
-
-/**
* Create SMA
*
* @v sma Subnet management agent
@@ -489,7 +441,6 @@ int ib_create_sma ( struct ib_sma *sma, struct ib_device *ibdev,
memset ( sma, 0, sizeof ( *sma ) );
sma->ibdev = ibdev;
sma->op = op;
- process_init ( &sma->poll, ib_sma_step, &ibdev->refcnt );
/* Create completion queue */
sma->cq = ib_create_cq ( ibdev, IB_SMA_NUM_CQES,
@@ -517,7 +468,7 @@ int ib_create_sma ( struct ib_sma *sma, struct ib_device *ibdev,
}
/* Fill receive ring */
- ib_sma_refill_recv ( sma );
+ ib_refill_recv ( ibdev, sma->qp );
return 0;
err_not_qp0:
@@ -525,7 +476,6 @@ int ib_create_sma ( struct ib_sma *sma, struct ib_device *ibdev,
err_create_qp:
ib_destroy_cq ( ibdev, sma->cq );
err_create_cq:
- process_del ( &sma->poll );
return rc;
}
@@ -539,5 +489,4 @@ void ib_destroy_sma ( struct ib_sma *sma ) {
ib_destroy_qp ( ibdev, sma->qp );
ib_destroy_cq ( ibdev, sma->cq );
- process_del ( &sma->poll );
}