2
0
mirror of https://github.com/xcat2/xNBA.git synced 2025-04-13 16:57:25 +00:00

[infiniband] Centralise assumption of 2048-byte payloads

IPoIB and the SMA have separate constants for the packet size to be
used to I/O buffer allocations.  Merge these into the single
IB_MAX_PAYLOAD_SIZE constant.

(Various other points in the Infiniband stack have hard-coded
assumptions of a 2048-byte payload; we don't currently support
variable MTUs.)
This commit is contained in:
Michael Brown 2009-07-17 22:50:33 +01:00
parent 7ba33f7826
commit 1f5c0239b4
8 changed files with 22 additions and 18 deletions

View File

@ -838,7 +838,7 @@ static int ipoib_open ( struct net_device *netdev ) {
IPOIB_META_NUM_CQES, &ipoib_meta_cq_op,
IPOIB_META_NUM_SEND_WQES,
IPOIB_META_NUM_RECV_WQES,
IPOIB_PKT_LEN, IB_GLOBAL_QKEY ) ) != 0 ) {
IB_GLOBAL_QKEY ) ) != 0 ) {
DBGC ( ipoib, "IPoIB %p could not allocate metadata QP: %s\n",
ipoib, strerror ( rc ) );
goto err_create_meta_qset;
@ -850,7 +850,7 @@ static int ipoib_open ( struct net_device *netdev ) {
IPOIB_DATA_NUM_CQES, &ipoib_data_cq_op,
IPOIB_DATA_NUM_SEND_WQES,
IPOIB_DATA_NUM_RECV_WQES,
IPOIB_PKT_LEN, IB_GLOBAL_QKEY ) ) != 0 ) {
IB_GLOBAL_QKEY ) ) != 0 ) {
DBGC ( ipoib, "IPoIB %p could not allocate data QP: %s\n",
ipoib, strerror ( rc ) );
goto err_create_data_qset;
@ -1040,7 +1040,7 @@ struct net_device * alloc_ipoibdev ( size_t priv_size ) {
if ( netdev ) {
netdev->ll_protocol = &ipoib_protocol;
netdev->ll_broadcast = ( uint8_t * ) &ipoib_broadcast;
netdev->max_pkt_len = IPOIB_PKT_LEN;
netdev->max_pkt_len = IB_MAX_PAYLOAD_SIZE;
}
return netdev;
}

View File

@ -20,16 +20,13 @@ struct ib_queue_set {
struct ib_queue_pair *qp;
/** Receive work queue maximum fill level */
unsigned int recv_max_fill;
/** Receive packet length */
size_t recv_pkt_len;
};
extern int ib_create_qset ( struct ib_device *ibdev,
struct ib_queue_set *qset, unsigned int num_cqes,
struct ib_completion_queue_operations *cq_op,
unsigned int num_send_wqes,
unsigned int num_recv_wqes, size_t recv_pkt_len,
unsigned long qkey );
unsigned int num_recv_wqes, unsigned long qkey );
extern void ib_qset_refill_recv ( struct ib_device *ibdev,
struct ib_queue_set *qset );
extern void ib_destroy_qset ( struct ib_device *ibdev,

View File

@ -37,9 +37,6 @@ struct ib_sma {
struct process poll;
};
/** SMA payload size allocated for received packets */
#define IB_SMA_PAYLOAD_LEN 2048
/** SMA number of send WQEs
*
* This is a policy decision.

View File

@ -24,6 +24,14 @@ FILE_LICENCE ( GPL2_OR_LATER );
/** Subnet administrator queue key */
#define IB_GLOBAL_QKEY 0x80010000UL
/**
* Maximum payload size
*
* This is currently hard-coded in various places (drivers, subnet
* management agent, etc.) to 2048.
*/
#define IB_MAX_PAYLOAD_SIZE 2048
struct ib_device;
struct ib_queue_pair;
struct ib_address_vector;

View File

@ -10,9 +10,6 @@ FILE_LICENCE ( GPL2_OR_LATER );
#include <gpxe/infiniband.h>
/** IPoIB packet length */
#define IPOIB_PKT_LEN 2048
/** IPoIB MAC address length */
#define IPOIB_ALEN 20

View File

@ -345,6 +345,13 @@ int ib_post_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp,
struct io_buffer *iobuf ) {
int rc;
/* Check packet length */
if ( iob_tailroom ( iobuf ) < IB_MAX_PAYLOAD_SIZE ) {
DBGC ( ibdev, "IBDEV %p QPN %#lx wrong RX buffer size (%zd)\n",
ibdev, qp->qpn, iob_tailroom ( iobuf ) );
return -EINVAL;
}
/* Check queue fill level */
if ( qp->recv.fill >= qp->recv.num_wqes ) {
DBGC ( ibdev, "IBDEV %p QPN %#lx receive queue full\n",

View File

@ -40,7 +40,6 @@ FILE_LICENCE ( GPL2_OR_LATER );
* @v cq_op Completion queue operations
* @v num_send_wqes Number of send work queue entries
* @v num_recv_wqes Number of receive work queue entries
* @v recv_pkt_len Receive packet length
* @v qkey Queue key
* @ret rc Return status code
*/
@ -48,7 +47,7 @@ int ib_create_qset ( struct ib_device *ibdev, struct ib_queue_set *qset,
unsigned int num_cqes,
struct ib_completion_queue_operations *cq_op,
unsigned int num_send_wqes, unsigned int num_recv_wqes,
size_t recv_pkt_len, unsigned long qkey ) {
unsigned long qkey ) {
int rc;
/* Sanity check */
@ -57,7 +56,6 @@ int ib_create_qset ( struct ib_device *ibdev, struct ib_queue_set *qset,
/* Store queue parameters */
qset->recv_max_fill = num_recv_wqes;
qset->recv_pkt_len = recv_pkt_len;
/* Allocate completion queue */
qset->cq = ib_create_cq ( ibdev, num_cqes, cq_op );
@ -99,7 +97,7 @@ void ib_qset_refill_recv ( struct ib_device *ibdev,
while ( qset->qp->recv.fill < qset->recv_max_fill ) {
/* Allocate I/O buffer */
iobuf = alloc_iob ( qset->recv_pkt_len );
iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
if ( ! iobuf ) {
/* Non-fatal; we will refill on next attempt */
return;

View File

@ -361,7 +361,7 @@ static void ib_sma_refill_recv ( struct ib_sma *sma ) {
while ( sma->qp->recv.fill < IB_SMA_NUM_RECV_WQES ) {
/* Allocate I/O buffer */
iobuf = alloc_iob ( IB_SMA_PAYLOAD_LEN );
iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
if ( ! iobuf ) {
/* Non-fatal; we will refill on next attempt */
return;