2
0
mirror of https://github.com/xcat2/xNBA.git synced 2024-11-22 17:41:55 +00:00

[netdevice] Add netdev_tx_defer() to allow drivers to defer transmissions

Devices with small transmit descriptor rings may temporarily run out
of space.  Provide netdev_tx_defer() to allow drivers to defer packets
for retransmission as soon as a descriptor becomes available.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
Michael Brown 2013-05-01 14:05:42 +01:00
parent ab1e3ce0d7
commit 2095ed413e
2 changed files with 91 additions and 3 deletions

View File

@ -346,6 +346,8 @@ struct net_device {
size_t max_pkt_len;
/** TX packet queue */
struct list_head tx_queue;
/** Deferred TX packet queue */
struct list_head tx_deferred;
/** RX packet queue */
struct list_head rx_queue;
/** TX statistics */
@ -605,6 +607,8 @@ netdev_rx_frozen ( struct net_device *netdev ) {
extern void netdev_link_err ( struct net_device *netdev, int rc );
extern void netdev_link_down ( struct net_device *netdev );
extern int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf );
extern void netdev_tx_defer ( struct net_device *netdev,
struct io_buffer *iobuf );
extern void netdev_tx_err ( struct net_device *netdev,
struct io_buffer *iobuf, int rc );
extern void netdev_tx_complete_err ( struct net_device *netdev,

View File

@ -31,6 +31,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
#include <ipxe/tables.h>
#include <ipxe/process.h>
#include <ipxe/init.h>
#include <ipxe/malloc.h>
#include <ipxe/device.h>
#include <ipxe/errortab.h>
#include <ipxe/vlan.h>
@ -212,6 +213,43 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) {
return rc;
}
/**
* Defer transmitted packet
*
* @v netdev Network device
* @v iobuf I/O buffer
*
* Drivers may call netdev_tx_defer() if there is insufficient space
* in the transmit descriptor ring. Any packets deferred in this way
* will be automatically retransmitted as soon as space becomes
* available (i.e. as soon as the driver calls netdev_tx_complete()).
*
* The packet must currently be in the network device's TX queue.
*
* Drivers utilising netdev_tx_defer() must ensure that space in the
* transmit descriptor ring is freed up @b before calling
* netdev_tx_complete(). For example, if the ring is modelled using a
* producer counter and a consumer counter, then the consumer counter
* must be incremented before the call to netdev_tx_complete().
* Failure to do this will cause the retransmitted packet to be
* immediately redeferred (which will result in out-of-order
* transmissions and other nastiness).
*/
void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) {
/* Catch data corruption as early as possible */
list_check_contains_entry ( iobuf, &netdev->tx_queue, list );
/* Remove from transmit queue */
list_del ( &iobuf->list );
/* Add to deferred transmit queue */
list_add_tail ( &iobuf->list, &netdev->tx_deferred );
/* Record "out of space" statistic */
netdev_tx_err ( netdev, NULL, -ENOBUFS );
}
/**
* Discard transmitted packet
*
@ -257,6 +295,13 @@ void netdev_tx_complete_err ( struct net_device *netdev,
/* Dequeue and free I/O buffer */
list_del ( &iobuf->list );
netdev_tx_err ( netdev, iobuf, rc );
/* Transmit first pending packet, if any */
if ( ( iobuf = list_first_entry ( &netdev->tx_deferred,
struct io_buffer, list ) ) != NULL ) {
list_del ( &iobuf->list );
netdev_tx ( netdev, iobuf );
}
}
/**
@ -270,9 +315,9 @@ void netdev_tx_complete_err ( struct net_device *netdev,
void netdev_tx_complete_next_err ( struct net_device *netdev, int rc ) {
struct io_buffer *iobuf;
list_for_each_entry ( iobuf, &netdev->tx_queue, list ) {
if ( ( iobuf = list_first_entry ( &netdev->tx_queue, struct io_buffer,
list ) ) != NULL ) {
netdev_tx_complete_err ( netdev, iobuf, rc );
return;
}
}
@ -283,10 +328,15 @@ void netdev_tx_complete_next_err ( struct net_device *netdev, int rc ) {
*/
static void netdev_tx_flush ( struct net_device *netdev ) {
/* Discard any packets in the TX queue */
/* Discard any packets in the TX queue. This will also cause
* any packets in the deferred TX queue to be discarded
* automatically.
*/
while ( ! list_empty ( &netdev->tx_queue ) ) {
netdev_tx_complete_next_err ( netdev, -ECANCELED );
}
assert ( list_empty ( &netdev->tx_queue ) );
assert ( list_empty ( &netdev->tx_deferred ) );
}
/**
@ -424,6 +474,7 @@ struct net_device * alloc_netdev ( size_t priv_size ) {
ref_init ( &netdev->refcnt, free_netdev );
netdev->link_rc = -EUNKNOWN_LINK_STATUS;
INIT_LIST_HEAD ( &netdev->tx_queue );
INIT_LIST_HEAD ( &netdev->tx_deferred );
INIT_LIST_HEAD ( &netdev->rx_queue );
netdev_settings_init ( netdev );
netdev->priv = ( ( ( void * ) netdev ) + sizeof ( *netdev ) );
@ -817,3 +868,36 @@ __weak struct net_device * vlan_find ( struct net_device *trunk __unused,
/** Networking stack process */
PERMANENT_PROCESS ( net_process, net_step );
/**
* Discard some cached network device data
*
* @ret discarded Number of cached items discarded
*/
static unsigned int net_discard ( void ) {
struct net_device *netdev;
struct io_buffer *iobuf;
unsigned int discarded = 0;
/* Try to drop one deferred TX packet from each network device */
for_each_netdev ( netdev ) {
if ( ( iobuf = list_first_entry ( &netdev->tx_deferred,
struct io_buffer,
list ) ) != NULL ) {
/* Discard first deferred packet */
list_del ( &iobuf->list );
free ( iobuf );
/* Report discard */
discarded++;
}
}
return discarded;
}
/** Network device cache discarder */
struct cache_discarder net_discarder __cache_discarder ( CACHE_NORMAL ) = {
.discard = net_discard,
};