netdev: Fix lockdep warnings in multiqueue configurations.
When support for multiple TX queues were added, the netif_tx_lock() routines we converted to iterate over all TX queues and grab each queue's spinlock. This causes heartburn for lockdep and it's not a healthy thing to do with lots of TX queues anyways. So modify this to use a top-level lock and a "frozen" state for the individual TX queues. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
967ab999a0
commit
c3f26a269c
@ -69,18 +69,20 @@ static void ri_tasklet(unsigned long dev)
|
|||||||
struct net_device *_dev = (struct net_device *)dev;
|
struct net_device *_dev = (struct net_device *)dev;
|
||||||
struct ifb_private *dp = netdev_priv(_dev);
|
struct ifb_private *dp = netdev_priv(_dev);
|
||||||
struct net_device_stats *stats = &_dev->stats;
|
struct net_device_stats *stats = &_dev->stats;
|
||||||
|
struct netdev_queue *txq;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
|
txq = netdev_get_tx_queue(_dev, 0);
|
||||||
dp->st_task_enter++;
|
dp->st_task_enter++;
|
||||||
if ((skb = skb_peek(&dp->tq)) == NULL) {
|
if ((skb = skb_peek(&dp->tq)) == NULL) {
|
||||||
dp->st_txq_refl_try++;
|
dp->st_txq_refl_try++;
|
||||||
if (netif_tx_trylock(_dev)) {
|
if (__netif_tx_trylock(txq)) {
|
||||||
dp->st_rxq_enter++;
|
dp->st_rxq_enter++;
|
||||||
while ((skb = skb_dequeue(&dp->rq)) != NULL) {
|
while ((skb = skb_dequeue(&dp->rq)) != NULL) {
|
||||||
skb_queue_tail(&dp->tq, skb);
|
skb_queue_tail(&dp->tq, skb);
|
||||||
dp->st_rx2tx_tran++;
|
dp->st_rx2tx_tran++;
|
||||||
}
|
}
|
||||||
netif_tx_unlock(_dev);
|
__netif_tx_unlock(txq);
|
||||||
} else {
|
} else {
|
||||||
/* reschedule */
|
/* reschedule */
|
||||||
dp->st_rxq_notenter++;
|
dp->st_rxq_notenter++;
|
||||||
@ -115,7 +117,7 @@ static void ri_tasklet(unsigned long dev)
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (netif_tx_trylock(_dev)) {
|
if (__netif_tx_trylock(txq)) {
|
||||||
dp->st_rxq_check++;
|
dp->st_rxq_check++;
|
||||||
if ((skb = skb_peek(&dp->rq)) == NULL) {
|
if ((skb = skb_peek(&dp->rq)) == NULL) {
|
||||||
dp->tasklet_pending = 0;
|
dp->tasklet_pending = 0;
|
||||||
@ -123,10 +125,10 @@ static void ri_tasklet(unsigned long dev)
|
|||||||
netif_wake_queue(_dev);
|
netif_wake_queue(_dev);
|
||||||
} else {
|
} else {
|
||||||
dp->st_rxq_rsch++;
|
dp->st_rxq_rsch++;
|
||||||
netif_tx_unlock(_dev);
|
__netif_tx_unlock(txq);
|
||||||
goto resched;
|
goto resched;
|
||||||
}
|
}
|
||||||
netif_tx_unlock(_dev);
|
__netif_tx_unlock(txq);
|
||||||
} else {
|
} else {
|
||||||
resched:
|
resched:
|
||||||
dp->tasklet_pending = 1;
|
dp->tasklet_pending = 1;
|
||||||
|
@ -440,6 +440,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
|
|||||||
enum netdev_queue_state_t
|
enum netdev_queue_state_t
|
||||||
{
|
{
|
||||||
__QUEUE_STATE_XOFF,
|
__QUEUE_STATE_XOFF,
|
||||||
|
__QUEUE_STATE_FROZEN,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct netdev_queue {
|
struct netdev_queue {
|
||||||
@ -636,7 +637,7 @@ struct net_device
|
|||||||
unsigned int real_num_tx_queues;
|
unsigned int real_num_tx_queues;
|
||||||
|
|
||||||
unsigned long tx_queue_len; /* Max frames per queue allowed */
|
unsigned long tx_queue_len; /* Max frames per queue allowed */
|
||||||
|
spinlock_t tx_global_lock;
|
||||||
/*
|
/*
|
||||||
* One part is mostly used on xmit path (device)
|
* One part is mostly used on xmit path (device)
|
||||||
*/
|
*/
|
||||||
@ -1099,6 +1100,11 @@ static inline int netif_queue_stopped(const struct net_device *dev)
|
|||||||
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
|
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
|
||||||
|
{
|
||||||
|
return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* netif_running - test if up
|
* netif_running - test if up
|
||||||
* @dev: network device
|
* @dev: network device
|
||||||
@ -1475,30 +1481,6 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
|
|||||||
txq->xmit_lock_owner = smp_processor_id();
|
txq->xmit_lock_owner = smp_processor_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* netif_tx_lock - grab network device transmit lock
|
|
||||||
* @dev: network device
|
|
||||||
* @cpu: cpu number of lock owner
|
|
||||||
*
|
|
||||||
* Get network device transmit lock
|
|
||||||
*/
|
|
||||||
static inline void netif_tx_lock(struct net_device *dev)
|
|
||||||
{
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
||||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
|
||||||
__netif_tx_lock(txq, cpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void netif_tx_lock_bh(struct net_device *dev)
|
|
||||||
{
|
|
||||||
local_bh_disable();
|
|
||||||
netif_tx_lock(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __netif_tx_trylock(struct netdev_queue *txq)
|
static inline int __netif_tx_trylock(struct netdev_queue *txq)
|
||||||
{
|
{
|
||||||
int ok = spin_trylock(&txq->_xmit_lock);
|
int ok = spin_trylock(&txq->_xmit_lock);
|
||||||
@ -1507,11 +1489,6 @@ static inline int __netif_tx_trylock(struct netdev_queue *txq)
|
|||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int netif_tx_trylock(struct net_device *dev)
|
|
||||||
{
|
|
||||||
return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __netif_tx_unlock(struct netdev_queue *txq)
|
static inline void __netif_tx_unlock(struct netdev_queue *txq)
|
||||||
{
|
{
|
||||||
txq->xmit_lock_owner = -1;
|
txq->xmit_lock_owner = -1;
|
||||||
@ -1524,15 +1501,57 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
|
|||||||
spin_unlock_bh(&txq->_xmit_lock);
|
spin_unlock_bh(&txq->_xmit_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* netif_tx_lock - grab network device transmit lock
|
||||||
|
* @dev: network device
|
||||||
|
* @cpu: cpu number of lock owner
|
||||||
|
*
|
||||||
|
* Get network device transmit lock
|
||||||
|
*/
|
||||||
|
static inline void netif_tx_lock(struct net_device *dev)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
spin_lock(&dev->tx_global_lock);
|
||||||
|
cpu = smp_processor_id();
|
||||||
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||||
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
||||||
|
|
||||||
|
/* We are the only thread of execution doing a
|
||||||
|
* freeze, but we have to grab the _xmit_lock in
|
||||||
|
* order to synchronize with threads which are in
|
||||||
|
* the ->hard_start_xmit() handler and already
|
||||||
|
* checked the frozen bit.
|
||||||
|
*/
|
||||||
|
__netif_tx_lock(txq, cpu);
|
||||||
|
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
|
||||||
|
__netif_tx_unlock(txq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void netif_tx_lock_bh(struct net_device *dev)
|
||||||
|
{
|
||||||
|
local_bh_disable();
|
||||||
|
netif_tx_lock(dev);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void netif_tx_unlock(struct net_device *dev)
|
static inline void netif_tx_unlock(struct net_device *dev)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
||||||
__netif_tx_unlock(txq);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
/* No need to grab the _xmit_lock here. If the
|
||||||
|
* queue is not stopped for another reason, we
|
||||||
|
* force a schedule.
|
||||||
|
*/
|
||||||
|
clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
|
||||||
|
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
|
||||||
|
__netif_schedule(txq->qdisc);
|
||||||
|
}
|
||||||
|
spin_unlock(&dev->tx_global_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void netif_tx_unlock_bh(struct net_device *dev)
|
static inline void netif_tx_unlock_bh(struct net_device *dev)
|
||||||
@ -1556,13 +1575,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
|
|||||||
static inline void netif_tx_disable(struct net_device *dev)
|
static inline void netif_tx_disable(struct net_device *dev)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
netif_tx_lock_bh(dev);
|
local_bh_disable();
|
||||||
|
cpu = smp_processor_id();
|
||||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
||||||
|
|
||||||
|
__netif_tx_lock(txq, cpu);
|
||||||
netif_tx_stop_queue(txq);
|
netif_tx_stop_queue(txq);
|
||||||
|
__netif_tx_unlock(txq);
|
||||||
}
|
}
|
||||||
netif_tx_unlock_bh(dev);
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void netif_addr_lock(struct net_device *dev)
|
static inline void netif_addr_lock(struct net_device *dev)
|
||||||
|
@ -4200,6 +4200,7 @@ static void netdev_init_queues(struct net_device *dev)
|
|||||||
{
|
{
|
||||||
netdev_init_one_queue(dev, &dev->rx_queue, NULL);
|
netdev_init_one_queue(dev, &dev->rx_queue, NULL);
|
||||||
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
|
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
|
||||||
|
spin_lock_init(&dev->tx_global_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__netif_tx_lock(txq, smp_processor_id());
|
__netif_tx_lock(txq, smp_processor_id());
|
||||||
if (netif_tx_queue_stopped(txq) ||
|
if (netif_tx_queue_stopped(txq) ||
|
||||||
|
netif_tx_queue_frozen(txq) ||
|
||||||
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
|
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
|
||||||
skb_queue_head(&npinfo->txq, skb);
|
skb_queue_head(&npinfo->txq, skb);
|
||||||
__netif_tx_unlock(txq);
|
__netif_tx_unlock(txq);
|
||||||
|
@ -3305,6 +3305,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|||||||
|
|
||||||
txq = netdev_get_tx_queue(odev, queue_map);
|
txq = netdev_get_tx_queue(odev, queue_map);
|
||||||
if (netif_tx_queue_stopped(txq) ||
|
if (netif_tx_queue_stopped(txq) ||
|
||||||
|
netif_tx_queue_frozen(txq) ||
|
||||||
need_resched()) {
|
need_resched()) {
|
||||||
idle_start = getCurUs();
|
idle_start = getCurUs();
|
||||||
|
|
||||||
@ -3320,7 +3321,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|||||||
|
|
||||||
pkt_dev->idle_acc += getCurUs() - idle_start;
|
pkt_dev->idle_acc += getCurUs() - idle_start;
|
||||||
|
|
||||||
if (netif_tx_queue_stopped(txq)) {
|
if (netif_tx_queue_stopped(txq) ||
|
||||||
|
netif_tx_queue_frozen(txq)) {
|
||||||
pkt_dev->next_tx_us = getCurUs(); /* TODO */
|
pkt_dev->next_tx_us = getCurUs(); /* TODO */
|
||||||
pkt_dev->next_tx_ns = 0;
|
pkt_dev->next_tx_ns = 0;
|
||||||
goto out; /* Try the next interface */
|
goto out; /* Try the next interface */
|
||||||
@ -3352,7 +3354,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|||||||
txq = netdev_get_tx_queue(odev, queue_map);
|
txq = netdev_get_tx_queue(odev, queue_map);
|
||||||
|
|
||||||
__netif_tx_lock_bh(txq);
|
__netif_tx_lock_bh(txq);
|
||||||
if (!netif_tx_queue_stopped(txq)) {
|
if (!netif_tx_queue_stopped(txq) &&
|
||||||
|
!netif_tx_queue_frozen(txq)) {
|
||||||
|
|
||||||
atomic_inc(&(pkt_dev->skb->users));
|
atomic_inc(&(pkt_dev->skb->users));
|
||||||
retry_now:
|
retry_now:
|
||||||
|
@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q)
|
|||||||
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
||||||
|
|
||||||
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||||
if (!netif_subqueue_stopped(dev, skb))
|
if (!netif_tx_queue_stopped(txq) &&
|
||||||
|
!netif_tx_queue_frozen(txq))
|
||||||
ret = dev_hard_start_xmit(skb, dev, txq);
|
ret = dev_hard_start_xmit(skb, dev, txq);
|
||||||
HARD_TX_UNLOCK(dev, txq);
|
HARD_TX_UNLOCK(dev, txq);
|
||||||
|
|
||||||
@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret && netif_tx_queue_stopped(txq))
|
if (ret && (netif_tx_queue_stopped(txq) ||
|
||||||
|
netif_tx_queue_frozen(txq)))
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -305,10 +305,11 @@ restart:
|
|||||||
|
|
||||||
switch (teql_resolve(skb, skb_res, slave)) {
|
switch (teql_resolve(skb, skb_res, slave)) {
|
||||||
case 0:
|
case 0:
|
||||||
if (netif_tx_trylock(slave)) {
|
if (__netif_tx_trylock(slave_txq)) {
|
||||||
if (!__netif_subqueue_stopped(slave, subq) &&
|
if (!netif_tx_queue_stopped(slave_txq) &&
|
||||||
|
!netif_tx_queue_frozen(slave_txq) &&
|
||||||
slave->hard_start_xmit(skb, slave) == 0) {
|
slave->hard_start_xmit(skb, slave) == 0) {
|
||||||
netif_tx_unlock(slave);
|
__netif_tx_unlock(slave_txq);
|
||||||
master->slaves = NEXT_SLAVE(q);
|
master->slaves = NEXT_SLAVE(q);
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
master->stats.tx_packets++;
|
master->stats.tx_packets++;
|
||||||
@ -316,7 +317,7 @@ restart:
|
|||||||
qdisc_pkt_len(skb);
|
qdisc_pkt_len(skb);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
netif_tx_unlock(slave);
|
__netif_tx_unlock(slave_txq);
|
||||||
}
|
}
|
||||||
if (netif_queue_stopped(dev))
|
if (netif_queue_stopped(dev))
|
||||||
busy = 1;
|
busy = 1;
|
||||||
|
Loading…
Reference in New Issue
Block a user