mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 08:44:41 +00:00 
			
		
		
		
	net_sched: Add qdisc_enqueue wrapper
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									db7a94d60f
								
							
						
					
					
						commit
						5f86173bdf
					
				
					 12 changed files with 37 additions and 21 deletions
				
			
		|  | @ -306,6 +306,16 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev) | ||||||
| 	return true; | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
|  | { | ||||||
|  | 	return sch->enqueue(skb, sch); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) | ||||||
|  | { | ||||||
|  | 	return qdisc_enqueue(skb, sch); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | ||||||
| 				       struct sk_buff_head *list) | 				       struct sk_buff_head *list) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -1781,7 +1781,7 @@ gso: | ||||||
| 
 | 
 | ||||||
| 		spin_lock(root_lock); | 		spin_lock(root_lock); | ||||||
| 
 | 
 | ||||||
| 		rc = q->enqueue(skb, q); | 		rc = qdisc_enqueue_root(skb, q); | ||||||
| 		qdisc_run(q); | 		qdisc_run(q); | ||||||
| 
 | 
 | ||||||
| 		spin_unlock(root_lock); | 		spin_unlock(root_lock); | ||||||
|  | @ -2083,7 +2083,7 @@ static int ing_filter(struct sk_buff *skb) | ||||||
| 	q = rxq->qdisc; | 	q = rxq->qdisc; | ||||||
| 	if (q) { | 	if (q) { | ||||||
| 		spin_lock(qdisc_lock(q)); | 		spin_lock(qdisc_lock(q)); | ||||||
| 		result = q->enqueue(skb, q); | 		result = qdisc_enqueue_root(skb, q); | ||||||
| 		spin_unlock(qdisc_lock(q)); | 		spin_unlock(qdisc_lock(q)); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -289,7 +289,7 @@ void ieee80211_requeue(struct ieee80211_local *local, int queue) | ||||||
| 		root_lock = qdisc_root_lock(qdisc); | 		root_lock = qdisc_root_lock(qdisc); | ||||||
| 
 | 
 | ||||||
| 		spin_lock(root_lock); | 		spin_lock(root_lock); | ||||||
| 		qdisc->enqueue(skb, qdisc); | 		qdisc_enqueue_root(skb, qdisc); | ||||||
| 		spin_unlock(root_lock); | 		spin_unlock(root_lock); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -429,7 +429,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| #endif | #endif | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ret = flow->q->enqueue(skb, flow->q); | 	ret = qdisc_enqueue(skb, flow->q); | ||||||
| 	if (ret != 0) { | 	if (ret != 0) { | ||||||
| drop: __maybe_unused | drop: __maybe_unused | ||||||
| 		sch->qstats.drops++; | 		sch->qstats.drops++; | ||||||
|  |  | ||||||
|  | @ -387,7 +387,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| #ifdef CONFIG_NET_CLS_ACT | #ifdef CONFIG_NET_CLS_ACT | ||||||
| 	cl->q->__parent = sch; | 	cl->q->__parent = sch; | ||||||
| #endif | #endif | ||||||
| 	if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { | 	ret = qdisc_enqueue(skb, cl->q); | ||||||
|  | 	if (ret == NET_XMIT_SUCCESS) { | ||||||
| 		sch->q.qlen++; | 		sch->q.qlen++; | ||||||
| 		sch->bstats.packets++; | 		sch->bstats.packets++; | ||||||
| 		sch->bstats.bytes+=len; | 		sch->bstats.bytes+=len; | ||||||
|  | @ -671,7 +672,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | ||||||
| 		q->rx_class = cl; | 		q->rx_class = cl; | ||||||
| 		cl->q->__parent = sch; | 		cl->q->__parent = sch; | ||||||
| 
 | 
 | ||||||
| 		if (cl->q->enqueue(skb, cl->q) == 0) { | 		if (qdisc_enqueue(skb, cl->q) == 0) { | ||||||
| 			sch->q.qlen++; | 			sch->q.qlen++; | ||||||
| 			sch->bstats.packets++; | 			sch->bstats.packets++; | ||||||
| 			sch->bstats.bytes+=len; | 			sch->bstats.bytes+=len; | ||||||
|  |  | ||||||
|  | @ -252,7 +252,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	err = p->q->enqueue(skb, p->q); | 	err = qdisc_enqueue(skb, p->q); | ||||||
| 	if (err != NET_XMIT_SUCCESS) { | 	if (err != NET_XMIT_SUCCESS) { | ||||||
| 		sch->qstats.drops++; | 		sch->qstats.drops++; | ||||||
| 		return err; | 		return err; | ||||||
|  |  | ||||||
|  | @ -1586,7 +1586,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	len = skb->len; | 	len = skb->len; | ||||||
| 	err = cl->qdisc->enqueue(skb, cl->qdisc); | 	err = qdisc_enqueue(skb, cl->qdisc); | ||||||
| 	if (unlikely(err != NET_XMIT_SUCCESS)) { | 	if (unlikely(err != NET_XMIT_SUCCESS)) { | ||||||
| 		cl->qstats.drops++; | 		cl->qstats.drops++; | ||||||
| 		sch->qstats.drops++; | 		sch->qstats.drops++; | ||||||
|  |  | ||||||
|  | @ -572,8 +572,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| 		kfree_skb(skb); | 		kfree_skb(skb); | ||||||
| 		return ret; | 		return ret; | ||||||
| #endif | #endif | ||||||
| 	} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != | 	} else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { | ||||||
| 		   NET_XMIT_SUCCESS) { |  | ||||||
| 		sch->qstats.drops++; | 		sch->qstats.drops++; | ||||||
| 		cl->qstats.drops++; | 		cl->qstats.drops++; | ||||||
| 		return NET_XMIT_DROP; | 		return NET_XMIT_DROP; | ||||||
|  |  | ||||||
|  | @ -82,6 +82,12 @@ struct netem_skb_cb { | ||||||
| 	psched_time_t	time_to_send; | 	psched_time_t	time_to_send; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) | ||||||
|  | { | ||||||
|  | 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct netem_skb_cb)); | ||||||
|  | 	return (struct netem_skb_cb *)skb->cb; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /* init_crandom - initialize correlated random number generator
 | /* init_crandom - initialize correlated random number generator
 | ||||||
|  * Use entropy source for initial seed. |  * Use entropy source for initial seed. | ||||||
|  */ |  */ | ||||||
|  | @ -184,7 +190,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ | 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ | ||||||
| 		q->duplicate = 0; | 		q->duplicate = 0; | ||||||
| 
 | 
 | ||||||
| 		rootq->enqueue(skb2, rootq); | 		qdisc_enqueue_root(skb2, rootq); | ||||||
| 		q->duplicate = dupsave; | 		q->duplicate = dupsave; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -205,7 +211,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); | 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	cb = (struct netem_skb_cb *)skb->cb; | 	cb = netem_skb_cb(skb); | ||||||
| 	if (q->gap == 0 		/* not doing reordering */ | 	if (q->gap == 0 		/* not doing reordering */ | ||||||
| 	    || q->counter < q->gap 	/* inside last reordering gap */ | 	    || q->counter < q->gap 	/* inside last reordering gap */ | ||||||
| 	    || q->reorder < get_crandom(&q->reorder_cor)) { | 	    || q->reorder < get_crandom(&q->reorder_cor)) { | ||||||
|  | @ -218,7 +224,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| 		now = psched_get_time(); | 		now = psched_get_time(); | ||||||
| 		cb->time_to_send = now + delay; | 		cb->time_to_send = now + delay; | ||||||
| 		++q->counter; | 		++q->counter; | ||||||
| 		ret = q->qdisc->enqueue(skb, q->qdisc); | 		ret = qdisc_enqueue(skb, q->qdisc); | ||||||
| 	} else { | 	} else { | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * Do re-ordering by putting one out of N packets at the front | 		 * Do re-ordering by putting one out of N packets at the front | ||||||
|  | @ -277,8 +283,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) | ||||||
| 
 | 
 | ||||||
| 	skb = q->qdisc->dequeue(q->qdisc); | 	skb = q->qdisc->dequeue(q->qdisc); | ||||||
| 	if (skb) { | 	if (skb) { | ||||||
| 		const struct netem_skb_cb *cb | 		const struct netem_skb_cb *cb = netem_skb_cb(skb); | ||||||
| 			= (const struct netem_skb_cb *)skb->cb; |  | ||||||
| 		psched_time_t now = psched_get_time(); | 		psched_time_t now = psched_get_time(); | ||||||
| 
 | 
 | ||||||
| 		/* if more time remaining? */ | 		/* if more time remaining? */ | ||||||
|  | @ -457,7 +462,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | ||||||
| { | { | ||||||
| 	struct fifo_sched_data *q = qdisc_priv(sch); | 	struct fifo_sched_data *q = qdisc_priv(sch); | ||||||
| 	struct sk_buff_head *list = &sch->q; | 	struct sk_buff_head *list = &sch->q; | ||||||
| 	psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send; | 	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; | ||||||
| 	struct sk_buff *skb; | 	struct sk_buff *skb; | ||||||
| 
 | 
 | ||||||
| 	if (likely(skb_queue_len(list) < q->limit)) { | 	if (likely(skb_queue_len(list) < q->limit)) { | ||||||
|  | @ -468,8 +473,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		skb_queue_reverse_walk(list, skb) { | 		skb_queue_reverse_walk(list, skb) { | ||||||
| 			const struct netem_skb_cb *cb | 			const struct netem_skb_cb *cb = netem_skb_cb(skb); | ||||||
| 				= (const struct netem_skb_cb *)skb->cb; |  | ||||||
| 
 | 
 | ||||||
| 			if (tnext >= cb->time_to_send) | 			if (tnext >= cb->time_to_send) | ||||||
| 				break; | 				break; | ||||||
|  |  | ||||||
|  | @ -81,7 +81,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||||||
| 	} | 	} | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| 	if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { | 	ret = qdisc_enqueue(skb, qdisc); | ||||||
|  | 	if (ret == NET_XMIT_SUCCESS) { | ||||||
| 		sch->bstats.bytes += skb->len; | 		sch->bstats.bytes += skb->len; | ||||||
| 		sch->bstats.packets++; | 		sch->bstats.packets++; | ||||||
| 		sch->q.qlen++; | 		sch->q.qlen++; | ||||||
|  |  | ||||||
|  | @ -92,7 +92,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | ||||||
| 			break; | 			break; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ret = child->enqueue(skb, child); | 	ret = qdisc_enqueue(skb, child); | ||||||
| 	if (likely(ret == NET_XMIT_SUCCESS)) { | 	if (likely(ret == NET_XMIT_SUCCESS)) { | ||||||
| 		sch->bstats.bytes += skb->len; | 		sch->bstats.bytes += skb->len; | ||||||
| 		sch->bstats.packets++; | 		sch->bstats.packets++; | ||||||
|  |  | ||||||
|  | @ -133,7 +133,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | ||||||
| 		return NET_XMIT_DROP; | 		return NET_XMIT_DROP; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) { | 	ret = qdisc_enqueue(skb, q->qdisc); | ||||||
|  | 	if (ret != 0) { | ||||||
| 		sch->qstats.drops++; | 		sch->qstats.drops++; | ||||||
| 		return ret; | 		return ret; | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Jussi Kivilinna
						Jussi Kivilinna