Home | History | Annotate | Line # | Download | only in net
pktqueue.c revision 1.2
      1  1.2  rmind /*	$NetBSD: pktqueue.c,v 1.2 2014/06/09 12:57:04 rmind Exp $	*/
      2  1.1  rmind 
      3  1.1  rmind /*-
      4  1.1  rmind  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  1.1  rmind  * All rights reserved.
      6  1.1  rmind  *
      7  1.1  rmind  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  rmind  * by Mindaugas Rasiukevicius.
      9  1.1  rmind  *
     10  1.1  rmind  * Redistribution and use in source and binary forms, with or without
     11  1.1  rmind  * modification, are permitted provided that the following conditions
     12  1.1  rmind  * are met:
     13  1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     14  1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     15  1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  rmind  *    documentation and/or other materials provided with the distribution.
     18  1.1  rmind  *
     19  1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  rmind  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  rmind  */
     31  1.1  rmind 
     32  1.1  rmind #include <sys/cdefs.h>
     33  1.2  rmind __KERNEL_RCSID(0, "$NetBSD: pktqueue.c,v 1.2 2014/06/09 12:57:04 rmind Exp $");
     34  1.1  rmind 
     35  1.1  rmind #include <sys/param.h>
     36  1.1  rmind #include <sys/types.h>
     37  1.1  rmind 
     38  1.1  rmind #include <sys/atomic.h>
     39  1.1  rmind #include <sys/cpu.h>
     40  1.1  rmind #include <sys/pcq.h>
     41  1.1  rmind #include <sys/intr.h>
     42  1.1  rmind #include <sys/mbuf.h>
     43  1.1  rmind #include <sys/proc.h>
     44  1.1  rmind #include <sys/percpu.h>
     45  1.1  rmind 
     46  1.1  rmind #include <netinet/in.h>
     47  1.1  rmind #include <netinet/ip.h>
     48  1.1  rmind #include <netinet/ip_private.h>
     49  1.1  rmind 
     50  1.1  rmind #include <net/pktqueue.h>
     51  1.1  rmind 
     52  1.1  rmind /*
     53  1.1  rmind  * WARNING: update this if struct pktqueue changes.
     54  1.1  rmind  */
     55  1.1  rmind #define	PKTQ_CLPAD	\
     56  1.1  rmind     MAX(COHERENCY_UNIT, COHERENCY_UNIT - sizeof(kmutex_t) - sizeof(u_int))
     57  1.1  rmind 
     58  1.1  rmind struct pktqueue {
     59  1.1  rmind 	/*
     60  1.1  rmind 	 * The lock used for a barrier mechanism.  The barrier counter,
     61  1.1  rmind 	 * as well as the drop counter, are managed atomically though.
     62  1.1  rmind 	 * Ensure this group is in a separate cache line.
     63  1.1  rmind 	 */
     64  1.1  rmind 	kmutex_t	pq_lock;
     65  1.1  rmind 	volatile u_int	pq_barrier;
     66  1.1  rmind 	uint8_t		_pad[PKTQ_CLPAD];
     67  1.1  rmind 
     68  1.1  rmind 	/* The size of the queue, counters and the interrupt handler. */
     69  1.1  rmind 	u_int		pq_maxlen;
     70  1.1  rmind 	percpu_t *	pq_counters;
     71  1.1  rmind 	void *		pq_sih;
     72  1.1  rmind 
     73  1.1  rmind 	/* Finally, per-CPU queues. */
     74  1.1  rmind 	pcq_t *		pq_queue[];
     75  1.1  rmind };
     76  1.1  rmind 
     77  1.1  rmind /* The counters of the packet queue. */
     78  1.1  rmind #define	PQCNT_ENQUEUE	0
     79  1.1  rmind #define	PQCNT_DEQUEUE	1
     80  1.1  rmind #define	PQCNT_DROP	2
     81  1.1  rmind #define	PQCNT_NCOUNTERS	3
     82  1.1  rmind 
     83  1.1  rmind typedef struct {
     84  1.1  rmind 	uint64_t	count[PQCNT_NCOUNTERS];
     85  1.1  rmind } pktq_counters_t;
     86  1.1  rmind 
     87  1.1  rmind /* Special marker value used by pktq_barrier() mechanism. */
     88  1.1  rmind #define	PKTQ_MARKER	((void *)(~0ULL))
     89  1.1  rmind 
     90  1.1  rmind /*
     91  1.1  rmind  * The total size of pktqueue_t which depends on the number of CPUs.
     92  1.1  rmind  */
     93  1.1  rmind #define	PKTQUEUE_STRUCT_LEN(ncpu)	\
     94  1.1  rmind     roundup2(offsetof(pktqueue_t, pq_queue[ncpu]), coherency_unit)
     95  1.1  rmind 
     96  1.1  rmind pktqueue_t *
     97  1.1  rmind pktq_create(size_t maxlen, void (*intrh)(void *))
     98  1.1  rmind {
     99  1.1  rmind 	const u_int sflags = SOFTINT_NET | SOFTINT_MPSAFE | SOFTINT_RCPU;
    100  1.1  rmind 	const size_t len = PKTQUEUE_STRUCT_LEN(ncpu);
    101  1.1  rmind 	pktqueue_t *pq;
    102  1.1  rmind 	percpu_t *pc;
    103  1.1  rmind 	void *sih;
    104  1.1  rmind 
    105  1.1  rmind 	if ((pc = percpu_alloc(sizeof(pktq_counters_t))) == NULL) {
    106  1.1  rmind 		return NULL;
    107  1.1  rmind 	}
    108  1.1  rmind 	if ((sih = softint_establish(sflags, intrh, NULL)) == NULL) {
    109  1.1  rmind 		percpu_free(pc, sizeof(pktq_counters_t));
    110  1.1  rmind 		return NULL;
    111  1.1  rmind 	}
    112  1.1  rmind 
    113  1.1  rmind 	pq = kmem_zalloc(len, KM_SLEEP);
    114  1.1  rmind 	for (u_int i = 0; i < ncpu; i++) {
    115  1.1  rmind 		pq->pq_queue[i] = pcq_create(maxlen, KM_SLEEP);
    116  1.1  rmind 	}
    117  1.1  rmind 	mutex_init(&pq->pq_lock, MUTEX_DEFAULT, IPL_NONE);
    118  1.1  rmind 	pq->pq_maxlen = maxlen;
    119  1.1  rmind 	pq->pq_counters = pc;
    120  1.1  rmind 	pq->pq_sih = sih;
    121  1.1  rmind 
    122  1.1  rmind 	return pq;
    123  1.1  rmind }
    124  1.1  rmind 
    125  1.1  rmind void
    126  1.1  rmind pktq_destroy(pktqueue_t *pq)
    127  1.1  rmind {
    128  1.1  rmind 	const size_t len = PKTQUEUE_STRUCT_LEN(ncpu);
    129  1.1  rmind 
    130  1.1  rmind 	for (u_int i = 0; i < ncpu; i++) {
    131  1.1  rmind 		pcq_t *q = pq->pq_queue[i];
    132  1.1  rmind 		KASSERT(pcq_peek(q) == NULL);
    133  1.1  rmind 		pcq_destroy(q);
    134  1.1  rmind 	}
    135  1.1  rmind 	percpu_free(pq->pq_counters, sizeof(pktq_counters_t));
    136  1.1  rmind 	softint_disestablish(pq->pq_sih);
    137  1.1  rmind 	mutex_destroy(&pq->pq_lock);
    138  1.1  rmind 	kmem_free(pq, len);
    139  1.1  rmind }
    140  1.1  rmind 
    141  1.1  rmind /*
    142  1.1  rmind  * - pktq_inc_counter: increment the counter given an ID.
    143  1.1  rmind  * - pktq_collect_counts: handler to sum up the counts from each CPU.
    144  1.1  rmind  * - pktq_getcount: return the effective count given an ID.
    145  1.1  rmind  */
    146  1.1  rmind 
    147  1.1  rmind static inline void
    148  1.1  rmind pktq_inc_count(pktqueue_t *pq, u_int i)
    149  1.1  rmind {
    150  1.1  rmind 	percpu_t *pc = pq->pq_counters;
    151  1.1  rmind 	pktq_counters_t *c;
    152  1.1  rmind 
    153  1.1  rmind 	c = percpu_getref(pc);
    154  1.1  rmind 	c->count[i]++;
    155  1.1  rmind 	percpu_putref(pc);
    156  1.1  rmind }
    157  1.1  rmind 
    158  1.1  rmind static void
    159  1.1  rmind pktq_collect_counts(void *mem, void *arg, struct cpu_info *ci)
    160  1.1  rmind {
    161  1.1  rmind 	const pktq_counters_t *c = mem;
    162  1.1  rmind 	pktq_counters_t *sum = arg;
    163  1.1  rmind 
    164  1.1  rmind 	for (u_int i = 0; i < PQCNT_NCOUNTERS; i++) {
    165  1.1  rmind 		sum->count[i] += c->count[i];
    166  1.1  rmind 	}
    167  1.1  rmind }
    168  1.1  rmind 
    169  1.1  rmind uint64_t
    170  1.1  rmind pktq_get_count(pktqueue_t *pq, pktq_count_t c)
    171  1.1  rmind {
    172  1.1  rmind 	pktq_counters_t sum;
    173  1.1  rmind 
    174  1.1  rmind 	if (c != PKTQ_MAXLEN) {
    175  1.1  rmind 		memset(&sum, 0, sizeof(sum));
    176  1.1  rmind 		percpu_foreach(pq->pq_counters, pktq_collect_counts, &sum);
    177  1.1  rmind 	}
    178  1.1  rmind 	switch (c) {
    179  1.1  rmind 	case PKTQ_NITEMS:
    180  1.1  rmind 		return sum.count[PQCNT_ENQUEUE] - sum.count[PQCNT_DEQUEUE];
    181  1.1  rmind 	case PKTQ_DROPS:
    182  1.1  rmind 		return sum.count[PQCNT_DROP];
    183  1.1  rmind 	case PKTQ_MAXLEN:
    184  1.1  rmind 		return pq->pq_maxlen;
    185  1.1  rmind 	}
    186  1.1  rmind 	return 0;
    187  1.1  rmind }
    188  1.1  rmind 
    189  1.1  rmind uint32_t
    190  1.1  rmind pktq_rps_hash(const struct mbuf *m __unused)
    191  1.1  rmind {
    192  1.1  rmind 	/*
    193  1.1  rmind 	 * XXX: No distribution yet; the softnet_lock contention
    194  1.1  rmind 	 * XXX: must be eliminated first.
    195  1.1  rmind 	 */
    196  1.1  rmind 	return 0;
    197  1.1  rmind }
    198  1.1  rmind 
    199  1.1  rmind /*
    200  1.1  rmind  * pktq_enqueue: inject the packet into the end of the queue.
    201  1.1  rmind  *
    202  1.1  rmind  * => Must be called from the interrupt or with the preemption disabled.
    203  1.1  rmind  * => Consumes the packet and returns true on success.
    204  1.1  rmind  * => Returns false on failure; caller is responsible to free the packet.
    205  1.1  rmind  */
    206  1.1  rmind bool
    207  1.1  rmind pktq_enqueue(pktqueue_t *pq, struct mbuf *m, const u_int hash)
    208  1.1  rmind {
    209  1.1  rmind 	const unsigned cpuid = hash % ncpu;
    210  1.1  rmind 
    211  1.1  rmind 	KASSERT(kpreempt_disabled());
    212  1.1  rmind 
    213  1.1  rmind 	if (__predict_false(!pcq_put(pq->pq_queue[cpuid], m))) {
    214  1.1  rmind 		pktq_inc_count(pq, PQCNT_DROP);
    215  1.1  rmind 		return false;
    216  1.1  rmind 	}
    217  1.1  rmind 	softint_schedule_cpu(pq->pq_sih, cpu_lookup(cpuid));
    218  1.1  rmind 	pktq_inc_count(pq, PQCNT_ENQUEUE);
    219  1.1  rmind 	return true;
    220  1.1  rmind }
    221  1.1  rmind 
    222  1.1  rmind /*
    223  1.1  rmind  * pktq_dequeue: take a packet from the queue.
    224  1.1  rmind  *
    225  1.1  rmind  * => Must be called with preemption disabled.
    226  1.1  rmind  * => Must ensure there are not concurrent dequeue calls.
    227  1.1  rmind  */
    228  1.1  rmind struct mbuf *
    229  1.1  rmind pktq_dequeue(pktqueue_t *pq)
    230  1.1  rmind {
    231  1.1  rmind 	const struct cpu_info *ci = curcpu();
    232  1.1  rmind 	const unsigned cpuid = cpu_index(ci);
    233  1.1  rmind 	struct mbuf *m;
    234  1.1  rmind 
    235  1.1  rmind 	m = pcq_get(pq->pq_queue[cpuid]);
    236  1.1  rmind 	if (__predict_false(m == PKTQ_MARKER)) {
    237  1.1  rmind 		/* Note the marker entry. */
    238  1.1  rmind 		atomic_inc_uint(&pq->pq_barrier);
    239  1.1  rmind 		return NULL;
    240  1.1  rmind 	}
    241  1.1  rmind 	if (__predict_true(m != NULL)) {
    242  1.1  rmind 		pktq_inc_count(pq, PQCNT_DEQUEUE);
    243  1.1  rmind 	}
    244  1.1  rmind 	return m;
    245  1.1  rmind }
    246  1.1  rmind 
    247  1.1  rmind /*
    248  1.1  rmind  * pktq_barrier: waits for a grace period when all packets enqueued at
    249  1.1  rmind  * the moment of calling this routine will be processed.  This is used
    250  1.1  rmind  * to ensure that e.g. packets referencing some interface were drained.
    251  1.1  rmind  */
    252  1.1  rmind void
    253  1.1  rmind pktq_barrier(pktqueue_t *pq)
    254  1.1  rmind {
    255  1.1  rmind 	u_int pending = 0;
    256  1.1  rmind 
    257  1.1  rmind 	mutex_enter(&pq->pq_lock);
    258  1.1  rmind 	KASSERT(pq->pq_barrier == 0);
    259  1.1  rmind 
    260  1.1  rmind 	for (u_int i = 0; i < ncpu; i++) {
    261  1.1  rmind 		pcq_t *q = pq->pq_queue[i];
    262  1.1  rmind 
    263  1.1  rmind 		/* If the queue is empty - nothing to do. */
    264  1.1  rmind 		if (pcq_peek(q) == NULL) {
    265  1.1  rmind 			continue;
    266  1.1  rmind 		}
    267  1.1  rmind 		/* Otherwise, put the marker and entry. */
    268  1.1  rmind 		while (!pcq_put(q, PKTQ_MARKER)) {
    269  1.1  rmind 			kpause("pktqsync", false, 1, NULL);
    270  1.1  rmind 		}
    271  1.1  rmind 		kpreempt_disable();
    272  1.1  rmind 		softint_schedule_cpu(pq->pq_sih, cpu_lookup(i));
    273  1.1  rmind 		kpreempt_enable();
    274  1.1  rmind 		pending++;
    275  1.1  rmind 	}
    276  1.1  rmind 
    277  1.1  rmind 	/* Wait for each queue to process the markers. */
    278  1.1  rmind 	while (pq->pq_barrier != pending) {
    279  1.1  rmind 		kpause("pktqsync", false, 1, NULL);
    280  1.1  rmind 	}
    281  1.1  rmind 	pq->pq_barrier = 0;
    282  1.1  rmind 	mutex_exit(&pq->pq_lock);
    283  1.1  rmind }
    284  1.1  rmind 
    285  1.1  rmind /*
    286  1.1  rmind  * pktq_flush: free mbufs in all queues.
    287  1.1  rmind  *
    288  1.1  rmind  * => The caller must ensure there are no concurrent writers or flush.
    289  1.1  rmind  */
    290  1.1  rmind void
    291  1.1  rmind pktq_flush(pktqueue_t *pq)
    292  1.1  rmind {
    293  1.1  rmind 	struct mbuf *m;
    294  1.1  rmind 
    295  1.1  rmind 	for (u_int i = 0; i < ncpu; i++) {
    296  1.1  rmind 		while ((m = pcq_get(pq->pq_queue[i])) != NULL) {
    297  1.1  rmind 			pktq_inc_count(pq, PQCNT_DEQUEUE);
    298  1.1  rmind 			m_freem(m);
    299  1.1  rmind 		}
    300  1.1  rmind 	}
    301  1.1  rmind }
    302  1.2  rmind 
    303  1.2  rmind /*
    304  1.2  rmind  * pktq_set_maxlen: create per-CPU queues using a new size and replace
    305  1.2  rmind  * the existing queues without losing any packets.
    306  1.2  rmind  */
    307  1.2  rmind int
    308  1.2  rmind pktq_set_maxlen(pktqueue_t *pq, size_t maxlen)
    309  1.2  rmind {
    310  1.2  rmind 	const u_int slotbytes = ncpu * sizeof(pcq_t *);
    311  1.2  rmind 	pcq_t **qs;
    312  1.2  rmind 
    313  1.2  rmind 	if (!maxlen || maxlen > PCQ_MAXLEN)
    314  1.2  rmind 		return EINVAL;
    315  1.2  rmind 	if (pq->pq_maxlen == maxlen)
    316  1.2  rmind 		return 0;
    317  1.2  rmind 
    318  1.2  rmind 	/* First, allocate the new queues and replace them. */
    319  1.2  rmind 	qs = kmem_zalloc(slotbytes, KM_SLEEP);
    320  1.2  rmind 	for (u_int i = 0; i < ncpu; i++) {
    321  1.2  rmind 		qs[i] = pcq_create(maxlen, KM_SLEEP);
    322  1.2  rmind 	}
    323  1.2  rmind 	mutex_enter(&pq->pq_lock);
    324  1.2  rmind 	for (u_int i = 0; i < ncpu; i++) {
    325  1.2  rmind 		/* Swap: store of a word is atomic. */
    326  1.2  rmind 		pcq_t *q = pq->pq_queue[i];
    327  1.2  rmind 		pq->pq_queue[i] = qs[i];
    328  1.2  rmind 		qs[i] = q;
    329  1.2  rmind 	}
    330  1.2  rmind 	pq->pq_maxlen = maxlen;
    331  1.2  rmind 	mutex_exit(&pq->pq_lock);
    332  1.2  rmind 
    333  1.2  rmind 	/*
    334  1.2  rmind 	 * At this point, the new packets are flowing into the new
    335  1.2  rmind 	 * queues.  However, the old queues may have same packets
    336  1.2  rmind 	 * present which are no longer being present.  We are going
    337  1.2  rmind 	 * to re-enqueue them.  This may change the order of packet
    338  1.2  rmind 	 * arrival, but it is not considered an issue.
    339  1.2  rmind 	 *
    340  1.2  rmind 	 * There may also in-flight interrupts calling pktq_dequeue()
    341  1.2  rmind 	 * which reference the old queues.  Issue a barrier to ensure
    342  1.2  rmind 	 * that we are going to be the only pcq_get() callers on the
    343  1.2  rmind 	 * old queues.
    344  1.2  rmind 	 */
    345  1.2  rmind 	pktq_barrier(pq);
    346  1.2  rmind 
    347  1.2  rmind 	for (u_int i = 0; i < ncpu; i++) {
    348  1.2  rmind 		struct mbuf *m;
    349  1.2  rmind 
    350  1.2  rmind 		while ((m = pcq_get(qs[i])) != NULL) {
    351  1.2  rmind 			while (!pcq_put(pq->pq_queue[i], m)) {
    352  1.2  rmind 				kpause("pktqrenq", false, 1, NULL);
    353  1.2  rmind 			}
    354  1.2  rmind 		}
    355  1.2  rmind 		pcq_destroy(qs[i]);
    356  1.2  rmind 	}
    357  1.2  rmind 
    358  1.2  rmind 	/* Well, that was fun. */
    359  1.2  rmind 	kmem_free(qs, slotbytes);
    360  1.2  rmind 	return 0;
    361  1.2  rmind }
    362