Home | History | Annotate | Line # | Download | only in net
pktqueue.c revision 1.18
      1  1.18   thorpej /*	$NetBSD: pktqueue.c,v 1.18 2022/09/01 05:04:22 thorpej Exp $	*/
      2   1.1     rmind 
      3   1.1     rmind /*-
      4   1.1     rmind  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5   1.1     rmind  * All rights reserved.
      6   1.1     rmind  *
      7   1.1     rmind  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1     rmind  * by Mindaugas Rasiukevicius.
      9   1.1     rmind  *
     10   1.1     rmind  * Redistribution and use in source and binary forms, with or without
     11   1.1     rmind  * modification, are permitted provided that the following conditions
     12   1.1     rmind  * are met:
     13   1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     14   1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     15   1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     17   1.1     rmind  *    documentation and/or other materials provided with the distribution.
     18   1.1     rmind  *
     19   1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1     rmind  */
     31   1.1     rmind 
     32   1.4     rmind /*
     33   1.4     rmind  * The packet queue (pktqueue) interface is a lockless IP input queue
     34   1.4     rmind  * which also abstracts and handles network ISR scheduling.  It provides
     35   1.4     rmind  * a mechanism to enable receiver-side packet steering (RPS).
     36   1.4     rmind  */
     37   1.4     rmind 
     38   1.1     rmind #include <sys/cdefs.h>
     39  1.18   thorpej __KERNEL_RCSID(0, "$NetBSD: pktqueue.c,v 1.18 2022/09/01 05:04:22 thorpej Exp $");
     40  1.14  knakahar 
     41  1.14  knakahar #ifdef _KERNEL_OPT
     42  1.14  knakahar #include "opt_net_mpsafe.h"
     43  1.14  knakahar #endif
     44   1.1     rmind 
     45   1.1     rmind #include <sys/param.h>
     46   1.1     rmind #include <sys/types.h>
     47   1.1     rmind 
     48   1.1     rmind #include <sys/atomic.h>
     49   1.1     rmind #include <sys/cpu.h>
     50   1.1     rmind #include <sys/pcq.h>
     51   1.1     rmind #include <sys/intr.h>
     52   1.1     rmind #include <sys/mbuf.h>
     53   1.1     rmind #include <sys/proc.h>
     54   1.1     rmind #include <sys/percpu.h>
     55  1.11   thorpej #include <sys/xcall.h>
     56   1.1     rmind 
     57   1.1     rmind #include <net/pktqueue.h>
     58  1.14  knakahar #include <net/rss_config.h>
     59  1.14  knakahar 
     60  1.14  knakahar #include <netinet/in.h>
     61  1.14  knakahar #include <netinet/ip.h>
     62  1.14  knakahar #include <netinet/ip6.h>
     63   1.1     rmind 
     64   1.1     rmind struct pktqueue {
     65   1.1     rmind 	/*
     66   1.1     rmind 	 * The lock used for a barrier mechanism.  The barrier counter,
     67   1.1     rmind 	 * as well as the drop counter, are managed atomically though.
     68   1.1     rmind 	 * Ensure this group is in a separate cache line.
     69   1.1     rmind 	 */
     70  1.13     skrll 	union {
     71  1.13     skrll 		struct {
     72  1.13     skrll 			kmutex_t	pq_lock;
     73  1.13     skrll 			volatile u_int	pq_barrier;
     74  1.13     skrll 		};
     75  1.13     skrll 		uint8_t	 _pad[COHERENCY_UNIT];
     76  1.13     skrll 	};
     77   1.1     rmind 
     78   1.1     rmind 	/* The size of the queue, counters and the interrupt handler. */
     79   1.1     rmind 	u_int		pq_maxlen;
     80   1.1     rmind 	percpu_t *	pq_counters;
     81   1.1     rmind 	void *		pq_sih;
     82   1.1     rmind 
     83   1.1     rmind 	/* Finally, per-CPU queues. */
     84  1.12  riastrad 	struct percpu *	pq_pcq;	/* struct pcq * */
     85   1.1     rmind };
     86   1.1     rmind 
     87   1.1     rmind /* The counters of the packet queue. */
     88   1.1     rmind #define	PQCNT_ENQUEUE	0
     89   1.1     rmind #define	PQCNT_DEQUEUE	1
     90   1.1     rmind #define	PQCNT_DROP	2
     91   1.1     rmind #define	PQCNT_NCOUNTERS	3
     92   1.1     rmind 
     93   1.1     rmind typedef struct {
     94   1.1     rmind 	uint64_t	count[PQCNT_NCOUNTERS];
     95   1.1     rmind } pktq_counters_t;
     96   1.1     rmind 
     97   1.1     rmind /* Special marker value used by pktq_barrier() mechanism. */
     98   1.1     rmind #define	PKTQ_MARKER	((void *)(~0ULL))
     99   1.1     rmind 
    100  1.12  riastrad static void
    101  1.12  riastrad pktq_init_cpu(void *vqp, void *vpq, struct cpu_info *ci)
    102  1.12  riastrad {
    103  1.12  riastrad 	struct pcq **qp = vqp;
    104  1.12  riastrad 	struct pktqueue *pq = vpq;
    105  1.12  riastrad 
    106  1.12  riastrad 	*qp = pcq_create(pq->pq_maxlen, KM_SLEEP);
    107  1.12  riastrad }
    108  1.12  riastrad 
    109  1.12  riastrad static void
    110  1.12  riastrad pktq_fini_cpu(void *vqp, void *vpq, struct cpu_info *ci)
    111  1.12  riastrad {
    112  1.12  riastrad 	struct pcq **qp = vqp, *q = *qp;
    113  1.12  riastrad 
    114  1.12  riastrad 	KASSERT(pcq_peek(q) == NULL);
    115  1.12  riastrad 	pcq_destroy(q);
    116  1.12  riastrad 	*qp = NULL;		/* paranoia */
    117  1.12  riastrad }
    118  1.12  riastrad 
    119  1.12  riastrad static struct pcq *
    120  1.12  riastrad pktq_pcq(struct pktqueue *pq, struct cpu_info *ci)
    121  1.12  riastrad {
    122  1.12  riastrad 	struct pcq **qp, *q;
    123  1.12  riastrad 
    124  1.12  riastrad 	/*
    125  1.12  riastrad 	 * As long as preemption is disabled, the xcall to swap percpu
    126  1.12  riastrad 	 * buffers can't complete, so it is safe to read the pointer.
    127  1.12  riastrad 	 */
    128  1.12  riastrad 	KASSERT(kpreempt_disabled());
    129  1.12  riastrad 
    130  1.12  riastrad 	qp = percpu_getptr_remote(pq->pq_pcq, ci);
    131  1.12  riastrad 	q = *qp;
    132  1.12  riastrad 
    133  1.12  riastrad 	return q;
    134  1.12  riastrad }
    135   1.1     rmind 
    136   1.1     rmind pktqueue_t *
    137   1.5     ozaki pktq_create(size_t maxlen, void (*intrh)(void *), void *sc)
    138   1.1     rmind {
    139   1.1     rmind 	const u_int sflags = SOFTINT_NET | SOFTINT_MPSAFE | SOFTINT_RCPU;
    140   1.1     rmind 	pktqueue_t *pq;
    141   1.1     rmind 	percpu_t *pc;
    142   1.1     rmind 	void *sih;
    143   1.1     rmind 
    144   1.9       chs 	pc = percpu_alloc(sizeof(pktq_counters_t));
    145   1.5     ozaki 	if ((sih = softint_establish(sflags, intrh, sc)) == NULL) {
    146   1.1     rmind 		percpu_free(pc, sizeof(pktq_counters_t));
    147   1.1     rmind 		return NULL;
    148   1.1     rmind 	}
    149   1.1     rmind 
    150  1.12  riastrad 	pq = kmem_zalloc(sizeof(*pq), KM_SLEEP);
    151   1.1     rmind 	mutex_init(&pq->pq_lock, MUTEX_DEFAULT, IPL_NONE);
    152   1.1     rmind 	pq->pq_maxlen = maxlen;
    153   1.1     rmind 	pq->pq_counters = pc;
    154   1.1     rmind 	pq->pq_sih = sih;
    155  1.12  riastrad 	pq->pq_pcq = percpu_create(sizeof(struct pcq *),
    156  1.12  riastrad 	    pktq_init_cpu, pktq_fini_cpu, pq);
    157   1.1     rmind 
    158   1.1     rmind 	return pq;
    159   1.1     rmind }
    160   1.1     rmind 
    161   1.1     rmind void
    162   1.1     rmind pktq_destroy(pktqueue_t *pq)
    163   1.1     rmind {
    164   1.1     rmind 
    165  1.12  riastrad 	percpu_free(pq->pq_pcq, sizeof(struct pcq *));
    166   1.1     rmind 	percpu_free(pq->pq_counters, sizeof(pktq_counters_t));
    167   1.1     rmind 	softint_disestablish(pq->pq_sih);
    168   1.1     rmind 	mutex_destroy(&pq->pq_lock);
    169  1.12  riastrad 	kmem_free(pq, sizeof(*pq));
    170   1.1     rmind }
    171   1.1     rmind 
    172   1.1     rmind /*
    173   1.1     rmind  * - pktq_inc_counter: increment the counter given an ID.
    174   1.1     rmind  * - pktq_collect_counts: handler to sum up the counts from each CPU.
    175   1.1     rmind  * - pktq_getcount: return the effective count given an ID.
    176   1.1     rmind  */
    177   1.1     rmind 
    178   1.1     rmind static inline void
    179   1.1     rmind pktq_inc_count(pktqueue_t *pq, u_int i)
    180   1.1     rmind {
    181   1.1     rmind 	percpu_t *pc = pq->pq_counters;
    182   1.1     rmind 	pktq_counters_t *c;
    183   1.1     rmind 
    184   1.1     rmind 	c = percpu_getref(pc);
    185   1.1     rmind 	c->count[i]++;
    186   1.1     rmind 	percpu_putref(pc);
    187   1.1     rmind }
    188   1.1     rmind 
    189   1.1     rmind static void
    190   1.1     rmind pktq_collect_counts(void *mem, void *arg, struct cpu_info *ci)
    191   1.1     rmind {
    192   1.1     rmind 	const pktq_counters_t *c = mem;
    193   1.1     rmind 	pktq_counters_t *sum = arg;
    194   1.1     rmind 
    195  1.11   thorpej 	int s = splnet();
    196  1.11   thorpej 
    197   1.1     rmind 	for (u_int i = 0; i < PQCNT_NCOUNTERS; i++) {
    198   1.1     rmind 		sum->count[i] += c->count[i];
    199   1.1     rmind 	}
    200  1.11   thorpej 
    201  1.11   thorpej 	splx(s);
    202   1.1     rmind }
    203   1.1     rmind 
    204   1.1     rmind uint64_t
    205   1.1     rmind pktq_get_count(pktqueue_t *pq, pktq_count_t c)
    206   1.1     rmind {
    207   1.1     rmind 	pktq_counters_t sum;
    208   1.1     rmind 
    209   1.1     rmind 	if (c != PKTQ_MAXLEN) {
    210   1.1     rmind 		memset(&sum, 0, sizeof(sum));
    211  1.11   thorpej 		percpu_foreach_xcall(pq->pq_counters,
    212  1.11   thorpej 		    XC_HIGHPRI_IPL(IPL_SOFTNET), pktq_collect_counts, &sum);
    213   1.1     rmind 	}
    214   1.1     rmind 	switch (c) {
    215   1.1     rmind 	case PKTQ_NITEMS:
    216   1.1     rmind 		return sum.count[PQCNT_ENQUEUE] - sum.count[PQCNT_DEQUEUE];
    217   1.1     rmind 	case PKTQ_DROPS:
    218   1.1     rmind 		return sum.count[PQCNT_DROP];
    219   1.1     rmind 	case PKTQ_MAXLEN:
    220   1.1     rmind 		return pq->pq_maxlen;
    221   1.1     rmind 	}
    222   1.1     rmind 	return 0;
    223   1.1     rmind }
    224   1.1     rmind 
    225   1.1     rmind uint32_t
    226  1.18   thorpej pktq_rps_hash(const pktq_rps_hash_func_t *funcp, const struct mbuf *m)
    227  1.14  knakahar {
    228  1.14  knakahar 	pktq_rps_hash_func_t func = atomic_load_relaxed(funcp);
    229  1.14  knakahar 
    230  1.14  knakahar 	KASSERT(func != NULL);
    231  1.14  knakahar 
    232  1.14  knakahar 	return (*func)(m);
    233  1.14  knakahar }
    234  1.14  knakahar 
    235  1.14  knakahar static uint32_t
    236  1.14  knakahar pktq_rps_hash_zero(const struct mbuf *m __unused)
    237   1.1     rmind {
    238  1.14  knakahar 
    239  1.14  knakahar 	return 0;
    240  1.14  knakahar }
    241  1.14  knakahar 
    242  1.14  knakahar static uint32_t
    243  1.14  knakahar pktq_rps_hash_curcpu(const struct mbuf *m __unused)
    244  1.14  knakahar {
    245  1.14  knakahar 
    246  1.14  knakahar 	return cpu_index(curcpu());
    247  1.14  knakahar }
    248  1.14  knakahar 
    249  1.14  knakahar static uint32_t
    250  1.14  knakahar pktq_rps_hash_toeplitz(const struct mbuf *m)
    251  1.14  knakahar {
    252  1.14  knakahar 	struct ip *ip;
    253   1.1     rmind 	/*
    254  1.14  knakahar 	 * Disable UDP port - IP fragments aren't currently being handled
    255  1.14  knakahar 	 * and so we end up with a mix of 2-tuple and 4-tuple
    256  1.14  knakahar 	 * traffic.
    257   1.1     rmind 	 */
    258  1.14  knakahar 	const u_int flag = RSS_TOEPLITZ_USE_TCP_PORT;
    259  1.14  knakahar 
    260  1.14  knakahar 	/* glance IP version */
    261  1.14  knakahar 	if ((m->m_flags & M_PKTHDR) == 0)
    262  1.14  knakahar 		return 0;
    263  1.14  knakahar 
    264  1.14  knakahar 	ip = mtod(m, struct ip *);
    265  1.14  knakahar 	if (ip->ip_v == IPVERSION) {
    266  1.14  knakahar 		if (__predict_false(m->m_len < sizeof(struct ip)))
    267  1.14  knakahar 			return 0;
    268  1.14  knakahar 		return rss_toeplitz_hash_from_mbuf_ipv4(m, flag);
    269  1.14  knakahar 	} else if (ip->ip_v == 6) {
    270  1.14  knakahar 		if (__predict_false(m->m_len < sizeof(struct ip6_hdr)))
    271  1.14  knakahar 			return 0;
    272  1.14  knakahar 		return rss_toeplitz_hash_from_mbuf_ipv6(m, flag);
    273  1.14  knakahar 	}
    274  1.14  knakahar 
    275   1.1     rmind 	return 0;
    276   1.1     rmind }
    277   1.1     rmind 
    278   1.1     rmind /*
    279  1.15  knakahar  * toeplitz without curcpu.
    280  1.15  knakahar  * Generally, this has better performance than toeplitz.
    281  1.14  knakahar  */
    282  1.14  knakahar static uint32_t
    283  1.14  knakahar pktq_rps_hash_toeplitz_othercpus(const struct mbuf *m)
    284  1.14  knakahar {
    285  1.14  knakahar 	uint32_t hash;
    286  1.14  knakahar 
    287  1.16  knakahar 	if (ncpu == 1)
    288  1.16  knakahar 		return 0;
    289  1.16  knakahar 
    290  1.14  knakahar 	hash = pktq_rps_hash_toeplitz(m);
    291  1.14  knakahar 	hash %= ncpu - 1;
    292  1.14  knakahar 	if (hash >= cpu_index(curcpu()))
    293  1.14  knakahar 		return hash + 1;
    294  1.14  knakahar 	else
    295  1.14  knakahar 		return hash;
    296  1.14  knakahar }
    297  1.14  knakahar 
    298  1.14  knakahar static struct pktq_rps_hash_table {
    299  1.14  knakahar 	const char* prh_type;
    300  1.14  knakahar 	pktq_rps_hash_func_t prh_func;
    301  1.14  knakahar } const pktq_rps_hash_tab[] = {
    302  1.14  knakahar 	{ "zero", pktq_rps_hash_zero },
    303  1.14  knakahar 	{ "curcpu", pktq_rps_hash_curcpu },
    304  1.14  knakahar 	{ "toeplitz", pktq_rps_hash_toeplitz },
    305  1.14  knakahar 	{ "toeplitz-othercpus", pktq_rps_hash_toeplitz_othercpus },
    306  1.14  knakahar };
    307  1.14  knakahar const pktq_rps_hash_func_t pktq_rps_hash_default =
    308  1.14  knakahar #ifdef NET_MPSAFE
    309  1.14  knakahar 	pktq_rps_hash_curcpu;
    310  1.14  knakahar #else
    311  1.14  knakahar 	pktq_rps_hash_zero;
    312  1.14  knakahar #endif
    313  1.14  knakahar 
    314  1.14  knakahar static const char *
    315  1.14  knakahar pktq_get_rps_hash_type(pktq_rps_hash_func_t func)
    316  1.14  knakahar {
    317  1.14  knakahar 
    318  1.14  knakahar 	for (int i = 0; i < __arraycount(pktq_rps_hash_tab); i++) {
    319  1.14  knakahar 		if (func == pktq_rps_hash_tab[i].prh_func) {
    320  1.14  knakahar 			return pktq_rps_hash_tab[i].prh_type;
    321  1.14  knakahar 		}
    322  1.14  knakahar 	}
    323  1.14  knakahar 
    324  1.14  knakahar 	return NULL;
    325  1.14  knakahar }
    326  1.14  knakahar 
    327  1.14  knakahar static int
    328  1.14  knakahar pktq_set_rps_hash_type(pktq_rps_hash_func_t *func, const char *type)
    329  1.14  knakahar {
    330  1.14  knakahar 
    331  1.14  knakahar 	if (strcmp(type, pktq_get_rps_hash_type(*func)) == 0)
    332  1.14  knakahar 		return 0;
    333  1.14  knakahar 
    334  1.14  knakahar 	for (int i = 0; i < __arraycount(pktq_rps_hash_tab); i++) {
    335  1.14  knakahar 		if (strcmp(type, pktq_rps_hash_tab[i].prh_type) == 0) {
    336  1.14  knakahar 			atomic_store_relaxed(func, pktq_rps_hash_tab[i].prh_func);
    337  1.14  knakahar 			return 0;
    338  1.14  knakahar 		}
    339  1.14  knakahar 	}
    340  1.14  knakahar 
    341  1.14  knakahar 	return ENOENT;
    342  1.14  knakahar }
    343  1.14  knakahar 
    344  1.14  knakahar int
    345  1.14  knakahar sysctl_pktq_rps_hash_handler(SYSCTLFN_ARGS)
    346  1.14  knakahar {
    347  1.14  knakahar 	struct sysctlnode node;
    348  1.14  knakahar 	pktq_rps_hash_func_t *func;
    349  1.14  knakahar 	int error;
    350  1.14  knakahar 	char type[PKTQ_RPS_HASH_NAME_LEN];
    351  1.14  knakahar 
    352  1.14  knakahar 	node = *rnode;
    353  1.14  knakahar 	func = node.sysctl_data;
    354  1.14  knakahar 
    355  1.14  knakahar 	strlcpy(type, pktq_get_rps_hash_type(*func), PKTQ_RPS_HASH_NAME_LEN);
    356  1.14  knakahar 
    357  1.14  knakahar 	node.sysctl_data = &type;
    358  1.14  knakahar 	node.sysctl_size = sizeof(type);
    359  1.14  knakahar 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    360  1.14  knakahar 	if (error || newp == NULL)
    361  1.14  knakahar 		return error;
    362  1.14  knakahar 
    363  1.14  knakahar 	error = pktq_set_rps_hash_type(func, type);
    364  1.14  knakahar 
    365  1.14  knakahar 	return error;
    366  1.14  knakahar  }
    367  1.14  knakahar 
    368  1.14  knakahar /*
    369   1.1     rmind  * pktq_enqueue: inject the packet into the end of the queue.
    370   1.1     rmind  *
    371   1.1     rmind  * => Must be called from the interrupt or with the preemption disabled.
    372   1.1     rmind  * => Consumes the packet and returns true on success.
    373   1.1     rmind  * => Returns false on failure; caller is responsible to free the packet.
    374   1.1     rmind  */
    375   1.1     rmind bool
    376   1.3     rmind pktq_enqueue(pktqueue_t *pq, struct mbuf *m, const u_int hash __unused)
    377   1.1     rmind {
    378   1.8     ozaki #if defined(_RUMPKERNEL) || defined(_RUMP_NATIVE_ABI)
    379  1.12  riastrad 	struct cpu_info *ci = curcpu();
    380   1.7     ozaki #else
    381  1.12  riastrad 	struct cpu_info *ci = cpu_lookup(hash % ncpu);
    382   1.7     ozaki #endif
    383   1.1     rmind 
    384   1.1     rmind 	KASSERT(kpreempt_disabled());
    385   1.1     rmind 
    386  1.12  riastrad 	if (__predict_false(!pcq_put(pktq_pcq(pq, ci), m))) {
    387   1.1     rmind 		pktq_inc_count(pq, PQCNT_DROP);
    388   1.1     rmind 		return false;
    389   1.1     rmind 	}
    390  1.12  riastrad 	softint_schedule_cpu(pq->pq_sih, ci);
    391   1.1     rmind 	pktq_inc_count(pq, PQCNT_ENQUEUE);
    392   1.1     rmind 	return true;
    393   1.1     rmind }
    394   1.1     rmind 
    395   1.1     rmind /*
    396   1.1     rmind  * pktq_dequeue: take a packet from the queue.
    397   1.1     rmind  *
    398   1.1     rmind  * => Must be called with preemption disabled.
    399   1.1     rmind  * => Must ensure there are not concurrent dequeue calls.
    400   1.1     rmind  */
    401   1.1     rmind struct mbuf *
    402   1.1     rmind pktq_dequeue(pktqueue_t *pq)
    403   1.1     rmind {
    404  1.12  riastrad 	struct cpu_info *ci = curcpu();
    405   1.1     rmind 	struct mbuf *m;
    406   1.1     rmind 
    407  1.12  riastrad 	KASSERT(kpreempt_disabled());
    408  1.12  riastrad 
    409  1.12  riastrad 	m = pcq_get(pktq_pcq(pq, ci));
    410   1.1     rmind 	if (__predict_false(m == PKTQ_MARKER)) {
    411   1.1     rmind 		/* Note the marker entry. */
    412   1.1     rmind 		atomic_inc_uint(&pq->pq_barrier);
    413  1.17   thorpej 
    414  1.17   thorpej 		/* Get the next queue entry. */
    415  1.17   thorpej 		m = pcq_get(pktq_pcq(pq, ci));
    416  1.17   thorpej 
    417  1.17   thorpej 		/*
    418  1.17   thorpej 		 * There can only be one barrier operation pending
    419  1.17   thorpej 		 * on a pktqueue at any given time, so we can assert
    420  1.17   thorpej 		 * that the next item is not a marker.
    421  1.17   thorpej 		 */
    422  1.17   thorpej 		KASSERT(m != PKTQ_MARKER);
    423   1.1     rmind 	}
    424   1.1     rmind 	if (__predict_true(m != NULL)) {
    425   1.1     rmind 		pktq_inc_count(pq, PQCNT_DEQUEUE);
    426   1.1     rmind 	}
    427   1.1     rmind 	return m;
    428   1.1     rmind }
    429   1.1     rmind 
    430   1.1     rmind /*
    431   1.1     rmind  * pktq_barrier: waits for a grace period when all packets enqueued at
    432   1.1     rmind  * the moment of calling this routine will be processed.  This is used
    433   1.1     rmind  * to ensure that e.g. packets referencing some interface were drained.
    434   1.1     rmind  */
    435   1.1     rmind void
    436   1.1     rmind pktq_barrier(pktqueue_t *pq)
    437   1.1     rmind {
    438  1.12  riastrad 	CPU_INFO_ITERATOR cii;
    439  1.12  riastrad 	struct cpu_info *ci;
    440   1.1     rmind 	u_int pending = 0;
    441   1.1     rmind 
    442   1.1     rmind 	mutex_enter(&pq->pq_lock);
    443   1.1     rmind 	KASSERT(pq->pq_barrier == 0);
    444   1.1     rmind 
    445  1.12  riastrad 	for (CPU_INFO_FOREACH(cii, ci)) {
    446  1.12  riastrad 		struct pcq *q;
    447  1.12  riastrad 
    448  1.12  riastrad 		kpreempt_disable();
    449  1.12  riastrad 		q = pktq_pcq(pq, ci);
    450  1.12  riastrad 		kpreempt_enable();
    451   1.1     rmind 
    452   1.1     rmind 		/* If the queue is empty - nothing to do. */
    453   1.1     rmind 		if (pcq_peek(q) == NULL) {
    454   1.1     rmind 			continue;
    455   1.1     rmind 		}
    456   1.1     rmind 		/* Otherwise, put the marker and entry. */
    457   1.1     rmind 		while (!pcq_put(q, PKTQ_MARKER)) {
    458   1.1     rmind 			kpause("pktqsync", false, 1, NULL);
    459   1.1     rmind 		}
    460   1.1     rmind 		kpreempt_disable();
    461  1.12  riastrad 		softint_schedule_cpu(pq->pq_sih, ci);
    462   1.1     rmind 		kpreempt_enable();
    463   1.1     rmind 		pending++;
    464   1.1     rmind 	}
    465   1.1     rmind 
    466   1.1     rmind 	/* Wait for each queue to process the markers. */
    467   1.1     rmind 	while (pq->pq_barrier != pending) {
    468   1.1     rmind 		kpause("pktqsync", false, 1, NULL);
    469   1.1     rmind 	}
    470   1.1     rmind 	pq->pq_barrier = 0;
    471   1.1     rmind 	mutex_exit(&pq->pq_lock);
    472   1.1     rmind }
    473   1.1     rmind 
    474   1.1     rmind /*
    475   1.1     rmind  * pktq_flush: free mbufs in all queues.
    476   1.1     rmind  *
    477   1.4     rmind  * => The caller must ensure there are no concurrent writers or flush calls.
    478   1.1     rmind  */
    479   1.1     rmind void
    480   1.1     rmind pktq_flush(pktqueue_t *pq)
    481   1.1     rmind {
    482  1.12  riastrad 	CPU_INFO_ITERATOR cii;
    483  1.12  riastrad 	struct cpu_info *ci;
    484   1.1     rmind 	struct mbuf *m;
    485   1.1     rmind 
    486  1.12  riastrad 	for (CPU_INFO_FOREACH(cii, ci)) {
    487  1.12  riastrad 		struct pcq *q;
    488  1.12  riastrad 
    489  1.12  riastrad 		kpreempt_disable();
    490  1.12  riastrad 		q = pktq_pcq(pq, ci);
    491  1.12  riastrad 		kpreempt_enable();
    492  1.12  riastrad 
    493  1.12  riastrad 		/*
    494  1.12  riastrad 		 * XXX This can't be right -- if the softint is running
    495  1.12  riastrad 		 * then pcq_get isn't safe here.
    496  1.12  riastrad 		 */
    497  1.12  riastrad 		while ((m = pcq_get(q)) != NULL) {
    498   1.1     rmind 			pktq_inc_count(pq, PQCNT_DEQUEUE);
    499   1.1     rmind 			m_freem(m);
    500   1.1     rmind 		}
    501   1.1     rmind 	}
    502   1.1     rmind }
    503   1.2     rmind 
    504  1.12  riastrad static void
    505  1.12  riastrad pktq_set_maxlen_cpu(void *vpq, void *vqs)
    506  1.12  riastrad {
    507  1.12  riastrad 	struct pktqueue *pq = vpq;
    508  1.12  riastrad 	struct pcq **qp, *q, **qs = vqs;
    509  1.12  riastrad 	unsigned i = cpu_index(curcpu());
    510  1.12  riastrad 	int s;
    511  1.12  riastrad 
    512  1.12  riastrad 	s = splnet();
    513  1.12  riastrad 	qp = percpu_getref(pq->pq_pcq);
    514  1.12  riastrad 	q = *qp;
    515  1.12  riastrad 	*qp = qs[i];
    516  1.12  riastrad 	qs[i] = q;
    517  1.12  riastrad 	percpu_putref(pq->pq_pcq);
    518  1.12  riastrad 	splx(s);
    519  1.12  riastrad }
    520  1.12  riastrad 
    521   1.2     rmind /*
    522   1.2     rmind  * pktq_set_maxlen: create per-CPU queues using a new size and replace
    523   1.2     rmind  * the existing queues without losing any packets.
    524  1.12  riastrad  *
    525  1.12  riastrad  * XXX ncpu must remain stable throughout.
    526   1.2     rmind  */
    527   1.2     rmind int
    528   1.2     rmind pktq_set_maxlen(pktqueue_t *pq, size_t maxlen)
    529   1.2     rmind {
    530   1.2     rmind 	const u_int slotbytes = ncpu * sizeof(pcq_t *);
    531   1.2     rmind 	pcq_t **qs;
    532   1.2     rmind 
    533   1.2     rmind 	if (!maxlen || maxlen > PCQ_MAXLEN)
    534   1.2     rmind 		return EINVAL;
    535   1.2     rmind 	if (pq->pq_maxlen == maxlen)
    536   1.2     rmind 		return 0;
    537   1.2     rmind 
    538  1.12  riastrad 	/* First, allocate the new queues. */
    539   1.2     rmind 	qs = kmem_zalloc(slotbytes, KM_SLEEP);
    540   1.2     rmind 	for (u_int i = 0; i < ncpu; i++) {
    541   1.2     rmind 		qs[i] = pcq_create(maxlen, KM_SLEEP);
    542   1.2     rmind 	}
    543  1.12  riastrad 
    544  1.12  riastrad 	/*
    545  1.12  riastrad 	 * Issue an xcall to replace the queue pointers on each CPU.
    546  1.12  riastrad 	 * This implies all the necessary memory barriers.
    547  1.12  riastrad 	 */
    548   1.2     rmind 	mutex_enter(&pq->pq_lock);
    549  1.12  riastrad 	xc_wait(xc_broadcast(XC_HIGHPRI, pktq_set_maxlen_cpu, pq, qs));
    550   1.2     rmind 	pq->pq_maxlen = maxlen;
    551   1.2     rmind 	mutex_exit(&pq->pq_lock);
    552   1.2     rmind 
    553   1.2     rmind 	/*
    554   1.2     rmind 	 * At this point, the new packets are flowing into the new
    555   1.4     rmind 	 * queues.  However, the old queues may have some packets
    556   1.4     rmind 	 * present which are no longer being processed.  We are going
    557   1.2     rmind 	 * to re-enqueue them.  This may change the order of packet
    558   1.2     rmind 	 * arrival, but it is not considered an issue.
    559   1.2     rmind 	 *
    560   1.4     rmind 	 * There may be in-flight interrupts calling pktq_dequeue()
    561   1.2     rmind 	 * which reference the old queues.  Issue a barrier to ensure
    562   1.2     rmind 	 * that we are going to be the only pcq_get() callers on the
    563   1.2     rmind 	 * old queues.
    564   1.2     rmind 	 */
    565   1.2     rmind 	pktq_barrier(pq);
    566   1.2     rmind 
    567   1.2     rmind 	for (u_int i = 0; i < ncpu; i++) {
    568  1.12  riastrad 		struct pcq *q;
    569   1.2     rmind 		struct mbuf *m;
    570   1.2     rmind 
    571  1.12  riastrad 		kpreempt_disable();
    572  1.12  riastrad 		q = pktq_pcq(pq, cpu_lookup(i));
    573  1.12  riastrad 		kpreempt_enable();
    574  1.12  riastrad 
    575   1.2     rmind 		while ((m = pcq_get(qs[i])) != NULL) {
    576  1.12  riastrad 			while (!pcq_put(q, m)) {
    577   1.2     rmind 				kpause("pktqrenq", false, 1, NULL);
    578   1.2     rmind 			}
    579   1.2     rmind 		}
    580   1.2     rmind 		pcq_destroy(qs[i]);
    581   1.2     rmind 	}
    582   1.2     rmind 
    583   1.2     rmind 	/* Well, that was fun. */
    584   1.2     rmind 	kmem_free(qs, slotbytes);
    585   1.2     rmind 	return 0;
    586   1.2     rmind }
    587   1.6     ozaki 
    588   1.6     ozaki int
    589   1.6     ozaki sysctl_pktq_maxlen(SYSCTLFN_ARGS, pktqueue_t *pq)
    590   1.6     ozaki {
    591   1.6     ozaki 	u_int nmaxlen = pktq_get_count(pq, PKTQ_MAXLEN);
    592   1.6     ozaki 	struct sysctlnode node = *rnode;
    593   1.6     ozaki 	int error;
    594   1.6     ozaki 
    595   1.6     ozaki 	node.sysctl_data = &nmaxlen;
    596   1.6     ozaki 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    597   1.6     ozaki 	if (error || newp == NULL)
    598   1.6     ozaki 		return error;
    599   1.6     ozaki 	return pktq_set_maxlen(pq, nmaxlen);
    600   1.6     ozaki }
    601   1.6     ozaki 
    602   1.6     ozaki int
    603   1.6     ozaki sysctl_pktq_count(SYSCTLFN_ARGS, pktqueue_t *pq, u_int count_id)
    604   1.6     ozaki {
    605  1.10   msaitoh 	uint64_t count = pktq_get_count(pq, count_id);
    606   1.6     ozaki 	struct sysctlnode node = *rnode;
    607  1.10   msaitoh 
    608   1.6     ozaki 	node.sysctl_data = &count;
    609   1.6     ozaki 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    610   1.6     ozaki }
    611