Home | History | Annotate | Line # | Download | only in isc
      1 /*	$NetBSD: quota.c,v 1.11 2025/05/21 14:48:05 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
      5  *
      6  * SPDX-License-Identifier: MPL-2.0
      7  *
      8  * This Source Code Form is subject to the terms of the Mozilla Public
      9  * License, v. 2.0. If a copy of the MPL was not distributed with this
     10  * file, you can obtain one at https://mozilla.org/MPL/2.0/.
     11  *
     12  * See the COPYRIGHT file distributed with this work for additional
     13  * information regarding copyright ownership.
     14  */
     15 
     16 /*! \file */
     17 
     18 #include <stddef.h>
     19 
     20 #include <isc/atomic.h>
     21 #include <isc/quota.h>
     22 #include <isc/urcu.h>
     23 #include <isc/util.h>
     24 
     25 #define QUOTA_MAGIC    ISC_MAGIC('Q', 'U', 'O', 'T')
     26 #define VALID_QUOTA(p) ISC_MAGIC_VALID(p, QUOTA_MAGIC)
     27 
     28 void
     29 isc_quota_init(isc_quota_t *quota, unsigned int max) {
     30 	atomic_init(&quota->max, max);
     31 	atomic_init(&quota->used, 0);
     32 	atomic_init(&quota->soft, 0);
     33 	cds_wfcq_init(&quota->jobs.head, &quota->jobs.tail);
     34 	ISC_LINK_INIT(quota, link);
     35 	quota->magic = QUOTA_MAGIC;
     36 }
     37 
     38 void
     39 isc_quota_soft(isc_quota_t *quota, unsigned int soft) {
     40 	REQUIRE(VALID_QUOTA(quota));
     41 	atomic_store_relaxed(&quota->soft, soft);
     42 }
     43 
     44 void
     45 isc_quota_max(isc_quota_t *quota, unsigned int max) {
     46 	REQUIRE(VALID_QUOTA(quota));
     47 	atomic_store_relaxed(&quota->max, max);
     48 }
     49 
     50 unsigned int
     51 isc_quota_getmax(isc_quota_t *quota) {
     52 	REQUIRE(VALID_QUOTA(quota));
     53 	return atomic_load_relaxed(&quota->max);
     54 }
     55 
     56 unsigned int
     57 isc_quota_getsoft(isc_quota_t *quota) {
     58 	REQUIRE(VALID_QUOTA(quota));
     59 	return atomic_load_relaxed(&quota->soft);
     60 }
     61 
     62 unsigned int
     63 isc_quota_getused(isc_quota_t *quota) {
     64 	REQUIRE(VALID_QUOTA(quota));
     65 	return atomic_load_acquire(&quota->used);
     66 }
     67 
     68 void
     69 isc_quota_release(isc_quota_t *quota) {
     70 	struct cds_wfcq_node *node;
     71 	/*
     72 	 * We are using the cds_wfcq_dequeue_blocking() variant here that
     73 	 * has an internal mutex because we need synchronization on
     74 	 * multiple dequeues running from different threads.
     75 	 */
     76 again:
     77 	node = cds_wfcq_dequeue_blocking(&quota->jobs.head, &quota->jobs.tail);
     78 	if (node == NULL) {
     79 		uint_fast32_t used = atomic_fetch_sub_acq_rel(&quota->used, 1);
     80 		INSIST(used > 0);
     81 
     82 		/*
     83 		 * If this was the last quota released and in the meantime a
     84 		 * new job has appeared in the queue, then give it a chance
     85 		 * to run, otherwise it could get stuck there until a new quota
     86 		 * is acquired and released again.
     87 		 */
     88 		if (used == 1 &&
     89 		    !cds_wfcq_empty(&quota->jobs.head, &quota->jobs.tail))
     90 		{
     91 			atomic_fetch_add_acq_rel(&quota->used, 1);
     92 			goto again;
     93 		}
     94 
     95 		return;
     96 	}
     97 
     98 	isc_job_t *job = caa_container_of(node, isc_job_t, wfcq_node);
     99 	job->cb(job->cbarg);
    100 }
    101 
    102 isc_result_t
    103 isc_quota_acquire_cb(isc_quota_t *quota, isc_job_t *job, isc_job_cb cb,
    104 		     void *cbarg) {
    105 	REQUIRE(VALID_QUOTA(quota));
    106 	REQUIRE(job == NULL || cb != NULL);
    107 
    108 	uint_fast32_t used = atomic_fetch_add_acq_rel(&quota->used, 1);
    109 	uint_fast32_t max = atomic_load_relaxed(&quota->max);
    110 	if (max != 0 && used >= max) {
    111 		(void)atomic_fetch_sub_acq_rel(&quota->used, 1);
    112 		if (job != NULL) {
    113 			job->cb = cb;
    114 			job->cbarg = cbarg;
    115 			cds_wfcq_node_init(&job->wfcq_node);
    116 
    117 			/*
    118 			 * The cds_wfcq_enqueue() is non-blocking (no internal
    119 			 * mutex involved), so it offers a slight advantage.
    120 			 */
    121 			cds_wfcq_enqueue(&quota->jobs.head, &quota->jobs.tail,
    122 					 &job->wfcq_node);
    123 
    124 			/*
    125 			 * While we were initializing and enqueuing a new node,
    126 			 * quotas might have been released, and if no quota is
    127 			 * used any more, then our newly enqueued job won't
    128 			 * have a chance to get running until a new quota is
    129 			 * acquired and released. To avoid a hangup, check
    130 			 * quota->used again, if it's 0 then simulate a quota
    131 			 * acquire/release for the current job to run as soon as
    132 			 * possible, although we will still return ISC_R_QUOTA
    133 			 * to the caller.
    134 			 */
    135 			if (atomic_compare_exchange_strong_acq_rel(
    136 				    &quota->used, &(uint_fast32_t){ 0 }, 1))
    137 			{
    138 				isc_quota_release(quota);
    139 			}
    140 		}
    141 		return ISC_R_QUOTA;
    142 	}
    143 
    144 	uint_fast32_t soft = atomic_load_relaxed(&quota->soft);
    145 	if (soft != 0 && used >= soft) {
    146 		return ISC_R_SOFTQUOTA;
    147 	}
    148 
    149 	return ISC_R_SUCCESS;
    150 }
    151 
    152 void
    153 isc_quota_destroy(isc_quota_t *quota) {
    154 	REQUIRE(VALID_QUOTA(quota));
    155 	quota->magic = 0;
    156 
    157 	INSIST(atomic_load_acquire(&quota->used) == 0);
    158 	INSIST(cds_wfcq_empty(&quota->jobs.head, &quota->jobs.tail));
    159 
    160 	cds_wfcq_destroy(&quota->jobs.head, &quota->jobs.tail);
    161 }
    162