CNDP  22.08.0
cthread_pool.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2019-2022 Intel Corporation
3  */
4 
5 /*
6  * Some portions of this software is derived from the producer
7  * consumer queues described by Dmitry Vyukov and published here
8  * http://www.1024cores.net
9  *
10  * Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met
14  *
15  * 1. Redistributions of source code must retain the above copyright notice,
16  * this list of conditions and the following disclaimer.
17  *
18  * 2. Redistributions in binary form must reproduce the above copyright notice,
19  * this list of conditions and the following disclaimer in the documentation
20  * and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
27  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
32  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * The views and conclusions contained in the software and documentation are
35  * those of the authors and should not be interpreted as representing official
36  * policies, either expressed or implied, of Dmitry Vyukov.
37  */
38 
39 #ifndef _CTHREAD_POOL_H_
40 #define _CTHREAD_POOL_H_
41 
42 #include <cne_per_thread.h>
43 #include <cne_log.h>
44 
45 #include "cthread_int.h"
46 
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50 
51 /*
52  * This file implements pool of queue nodes used by the queue implemented
53  * in cthread_queue.h.
54  *
55  * The pool is an intrusive lock free MPSC queue.
56  *
57  * The pool is created empty and populated lazily, i.e. on first attempt to
58  * allocate a the pool.
59  *
60  * Whenever the pool is empty more nodes are added to the pool
61  * The number of nodes preallocated in this way is a parameter of
62  * _qnode_pool_create. Freeing an object returns it to the pool.
63  *
64  * Each cthread scheduler maintains its own pool of nodes. D-threads must always
65  * allocate from this local pool ( because it is a single consumer queue ).
66  * D-threads can free nodes to any pool (because it is a multi producer queue)
67  * This enables threads that have affined to a different scheduler to free
68  * nodes safely.
69  */
70 
71 struct qnode;
72 struct qnode_cache;
73 
74 /*
75  * define intermediate node
76  */
77 struct qnode {
78  struct qnode *next;
79  void *data;
80  struct qnode_pool *pool;
82 
83 /*
84  * a pool structure
85  */
86 struct qnode_pool {
87  struct qnode *head;
88  struct qnode *stub;
89  struct qnode *fast_alloc;
90  struct qnode *tail __cne_cache_aligned;
91  int pre_alloc;
92  char name[CTHREAD_NAME_SIZE];
94 
105 static inline struct qnode_pool *
106 _qnode_pool_create(const char *name, int prealloc_size)
107 {
108  struct qnode_pool *p = calloc(1, sizeof(struct qnode_pool));
109 
110  CNE_ASSERT(p);
111  if (p == NULL)
112  return NULL;
113 
114  p->stub = calloc(1, sizeof(struct qnode));
115 
116  CNE_ASSERT(p->stub);
117  if (p->stub == NULL) {
118  free(p);
119  return NULL;
120  }
121 
122  if (name != NULL)
123  strlcpy(p->name, name, sizeof(p->name));
124  p->name[sizeof(p->name) - 1] = 0;
125 
126  p->stub->pool = p;
127  p->stub->next = NULL;
128  p->tail = p->stub;
129  p->head = p->stub;
130  p->pre_alloc = prealloc_size;
131 
132  return p;
133 }
134 
143 static inline void __attribute__((always_inline))
144 _qnode_pool_insert(struct qnode_pool *p, struct qnode *n)
145 {
146  n->next = NULL;
147  struct qnode *prev = n;
148  /* We insert at the head */
149  prev = (struct qnode *)__sync_lock_test_and_set((uint64_t *)&p->head, (uint64_t)prev);
150  /* there is a window of inconsistency until prev next is set */
151  /* which is why remove must retry */
152  prev->next = (n);
153 }
154 
171 static inline struct qnode *__attribute__((always_inline)) _pool_remove(struct qnode_pool *p)
172 {
173  struct qnode *head;
174  struct qnode *tail = p->tail;
175  struct qnode *next = tail->next;
176 
177  /* we remove from the tail */
178  if (tail == p->stub) {
179  if (next == NULL)
180  return NULL;
181  /* advance the tail */
182  p->tail = next;
183  tail = next;
184  next = next->next;
185  }
186  if (likely(next != NULL)) {
187  p->tail = next;
188  return tail;
189  }
190 
191  head = p->head;
192  if (tail == head)
193  return NULL;
194 
195  /* swing stub node */
196  _qnode_pool_insert(p, p->stub);
197 
198  next = tail->next;
199  if (next) {
200  p->tail = next;
201  return tail;
202  }
203  return NULL;
204 }
205 
215 static inline struct qnode *__attribute__((always_inline)) _qnode_pool_remove(struct qnode_pool *p)
216 {
217  struct qnode *n;
218 
219  do {
220  n = _pool_remove(p);
221  if (likely(n != NULL))
222  return n;
223 
225  } while ((p->head != p->tail) && (p->tail != p->stub));
226  return NULL;
227 }
228 
229 /*
230  * Allocate a node from the pool
231  * If the pool is empty add mode nodes
232  */
233 static inline struct qnode *__attribute__((always_inline)) _qnode_alloc(void)
234 {
235  struct qnode_pool *p = (THIS_SCHED)->qnode_pool;
236  int prealloc_size = p->pre_alloc;
237  struct qnode *n;
238  int i;
239 
240  if (likely(p->fast_alloc != NULL)) {
241  n = p->fast_alloc;
242  p->fast_alloc = NULL;
243  return n;
244  }
245 
246  n = _qnode_pool_remove(p);
247 
248  if (unlikely(n == NULL)) {
249  for (i = 0; i < prealloc_size; i++) {
250  n = calloc(1, sizeof(struct qnode));
251  if (n == NULL)
252  return NULL;
253 
254  n->pool = p;
255  _qnode_pool_insert(p, n);
256  }
257  n = _qnode_pool_remove(p);
258  }
259 
260  return n;
261 }
262 
263 /*
264  * free a queue node to the per scheduler pool from which it came
265  */
266 static inline void __attribute__((always_inline)) _qnode_free(struct qnode *n)
267 {
268  struct qnode_pool *p = n->pool;
269 
270  if (unlikely(p->fast_alloc != NULL) || unlikely(n->pool != (THIS_SCHED)->qnode_pool)) {
271  _qnode_pool_insert(p, n);
272  return;
273  }
274  p->fast_alloc = n;
275 }
276 
277 /*
278  * Destroy an qnode pool
279  * queue must be empty when this is called
280  */
281 static inline int
282 _qnode_pool_destroy(struct qnode_pool *p)
283 {
284  free(p->stub);
285  free(p);
286  return 0;
287 }
288 
289 #ifdef __cplusplus
290 }
291 #endif
292 
293 #endif /* _CTHREAD_POOL_H_ */
#define likely(x)
#define unlikely(x)
#define cne_compiler_barrier()
Definition: cne_common.h:116
#define __cne_cache_aligned
Definition: cne_common.h:379