CNDP  22.08.0
cne_spinlock.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2010-2022 Intel Corporation
3  */
4 
5 #ifndef _CNE_SPINLOCK_H_
6 #define _CNE_SPINLOCK_H_
7 
21 #include <unistd.h>
22 
23 #include <cne_cpuflags.h>
24 #include <cne_cycles.h>
25 #include <cne_branch_prediction.h>
26 #include <cne_common.h>
27 #include <cne_gettid.h>
28 #include <cne_system.h>
29 #include <cne_pause.h>
30 #include <cne_rtm.h>
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
36 #define CNE_RTM_MAX_RETRIES (20)
37 #define CNE_RTM_MAX_RETRIES (20)
38 #define CNE_XABORT_LOCK_BUSY (0xff)
39 
43 typedef struct {
44  volatile int locked;
46 
50 #define CNE_SPINLOCK_INITIALIZER \
51  { \
52  0 \
53  }
54 
61 static inline void
63 {
64  sl->locked = 0;
65 }
66 
67 #ifdef CNE_FORCE_INTRINSICS
74 static inline void cne_spinlock_lock(cne_spinlock_t *sl);
75 
76 static inline void
77 cne_spinlock_lock(cne_spinlock_t *sl)
78 {
79  int exp = 0;
80 
81  while (
82  !__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
83  while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
84  cne_pause();
85  exp = 0;
86  }
87 }
88 
95 static inline void cne_spinlock_unlock(cne_spinlock_t *sl);
96 
97 static inline void
98 cne_spinlock_unlock(cne_spinlock_t *sl)
99 {
100  __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
101 }
102 
111 static inline int cne_spinlock_trylock(cne_spinlock_t *sl);
112 
113 static inline int
114 cne_spinlock_trylock(cne_spinlock_t *sl)
115 {
116  int exp = 0;
117  return __atomic_compare_exchange_n(&sl->locked, &exp, 1, 0, /* disallow spurious failure */
118  __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
119 }
120 
121 #else /* CNE_FORCE_INTRINSICS not defined */
122 
123 static inline void
124 cne_spinlock_lock(cne_spinlock_t *sl)
125 {
126  int lock_val = 1;
127  asm volatile("1:\n"
128  "xchg %[locked], %[lv]\n"
129  "test %[lv], %[lv]\n"
130  "jz 3f\n"
131  "2:\n"
132  "pause\n"
133  "cmpl $0, %[locked]\n"
134  "jnz 2b\n"
135  "jmp 1b\n"
136  "3:\n"
137  : [locked] "=m"(sl->locked), [lv] "=q"(lock_val)
138  : "[lv]"(lock_val)
139  : "memory");
140 }
141 
142 static inline void
143 cne_spinlock_unlock(cne_spinlock_t *sl)
144 {
145  int unlock_val = 0;
146  asm volatile("xchg %[locked], %[ulv]\n"
147  : [locked] "=m"(sl->locked), [ulv] "=q"(unlock_val)
148  : "[ulv]"(unlock_val)
149  : "memory");
150 }
151 
152 static inline int
153 cne_spinlock_trylock(cne_spinlock_t *sl)
154 {
155  int lockval = 1;
156 
157  asm volatile("xchg %[locked], %[lockval]"
158  : [locked] "=m"(sl->locked), [lockval] "=q"(lockval)
159  : "[lockval]"(lockval)
160  : "memory");
161 
162  return lockval == 0;
163 }
164 #endif
165 
174 static inline int
176 {
177  return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
178 }
179 
183 typedef struct {
185  volatile int user;
186  volatile int count;
188 
192 #define CNE_SPINLOCK_RECURSIVE_INITIALIZER \
193  { \
194  CNE_SPINLOCK_INITIALIZER, -1, 0 \
195  }
196 
203 static inline void
205 {
206  cne_spinlock_init(&slr->sl);
207  slr->user = -1;
208  slr->count = 0;
209 }
210 
217 static inline void
219 {
220  int id = gettid();
221 
222  if (slr->user != id) {
223  cne_spinlock_lock(&slr->sl);
224  slr->user = id;
225  }
226  slr->count++;
227 }
234 static inline void
236 {
237  if (--(slr->count) == 0) {
238  slr->user = -1;
239  cne_spinlock_unlock(&slr->sl);
240  }
241 }
242 
251 static inline int
253 {
254  int id = gettid();
255 
256  if (slr->user != id) {
257  if (cne_spinlock_trylock(&slr->sl) == 0)
258  return 0;
259  slr->user = id;
260  }
261  slr->count++;
262  return 1;
263 }
264 
265 static inline int
266 cne_try_tm(volatile int *lock)
267 {
268  int i, retries;
269 
271  return 0;
272 
273  retries = CNE_RTM_MAX_RETRIES;
274 
275  while (likely(retries--)) {
276 
277  unsigned int status = cne_xbegin();
278 
279  if (likely(CNE_XBEGIN_STARTED == status)) {
280  if (unlikely(*lock))
281  cne_xabort(CNE_XABORT_LOCK_BUSY);
282  else
283  return 1;
284  }
285  while (*lock)
286  cne_pause();
287 
288  if ((status & CNE_XABORT_CONFLICT) ||
289  ((status & CNE_XABORT_EXPLICIT) && (CNE_XABORT_CODE(status) == CNE_XABORT_LOCK_BUSY))) {
290  /* add a small delay before retrying, basing the
291  * delay on the number of times we've already tried,
292  * to give a back-off type of behaviour. We
293  * randomize trycount by taking bits from the tsc count
294  */
295  int try_count = CNE_RTM_MAX_RETRIES - retries;
296  int pause_count = (cne_rdtsc() & 0x7) | 1;
297  pause_count <<= try_count;
298  for (i = 0; i < pause_count; i++)
299  cne_pause();
300  continue;
301  }
302 
303  if ((status & CNE_XABORT_RETRY) == 0) /* do not retry */
304  break;
305  }
306  return 0;
307 }
308 
309 static inline void
310 cne_spinlock_lock_tm(cne_spinlock_t *sl)
311 {
312  if (likely(cne_try_tm(&sl->locked)))
313  return;
314 
315  cne_spinlock_lock(sl); /* fall-back */
316 }
317 
318 static inline int
319 cne_spinlock_trylock_tm(cne_spinlock_t *sl)
320 {
321  if (likely(cne_try_tm(&sl->locked)))
322  return 1;
323 
324  return cne_spinlock_trylock(sl);
325 }
326 
327 static inline void
328 cne_spinlock_unlock_tm(cne_spinlock_t *sl)
329 {
330  if (unlikely(sl->locked))
331  cne_spinlock_unlock(sl);
332  else
333  cne_xend();
334 }
335 
336 static inline void
337 cne_spinlock_recursive_lock_tm(cne_spinlock_recursive_t *slr)
338 {
339  if (likely(cne_try_tm(&slr->sl.locked)))
340  return;
341 
342  cne_spinlock_recursive_lock(slr); /* fall-back */
343 }
344 
345 static inline void
346 cne_spinlock_recursive_unlock_tm(cne_spinlock_recursive_t *slr)
347 {
348  if (unlikely(slr->sl.locked))
350  else
351  cne_xend();
352 }
353 
354 static inline int
355 cne_spinlock_recursive_trylock_tm(cne_spinlock_recursive_t *slr)
356 {
357  if (likely(cne_try_tm(&slr->sl.locked)))
358  return 1;
359 
360  return cne_spinlock_recursive_trylock(slr);
361 }
362 
363 #ifdef __cplusplus
364 }
365 #endif
366 
367 #endif /* _CNE_SPINLOCK_H_ */
#define likely(x)
#define unlikely(x)
CNDP_API bool cne_cpu_rtm_is_supported(void)
static uint64_t cne_rdtsc(void)
Definition: cne_cycles.h:32
static int cne_spinlock_is_locked(cne_spinlock_t *sl)
Definition: cne_spinlock.h:175
static void cne_spinlock_recursive_unlock(cne_spinlock_recursive_t *slr)
Definition: cne_spinlock.h:235
static void cne_spinlock_recursive_lock(cne_spinlock_recursive_t *slr)
Definition: cne_spinlock.h:218
static void cne_spinlock_recursive_init(cne_spinlock_recursive_t *slr)
Definition: cne_spinlock.h:204
static void cne_spinlock_init(cne_spinlock_t *sl)
Definition: cne_spinlock.h:62
static int cne_spinlock_recursive_trylock(cne_spinlock_recursive_t *slr)
Definition: cne_spinlock.h:252
volatile int locked
Definition: cne_spinlock.h:44