libcopp  2.2.0
uint64_id_allocator.cpp
Go to the documentation of this file.
1 // Copyright 2023 owent
2 
3 #include <libcopp/utils/config/libcopp_build_features.h>
4 
7 
8 // clang-format off
9 #include <libcopp/utils/config/stl_include_prefix.h> // NOLINT(build/include_order)
10 // clang-format on
11 #if defined(THREAD_TLS_USE_PTHREAD) && THREAD_TLS_USE_PTHREAD
12 # include <pthread.h>
13 #endif
14 #include <ctime>
15 // clang-format off
16 #include <libcopp/utils/config/stl_include_suffix.h> // NOLINT(build/include_order)
17 // clang-format on
18 
19 LIBCOPP_COPP_NAMESPACE_BEGIN
20 namespace util {
21 namespace details {
22 
23 // TIMESTAMP:32|SEQUENCE:24 -> about 16M id per second
24 static uint64_t allocate_id_by_atomic() {
25  static LIBCOPP_COPP_NAMESPACE_ID::util::lock::atomic_int_type<uint64_t> seq_alloc(0);
26 
27  //
28  static constexpr const size_t seq_bits = 24;
29  static constexpr const uint64_t time_mask = (static_cast<uint64_t>(1) << 32) - 1;
30 
31  // always do not allocate 0 as a valid ID
32  uint64_t ret = 0;
33  while (0 == ret) {
34  uint64_t res = seq_alloc.load();
35  uint64_t time_part = res >> seq_bits;
36 
37  uint64_t next_ret = res + 1;
38  uint64_t next_time_part = next_ret >> seq_bits;
39  if (0 == time_part || time_part != next_time_part) {
40  uint64_t now_time = time_part;
41  while (time_part == now_time) {
42  now_time = (static_cast<uint64_t>(time(nullptr)) & time_mask) - 1577836800; // 2020-01-01 00:00:00+00:00 UTC
43  }
44 
45  // if failed, maybe another thread do it
46  if (seq_alloc.compare_exchange_strong(res, now_time << seq_bits,
49  ret = now_time << seq_bits;
50  }
51  } else {
52  if (seq_alloc.compare_exchange_weak(res, next_ret, LIBCOPP_COPP_NAMESPACE_ID::util::lock::memory_order_acq_rel,
54  ret = next_ret;
55  }
56  }
57  }
58 
59  return ret;
60 }
61 
63  uint64_t base;
64  uint64_t inner_seq;
65 };
66 
67 #if defined(THREAD_TLS_USE_PTHREAD) && THREAD_TLS_USE_PTHREAD
68 static pthread_once_t gt_uint64_id_allocator_tls_once = PTHREAD_ONCE_INIT;
69 static pthread_key_t gt_uint64_id_allocator_tls_key;
70 
71 static void dtor_pthread_uint64_id_allocator_tls(void *p) {
73  if (nullptr != cache) {
74  delete cache;
75  }
76 }
77 
78 static void init_pthread_uint64_id_allocator_tls() {
79  (void)pthread_key_create(&gt_uint64_id_allocator_tls_key, dtor_pthread_uint64_id_allocator_tls);
80 }
81 
82 struct gt_uint64_id_allocator_tls_cache_main_thread_dtor_t {
83  gt_uint64_id_allocator_tls_cache_main_thread_dtor_t() {}
84 
85  ~gt_uint64_id_allocator_tls_cache_main_thread_dtor_t() {
86  void *cache_ptr = pthread_getspecific(gt_uint64_id_allocator_tls_key);
87  pthread_setspecific(gt_uint64_id_allocator_tls_key, nullptr);
88  dtor_pthread_uint64_id_allocator_tls(cache_ptr);
89  }
90 };
91 static void init_pthread_get_log_tls_main_thread_dtor() {
92  static gt_uint64_id_allocator_tls_cache_main_thread_dtor_t gt_uint64_id_allocator_tls_cache_main_thread_dtor;
93  (void)gt_uint64_id_allocator_tls_cache_main_thread_dtor;
94 }
95 
96 static uint64_id_allocator_tls_cache_t *get_uint64_id_allocator_tls_cache() {
97  init_pthread_get_log_tls_main_thread_dtor();
98  (void)pthread_once(&gt_uint64_id_allocator_tls_once, init_pthread_uint64_id_allocator_tls);
99  uint64_id_allocator_tls_cache_t *ret =
100  reinterpret_cast<uint64_id_allocator_tls_cache_t *>(pthread_getspecific(gt_uint64_id_allocator_tls_key));
101  if (nullptr == ret) {
102  ret = new uint64_id_allocator_tls_cache_t();
103  ret->base = 0;
104  ret->inner_seq = 0;
105  pthread_setspecific(gt_uint64_id_allocator_tls_key, ret);
106  }
107  return ret;
108 }
109 
110 #else
112  static thread_local uint64_id_allocator_tls_cache_t ret = {0, 0};
113  return &ret;
114 }
115 #endif
116 } // namespace details
117 
118 LIBCOPP_COPP_API uint64_id_allocator::value_type uint64_id_allocator::allocate() LIBCOPP_MACRO_NOEXCEPT {
119  // details::allocate_id_by_atomic() takes 56 bits, we use 5 bits here
121  if (nullptr == tls_cache) {
122  return 0;
123  }
124 
125  static constexpr const uint64_t tls_cache_count = (static_cast<uint64_t>(1) << 5);
126 
127  while (0 == tls_cache->base || tls_cache->inner_seq >= tls_cache_count) {
128  tls_cache->base = details::allocate_id_by_atomic();
129  tls_cache->inner_seq = 0;
130  }
131 
132  return (tls_cache->base << 5) + (tls_cache->inner_seq++);
133 }
134 
135 LIBCOPP_COPP_API void uint64_id_allocator::deallocate(value_type) LIBCOPP_MACRO_NOEXCEPT {}
136 } // namespace util
137 LIBCOPP_COPP_NAMESPACE_END
atomic wrapper fo integers Licensed under the MIT licenses.
static void deallocate(value_type) LIBCOPP_MACRO_NOEXCEPT
static value_type allocate() LIBCOPP_MACRO_NOEXCEPT
static uint64_t allocate_id_by_atomic()
uint64_id_allocator_tls_cache_t * get_uint64_id_allocator_tls_cache()