cord_internal.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. // Copyright 2021 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #ifndef ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
  15. #define ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_
  16. #include <atomic>
  17. #include <cassert>
  18. #include <cstddef>
  19. #include <cstdint>
  20. #include <type_traits>
  21. #include "absl/base/config.h"
  22. #include "absl/base/internal/endian.h"
  23. #include "absl/base/internal/invoke.h"
  24. #include "absl/base/optimization.h"
  25. #include "absl/container/internal/compressed_tuple.h"
  26. #include "absl/meta/type_traits.h"
  27. #include "absl/strings/string_view.h"
  28. namespace absl {
  29. ABSL_NAMESPACE_BEGIN
  30. namespace cord_internal {
  31. class CordzInfo;
  32. // Default feature enable states for cord ring buffers
  33. enum CordFeatureDefaults {
  34. kCordEnableBtreeDefault = true,
  35. kCordEnableRingBufferDefault = false,
  36. kCordShallowSubcordsDefault = false
  37. };
  38. extern std::atomic<bool> cord_btree_enabled;
  39. extern std::atomic<bool> cord_ring_buffer_enabled;
  40. extern std::atomic<bool> shallow_subcords_enabled;
  41. // `cord_btree_exhaustive_validation` can be set to force exhaustive validation
  42. // in debug assertions, and code that calls `IsValid()` explicitly. By default,
  43. // assertions should be relatively cheap and AssertValid() can easily lead to
  44. // O(n^2) complexity as recursive / full tree validation is O(n).
  45. extern std::atomic<bool> cord_btree_exhaustive_validation;
  46. inline void enable_cord_btree(bool enable) {
  47. cord_btree_enabled.store(enable, std::memory_order_relaxed);
  48. }
  49. inline void enable_cord_ring_buffer(bool enable) {
  50. cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
  51. }
  52. inline void enable_shallow_subcords(bool enable) {
  53. shallow_subcords_enabled.store(enable, std::memory_order_relaxed);
  54. }
  55. enum Constants {
  56. // The inlined size to use with absl::InlinedVector.
  57. //
  58. // Note: The InlinedVectors in this file (and in cord.h) do not need to use
  59. // the same value for their inlined size. The fact that they do is historical.
  60. // It may be desirable for each to use a different inlined size optimized for
  61. // that InlinedVector's usage.
  62. //
  63. // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
  64. // the inlined vector size (47 exists for backward compatibility).
  65. kInlinedVectorSize = 47,
  66. // Prefer copying blocks of at most this size, otherwise reference count.
  67. kMaxBytesToCopy = 511
  68. };
  69. // Compact class for tracking the reference count and state flags for CordRep
  70. // instances. Data is stored in an atomic int32_t for compactness and speed.
  71. class RefcountAndFlags {
  72. public:
  73. constexpr RefcountAndFlags() : count_{kRefIncrement} {}
  74. struct Immortal {};
  75. explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
  76. struct WithCrc {};
  77. explicit constexpr RefcountAndFlags(WithCrc)
  78. : count_(kCrcFlag | kRefIncrement) {}
  79. // Increments the reference count. Imposes no memory ordering.
  80. inline void Increment() {
  81. count_.fetch_add(kRefIncrement, std::memory_order_relaxed);
  82. }
  83. // Asserts that the current refcount is greater than 0. If the refcount is
  84. // greater than 1, decrements the reference count.
  85. //
  86. // Returns false if there are no references outstanding; true otherwise.
  87. // Inserts barriers to ensure that state written before this method returns
  88. // false will be visible to a thread that just observed this method returning
  89. // false. Always returns false when the immortal bit is set.
  90. inline bool Decrement() {
  91. int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
  92. assert(refcount > 0 || refcount & kImmortalFlag);
  93. return refcount != kRefIncrement &&
  94. (count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
  95. kRefcountMask) != kRefIncrement;
  96. }
  97. // Same as Decrement but expect that refcount is greater than 1.
  98. inline bool DecrementExpectHighRefcount() {
  99. int32_t refcount =
  100. count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
  101. kRefcountMask;
  102. assert(refcount > 0 || refcount & kImmortalFlag);
  103. return refcount != kRefIncrement;
  104. }
  105. // Returns the current reference count using acquire semantics.
  106. inline int32_t Get() const {
  107. return count_.load(std::memory_order_acquire) >> kNumFlags;
  108. }
  109. // Returns true if the referenced object carries a CRC value.
  110. bool HasCrc() const {
  111. return (count_.load(std::memory_order_relaxed) & kCrcFlag) != 0;
  112. }
  113. // Returns true iff the atomic integer is 1 and this node does not store
  114. // a CRC. When both these conditions are met, the current thread owns
  115. // the reference and no other thread shares it, so its contents may be
  116. // safely mutated.
  117. //
  118. // If the referenced item is shared, carries a CRC, or is immortal,
  119. // it should not be modified in-place, and this function returns false.
  120. //
  121. // This call performs the memory barrier needed for the owning thread
  122. // to act on the object, so that if it returns true, it may safely
  123. // assume exclusive access to the object.
  124. inline bool IsMutable() {
  125. return (count_.load(std::memory_order_acquire)) == kRefIncrement;
  126. }
  127. // Returns whether the atomic integer is 1. Similar to IsMutable(),
  128. // but does not check for a stored CRC. (An unshared node with a CRC is not
  129. // mutable, because changing its data would invalidate the CRC.)
  130. //
  131. // When this returns true, there are no other references, and data sinks
  132. // may safely adopt the children of the CordRep.
  133. inline bool IsOne() {
  134. return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
  135. kRefIncrement;
  136. }
  137. bool IsImmortal() const {
  138. return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0;
  139. }
  140. private:
  141. // We reserve the bottom bits for flags.
  142. // kImmortalBit indicates that this entity should never be collected; it is
  143. // used for the StringConstant constructor to avoid collecting immutable
  144. // constant cords.
  145. // kReservedFlag is reserved for future use.
  146. enum {
  147. kNumFlags = 2,
  148. kImmortalFlag = 0x1,
  149. kCrcFlag = 0x2,
  150. kRefIncrement = (1 << kNumFlags),
  151. // Bitmask to use when checking refcount by equality. This masks out
  152. // all flags except kImmortalFlag, which is part of the refcount for
  153. // purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
  154. // if the immortal bit is set.)
  155. kRefcountMask = ~kCrcFlag,
  156. };
  157. std::atomic<int32_t> count_;
  158. };
  159. // The overhead of a vtable is too much for Cord, so we roll our own subclasses
  160. // using only a single byte to differentiate classes from each other - the "tag"
  161. // byte. Define the subclasses first so we can provide downcasting helper
  162. // functions in the base class.
  163. struct CordRepConcat;
  164. struct CordRepExternal;
  165. struct CordRepFlat;
  166. struct CordRepSubstring;
  167. class CordRepRing;
  168. class CordRepBtree;
  169. // Various representations that we allow
  170. enum CordRepKind {
  171. CONCAT = 0,
  172. SUBSTRING = 1,
  173. BTREE = 2,
  174. RING = 3,
  175. EXTERNAL = 4,
  176. // We have different tags for different sized flat arrays,
  177. // starting with FLAT, and limited to MAX_FLAT_TAG. The 225 value is based on
  178. // the current 'size to tag' encoding of 8 / 32 bytes. If a new tag is needed
  179. // in the future, then 'FLAT' and 'MAX_FLAT_TAG' should be adjusted as well
  180. // as the Tag <---> Size logic so that FLAT stil represents the minimum flat
  181. // allocation size. (32 bytes as of now).
  182. FLAT = 5,
  183. MAX_FLAT_TAG = 225
  184. };
  185. // There are various locations where we want to check if some rep is a 'plain'
  186. // data edge, i.e. an external or flat rep. By having FLAT == EXTERNAL + 1, we
  187. // can perform this check in a single branch as 'tag >= EXTERNAL'
  188. // Likewise, we have some locations where we check for 'ring or external/flat',
  189. // so likewise align RING to EXTERNAL.
  190. // Note that we can leave this optimization to the compiler. The compiler will
  191. // DTRT when it sees a condition like `tag == EXTERNAL || tag >= FLAT`.
  192. static_assert(RING == BTREE + 1, "BTREE and RING not consecutive");
  193. static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive");
  194. static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
  195. struct CordRep {
  196. CordRep() = default;
  197. constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
  198. : length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
  199. // The following three fields have to be less than 32 bytes since
  200. // that is the smallest supported flat node size.
  201. size_t length;
  202. RefcountAndFlags refcount;
  203. // If tag < FLAT, it represents CordRepKind and indicates the type of node.
  204. // Otherwise, the node type is CordRepFlat and the tag is the encoded size.
  205. uint8_t tag;
  206. // `storage` provides two main purposes:
  207. // - the starting point for FlatCordRep.Data() [flexible-array-member]
  208. // - 3 bytes of additional storage for use by derived classes.
  209. // The latter is used by CordrepConcat and CordRepBtree. CordRepConcat stores
  210. // a 'depth' value in storage[0], and the (future) CordRepBtree class stores
  211. // `height`, `begin` and `end` in the 3 entries. Otherwise we would need to
  212. // allocate room for these in the derived class, as not all compilers reuse
  213. // padding space from the base class (clang and gcc do, MSVC does not, etc)
  214. uint8_t storage[3];
  215. // Returns true if this instance's tag matches the requested type.
  216. constexpr bool IsRing() const { return tag == RING; }
  217. constexpr bool IsConcat() const { return tag == CONCAT; }
  218. constexpr bool IsSubstring() const { return tag == SUBSTRING; }
  219. constexpr bool IsExternal() const { return tag == EXTERNAL; }
  220. constexpr bool IsFlat() const { return tag >= FLAT; }
  221. constexpr bool IsBtree() const { return tag == BTREE; }
  222. inline CordRepRing* ring();
  223. inline const CordRepRing* ring() const;
  224. inline CordRepConcat* concat();
  225. inline const CordRepConcat* concat() const;
  226. inline CordRepSubstring* substring();
  227. inline const CordRepSubstring* substring() const;
  228. inline CordRepExternal* external();
  229. inline const CordRepExternal* external() const;
  230. inline CordRepFlat* flat();
  231. inline const CordRepFlat* flat() const;
  232. inline CordRepBtree* btree();
  233. inline const CordRepBtree* btree() const;
  234. // --------------------------------------------------------------------
  235. // Memory management
  236. // Destroys the provided `rep`.
  237. static void Destroy(CordRep* rep);
  238. // Increments the reference count of `rep`.
  239. // Requires `rep` to be a non-null pointer value.
  240. static inline CordRep* Ref(CordRep* rep);
  241. // Decrements the reference count of `rep`. Destroys rep if count reaches
  242. // zero. Requires `rep` to be a non-null pointer value.
  243. static inline void Unref(CordRep* rep);
  244. };
  245. struct CordRepConcat : public CordRep {
  246. CordRep* left;
  247. CordRep* right;
  248. uint8_t depth() const { return storage[0]; }
  249. void set_depth(uint8_t depth) { storage[0] = depth; }
  250. };
  251. struct CordRepSubstring : public CordRep {
  252. size_t start; // Starting offset of substring in child
  253. CordRep* child;
  254. };
  255. // Type for function pointer that will invoke the releaser function and also
  256. // delete the `CordRepExternalImpl` corresponding to the passed in
  257. // `CordRepExternal`.
  258. using ExternalReleaserInvoker = void (*)(CordRepExternal*);
  259. // External CordReps are allocated together with a type erased releaser. The
  260. // releaser is stored in the memory directly following the CordRepExternal.
  261. struct CordRepExternal : public CordRep {
  262. CordRepExternal() = default;
  263. explicit constexpr CordRepExternal(absl::string_view str)
  264. : CordRep(RefcountAndFlags::Immortal{}, str.size()),
  265. base(str.data()),
  266. releaser_invoker(nullptr) {}
  267. const char* base;
  268. // Pointer to function that knows how to call and destroy the releaser.
  269. ExternalReleaserInvoker releaser_invoker;
  270. // Deletes (releases) the external rep.
  271. // Requires rep != nullptr and rep->IsExternal()
  272. static void Delete(CordRep* rep);
  273. };
  274. struct Rank1 {};
  275. struct Rank0 : Rank1 {};
  276. template <typename Releaser, typename = ::absl::base_internal::invoke_result_t<
  277. Releaser, absl::string_view>>
  278. void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view data) {
  279. ::absl::base_internal::invoke(std::forward<Releaser>(releaser), data);
  280. }
  281. template <typename Releaser,
  282. typename = ::absl::base_internal::invoke_result_t<Releaser>>
  283. void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view) {
  284. ::absl::base_internal::invoke(std::forward<Releaser>(releaser));
  285. }
  286. // We use CompressedTuple so that we can benefit from EBCO.
  287. template <typename Releaser>
  288. struct CordRepExternalImpl
  289. : public CordRepExternal,
  290. public ::absl::container_internal::CompressedTuple<Releaser> {
  291. // The extra int arg is so that we can avoid interfering with copy/move
  292. // constructors while still benefitting from perfect forwarding.
  293. template <typename T>
  294. CordRepExternalImpl(T&& releaser, int)
  295. : CordRepExternalImpl::CompressedTuple(std::forward<T>(releaser)) {
  296. this->releaser_invoker = &Release;
  297. }
  298. ~CordRepExternalImpl() {
  299. InvokeReleaser(Rank0{}, std::move(this->template get<0>()),
  300. absl::string_view(base, length));
  301. }
  302. static void Release(CordRepExternal* rep) {
  303. delete static_cast<CordRepExternalImpl*>(rep);
  304. }
  305. };
  306. inline void CordRepExternal::Delete(CordRep* rep) {
  307. assert(rep != nullptr && rep->IsExternal());
  308. auto* rep_external = static_cast<CordRepExternal*>(rep);
  309. assert(rep_external->releaser_invoker != nullptr);
  310. rep_external->releaser_invoker(rep_external);
  311. }
  312. template <typename Str>
  313. struct ConstInitExternalStorage {
  314. ABSL_CONST_INIT static CordRepExternal value;
  315. };
  316. template <typename Str>
  317. CordRepExternal ConstInitExternalStorage<Str>::value(Str::value);
  318. enum {
  319. kMaxInline = 15,
  320. };
  321. constexpr char GetOrNull(absl::string_view data, size_t pos) {
  322. return pos < data.size() ? data[pos] : '\0';
  323. }
  324. // We store cordz_info as 64 bit pointer value in big endian format. This
  325. // guarantees that the least significant byte of cordz_info matches the last
  326. // byte of the inline data representation in as_chars_, which holds the inlined
  327. // size or the 'is_tree' bit.
  328. using cordz_info_t = int64_t;
  329. // Assert that the `cordz_info` pointer value perfectly overlaps the last half
  330. // of `as_chars_` and can hold a pointer value.
  331. static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, "");
  332. static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), "");
  333. // BigEndianByte() creates a big endian representation of 'value', i.e.: a big
  334. // endian value where the last byte in the host's representation holds 'value`,
  335. // with all other bytes being 0.
  336. static constexpr cordz_info_t BigEndianByte(unsigned char value) {
  337. #if defined(ABSL_IS_BIG_ENDIAN)
  338. return value;
  339. #else
  340. return static_cast<cordz_info_t>(value) << ((sizeof(cordz_info_t) - 1) * 8);
  341. #endif
  342. }
  343. class InlineData {
  344. public:
  345. // DefaultInitType forces the use of the default initialization constructor.
  346. enum DefaultInitType { kDefaultInit };
  347. // kNullCordzInfo holds the big endian representation of intptr_t(1)
  348. // This is the 'null' / initial value of 'cordz_info'. The null value
  349. // is specifically big endian 1 as with 64-bit pointers, the last
  350. // byte of cordz_info overlaps with the last byte holding the tag.
  351. static constexpr cordz_info_t kNullCordzInfo = BigEndianByte(1);
  352. constexpr InlineData() : as_chars_{0} {}
  353. explicit InlineData(DefaultInitType) {}
  354. explicit constexpr InlineData(CordRep* rep) : as_tree_(rep) {}
  355. explicit constexpr InlineData(absl::string_view chars)
  356. : as_chars_{
  357. GetOrNull(chars, 0), GetOrNull(chars, 1),
  358. GetOrNull(chars, 2), GetOrNull(chars, 3),
  359. GetOrNull(chars, 4), GetOrNull(chars, 5),
  360. GetOrNull(chars, 6), GetOrNull(chars, 7),
  361. GetOrNull(chars, 8), GetOrNull(chars, 9),
  362. GetOrNull(chars, 10), GetOrNull(chars, 11),
  363. GetOrNull(chars, 12), GetOrNull(chars, 13),
  364. GetOrNull(chars, 14), static_cast<char>((chars.size() << 1))} {}
  365. // Returns true if the current instance is empty.
  366. // The 'empty value' is an inlined data value of zero length.
  367. bool is_empty() const { return tag() == 0; }
  368. // Returns true if the current instance holds a tree value.
  369. bool is_tree() const { return (tag() & 1) != 0; }
  370. // Returns true if the current instance holds a cordz_info value.
  371. // Requires the current instance to hold a tree value.
  372. bool is_profiled() const {
  373. assert(is_tree());
  374. return as_tree_.cordz_info != kNullCordzInfo;
  375. }
  376. // Returns true if either of the provided instances hold a cordz_info value.
  377. // This method is more efficient than the equivalent `data1.is_profiled() ||
  378. // data2.is_profiled()`. Requires both arguments to hold a tree.
  379. static bool is_either_profiled(const InlineData& data1,
  380. const InlineData& data2) {
  381. assert(data1.is_tree() && data2.is_tree());
  382. return (data1.as_tree_.cordz_info | data2.as_tree_.cordz_info) !=
  383. kNullCordzInfo;
  384. }
  385. // Returns the cordz_info sampling instance for this instance, or nullptr
  386. // if the current instance is not sampled and does not have CordzInfo data.
  387. // Requires the current instance to hold a tree value.
  388. CordzInfo* cordz_info() const {
  389. assert(is_tree());
  390. intptr_t info =
  391. static_cast<intptr_t>(absl::big_endian::ToHost64(as_tree_.cordz_info));
  392. assert(info & 1);
  393. return reinterpret_cast<CordzInfo*>(info - 1);
  394. }
  395. // Sets the current cordz_info sampling instance for this instance, or nullptr
  396. // if the current instance is not sampled and does not have CordzInfo data.
  397. // Requires the current instance to hold a tree value.
  398. void set_cordz_info(CordzInfo* cordz_info) {
  399. assert(is_tree());
  400. intptr_t info = reinterpret_cast<intptr_t>(cordz_info) | 1;
  401. as_tree_.cordz_info = absl::big_endian::FromHost64(info);
  402. }
  403. // Resets the current cordz_info to null / empty.
  404. void clear_cordz_info() {
  405. assert(is_tree());
  406. as_tree_.cordz_info = kNullCordzInfo;
  407. }
  408. // Returns a read only pointer to the character data inside this instance.
  409. // Requires the current instance to hold inline data.
  410. const char* as_chars() const {
  411. assert(!is_tree());
  412. return as_chars_;
  413. }
  414. // Returns a mutable pointer to the character data inside this instance.
  415. // Should be used for 'write only' operations setting an inlined value.
  416. // Applications can set the value of inlined data either before or after
  417. // setting the inlined size, i.e., both of the below are valid:
  418. //
  419. // // Set inlined data and inline size
  420. // memcpy(data_.as_chars(), data, size);
  421. // data_.set_inline_size(size);
  422. //
  423. // // Set inlined size and inline data
  424. // data_.set_inline_size(size);
  425. // memcpy(data_.as_chars(), data, size);
  426. //
  427. // It's an error to read from the returned pointer without a preceding write
  428. // if the current instance does not hold inline data, i.e.: is_tree() == true.
  429. char* as_chars() { return as_chars_; }
  430. // Returns the tree value of this value.
  431. // Requires the current instance to hold a tree value.
  432. CordRep* as_tree() const {
  433. assert(is_tree());
  434. return as_tree_.rep;
  435. }
  436. // Initialize this instance to holding the tree value `rep`,
  437. // initializing the cordz_info to null, i.e.: 'not profiled'.
  438. void make_tree(CordRep* rep) {
  439. as_tree_.rep = rep;
  440. as_tree_.cordz_info = kNullCordzInfo;
  441. }
  442. // Set the tree value of this instance to 'rep`.
  443. // Requires the current instance to already hold a tree value.
  444. // Does not affect the value of cordz_info.
  445. void set_tree(CordRep* rep) {
  446. assert(is_tree());
  447. as_tree_.rep = rep;
  448. }
  449. // Returns the size of the inlined character data inside this instance.
  450. // Requires the current instance to hold inline data.
  451. size_t inline_size() const {
  452. assert(!is_tree());
  453. return tag() >> 1;
  454. }
  455. // Sets the size of the inlined character data inside this instance.
  456. // Requires `size` to be <= kMaxInline.
  457. // See the documentation on 'as_chars()' for more information and examples.
  458. void set_inline_size(size_t size) {
  459. ABSL_ASSERT(size <= kMaxInline);
  460. tag() = static_cast<char>(size << 1);
  461. }
  462. private:
  463. // See cordz_info_t for forced alignment and size of `cordz_info` details.
  464. struct AsTree {
  465. explicit constexpr AsTree(absl::cord_internal::CordRep* tree)
  466. : rep(tree), cordz_info(kNullCordzInfo) {}
  467. // This union uses up extra space so that whether rep is 32 or 64 bits,
  468. // cordz_info will still start at the eighth byte, and the last
  469. // byte of cordz_info will still be the last byte of InlineData.
  470. union {
  471. absl::cord_internal::CordRep* rep;
  472. cordz_info_t unused_aligner;
  473. };
  474. cordz_info_t cordz_info;
  475. };
  476. char& tag() { return reinterpret_cast<char*>(this)[kMaxInline]; }
  477. char tag() const { return reinterpret_cast<const char*>(this)[kMaxInline]; }
  478. // If the data has length <= kMaxInline, we store it in `as_chars_`, and
  479. // store the size in the last char of `as_chars_` shifted left + 1.
  480. // Else we store it in a tree and store a pointer to that tree in
  481. // `as_tree_.rep` and store a tag in `tagged_size`.
  482. union {
  483. char as_chars_[kMaxInline + 1];
  484. AsTree as_tree_;
  485. };
  486. };
  487. static_assert(sizeof(InlineData) == kMaxInline + 1, "");
  488. inline CordRepConcat* CordRep::concat() {
  489. assert(IsConcat());
  490. return static_cast<CordRepConcat*>(this);
  491. }
  492. inline const CordRepConcat* CordRep::concat() const {
  493. assert(IsConcat());
  494. return static_cast<const CordRepConcat*>(this);
  495. }
  496. inline CordRepSubstring* CordRep::substring() {
  497. assert(IsSubstring());
  498. return static_cast<CordRepSubstring*>(this);
  499. }
  500. inline const CordRepSubstring* CordRep::substring() const {
  501. assert(IsSubstring());
  502. return static_cast<const CordRepSubstring*>(this);
  503. }
  504. inline CordRepExternal* CordRep::external() {
  505. assert(IsExternal());
  506. return static_cast<CordRepExternal*>(this);
  507. }
  508. inline const CordRepExternal* CordRep::external() const {
  509. assert(IsExternal());
  510. return static_cast<const CordRepExternal*>(this);
  511. }
  512. inline CordRep* CordRep::Ref(CordRep* rep) {
  513. assert(rep != nullptr);
  514. rep->refcount.Increment();
  515. return rep;
  516. }
  517. inline void CordRep::Unref(CordRep* rep) {
  518. assert(rep != nullptr);
  519. // Expect refcount to be 0. Avoiding the cost of an atomic decrement should
  520. // typically outweigh the cost of an extra branch checking for ref == 1.
  521. if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) {
  522. Destroy(rep);
  523. }
  524. }
  525. } // namespace cord_internal
  526. ABSL_NAMESPACE_END
  527. } // namespace absl
  528. #endif // ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_