1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/kernel.h>
4 #include <linux/irqflags.h>
5 #include <linux/string.h>
6 #include <linux/errno.h>
8 #include "printk_ringbuffer.h"
11 * DOC: printk_ringbuffer overview
15 * The printk_ringbuffer is made up of 3 internal ringbuffers:
18 * A ring of descriptors. A descriptor contains all record meta data
19 * (sequence number, timestamp, loglevel, etc.) as well as internal state
20 * information about the record and logical positions specifying where in
21 * the other ringbuffers the text and dictionary strings are located.
24 * A ring of data blocks. A data block consists of an unsigned long
25 * integer (ID) that maps to a desc_ring index followed by the text
26 * string of the record.
29 * A ring of data blocks. A data block consists of an unsigned long
30 * integer (ID) that maps to a desc_ring index followed by the dictionary
31 * string of the record.
33 * The internal state information of a descriptor is the key element to allow
34 * readers and writers to locklessly synchronize access to the data.
41 * The descriptor ring is an array of descriptors. A descriptor contains all
42 * the meta data of a printk record as well as blk_lpos structs pointing to
43 * associated text and dictionary data blocks (see "Data Rings" below). Each
44 * descriptor is assigned an ID that maps directly to index values of the
45 * descriptor array and has a state. The ID and the state are bitwise combined
46 * into a single descriptor field named @state_var, allowing ID and state to
47 * be synchronously and atomically updated.
49 * Descriptors have three states:
52 * A writer is modifying the record.
55 * The record and all its data are complete and available for reading.
58 * The record exists, but its text and/or dictionary data may no longer
61 * Querying the @state_var of a record requires providing the ID of the
62 * descriptor to query. This can yield a possible fourth (pseudo) state:
65 * The descriptor being queried has an unexpected ID.
67 * The descriptor ring has a @tail_id that contains the ID of the oldest
68 * descriptor and @head_id that contains the ID of the newest descriptor.
70 * When a new descriptor should be created (and the ring is full), the tail
71 * descriptor is invalidated by first transitioning to the reusable state and
72 * then invalidating all tail data blocks up to and including the data blocks
73 * associated with the tail descriptor (for text and dictionary rings). Then
74 * @tail_id is advanced, followed by advancing @head_id. And finally the
75 * @state_var of the new descriptor is initialized to the new ID and reserved
78 * The @tail_id can only be advanced if the new @tail_id would be in the
79 * committed or reusable queried state. This makes it possible that a valid
80 * sequence number of the tail is always available.
84 * The two data rings (text and dictionary) function identically. They exist
85 * separately so that their buffer sizes can be individually set and they do
86 * not affect one another.
88 * Data rings are byte arrays composed of data blocks. Data blocks are
89 * referenced by blk_lpos structs that point to the logical position of the
90 * beginning of a data block and the beginning of the next adjacent data
91 * block. Logical positions are mapped directly to index values of the byte
94 * Each data block consists of an ID followed by the writer data. The ID is
95 * the identifier of a descriptor that is associated with the data block. A
96 * given data block is considered valid if all of the following conditions
99 * 1) The descriptor associated with the data block is in the committed
102 * 2) The blk_lpos struct within the descriptor associated with the data
103 * block references back to the same data block.
105 * 3) The data block is within the head/tail logical position range.
107 * If the writer data of a data block would extend beyond the end of the
108 * byte array, only the ID of the data block is stored at the logical
109 * position and the full data block (ID and writer data) is stored at the
110 * beginning of the byte array. The referencing blk_lpos will point to the
111 * ID before the wrap and the next data block will be at the logical
112 * position adjacent the full data block after the wrap.
114 * Data rings have a @tail_lpos that points to the beginning of the oldest
115 * data block and a @head_lpos that points to the logical position of the
116 * next (not yet existing) data block.
118 * When a new data block should be created (and the ring is full), tail data
119 * blocks will first be invalidated by putting their associated descriptors
120 * into the reusable state and then pushing the @tail_lpos forward beyond
121 * them. Then the @head_lpos is pushed forward and is associated with a new
122 * descriptor. If a data block is not valid, the @tail_lpos cannot be
123 * advanced beyond it.
127 * Here are some simple examples demonstrating writers and readers. For the
128 * examples a global ringbuffer (test_rb) is available (which is not the
129 * actual ringbuffer used by printk)::
131 * DEFINE_PRINTKRB(test_rb, 15, 5, 3);
133 * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of
134 * 1 MiB (2 ^ (15 + 5)) for text data and 256 KiB (2 ^ (15 + 3)) for
137 * Sample writer code::
139 * const char *dictstr = "dictionary text";
140 * const char *textstr = "message text";
141 * struct prb_reserved_entry e;
142 * struct printk_record r;
144 * // specify how much to allocate
145 * prb_rec_init_wr(&r, strlen(textstr) + 1, strlen(dictstr) + 1);
147 * if (prb_reserve(&e, &test_rb, &r)) {
148 * snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
150 * // dictionary allocation may have failed
152 * snprintf(r.dict_buf, r.dict_buf_size, "%s", dictstr);
154 * r.info->ts_nsec = local_clock();
159 * Sample reader code::
161 * struct printk_info info;
162 * struct printk_record r;
167 * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf),
168 * &dict_buf[0], sizeof(dict_buf));
170 * prb_for_each_record(0, &test_rb, &seq, &r) {
171 * if (info.seq != seq)
172 * pr_warn("lost %llu records\n", info.seq - seq);
174 * if (info.text_len > r.text_buf_size) {
175 * pr_warn("record %llu text truncated\n", info.seq);
176 * text_buf[r.text_buf_size - 1] = 0;
179 * if (info.dict_len > r.dict_buf_size) {
180 * pr_warn("record %llu dict truncated\n", info.seq);
181 * dict_buf[r.dict_buf_size - 1] = 0;
184 * pr_info("%llu: %llu: %s;%s\n", info.seq, info.ts_nsec,
185 * &text_buf[0], info.dict_len ? &dict_buf[0] : "");
188 * Note that additional less convenient reader functions are available to
189 * allow complex record access.
193 * To help avoid ABA issues, descriptors are referenced by IDs (array index
194 * values combined with tagged bits counting array wraps) and data blocks are
195 * referenced by logical positions (array index values combined with tagged
196 * bits counting array wraps). However, on 32-bit systems the number of
197 * tagged bits is relatively small such that an ABA incident is (at least
198 * theoretically) possible. For example, if 4 million maximally sized (1KiB)
199 * printk messages were to occur in NMI context on a 32-bit system, the
200 * interrupted context would not be able to recognize that the 32-bit integer
201 * completely wrapped and thus represents a different data block than the one
202 * the interrupted context expects.
204 * To help combat this possibility, additional state checking is performed
205 * (such as using cmpxchg() even though set() would suffice). These extra
206 * checks are commented as such and will hopefully catch any ABA issue that
207 * a 32-bit system might experience.
211 * Multiple memory barriers are used. To simplify proving correctness and
212 * generating litmus tests, lines of code related to memory barriers
213 * (loads, stores, and the associated memory barriers) are labeled::
215 * LMM(function:letter)
217 * Comments reference the labels using only the "function:letter" part.
219 * The memory barrier pairs and their ordering are:
221 * desc_reserve:D / desc_reserve:B
222 * push descriptor tail (id), then push descriptor head (id)
224 * desc_reserve:D / data_push_tail:B
225 * push data tail (lpos), then set new descriptor reserved (state)
227 * desc_reserve:D / desc_push_tail:C
228 * push descriptor tail (id), then set new descriptor reserved (state)
230 * desc_reserve:D / prb_first_seq:C
231 * push descriptor tail (id), then set new descriptor reserved (state)
233 * desc_reserve:F / desc_read:D
234 * set new descriptor id and reserved (state), then allow writer changes
236 * data_alloc:A / desc_read:D
237 * set old descriptor reusable (state), then modify new data block area
239 * data_alloc:A / data_push_tail:B
240 * push data tail (lpos), then modify new data block area
242 * prb_commit:B / desc_read:B
243 * store writer changes, then set new descriptor committed (state)
245 * data_push_tail:D / data_push_tail:A
246 * set descriptor reusable (state), then push data tail (lpos)
248 * desc_push_tail:B / desc_reserve:D
249 * set descriptor reusable (state), then push descriptor tail (id)
252 #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits)
253 #define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1)
255 #define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits)
256 #define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1)
258 /* Determine the data array index from a logical position. */
259 #define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring))
261 /* Determine the desc array index from an ID or sequence number. */
262 #define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring))
264 /* Determine how many times the data array has wrapped. */
265 #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits)
267 /* Determine if a logical position refers to a data-less block. */
268 #define LPOS_DATALESS(lpos) ((lpos) & 1UL)
270 /* Get the logical position at index 0 of the current wrap. */
271 #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
272 ((lpos) & ~DATA_SIZE_MASK(data_ring))
274 /* Get the ID for the same index of the previous wrap as the given ID. */
275 #define DESC_ID_PREV_WRAP(desc_ring, id) \
276 DESC_ID((id) - DESCS_COUNT(desc_ring))
279 * A data block: mapped directly to the beginning of the data block area
280 * specified as a logical position within the data ring.
282 * @id: the ID of the associated descriptor
283 * @data: the writer data
285 * Note that the size of a data block is only known by its associated
288 struct prb_data_block {
294 * Return the descriptor associated with @n. @n can be either a
295 * descriptor ID or a sequence number.
297 static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n)
299 return &desc_ring->descs[DESC_INDEX(desc_ring, n)];
302 static struct prb_data_block *to_block(struct prb_data_ring *data_ring,
303 unsigned long begin_lpos)
305 return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)];
309 * Increase the data size to account for data block meta data plus any
310 * padding so that the adjacent data block is aligned on the ID size.
312 static unsigned int to_blk_size(unsigned int size)
314 struct prb_data_block *db = NULL;
317 size = ALIGN(size, sizeof(db->id));
322 * Sanity checker for reserve size. The ringbuffer code assumes that a data
323 * block does not exceed the maximum possible size that could fit within the
324 * ringbuffer. This function provides that basic size check so that the
325 * assumption is safe.
327 static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
329 struct prb_data_block *db = NULL;
335 * Ensure the alignment padded size could possibly fit in the data
336 * array. The largest possible data block must still leave room for
337 * at least the ID of the next block.
339 size = to_blk_size(size);
340 if (size > DATA_SIZE(data_ring) - sizeof(db->id))
346 /* The possible responses of a descriptor state-query. */
348 desc_miss, /* ID mismatch */
349 desc_reserved, /* reserved, in use by writer */
350 desc_committed, /* committed, writer is done */
351 desc_reusable, /* free, not yet used by any writer */
354 /* Query the state of a descriptor. */
355 static enum desc_state get_desc_state(unsigned long id,
356 unsigned long state_val)
358 if (id != DESC_ID(state_val))
361 if (state_val & DESC_REUSE_MASK)
362 return desc_reusable;
364 if (state_val & DESC_COMMITTED_MASK)
365 return desc_committed;
367 return desc_reserved;
371 * Get a copy of a specified descriptor and return its queried state. If the
372 * descriptor is in an inconsistent state (miss or reserved), the caller can
373 * only expect the descriptor's @state_var field to be valid.
375 static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
376 unsigned long id, struct prb_desc *desc_out)
378 struct prb_desc *desc = to_desc(desc_ring, id);
379 atomic_long_t *state_var = &desc->state_var;
380 enum desc_state d_state;
381 unsigned long state_val;
383 /* Check the descriptor state. */
384 state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */
385 d_state = get_desc_state(id, state_val);
386 if (d_state == desc_miss || d_state == desc_reserved) {
388 * The descriptor is in an inconsistent state. Set at least
389 * @state_var so that the caller can see the details of
390 * the inconsistent state.
396 * Guarantee the state is loaded before copying the descriptor
397 * content. This avoids copying obsolete descriptor content that might
398 * not apply to the descriptor state. This pairs with prb_commit:B.
400 * Memory barrier involvement:
402 * If desc_read:A reads from prb_commit:B, then desc_read:C reads
407 * WMB from prb_commit:A to prb_commit:B
409 * RMB from desc_read:A to desc_read:C
411 smp_rmb(); /* LMM(desc_read:B) */
414 * Copy the descriptor data. The data is not valid until the
415 * state has been re-checked.
417 memcpy(desc_out, desc, sizeof(*desc_out)); /* LMM(desc_read:C) */
420 * 1. Guarantee the descriptor content is loaded before re-checking
421 * the state. This avoids reading an obsolete descriptor state
422 * that may not apply to the copied content. This pairs with
425 * Memory barrier involvement:
427 * If desc_read:C reads from desc_reserve:G, then desc_read:E
428 * reads from desc_reserve:F.
432 * WMB from desc_reserve:F to desc_reserve:G
434 * RMB from desc_read:C to desc_read:E
436 * 2. Guarantee the record data is loaded before re-checking the
437 * state. This avoids reading an obsolete descriptor state that may
438 * not apply to the copied data. This pairs with data_alloc:A.
440 * Memory barrier involvement:
442 * If copy_data:A reads from data_alloc:B, then desc_read:E
443 * reads from desc_make_reusable:A.
447 * MB from desc_make_reusable:A to data_alloc:B
449 * RMB from desc_read:C to desc_read:E
451 * Note: desc_make_reusable:A and data_alloc:B can be different
452 * CPUs. However, the data_alloc:B CPU (which performs the
453 * full memory barrier) must have previously seen
454 * desc_make_reusable:A.
456 smp_rmb(); /* LMM(desc_read:D) */
459 * The data has been copied. Return the current descriptor state,
460 * which may have changed since the load above.
462 state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
463 d_state = get_desc_state(id, state_val);
465 atomic_long_set(&desc_out->state_var, state_val);
470 * Take a specified descriptor out of the committed state by attempting
471 * the transition from committed to reusable. Either this context or some
472 * other context will have been successful.
474 static void desc_make_reusable(struct prb_desc_ring *desc_ring,
477 unsigned long val_committed = id | DESC_COMMITTED_MASK;
478 unsigned long val_reusable = val_committed | DESC_REUSE_MASK;
479 struct prb_desc *desc = to_desc(desc_ring, id);
480 atomic_long_t *state_var = &desc->state_var;
482 atomic_long_cmpxchg_relaxed(state_var, val_committed,
483 val_reusable); /* LMM(desc_make_reusable:A) */
487 * Given a data ring (text or dict), put the associated descriptor of each
488 * data block from @lpos_begin until @lpos_end into the reusable state.
490 * If there is any problem making the associated descriptor reusable, either
491 * the descriptor has not yet been committed or another writer context has
492 * already pushed the tail lpos past the problematic data block. Regardless,
493 * on error the caller can re-load the tail lpos to determine the situation.
495 static bool data_make_reusable(struct printk_ringbuffer *rb,
496 struct prb_data_ring *data_ring,
497 unsigned long lpos_begin,
498 unsigned long lpos_end,
499 unsigned long *lpos_out)
501 struct prb_desc_ring *desc_ring = &rb->desc_ring;
502 struct prb_data_blk_lpos *blk_lpos;
503 struct prb_data_block *blk;
504 enum desc_state d_state;
505 struct prb_desc desc;
509 * Using the provided @data_ring, point @blk_lpos to the correct
510 * blk_lpos within the local copy of the descriptor.
512 if (data_ring == &rb->text_data_ring)
513 blk_lpos = &desc.text_blk_lpos;
515 blk_lpos = &desc.dict_blk_lpos;
517 /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
518 while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
519 blk = to_block(data_ring, lpos_begin);
522 * Load the block ID from the data block. This is a data race
523 * against a writer that may have newly reserved this data
524 * area. If the loaded value matches a valid descriptor ID,
525 * the blk_lpos of that descriptor will be checked to make
526 * sure it points back to this data block. If the check fails,
527 * the data area has been recycled by another writer.
529 id = blk->id; /* LMM(data_make_reusable:A) */
531 d_state = desc_read(desc_ring, id, &desc); /* LMM(data_make_reusable:B) */
540 * This data block is invalid if the descriptor
541 * does not point back to it.
543 if (blk_lpos->begin != lpos_begin)
545 desc_make_reusable(desc_ring, id);
549 * This data block is invalid if the descriptor
550 * does not point back to it.
552 if (blk_lpos->begin != lpos_begin)
557 /* Advance @lpos_begin to the next data block. */
558 lpos_begin = blk_lpos->next;
561 *lpos_out = lpos_begin;
566 * Advance the data ring tail to at least @lpos. This function puts
567 * descriptors into the reusable state if the tail is pushed beyond
568 * their associated data block.
570 static bool data_push_tail(struct printk_ringbuffer *rb,
571 struct prb_data_ring *data_ring,
574 unsigned long tail_lpos_new;
575 unsigned long tail_lpos;
576 unsigned long next_lpos;
578 /* If @lpos is from a data-less block, there is nothing to do. */
579 if (LPOS_DATALESS(lpos))
583 * Any descriptor states that have transitioned to reusable due to the
584 * data tail being pushed to this loaded value will be visible to this
585 * CPU. This pairs with data_push_tail:D.
587 * Memory barrier involvement:
589 * If data_push_tail:A reads from data_push_tail:D, then this CPU can
590 * see desc_make_reusable:A.
594 * MB from desc_make_reusable:A to data_push_tail:D
596 * READFROM from data_push_tail:D to data_push_tail:A
598 * READFROM from desc_make_reusable:A to this CPU
600 tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */
603 * Loop until the tail lpos is at or beyond @lpos. This condition
604 * may already be satisfied, resulting in no full memory barrier
605 * from data_push_tail:D being performed. However, since this CPU
606 * sees the new tail lpos, any descriptor states that transitioned to
607 * the reusable state must already be visible.
609 while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
611 * Make all descriptors reusable that are associated with
612 * data blocks before @lpos.
614 if (!data_make_reusable(rb, data_ring, tail_lpos, lpos,
617 * 1. Guarantee the block ID loaded in
618 * data_make_reusable() is performed before
619 * reloading the tail lpos. The failed
620 * data_make_reusable() may be due to a newly
621 * recycled data area causing the tail lpos to
622 * have been previously pushed. This pairs with
625 * Memory barrier involvement:
627 * If data_make_reusable:A reads from data_alloc:B,
628 * then data_push_tail:C reads from
633 * MB from data_push_tail:D to data_alloc:B
635 * RMB from data_make_reusable:A to
638 * Note: data_push_tail:D and data_alloc:B can be
639 * different CPUs. However, the data_alloc:B
640 * CPU (which performs the full memory
641 * barrier) must have previously seen
644 * 2. Guarantee the descriptor state loaded in
645 * data_make_reusable() is performed before
646 * reloading the tail lpos. The failed
647 * data_make_reusable() may be due to a newly
648 * recycled descriptor causing the tail lpos to
649 * have been previously pushed. This pairs with
652 * Memory barrier involvement:
654 * If data_make_reusable:B reads from
655 * desc_reserve:F, then data_push_tail:C reads
656 * from data_push_tail:D.
660 * MB from data_push_tail:D to desc_reserve:F
662 * RMB from data_make_reusable:B to
665 * Note: data_push_tail:D and desc_reserve:F can
666 * be different CPUs. However, the
667 * desc_reserve:F CPU (which performs the
668 * full memory barrier) must have previously
669 * seen data_push_tail:D.
671 smp_rmb(); /* LMM(data_push_tail:B) */
673 tail_lpos_new = atomic_long_read(&data_ring->tail_lpos
674 ); /* LMM(data_push_tail:C) */
675 if (tail_lpos_new == tail_lpos)
678 /* Another CPU pushed the tail. Try again. */
679 tail_lpos = tail_lpos_new;
684 * Guarantee any descriptor states that have transitioned to
685 * reusable are stored before pushing the tail lpos. A full
686 * memory barrier is needed since other CPUs may have made
687 * the descriptor states reusable. This pairs with
690 if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos,
691 next_lpos)) { /* LMM(data_push_tail:D) */
700 * Advance the desc ring tail. This function advances the tail by one
701 * descriptor, thus invalidating the oldest descriptor. Before advancing
702 * the tail, the tail descriptor is made reusable and all data blocks up to
703 * and including the descriptor's data block are invalidated (i.e. the data
704 * ring tail is pushed past the data block of the descriptor being made
707 static bool desc_push_tail(struct printk_ringbuffer *rb,
708 unsigned long tail_id)
710 struct prb_desc_ring *desc_ring = &rb->desc_ring;
711 enum desc_state d_state;
712 struct prb_desc desc;
714 d_state = desc_read(desc_ring, tail_id, &desc);
719 * If the ID is exactly 1 wrap behind the expected, it is
720 * in the process of being reserved by another writer and
721 * must be considered reserved.
723 if (DESC_ID(atomic_long_read(&desc.state_var)) ==
724 DESC_ID_PREV_WRAP(desc_ring, tail_id)) {
729 * The ID has changed. Another writer must have pushed the
730 * tail and recycled the descriptor already. Success is
731 * returned because the caller is only interested in the
732 * specified tail being pushed, which it was.
738 desc_make_reusable(desc_ring, tail_id);
745 * Data blocks must be invalidated before their associated
746 * descriptor can be made available for recycling. Invalidating
747 * them later is not possible because there is no way to trust
748 * data blocks once their associated descriptor is gone.
751 if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next))
753 if (!data_push_tail(rb, &rb->dict_data_ring, desc.dict_blk_lpos.next))
757 * Check the next descriptor after @tail_id before pushing the tail
758 * to it because the tail must always be in a committed or reusable
759 * state. The implementation of prb_first_seq() relies on this.
761 * A successful read implies that the next descriptor is less than or
762 * equal to @head_id so there is no risk of pushing the tail past the
765 d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc); /* LMM(desc_push_tail:A) */
767 if (d_state == desc_committed || d_state == desc_reusable) {
769 * Guarantee any descriptor states that have transitioned to
770 * reusable are stored before pushing the tail ID. This allows
771 * verifying the recycled descriptor state. A full memory
772 * barrier is needed since other CPUs may have made the
773 * descriptor states reusable. This pairs with desc_reserve:D.
775 atomic_long_cmpxchg(&desc_ring->tail_id, tail_id,
776 DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */
779 * Guarantee the last state load from desc_read() is before
780 * reloading @tail_id in order to see a new tail ID in the
781 * case that the descriptor has been recycled. This pairs
782 * with desc_reserve:D.
784 * Memory barrier involvement:
786 * If desc_push_tail:A reads from desc_reserve:F, then
787 * desc_push_tail:D reads from desc_push_tail:B.
791 * MB from desc_push_tail:B to desc_reserve:F
793 * RMB from desc_push_tail:A to desc_push_tail:D
795 * Note: desc_push_tail:B and desc_reserve:F can be different
796 * CPUs. However, the desc_reserve:F CPU (which performs
797 * the full memory barrier) must have previously seen
800 smp_rmb(); /* LMM(desc_push_tail:C) */
803 * Re-check the tail ID. The descriptor following @tail_id is
804 * not in an allowed tail state. But if the tail has since
805 * been moved by another CPU, then it does not matter.
807 if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */
814 /* Reserve a new descriptor, invalidating the oldest if necessary. */
815 static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
817 struct prb_desc_ring *desc_ring = &rb->desc_ring;
818 unsigned long prev_state_val;
819 unsigned long id_prev_wrap;
820 struct prb_desc *desc;
821 unsigned long head_id;
824 head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
827 desc = to_desc(desc_ring, head_id);
829 id = DESC_ID(head_id + 1);
830 id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
833 * Guarantee the head ID is read before reading the tail ID.
834 * Since the tail ID is updated before the head ID, this
835 * guarantees that @id_prev_wrap is never ahead of the tail
836 * ID. This pairs with desc_reserve:D.
838 * Memory barrier involvement:
840 * If desc_reserve:A reads from desc_reserve:D, then
841 * desc_reserve:C reads from desc_push_tail:B.
845 * MB from desc_push_tail:B to desc_reserve:D
847 * RMB from desc_reserve:A to desc_reserve:C
849 * Note: desc_push_tail:B and desc_reserve:D can be different
850 * CPUs. However, the desc_reserve:D CPU (which performs
851 * the full memory barrier) must have previously seen
854 smp_rmb(); /* LMM(desc_reserve:B) */
856 if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id
857 )) { /* LMM(desc_reserve:C) */
859 * Make space for the new descriptor by
860 * advancing the tail.
862 if (!desc_push_tail(rb, id_prev_wrap))
867 * 1. Guarantee the tail ID is read before validating the
868 * recycled descriptor state. A read memory barrier is
869 * sufficient for this. This pairs with desc_push_tail:B.
871 * Memory barrier involvement:
873 * If desc_reserve:C reads from desc_push_tail:B, then
874 * desc_reserve:E reads from desc_make_reusable:A.
878 * MB from desc_make_reusable:A to desc_push_tail:B
880 * RMB from desc_reserve:C to desc_reserve:E
882 * Note: desc_make_reusable:A and desc_push_tail:B can be
883 * different CPUs. However, the desc_push_tail:B CPU
884 * (which performs the full memory barrier) must have
885 * previously seen desc_make_reusable:A.
887 * 2. Guarantee the tail ID is stored before storing the head
888 * ID. This pairs with desc_reserve:B.
890 * 3. Guarantee any data ring tail changes are stored before
891 * recycling the descriptor. Data ring tail changes can
892 * happen via desc_push_tail()->data_push_tail(). A full
893 * memory barrier is needed since another CPU may have
894 * pushed the data ring tails. This pairs with
897 * 4. Guarantee a new tail ID is stored before recycling the
898 * descriptor. A full memory barrier is needed since
899 * another CPU may have pushed the tail ID. This pairs
900 * with desc_push_tail:C and this also pairs with
903 } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id,
904 id)); /* LMM(desc_reserve:D) */
906 desc = to_desc(desc_ring, id);
909 * If the descriptor has been recycled, verify the old state val.
910 * See "ABA Issues" about why this verification is performed.
912 prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */
913 if (prev_state_val &&
914 prev_state_val != (id_prev_wrap | DESC_COMMITTED_MASK | DESC_REUSE_MASK)) {
920 * Assign the descriptor a new ID and set its state to reserved.
921 * See "ABA Issues" about why cmpxchg() instead of set() is used.
923 * Guarantee the new descriptor ID and state is stored before making
924 * any other changes. A write memory barrier is sufficient for this.
925 * This pairs with desc_read:D.
927 if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val,
928 id | 0)) { /* LMM(desc_reserve:F) */
933 /* Now data in @desc can be modified: LMM(desc_reserve:G) */
939 /* Determine the end of a data block. */
940 static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
941 unsigned long lpos, unsigned int size)
943 unsigned long begin_lpos;
944 unsigned long next_lpos;
947 next_lpos = lpos + size;
949 /* First check if the data block does not wrap. */
950 if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
953 /* Wrapping data blocks store their data at the beginning. */
954 return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size);
958 * Allocate a new data block, invalidating the oldest data block(s)
959 * if necessary. This function also associates the data block with
960 * a specified descriptor.
962 static char *data_alloc(struct printk_ringbuffer *rb,
963 struct prb_data_ring *data_ring, unsigned int size,
964 struct prb_data_blk_lpos *blk_lpos, unsigned long id)
966 struct prb_data_block *blk;
967 unsigned long begin_lpos;
968 unsigned long next_lpos;
971 /* Specify a data-less block. */
972 blk_lpos->begin = NO_LPOS;
973 blk_lpos->next = NO_LPOS;
977 size = to_blk_size(size);
979 begin_lpos = atomic_long_read(&data_ring->head_lpos);
982 next_lpos = get_next_lpos(data_ring, begin_lpos, size);
984 if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) {
985 /* Failed to allocate, specify a data-less block. */
986 blk_lpos->begin = FAILED_LPOS;
987 blk_lpos->next = FAILED_LPOS;
992 * 1. Guarantee any descriptor states that have transitioned
993 * to reusable are stored before modifying the newly
994 * allocated data area. A full memory barrier is needed
995 * since other CPUs may have made the descriptor states
996 * reusable. See data_push_tail:A about why the reusable
997 * states are visible. This pairs with desc_read:D.
999 * 2. Guarantee any updated tail lpos is stored before
1000 * modifying the newly allocated data area. Another CPU may
1001 * be in data_make_reusable() and is reading a block ID
1002 * from this area. data_make_reusable() can handle reading
1003 * a garbage block ID value, but then it must be able to
1004 * load a new tail lpos. A full memory barrier is needed
1005 * since other CPUs may have updated the tail lpos. This
1006 * pairs with data_push_tail:B.
1008 } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos,
1009 next_lpos)); /* LMM(data_alloc:A) */
1011 blk = to_block(data_ring, begin_lpos);
1012 blk->id = id; /* LMM(data_alloc:B) */
1014 if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
1015 /* Wrapping data blocks store their data at the beginning. */
1016 blk = to_block(data_ring, 0);
1019 * Store the ID on the wrapped block for consistency.
1020 * The printk_ringbuffer does not actually use it.
1025 blk_lpos->begin = begin_lpos;
1026 blk_lpos->next = next_lpos;
1028 return &blk->data[0];
1031 /* Return the number of bytes used by a data block. */
1032 static unsigned int space_used(struct prb_data_ring *data_ring,
1033 struct prb_data_blk_lpos *blk_lpos)
1035 /* Data-less blocks take no space. */
1036 if (LPOS_DATALESS(blk_lpos->begin))
1039 if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
1040 /* Data block does not wrap. */
1041 return (DATA_INDEX(data_ring, blk_lpos->next) -
1042 DATA_INDEX(data_ring, blk_lpos->begin));
1046 * For wrapping data blocks, the trailing (wasted) space is
1049 return (DATA_INDEX(data_ring, blk_lpos->next) +
1050 DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin));
1054 * prb_reserve() - Reserve space in the ringbuffer.
1056 * @e: The entry structure to setup.
1057 * @rb: The ringbuffer to reserve data in.
1058 * @r: The record structure to allocate buffers for.
1060 * This is the public function available to writers to reserve data.
1062 * The writer specifies the text and dict sizes to reserve by setting the
1063 * @text_buf_size and @dict_buf_size fields of @r, respectively. Dictionaries
1064 * are optional, so @dict_buf_size is allowed to be 0. To ensure proper
1065 * initialization of @r, prb_rec_init_wr() should be used.
1067 * Context: Any context. Disables local interrupts on success.
1068 * Return: true if at least text data could be allocated, otherwise false.
1070 * On success, the fields @info, @text_buf, @dict_buf of @r will be set by
1071 * this function and should be filled in by the writer before committing. Also
1072 * on success, prb_record_text_space() can be used on @e to query the actual
1073 * space used for the text data block.
1075 * If the function fails to reserve dictionary space (but all else succeeded),
1076 * it will still report success. In that case @dict_buf is set to NULL and
1077 * @dict_buf_size is set to 0. Writers must check this before writing to
1080 * @info->text_len and @info->dict_len will already be set to @text_buf_size
1081 * and @dict_buf_size, respectively. If dictionary space reservation fails,
1082 * @info->dict_len is set to 0.
1084 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
1085 struct printk_record *r)
1087 struct prb_desc_ring *desc_ring = &rb->desc_ring;
1091 if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1094 if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size))
1098 * Descriptors in the reserved state act as blockers to all further
1099 * reservations once the desc_ring has fully wrapped. Disable
1100 * interrupts during the reserve/commit window in order to minimize
1101 * the likelihood of this happening.
1103 local_irq_save(e->irqflags);
1105 if (!desc_reserve(rb, &id)) {
1106 /* Descriptor reservation failures are tracked. */
1107 atomic_long_inc(&rb->fail);
1108 local_irq_restore(e->irqflags);
1112 d = to_desc(desc_ring, id);
1115 * Set the @e fields here so that prb_commit() can be used if
1116 * text data allocation fails.
1122 * Initialize the sequence number if it has "never been set".
1123 * Otherwise just increment it by a full wrap.
1125 * @seq is considered "never been set" if it has a value of 0,
1126 * _except_ for @descs[0], which was specially setup by the ringbuffer
1127 * initializer and therefore is always considered as set.
1129 * See the "Bootstrap" comment block in printk_ringbuffer.h for
1130 * details about how the initializer bootstraps the descriptors.
1132 if (d->info.seq == 0 && DESC_INDEX(desc_ring, id) != 0)
1133 d->info.seq = DESC_INDEX(desc_ring, id);
1135 d->info.seq += DESCS_COUNT(desc_ring);
1137 r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
1138 &d->text_blk_lpos, id);
1139 /* If text data allocation fails, a data-less record is committed. */
1140 if (r->text_buf_size && !r->text_buf) {
1141 d->info.text_len = 0;
1142 d->info.dict_len = 0;
1144 /* prb_commit() re-enabled interrupts. */
1148 r->dict_buf = data_alloc(rb, &rb->dict_data_ring, r->dict_buf_size,
1149 &d->dict_blk_lpos, id);
1151 * If dict data allocation fails, the caller can still commit
1152 * text. But dictionary information will not be available.
1154 if (r->dict_buf_size && !r->dict_buf)
1155 r->dict_buf_size = 0;
1159 /* Set default values for the sizes. */
1160 d->info.text_len = r->text_buf_size;
1161 d->info.dict_len = r->dict_buf_size;
1163 /* Record full text space used by record. */
1164 e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
1168 /* Make it clear to the caller that the reserve failed. */
1169 memset(r, 0, sizeof(*r));
1174 * prb_commit() - Commit (previously reserved) data to the ringbuffer.
1176 * @e: The entry containing the reserved data information.
1178 * This is the public function available to writers to commit data.
1180 * Context: Any context. Enables local interrupts.
1182 void prb_commit(struct prb_reserved_entry *e)
1184 struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
1185 struct prb_desc *d = to_desc(desc_ring, e->id);
1186 unsigned long prev_state_val = e->id | 0;
1188 /* Now the writer has finished all writing: LMM(prb_commit:A) */
1191 * Set the descriptor as committed. See "ABA Issues" about why
1192 * cmpxchg() instead of set() is used.
1194 * Guarantee all record data is stored before the descriptor state
1195 * is stored as committed. A write memory barrier is sufficient for
1196 * this. This pairs with desc_read:B.
1198 if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
1199 e->id | DESC_COMMITTED_MASK)) { /* LMM(prb_commit:B) */
1203 /* Restore interrupts, the reserve/commit window is finished. */
1204 local_irq_restore(e->irqflags);
1208 * Given @blk_lpos, return a pointer to the writer data from the data block
1209 * and calculate the size of the data part. A NULL pointer is returned if
1210 * @blk_lpos specifies values that could never be legal.
1212 * This function (used by readers) performs strict validation on the lpos
1213 * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
1214 * triggered if an internal error is detected.
1216 static const char *get_data(struct prb_data_ring *data_ring,
1217 struct prb_data_blk_lpos *blk_lpos,
1218 unsigned int *data_size)
1220 struct prb_data_block *db;
1222 /* Data-less data block description. */
1223 if (LPOS_DATALESS(blk_lpos->begin) && LPOS_DATALESS(blk_lpos->next)) {
1224 if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
1231 /* Regular data block: @begin less than @next and in same wrap. */
1232 if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
1233 blk_lpos->begin < blk_lpos->next) {
1234 db = to_block(data_ring, blk_lpos->begin);
1235 *data_size = blk_lpos->next - blk_lpos->begin;
1237 /* Wrapping data block: @begin is one wrap behind @next. */
1238 } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
1239 DATA_WRAPS(data_ring, blk_lpos->next)) {
1240 db = to_block(data_ring, 0);
1241 *data_size = DATA_INDEX(data_ring, blk_lpos->next);
1243 /* Illegal block description. */
1249 /* A valid data block will always be aligned to the ID size. */
1250 if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
1251 WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
1255 /* A valid data block will always have at least an ID. */
1256 if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
1259 /* Subtract block ID space from size to reflect data size. */
1260 *data_size -= sizeof(db->id);
1262 return &db->data[0];
1266 * Count the number of lines in provided text. All text has at least 1 line
1267 * (even if @text_size is 0). Each '\n' processed is counted as an additional
1270 static unsigned int count_lines(const char *text, unsigned int text_size)
1272 unsigned int next_size = text_size;
1273 unsigned int line_count = 1;
1274 const char *next = text;
1277 next = memchr(next, '\n', next_size);
1282 next_size = text_size - (next - text);
1289 * Given @blk_lpos, copy an expected @len of data into the provided buffer.
1290 * If @line_count is provided, count the number of lines in the data.
1292 * This function (used by readers) performs strict validation on the data
1293 * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
1294 * triggered if an internal error is detected.
1296 static bool copy_data(struct prb_data_ring *data_ring,
1297 struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf,
1298 unsigned int buf_size, unsigned int *line_count)
1300 unsigned int data_size;
1303 /* Caller might not want any data. */
1304 if ((!buf || !buf_size) && !line_count)
1307 data = get_data(data_ring, blk_lpos, &data_size);
1312 * Actual cannot be less than expected. It can be more than expected
1313 * because of the trailing alignment padding.
1315 if (WARN_ON_ONCE(data_size < (unsigned int)len)) {
1316 pr_warn_once("wrong data size (%u, expecting %hu) for data: %.*s\n",
1317 data_size, len, data_size, data);
1321 /* Caller interested in the line count? */
1323 *line_count = count_lines(data, data_size);
1325 /* Caller interested in the data content? */
1326 if (!buf || !buf_size)
1329 data_size = min_t(u16, buf_size, len);
1331 memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
1336 * This is an extended version of desc_read(). It gets a copy of a specified
1337 * descriptor. However, it also verifies that the record is committed and has
1338 * the sequence number @seq. On success, 0 is returned.
1340 * Error return values:
1341 * -EINVAL: A committed record with sequence number @seq does not exist.
1342 * -ENOENT: A committed record with sequence number @seq exists, but its data
1343 * is not available. This is a valid record, so readers should
1344 * continue with the next record.
1346 static int desc_read_committed_seq(struct prb_desc_ring *desc_ring,
1347 unsigned long id, u64 seq,
1348 struct prb_desc *desc_out)
1350 struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos;
1351 enum desc_state d_state;
1353 d_state = desc_read(desc_ring, id, desc_out);
1356 * An unexpected @id (desc_miss) or @seq mismatch means the record
1357 * does not exist. A descriptor in the reserved state means the
1358 * record does not yet exist for the reader.
1360 if (d_state == desc_miss ||
1361 d_state == desc_reserved ||
1362 desc_out->info.seq != seq) {
1367 * A descriptor in the reusable state may no longer have its data
1368 * available; report it as existing but with lost data. Or the record
1369 * may actually be a record with lost data.
1371 if (d_state == desc_reusable ||
1372 (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) {
1380 * Copy the ringbuffer data from the record with @seq to the provided
1381 * @r buffer. On success, 0 is returned.
1383 * See desc_read_committed_seq() for error return values.
1385 static int prb_read(struct printk_ringbuffer *rb, u64 seq,
1386 struct printk_record *r, unsigned int *line_count)
1388 struct prb_desc_ring *desc_ring = &rb->desc_ring;
1389 struct prb_desc *rdesc = to_desc(desc_ring, seq);
1390 atomic_long_t *state_var = &rdesc->state_var;
1391 struct prb_desc desc;
1395 /* Extract the ID, used to specify the descriptor to read. */
1396 id = DESC_ID(atomic_long_read(state_var));
1398 /* Get a local copy of the correct descriptor (if available). */
1399 err = desc_read_committed_seq(desc_ring, id, seq, &desc);
1402 * If @r is NULL, the caller is only interested in the availability
1408 /* If requested, copy meta data. */
1410 memcpy(r->info, &desc.info, sizeof(*(r->info)));
1412 /* Copy text data. If it fails, this is a data-less record. */
1413 if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, desc.info.text_len,
1414 r->text_buf, r->text_buf_size, line_count)) {
1419 * Copy dict data. Although this should not fail, dict data is not
1420 * important. So if it fails, modify the copied meta data to report
1421 * that there is no dict data, thus silently dropping the dict data.
1423 if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, desc.info.dict_len,
1424 r->dict_buf, r->dict_buf_size, NULL)) {
1426 r->info->dict_len = 0;
1429 /* Ensure the record is still committed and has the same @seq. */
1430 return desc_read_committed_seq(desc_ring, id, seq, &desc);
1433 /* Get the sequence number of the tail descriptor. */
1434 static u64 prb_first_seq(struct printk_ringbuffer *rb)
1436 struct prb_desc_ring *desc_ring = &rb->desc_ring;
1437 enum desc_state d_state;
1438 struct prb_desc desc;
1442 id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
1444 d_state = desc_read(desc_ring, id, &desc); /* LMM(prb_first_seq:B) */
1447 * This loop will not be infinite because the tail is
1448 * _always_ in the committed or reusable state.
1450 if (d_state == desc_committed || d_state == desc_reusable)
1454 * Guarantee the last state load from desc_read() is before
1455 * reloading @tail_id in order to see a new tail in the case
1456 * that the descriptor has been recycled. This pairs with
1459 * Memory barrier involvement:
1461 * If prb_first_seq:B reads from desc_reserve:F, then
1462 * prb_first_seq:A reads from desc_push_tail:B.
1466 * MB from desc_push_tail:B to desc_reserve:F
1468 * RMB prb_first_seq:B to prb_first_seq:A
1470 smp_rmb(); /* LMM(prb_first_seq:C) */
1473 return desc.info.seq;
1477 * Non-blocking read of a record. Updates @seq to the last committed record
1478 * (which may have no data).
1480 * See the description of prb_read_valid() and prb_read_valid_info()
1483 static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
1484 struct printk_record *r, unsigned int *line_count)
1489 while ((err = prb_read(rb, *seq, r, line_count))) {
1490 tail_seq = prb_first_seq(rb);
1492 if (*seq < tail_seq) {
1494 * Behind the tail. Catch up and try again. This
1495 * can happen for -ENOENT and -EINVAL cases.
1499 } else if (err == -ENOENT) {
1500 /* Record exists, but no data available. Skip. */
1504 /* Non-existent/non-committed record. Must stop. */
1513 * prb_read_valid() - Non-blocking read of a requested record or (if gone)
1514 * the next available record.
1516 * @rb: The ringbuffer to read from.
1517 * @seq: The sequence number of the record to read.
1518 * @r: A record data buffer to store the read record to.
1520 * This is the public function available to readers to read a record.
1522 * The reader provides the @info, @text_buf, @dict_buf buffers of @r to be
1523 * filled in. Any of the buffer pointers can be set to NULL if the reader
1524 * is not interested in that data. To ensure proper initialization of @r,
1525 * prb_rec_init_rd() should be used.
1527 * Context: Any context.
1528 * Return: true if a record was read, otherwise false.
1530 * On success, the reader must check r->info.seq to see which record was
1531 * actually read. This allows the reader to detect dropped records.
1533 * Failure means @seq refers to a not yet written record.
1535 bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
1536 struct printk_record *r)
1538 return _prb_read_valid(rb, &seq, r, NULL);
1542 * prb_read_valid_info() - Non-blocking read of meta data for a requested
1543 * record or (if gone) the next available record.
1545 * @rb: The ringbuffer to read from.
1546 * @seq: The sequence number of the record to read.
1547 * @info: A buffer to store the read record meta data to.
1548 * @line_count: A buffer to store the number of lines in the record text.
1550 * This is the public function available to readers to read only the
1551 * meta data of a record.
1553 * The reader provides the @info, @line_count buffers to be filled in.
1554 * Either of the buffer pointers can be set to NULL if the reader is not
1555 * interested in that data.
1557 * Context: Any context.
1558 * Return: true if a record's meta data was read, otherwise false.
1560 * On success, the reader must check info->seq to see which record meta data
1561 * was actually read. This allows the reader to detect dropped records.
1563 * Failure means @seq refers to a not yet written record.
1565 bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
1566 struct printk_info *info, unsigned int *line_count)
1568 struct printk_record r;
1570 prb_rec_init_rd(&r, info, NULL, 0, NULL, 0);
1572 return _prb_read_valid(rb, &seq, &r, line_count);
1576 * prb_first_valid_seq() - Get the sequence number of the oldest available
1579 * @rb: The ringbuffer to get the sequence number from.
1581 * This is the public function available to readers to see what the
1582 * first/oldest valid sequence number is.
1584 * This provides readers a starting point to begin iterating the ringbuffer.
1586 * Context: Any context.
1587 * Return: The sequence number of the first/oldest record or, if the
1588 * ringbuffer is empty, 0 is returned.
1590 u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
1594 if (!_prb_read_valid(rb, &seq, NULL, NULL))
1601 * prb_next_seq() - Get the sequence number after the last available record.
1603 * @rb: The ringbuffer to get the sequence number from.
1605 * This is the public function available to readers to see what the next
1606 * newest sequence number available to readers will be.
1608 * This provides readers a sequence number to jump to if all currently
1609 * available records should be skipped.
1611 * Context: Any context.
1612 * Return: The sequence number of the next newest (not yet available) record
1615 u64 prb_next_seq(struct printk_ringbuffer *rb)
1619 /* Search forward from the oldest descriptor. */
1620 while (_prb_read_valid(rb, &seq, NULL, NULL))
1627 * prb_init() - Initialize a ringbuffer to use provided external buffers.
1629 * @rb: The ringbuffer to initialize.
1630 * @text_buf: The data buffer for text data.
1631 * @textbits: The size of @text_buf as a power-of-2 value.
1632 * @dict_buf: The data buffer for dictionary data.
1633 * @dictbits: The size of @dict_buf as a power-of-2 value.
1634 * @descs: The descriptor buffer for ringbuffer records.
1635 * @descbits: The count of @descs items as a power-of-2 value.
1637 * This is the public function available to writers to setup a ringbuffer
1638 * during runtime using provided buffers.
1640 * This must match the initialization of DEFINE_PRINTKRB().
1642 * Context: Any context.
1644 void prb_init(struct printk_ringbuffer *rb,
1645 char *text_buf, unsigned int textbits,
1646 char *dict_buf, unsigned int dictbits,
1647 struct prb_desc *descs, unsigned int descbits)
1649 memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0]));
1651 rb->desc_ring.count_bits = descbits;
1652 rb->desc_ring.descs = descs;
1653 atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
1654 atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
1656 rb->text_data_ring.size_bits = textbits;
1657 rb->text_data_ring.data = text_buf;
1658 atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
1659 atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
1661 rb->dict_data_ring.size_bits = dictbits;
1662 rb->dict_data_ring.data = dict_buf;
1663 atomic_long_set(&rb->dict_data_ring.head_lpos, BLK0_LPOS(dictbits));
1664 atomic_long_set(&rb->dict_data_ring.tail_lpos, BLK0_LPOS(dictbits));
1666 atomic_long_set(&rb->fail, 0);
1668 descs[0].info.seq = -(u64)_DESCS_COUNT(descbits);
1670 descs[_DESCS_COUNT(descbits) - 1].info.seq = 0;
1671 atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
1672 descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
1673 descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
1674 descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = FAILED_LPOS;
1675 descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = FAILED_LPOS;
1679 * prb_record_text_space() - Query the full actual used ringbuffer space for
1680 * the text data of a reserved entry.
1682 * @e: The successfully reserved entry to query.
1684 * This is the public function available to writers to see how much actual
1685 * space is used in the ringbuffer to store the text data of the specified
1688 * This function is only valid if @e has been successfully reserved using
1691 * Context: Any context.
1692 * Return: The size in bytes used by the text data of the associated record.
1694 unsigned int prb_record_text_space(struct prb_reserved_entry *e)
1696 return e->text_space;