ringbuffer.h revision 26fa459c
1/* Copyright 2013 Google Inc. All Rights Reserved.
2
3   Distributed under MIT license.
4   See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
5*/
6
7/* Sliding window over the input data. */
8
9#ifndef BROTLI_ENC_RINGBUFFER_H_
10#define BROTLI_ENC_RINGBUFFER_H_
11
12#include <string.h>  /* memcpy */
13
14#include "../common/platform.h"
15#include <brotli/types.h>
16#include "./memory.h"
17#include "./quality.h"
18
19#if defined(__cplusplus) || defined(c_plusplus)
20extern "C" {
21#endif
22
23/* A RingBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
24   data in a circular manner: writing a byte writes it to:
25     `position() % (1 << window_bits)'.
26   For convenience, the RingBuffer array contains another copy of the
27   first `1 << tail_bits' bytes:
28     buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
29   and another copy of the last two bytes:
30     buffer_[-1] == buffer_[(1 << window_bits) - 1] and
31     buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
32typedef struct RingBuffer {
33  /* Size of the ring-buffer is (1 << window_bits) + tail_size_. */
34  const uint32_t size_;
35  const uint32_t mask_;
36  const uint32_t tail_size_;
37  const uint32_t total_size_;
38
39  uint32_t cur_size_;
40  /* Position to write in the ring buffer. */
41  uint32_t pos_;
42  /* The actual ring buffer containing the copy of the last two bytes, the data,
43     and the copy of the beginning as a tail. */
44  uint8_t* data_;
45  /* The start of the ring-buffer. */
46  uint8_t* buffer_;
47} RingBuffer;
48
49static BROTLI_INLINE void RingBufferInit(RingBuffer* rb) {
50  rb->cur_size_ = 0;
51  rb->pos_ = 0;
52  rb->data_ = 0;
53  rb->buffer_ = 0;
54}
55
56static BROTLI_INLINE void RingBufferSetup(
57    const BrotliEncoderParams* params, RingBuffer* rb) {
58  int window_bits = ComputeRbBits(params);
59  int tail_bits = params->lgblock;
60  *(uint32_t*)&rb->size_ = 1u << window_bits;
61  *(uint32_t*)&rb->mask_ = (1u << window_bits) - 1;
62  *(uint32_t*)&rb->tail_size_ = 1u << tail_bits;
63  *(uint32_t*)&rb->total_size_ = rb->size_ + rb->tail_size_;
64}
65
66static BROTLI_INLINE void RingBufferFree(MemoryManager* m, RingBuffer* rb) {
67  BROTLI_FREE(m, rb->data_);
68}
69
70/* Allocates or re-allocates data_ to the given length + plus some slack
71   region before and after. Fills the slack regions with zeros. */
72static BROTLI_INLINE void RingBufferInitBuffer(
73    MemoryManager* m, const uint32_t buflen, RingBuffer* rb) {
74  static const size_t kSlackForEightByteHashingEverywhere = 7;
75  uint8_t* new_data = BROTLI_ALLOC(
76      m, uint8_t, 2 + buflen + kSlackForEightByteHashingEverywhere);
77  size_t i;
78  if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(new_data)) return;
79  if (rb->data_) {
80    memcpy(new_data, rb->data_,
81        2 + rb->cur_size_ + kSlackForEightByteHashingEverywhere);
82    BROTLI_FREE(m, rb->data_);
83  }
84  rb->data_ = new_data;
85  rb->cur_size_ = buflen;
86  rb->buffer_ = rb->data_ + 2;
87  rb->buffer_[-2] = rb->buffer_[-1] = 0;
88  for (i = 0; i < kSlackForEightByteHashingEverywhere; ++i) {
89    rb->buffer_[rb->cur_size_ + i] = 0;
90  }
91}
92
93static BROTLI_INLINE void RingBufferWriteTail(
94    const uint8_t* bytes, size_t n, RingBuffer* rb) {
95  const size_t masked_pos = rb->pos_ & rb->mask_;
96  if (BROTLI_PREDICT_FALSE(masked_pos < rb->tail_size_)) {
97    /* Just fill the tail buffer with the beginning data. */
98    const size_t p = rb->size_ + masked_pos;
99    memcpy(&rb->buffer_[p], bytes,
100        BROTLI_MIN(size_t, n, rb->tail_size_ - masked_pos));
101  }
102}
103
104/* Push bytes into the ring buffer. */
105static BROTLI_INLINE void RingBufferWrite(
106    MemoryManager* m, const uint8_t* bytes, size_t n, RingBuffer* rb) {
107  if (rb->pos_ == 0 && n < rb->tail_size_) {
108    /* Special case for the first write: to process the first block, we don't
109       need to allocate the whole ring-buffer and we don't need the tail
110       either. However, we do this memory usage optimization only if the
111       first write is less than the tail size, which is also the input block
112       size, otherwise it is likely that other blocks will follow and we
113       will need to reallocate to the full size anyway. */
114    rb->pos_ = (uint32_t)n;
115    RingBufferInitBuffer(m, rb->pos_, rb);
116    if (BROTLI_IS_OOM(m)) return;
117    memcpy(rb->buffer_, bytes, n);
118    return;
119  }
120  if (rb->cur_size_ < rb->total_size_) {
121    /* Lazily allocate the full buffer. */
122    RingBufferInitBuffer(m, rb->total_size_, rb);
123    if (BROTLI_IS_OOM(m)) return;
124    /* Initialize the last two bytes to zero, so that we don't have to worry
125       later when we copy the last two bytes to the first two positions. */
126    rb->buffer_[rb->size_ - 2] = 0;
127    rb->buffer_[rb->size_ - 1] = 0;
128    /* Initialize tail; might be touched by "best_len++" optimization when
129       ring buffer is "full". */
130    rb->buffer_[rb->size_] = 241;
131  }
132  {
133    const size_t masked_pos = rb->pos_ & rb->mask_;
134    /* The length of the writes is limited so that we do not need to worry
135       about a write */
136    RingBufferWriteTail(bytes, n, rb);
137    if (BROTLI_PREDICT_TRUE(masked_pos + n <= rb->size_)) {
138      /* A single write fits. */
139      memcpy(&rb->buffer_[masked_pos], bytes, n);
140    } else {
141      /* Split into two writes.
142         Copy into the end of the buffer, including the tail buffer. */
143      memcpy(&rb->buffer_[masked_pos], bytes,
144             BROTLI_MIN(size_t, n, rb->total_size_ - masked_pos));
145      /* Copy into the beginning of the buffer */
146      memcpy(&rb->buffer_[0], bytes + (rb->size_ - masked_pos),
147             n - (rb->size_ - masked_pos));
148    }
149  }
150  {
151    BROTLI_BOOL not_first_lap = (rb->pos_ & (1u << 31)) != 0;
152    uint32_t rb_pos_mask = (1u << 31) - 1;
153    rb->buffer_[-2] = rb->buffer_[rb->size_ - 2];
154    rb->buffer_[-1] = rb->buffer_[rb->size_ - 1];
155    rb->pos_ = (rb->pos_ & rb_pos_mask) + (uint32_t)(n & rb_pos_mask);
156    if (not_first_lap) {
157      /* Wrap, but preserve not-a-first-lap feature. */
158      rb->pos_ |= 1u << 31;
159    }
160  }
161}
162
163#if defined(__cplusplus) || defined(c_plusplus)
164}  /* extern "C" */
165#endif
166
167#endif  /* BROTLI_ENC_RINGBUFFER_H_ */
168