/proc/self/cwd/c/writer.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | Copyright 2020 Google LLC |
3 | | |
4 | | Use of this source code is governed by a BSD-style |
5 | | license that can be found in the LICENSE file or at |
6 | | https://developers.google.com/open-source/licenses/bsd |
7 | | */ |
8 | | |
9 | | #include "writer.h" |
10 | | |
11 | | #include "system.h" |
12 | | |
13 | | #include "block.h" |
14 | | #include "constants.h" |
15 | | #include "record.h" |
16 | | #include "reftable.h" |
17 | | #include "tree.h" |
18 | | |
19 | | static struct reftable_block_stats * |
20 | | writer_reftable_block_stats(struct reftable_writer *w, byte typ) |
21 | 675 | { |
22 | 675 | switch (typ) { |
23 | 675 | case 'r': |
24 | 456 | return &w->stats.ref_stats; |
25 | 675 | case 'o': |
26 | 19 | return &w->stats.obj_stats; |
27 | 675 | case 'i': |
28 | 23 | return &w->stats.idx_stats; |
29 | 675 | case 'g': |
30 | 177 | return &w->stats.log_stats; |
31 | 0 | } |
32 | 0 | assert(false); |
33 | 0 | return NULL; |
34 | 0 | } |
35 | | |
36 | | /* write data, queuing the padding for the next write. Returns negative for |
37 | | * error. */ |
38 | | static int padded_write(struct reftable_writer *w, byte *data, size_t len, |
39 | | int padding) |
40 | 627 | { |
41 | 627 | int n = 0; |
42 | 627 | if (w->pending_padding > 0) { |
43 | 81 | byte *zeroed = reftable_calloc(w->pending_padding); |
44 | 81 | int n = w->write(w->write_arg, zeroed, w->pending_padding); |
45 | 81 | if (n < 0) { |
46 | 0 | return n; |
47 | 0 | } |
48 | 81 | |
49 | 81 | w->pending_padding = 0; |
50 | 81 | reftable_free(zeroed); |
51 | 81 | } |
52 | 627 | |
53 | 627 | w->pending_padding = padding; |
54 | 627 | n = w->write(w->write_arg, data, len); |
55 | 627 | if (n < 0) { |
56 | 0 | return n; |
57 | 0 | } |
58 | 627 | n += padding; |
59 | 627 | return 0; |
60 | 627 | } |
61 | | |
62 | | static void options_set_defaults(struct reftable_write_options *opts) |
63 | 224 | { |
64 | 224 | if (opts->restart_interval == 0) { |
65 | 28 | opts->restart_interval = 16; |
66 | 28 | } |
67 | 224 | |
68 | 224 | if (opts->hash_id == 0) { |
69 | 12 | opts->hash_id = SHA1_ID; |
70 | 12 | } |
71 | 224 | if (opts->block_size == 0) { |
72 | 13 | opts->block_size = DEFAULT_BLOCK_SIZE; |
73 | 13 | } |
74 | 224 | } |
75 | | |
76 | | static int writer_version(struct reftable_writer *w) |
77 | 1.77k | { |
78 | 1.77k | return (w->opts.hash_id == 0 || w->opts.hash_id == SHA1_ID) ? 1 : 2; |
79 | 1.77k | } |
80 | | |
81 | | static int writer_write_header(struct reftable_writer *w, byte *dest) |
82 | 434 | { |
83 | 434 | memcpy((char *)dest, "REFT", 4); |
84 | 434 | |
85 | 434 | dest[4] = writer_version(w); |
86 | 434 | |
87 | 434 | put_be24(dest + 5, w->opts.block_size); |
88 | 434 | put_be64(dest + 8, w->min_update_index); |
89 | 434 | put_be64(dest + 16, w->max_update_index); |
90 | 434 | if (writer_version(w) == 2) { |
91 | 2 | put_be32(dest + 24, w->opts.hash_id); |
92 | 2 | } |
93 | 434 | return header_size(writer_version(w)); |
94 | 434 | } |
95 | | |
96 | | static void writer_reinit_block_writer(struct reftable_writer *w, byte typ) |
97 | 453 | { |
98 | 453 | int block_start = 0; |
99 | 453 | if (w->next == 0) { |
100 | 260 | block_start = header_size(writer_version(w)); |
101 | 260 | } |
102 | 453 | |
103 | 453 | block_writer_init(&w->block_writer_data, typ, w->block, |
104 | 453 | w->opts.block_size, block_start, |
105 | 453 | hash_size(w->opts.hash_id)); |
106 | 453 | w->block_writer = &w->block_writer_data; |
107 | 453 | w->block_writer->restart_interval = w->opts.restart_interval; |
108 | 453 | } |
109 | | |
110 | | struct reftable_writer * |
111 | | reftable_new_writer(int (*writer_func)(void *, byte *, size_t), |
112 | | void *writer_arg, struct reftable_write_options *opts) |
113 | 224 | { |
114 | 224 | struct reftable_writer *wp = |
115 | 224 | reftable_calloc(sizeof(struct reftable_writer)); |
116 | 224 | options_set_defaults(opts); |
117 | 224 | if (opts->block_size >= (1 << 24)) { |
118 | 0 | /* TODO - error return? */ |
119 | 0 | abort(); |
120 | 0 | } |
121 | 224 | wp->block = reftable_calloc(opts->block_size); |
122 | 224 | wp->write = writer_func; |
123 | 224 | wp->write_arg = writer_arg; |
124 | 224 | wp->opts = *opts; |
125 | 224 | writer_reinit_block_writer(wp, BLOCK_TYPE_REF); |
126 | 224 | |
127 | 224 | return wp; |
128 | 224 | } |
129 | | |
130 | | void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min, |
131 | | uint64_t max) |
132 | 215 | { |
133 | 215 | w->min_update_index = min; |
134 | 215 | w->max_update_index = max; |
135 | 215 | } |
136 | | |
137 | | void reftable_writer_free(struct reftable_writer *w) |
138 | 224 | { |
139 | 224 | reftable_free(w->block); |
140 | 224 | reftable_free(w); |
141 | 224 | } |
142 | | |
143 | | struct obj_index_tree_node { |
144 | | struct slice hash; |
145 | | uint64_t *offsets; |
146 | | int offset_len; |
147 | | int offset_cap; |
148 | | }; |
149 | | |
150 | | static int obj_index_tree_node_compare(const void *a, const void *b) |
151 | 13.7k | { |
152 | 13.7k | return slice_compare(((const struct obj_index_tree_node *)a)->hash, |
153 | 13.7k | ((const struct obj_index_tree_node *)b)->hash); |
154 | 13.7k | } |
155 | | |
156 | | static void writer_index_hash(struct reftable_writer *w, struct slice hash) |
157 | 462 | { |
158 | 462 | uint64_t off = w->next; |
159 | 462 | |
160 | 462 | struct obj_index_tree_node want = { .hash = hash }; |
161 | 462 | |
162 | 462 | struct tree_node *node = tree_search(&want, &w->obj_index_tree, |
163 | 462 | &obj_index_tree_node_compare, 0); |
164 | 462 | struct obj_index_tree_node *key = NULL; |
165 | 462 | if (node == NULL) { |
166 | 292 | key = reftable_calloc(sizeof(struct obj_index_tree_node)); |
167 | 292 | slice_copy(&key->hash, hash); |
168 | 292 | tree_search((void *)key, &w->obj_index_tree, |
169 | 292 | &obj_index_tree_node_compare, 1); |
170 | 292 | } else { |
171 | 170 | key = node->key; |
172 | 170 | } |
173 | 462 | |
174 | 462 | if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off) { |
175 | 102 | return; |
176 | 102 | } |
177 | 360 | |
178 | 360 | if (key->offset_len == key->offset_cap) { |
179 | 340 | key->offset_cap = 2 * key->offset_cap + 1; |
180 | 340 | key->offsets = reftable_realloc( |
181 | 340 | key->offsets, sizeof(uint64_t) * key->offset_cap); |
182 | 340 | } |
183 | 360 | |
184 | 360 | key->offsets[key->offset_len++] = off; |
185 | 360 | } |
186 | | |
187 | | static int writer_add_record(struct reftable_writer *w, struct record rec) |
188 | 1.27k | { |
189 | 1.27k | int result = -1; |
190 | 1.27k | struct slice key = { 0 }; |
191 | 1.27k | int err = 0; |
192 | 1.27k | record_key(rec, &key); |
193 | 1.27k | if (slice_compare(w->last_key, key) >= 0) { |
194 | 0 | goto exit; |
195 | 0 | } |
196 | 1.27k | |
197 | 1.27k | slice_copy(&w->last_key, key); |
198 | 1.27k | if (w->block_writer == NULL) { |
199 | 44 | writer_reinit_block_writer(w, record_type(rec)); |
200 | 44 | } |
201 | 1.27k | |
202 | 1.27k | assert(block_writer_type(w->block_writer) == record_type(rec)); |
203 | 1.27k | |
204 | 1.27k | if (block_writer_add(w->block_writer, rec) == 0) { |
205 | 1.12k | result = 0; |
206 | 1.12k | goto exit; |
207 | 1.12k | } |
208 | 150 | |
209 | 150 | err = writer_flush_block(w); |
210 | 150 | if (err < 0) { |
211 | 0 | result = err; |
212 | 0 | goto exit; |
213 | 0 | } |
214 | 150 | |
215 | 150 | writer_reinit_block_writer(w, record_type(rec)); |
216 | 150 | err = block_writer_add(w->block_writer, rec); |
217 | 150 | if (err < 0) { |
218 | 0 | result = err; |
219 | 0 | goto exit; |
220 | 0 | } |
221 | 150 | |
222 | 150 | result = 0; |
223 | 1.27k | exit: |
224 | 1.27k | slice_clear(&key); |
225 | 1.27k | return result; |
226 | 150 | } |
227 | | |
228 | | int reftable_writer_add_ref(struct reftable_writer *w, |
229 | | struct reftable_ref_record *ref) |
230 | 906 | { |
231 | 906 | struct record rec = { 0 }; |
232 | 906 | struct reftable_ref_record copy = *ref; |
233 | 906 | int err = 0; |
234 | 906 | |
235 | 906 | if (ref->ref_name == NULL) { |
236 | 0 | return REFTABLE_API_ERROR; |
237 | 0 | } |
238 | 906 | if (ref->update_index < w->min_update_index || |
239 | 906 | ref->update_index > w->max_update_index) { |
240 | 0 | return REFTABLE_API_ERROR; |
241 | 0 | } |
242 | 906 | |
243 | 906 | record_from_ref(&rec, ©); |
244 | 906 | copy.update_index -= w->min_update_index; |
245 | 906 | err = writer_add_record(w, rec); |
246 | 906 | if (err < 0) { |
247 | 0 | return err; |
248 | 0 | } |
249 | 906 | |
250 | 906 | if (!w->opts.skip_index_objects && ref->value != NULL) { |
251 | 362 | struct slice h = { |
252 | 362 | .buf = ref->value, |
253 | 362 | .len = hash_size(w->opts.hash_id), |
254 | 362 | }; |
255 | 362 | |
256 | 362 | writer_index_hash(w, h); |
257 | 362 | } |
258 | 906 | if (!w->opts.skip_index_objects && ref->target_value != NULL) { |
259 | 100 | struct slice h = { |
260 | 100 | .buf = ref->target_value, |
261 | 100 | .len = hash_size(w->opts.hash_id), |
262 | 100 | }; |
263 | 100 | writer_index_hash(w, h); |
264 | 100 | } |
265 | 906 | return 0; |
266 | 906 | } |
267 | | |
268 | | int reftable_writer_add_refs(struct reftable_writer *w, |
269 | | struct reftable_ref_record *refs, int n) |
270 | 0 | { |
271 | 0 | int err = 0; |
272 | 0 | int i = 0; |
273 | 0 | QSORT(refs, n, reftable_ref_record_compare_name); |
274 | 0 | for (i = 0; err == 0 && i < n; i++) { |
275 | 0 | err = reftable_writer_add_ref(w, &refs[i]); |
276 | 0 | } |
277 | 0 | return err; |
278 | 0 | } |
279 | | |
280 | | int reftable_writer_add_log(struct reftable_writer *w, |
281 | | struct reftable_log_record *log) |
282 | 373 | { |
283 | 373 | if (log->ref_name == NULL) { |
284 | 0 | return REFTABLE_API_ERROR; |
285 | 0 | } |
286 | 373 | |
287 | 373 | if (w->block_writer != NULL && |
288 | 373 | block_writer_type(w->block_writer) == BLOCK_TYPE_REF) { |
289 | 44 | int err = writer_finish_public_section(w); |
290 | 44 | if (err < 0) { |
291 | 0 | return err; |
292 | 0 | } |
293 | 373 | } |
294 | 373 | |
295 | 373 | w->next -= w->pending_padding; |
296 | 373 | w->pending_padding = 0; |
297 | 373 | |
298 | 373 | { |
299 | 373 | struct record rec = { 0 }; |
300 | 373 | int err; |
301 | 373 | record_from_log(&rec, log); |
302 | 373 | err = writer_add_record(w, rec); |
303 | 373 | return err; |
304 | 373 | } |
305 | 373 | } |
306 | | |
307 | | int reftable_writer_add_logs(struct reftable_writer *w, |
308 | | struct reftable_log_record *logs, int n) |
309 | 0 | { |
310 | 0 | int err = 0; |
311 | 0 | int i = 0; |
312 | 0 | QSORT(logs, n, reftable_log_record_compare_key); |
313 | 0 | for (i = 0; err == 0 && i < n; i++) { |
314 | 0 | err = reftable_writer_add_log(w, &logs[i]); |
315 | 0 | } |
316 | 0 | return err; |
317 | 0 | } |
318 | | |
319 | | static int writer_finish_section(struct reftable_writer *w) |
320 | 268 | { |
321 | 268 | byte typ = block_writer_type(w->block_writer); |
322 | 268 | uint64_t index_start = 0; |
323 | 268 | int max_level = 0; |
324 | 268 | int threshold = w->opts.unpadded ? 1 : 3; |
325 | 268 | int before_blocks = w->stats.idx_stats.blocks; |
326 | 268 | int err = writer_flush_block(w); |
327 | 268 | int i = 0; |
328 | 268 | if (err < 0) { |
329 | 0 | return err; |
330 | 0 | } |
331 | 268 | |
332 | 280 | while (w->index_len > threshold) { |
333 | 12 | struct index_record *idx = NULL; |
334 | 12 | int idx_len = 0; |
335 | 12 | |
336 | 12 | max_level++; |
337 | 12 | index_start = w->next; |
338 | 12 | writer_reinit_block_writer(w, BLOCK_TYPE_INDEX); |
339 | 12 | |
340 | 12 | idx = w->index; |
341 | 12 | idx_len = w->index_len; |
342 | 12 | |
343 | 12 | w->index = NULL; |
344 | 12 | w->index_len = 0; |
345 | 12 | w->index_cap = 0; |
346 | 172 | for (i = 0; i < idx_len; i++) { |
347 | 160 | struct record rec = { 0 }; |
348 | 160 | record_from_index(&rec, idx + i); |
349 | 160 | if (block_writer_add(w->block_writer, rec) == 0) { |
350 | 149 | continue; |
351 | 149 | } |
352 | 11 | |
353 | 11 | { |
354 | 11 | int err = writer_flush_block(w); |
355 | 11 | if (err < 0) { |
356 | 0 | return err; |
357 | 0 | } |
358 | 11 | } |
359 | 11 | |
360 | 11 | writer_reinit_block_writer(w, BLOCK_TYPE_INDEX); |
361 | 11 | |
362 | 11 | err = block_writer_add(w->block_writer, rec); |
363 | 11 | if (err != 0) { |
364 | 0 | /* write into fresh block should always succeed |
365 | 0 | */ |
366 | 0 | abort(); |
367 | 0 | } |
368 | 11 | } |
369 | 172 | for (i = 0; i < idx_len; i++) { |
370 | 160 | slice_clear(&idx[i].last_key); |
371 | 160 | } |
372 | 12 | reftable_free(idx); |
373 | 12 | } |
374 | 268 | |
375 | 268 | writer_clear_index(w); |
376 | 268 | |
377 | 268 | err = writer_flush_block(w); |
378 | 268 | if (err < 0) { |
379 | 0 | return err; |
380 | 0 | } |
381 | 268 | |
382 | 268 | { |
383 | 268 | struct reftable_block_stats *bstats = |
384 | 268 | writer_reftable_block_stats(w, typ); |
385 | 268 | bstats->index_blocks = |
386 | 268 | w->stats.idx_stats.blocks - before_blocks; |
387 | 268 | bstats->index_offset = index_start; |
388 | 268 | bstats->max_index_level = max_level; |
389 | 268 | } |
390 | 268 | |
391 | 268 | /* Reinit lastKey, as the next section can start with any key. */ |
392 | 268 | w->last_key.len = 0; |
393 | 268 | |
394 | 268 | return 0; |
395 | 268 | } |
396 | | |
397 | | struct common_prefix_arg { |
398 | | struct slice *last; |
399 | | int max; |
400 | | }; |
401 | | |
402 | | static void update_common(void *void_arg, void *key) |
403 | 282 | { |
404 | 282 | struct common_prefix_arg *arg = (struct common_prefix_arg *)void_arg; |
405 | 282 | struct obj_index_tree_node *entry = (struct obj_index_tree_node *)key; |
406 | 282 | if (arg->last != NULL) { |
407 | 275 | int n = common_prefix_size(entry->hash, *arg->last); |
408 | 275 | if (n > arg->max) { |
409 | 0 | arg->max = n; |
410 | 0 | } |
411 | 275 | } |
412 | 282 | arg->last = &entry->hash; |
413 | 282 | } |
414 | | |
415 | | struct write_record_arg { |
416 | | struct reftable_writer *w; |
417 | | int err; |
418 | | }; |
419 | | |
420 | | static void write_object_record(void *void_arg, void *key) |
421 | 282 | { |
422 | 282 | struct write_record_arg *arg = (struct write_record_arg *)void_arg; |
423 | 282 | struct obj_index_tree_node *entry = (struct obj_index_tree_node *)key; |
424 | 282 | struct obj_record obj_rec = { |
425 | 282 | .hash_prefix = entry->hash.buf, |
426 | 282 | .hash_prefix_len = arg->w->stats.object_id_len, |
427 | 282 | .offsets = entry->offsets, |
428 | 282 | .offset_len = entry->offset_len, |
429 | 282 | }; |
430 | 282 | struct record rec = { 0 }; |
431 | 282 | if (arg->err < 0) { |
432 | 0 | goto exit; |
433 | 0 | } |
434 | 282 | |
435 | 282 | record_from_obj(&rec, &obj_rec); |
436 | 282 | arg->err = block_writer_add(arg->w->block_writer, rec); |
437 | 282 | if (arg->err == 0) { |
438 | 277 | goto exit; |
439 | 277 | } |
440 | 5 | |
441 | 5 | arg->err = writer_flush_block(arg->w); |
442 | 5 | if (arg->err < 0) { |
443 | 0 | goto exit; |
444 | 0 | } |
445 | 5 | |
446 | 5 | writer_reinit_block_writer(arg->w, BLOCK_TYPE_OBJ); |
447 | 5 | arg->err = block_writer_add(arg->w->block_writer, rec); |
448 | 5 | if (arg->err == 0) { |
449 | 5 | goto exit; |
450 | 5 | } |
451 | 0 | obj_rec.offset_len = 0; |
452 | 0 | arg->err = block_writer_add(arg->w->block_writer, rec); |
453 | 0 |
|
454 | 0 | /* Should be able to write into a fresh block. */ |
455 | 0 | assert(arg->err == 0); |
456 | 0 |
|
457 | 282 | exit:; |
458 | 282 | } |
459 | | |
460 | | static void object_record_free(void *void_arg, void *key) |
461 | 292 | { |
462 | 292 | struct obj_index_tree_node *entry = (struct obj_index_tree_node *)key; |
463 | 292 | |
464 | 292 | FREE_AND_NULL(entry->offsets); |
465 | 292 | slice_clear(&entry->hash); |
466 | 292 | reftable_free(entry); |
467 | 292 | } |
468 | | |
469 | | static int writer_dump_object_index(struct reftable_writer *w) |
470 | 7 | { |
471 | 7 | struct write_record_arg closure = { .w = w }; |
472 | 7 | struct common_prefix_arg common = { 0 }; |
473 | 7 | if (w->obj_index_tree != NULL) { |
474 | 7 | infix_walk(w->obj_index_tree, &update_common, &common); |
475 | 7 | } |
476 | 7 | w->stats.object_id_len = common.max + 1; |
477 | 7 | |
478 | 7 | writer_reinit_block_writer(w, BLOCK_TYPE_OBJ); |
479 | 7 | |
480 | 7 | if (w->obj_index_tree != NULL) { |
481 | 7 | infix_walk(w->obj_index_tree, &write_object_record, &closure); |
482 | 7 | } |
483 | 7 | |
484 | 7 | if (closure.err < 0) { |
485 | 0 | return closure.err; |
486 | 0 | } |
487 | 7 | return writer_finish_section(w); |
488 | 7 | } |
489 | | |
490 | | int writer_finish_public_section(struct reftable_writer *w) |
491 | 261 | { |
492 | 261 | byte typ = 0; |
493 | 261 | int err = 0; |
494 | 261 | |
495 | 261 | if (w->block_writer == NULL) { |
496 | 0 | return 0; |
497 | 0 | } |
498 | 261 | |
499 | 261 | typ = block_writer_type(w->block_writer); |
500 | 261 | err = writer_finish_section(w); |
501 | 261 | if (err < 0) { |
502 | 0 | return err; |
503 | 0 | } |
504 | 261 | if (typ == BLOCK_TYPE_REF && !w->opts.skip_index_objects && |
505 | 261 | w->stats.ref_stats.index_blocks > 0) { |
506 | 7 | err = writer_dump_object_index(w); |
507 | 7 | if (err < 0) { |
508 | 0 | return err; |
509 | 0 | } |
510 | 261 | } |
511 | 261 | |
512 | 261 | if (w->obj_index_tree != NULL) { |
513 | 15 | infix_walk(w->obj_index_tree, &object_record_free, NULL); |
514 | 15 | tree_free(w->obj_index_tree); |
515 | 15 | w->obj_index_tree = NULL; |
516 | 15 | } |
517 | 261 | |
518 | 261 | w->block_writer = NULL; |
519 | 261 | return 0; |
520 | 261 | } |
521 | | |
522 | | int reftable_writer_close(struct reftable_writer *w) |
523 | 217 | { |
524 | 217 | byte footer[72]; |
525 | 217 | byte *p = footer; |
526 | 217 | int err = writer_finish_public_section(w); |
527 | 217 | int empty_table = w->next == 0; |
528 | 217 | if (err != 0) { |
529 | 0 | goto exit; |
530 | 0 | } |
531 | 217 | w->pending_padding = 0; |
532 | 217 | if (empty_table) { |
533 | 3 | /* Empty tables need a header anyway. */ |
534 | 3 | byte header[28]; |
535 | 3 | int n = writer_write_header(w, header); |
536 | 3 | err = padded_write(w, header, n, 0); |
537 | 3 | if (err < 0) { |
538 | 0 | goto exit; |
539 | 0 | } |
540 | 217 | } |
541 | 217 | |
542 | 217 | p += writer_write_header(w, footer); |
543 | 217 | put_be64(p, w->stats.ref_stats.index_offset); |
544 | 217 | p += 8; |
545 | 217 | put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len); |
546 | 217 | p += 8; |
547 | 217 | put_be64(p, w->stats.obj_stats.index_offset); |
548 | 217 | p += 8; |
549 | 217 | |
550 | 217 | put_be64(p, w->stats.log_stats.offset); |
551 | 217 | p += 8; |
552 | 217 | put_be64(p, w->stats.log_stats.index_offset); |
553 | 217 | p += 8; |
554 | 217 | |
555 | 217 | put_be32(p, crc32(0, footer, p - footer)); |
556 | 217 | p += 4; |
557 | 217 | |
558 | 217 | err = padded_write(w, footer, footer_size(writer_version(w)), 0); |
559 | 217 | if (err < 0) { |
560 | 0 | goto exit; |
561 | 0 | } |
562 | 217 | |
563 | 217 | if (empty_table) { |
564 | 3 | err = REFTABLE_EMPTY_TABLE_ERROR; |
565 | 3 | goto exit; |
566 | 3 | } |
567 | 217 | |
568 | 217 | exit: |
569 | 217 | /* free up memory. */ |
570 | 217 | block_writer_clear(&w->block_writer_data); |
571 | 217 | writer_clear_index(w); |
572 | 217 | slice_clear(&w->last_key); |
573 | 217 | return err; |
574 | 217 | } |
575 | | |
576 | | void writer_clear_index(struct reftable_writer *w) |
577 | 485 | { |
578 | 485 | int i = 0; |
579 | 732 | for (i = 0; i < w->index_len; i++) { |
580 | 247 | slice_clear(&w->index[i].last_key); |
581 | 247 | } |
582 | 485 | |
583 | 485 | FREE_AND_NULL(w->index); |
584 | 485 | w->index_len = 0; |
585 | 485 | w->index_cap = 0; |
586 | 485 | } |
587 | | |
588 | | const int debug = 0; |
589 | | |
590 | | static int writer_flush_nonempty_block(struct reftable_writer *w) |
591 | 407 | { |
592 | 407 | byte typ = block_writer_type(w->block_writer); |
593 | 407 | struct reftable_block_stats *bstats = |
594 | 407 | writer_reftable_block_stats(w, typ); |
595 | 407 | uint64_t block_typ_off = (bstats->blocks == 0) ? w->next : 0; |
596 | 407 | int raw_bytes = block_writer_finish(w->block_writer); |
597 | 407 | int padding = 0; |
598 | 407 | int err = 0; |
599 | 407 | if (raw_bytes < 0) { |
600 | 0 | return raw_bytes; |
601 | 0 | } |
602 | 407 | |
603 | 407 | if (!w->opts.unpadded && typ != BLOCK_TYPE_LOG) { |
604 | 274 | padding = w->opts.block_size - raw_bytes; |
605 | 274 | } |
606 | 407 | |
607 | 407 | if (block_typ_off > 0) { |
608 | 22 | bstats->offset = block_typ_off; |
609 | 22 | } |
610 | 407 | |
611 | 407 | bstats->entries += w->block_writer->entries; |
612 | 407 | bstats->restarts += w->block_writer->restart_len; |
613 | 407 | bstats->blocks++; |
614 | 407 | w->stats.blocks++; |
615 | 407 | |
616 | 407 | if (debug) { |
617 | 0 | fprintf(stderr, "block %c off %" PRIu64 " sz %d (%d)\n", typ, |
618 | 0 | w->next, raw_bytes, |
619 | 0 | get_be24(w->block + w->block_writer->header_off + 1)); |
620 | 0 | } |
621 | 407 | |
622 | 407 | if (w->next == 0) { |
623 | 214 | writer_write_header(w, w->block); |
624 | 214 | } |
625 | 407 | |
626 | 407 | err = padded_write(w, w->block, raw_bytes, padding); |
627 | 407 | if (err < 0) { |
628 | 0 | return err; |
629 | 0 | } |
630 | 407 | |
631 | 407 | if (w->index_cap == w->index_len) { |
632 | 291 | w->index_cap = 2 * w->index_cap + 1; |
633 | 291 | w->index = reftable_realloc( |
634 | 291 | w->index, sizeof(struct index_record) * w->index_cap); |
635 | 291 | } |
636 | 407 | |
637 | 407 | { |
638 | 407 | struct index_record ir = { |
639 | 407 | .offset = w->next, |
640 | 407 | }; |
641 | 407 | slice_copy(&ir.last_key, w->block_writer->last_key); |
642 | 407 | w->index[w->index_len] = ir; |
643 | 407 | } |
644 | 407 | |
645 | 407 | w->index_len++; |
646 | 407 | w->next += padding + raw_bytes; |
647 | 407 | w->block_writer = NULL; |
648 | 407 | return 0; |
649 | 407 | } |
650 | | |
651 | | int writer_flush_block(struct reftable_writer *w) |
652 | 702 | { |
653 | 702 | if (w->block_writer == NULL) { |
654 | 217 | return 0; |
655 | 217 | } |
656 | 485 | if (w->block_writer->entries == 0) { |
657 | 78 | return 0; |
658 | 78 | } |
659 | 407 | return writer_flush_nonempty_block(w); |
660 | 407 | } |
661 | | |
662 | | const struct reftable_stats *writer_stats(struct reftable_writer *w) |
663 | 7 | { |
664 | 7 | return &w->stats; |
665 | 7 | } |