mirror of
https://github.com/git/git.git
synced 2025-02-06 09:44:30 +00:00
Merge branch 'kn/pack-write-with-reduced-globals'
Code clean-up. * kn/pack-write-with-reduced-globals: pack-write: pass hash_algo to internal functions pack-write: pass hash_algo to `write_rev_file()` pack-write: pass hash_algo to `write_idx_file()` pack-write: pass repository to `index_pack_lockfile()` pack-write: pass hash_algo to `fixup_pack_header_footer()`
This commit is contained in:
commit
b83a2f9006
@ -798,8 +798,8 @@ static const char *create_index(void)
|
||||
if (c != last)
|
||||
die("internal consistency error creating the index");
|
||||
|
||||
tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
|
||||
pack_data->hash);
|
||||
tmpfile = write_idx_file(the_hash_algo, NULL, idx, object_count,
|
||||
&pack_idx_opts, pack_data->hash);
|
||||
free(idx);
|
||||
return tmpfile;
|
||||
}
|
||||
@ -878,9 +878,10 @@ static void end_packfile(void)
|
||||
|
||||
close_pack_windows(pack_data);
|
||||
finalize_hashfile(pack_file, cur_pack_oid.hash, FSYNC_COMPONENT_PACK, 0);
|
||||
fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
|
||||
pack_data->pack_name, object_count,
|
||||
cur_pack_oid.hash, pack_size);
|
||||
fixup_pack_header_footer(the_hash_algo, pack_data->pack_fd,
|
||||
pack_data->hash, pack_data->pack_name,
|
||||
object_count, cur_pack_oid.hash,
|
||||
pack_size);
|
||||
|
||||
if (object_count <= unpack_limit) {
|
||||
if (!loosen_small_pack(pack_data)) {
|
||||
|
@ -1392,7 +1392,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
|
||||
strbuf_release(&msg);
|
||||
finalize_hashfile(f, tail_hash, FSYNC_COMPONENT_PACK, 0);
|
||||
hashcpy(read_hash, pack_hash, the_repository->hash_algo);
|
||||
fixup_pack_header_footer(output_fd, pack_hash,
|
||||
fixup_pack_header_footer(the_hash_algo, output_fd, pack_hash,
|
||||
curr_pack, nr_objects,
|
||||
read_hash, consumed_bytes-the_hash_algo->rawsz);
|
||||
if (!hasheq(read_hash, tail_hash, the_repository->hash_algo))
|
||||
@ -2089,11 +2089,12 @@ int cmd_index_pack(int argc,
|
||||
ALLOC_ARRAY(idx_objects, nr_objects);
|
||||
for (i = 0; i < nr_objects; i++)
|
||||
idx_objects[i] = &objects[i].idx;
|
||||
curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
|
||||
curr_index = write_idx_file(the_hash_algo, index_name, idx_objects,
|
||||
nr_objects, &opts, pack_hash);
|
||||
if (rev_index)
|
||||
curr_rev_index = write_rev_file(rev_index_name, idx_objects,
|
||||
nr_objects, pack_hash,
|
||||
opts.flags);
|
||||
curr_rev_index = write_rev_file(the_hash_algo, rev_index_name,
|
||||
idx_objects, nr_objects,
|
||||
pack_hash, opts.flags);
|
||||
free(idx_objects);
|
||||
|
||||
if (!verify)
|
||||
|
@ -1319,8 +1319,9 @@ static void write_pack_file(void)
|
||||
*/
|
||||
|
||||
int fd = finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK, 0);
|
||||
fixup_pack_header_footer(fd, hash, pack_tmp_name,
|
||||
nr_written, hash, offset);
|
||||
fixup_pack_header_footer(the_hash_algo, fd, hash,
|
||||
pack_tmp_name, nr_written,
|
||||
hash, offset);
|
||||
close(fd);
|
||||
if (write_bitmap_index) {
|
||||
if (write_bitmap_index != WRITE_BITMAP_QUIET)
|
||||
@ -1368,9 +1369,10 @@ static void write_pack_file(void)
|
||||
if (cruft)
|
||||
pack_idx_opts.flags |= WRITE_MTIMES;
|
||||
|
||||
stage_tmp_packfiles(&tmpname, pack_tmp_name,
|
||||
written_list, nr_written,
|
||||
&to_pack, &pack_idx_opts, hash,
|
||||
stage_tmp_packfiles(the_hash_algo, &tmpname,
|
||||
pack_tmp_name, written_list,
|
||||
nr_written, &to_pack,
|
||||
&pack_idx_opts, hash,
|
||||
&idx_tmp_name);
|
||||
|
||||
if (write_bitmap_index) {
|
||||
|
@ -2304,7 +2304,7 @@ static const char *unpack(int err_fd, struct shallow_info *si)
|
||||
if (status)
|
||||
return "index-pack fork failed";
|
||||
|
||||
lockfile = index_pack_lockfile(child.out, NULL);
|
||||
lockfile = index_pack_lockfile(the_repository, child.out, NULL);
|
||||
if (lockfile) {
|
||||
pack_lockfile = register_tempfile(lockfile);
|
||||
free(lockfile);
|
||||
|
@ -44,8 +44,9 @@ static void finish_tmp_packfile(struct strbuf *basename,
|
||||
{
|
||||
char *idx_tmp_name = NULL;
|
||||
|
||||
stage_tmp_packfiles(basename, pack_tmp_name, written_list, nr_written,
|
||||
NULL, pack_idx_opts, hash, &idx_tmp_name);
|
||||
stage_tmp_packfiles(the_hash_algo, basename, pack_tmp_name,
|
||||
written_list, nr_written, NULL, pack_idx_opts, hash,
|
||||
&idx_tmp_name);
|
||||
rename_tmp_packfile_idx(basename, &idx_tmp_name);
|
||||
|
||||
free(idx_tmp_name);
|
||||
@ -70,7 +71,7 @@ static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile *state)
|
||||
CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
|
||||
} else {
|
||||
int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
|
||||
fixup_pack_header_footer(fd, hash, state->pack_tmp_name,
|
||||
fixup_pack_header_footer(the_hash_algo, fd, hash, state->pack_tmp_name,
|
||||
state->nr_written, hash,
|
||||
state->offset);
|
||||
close(fd);
|
||||
|
@ -1036,7 +1036,9 @@ static int get_pack(struct fetch_pack_args *args,
|
||||
die(_("fetch-pack: unable to fork off %s"), cmd_name);
|
||||
if (do_keep && (pack_lockfiles || fsck_objects)) {
|
||||
int is_well_formed;
|
||||
char *pack_lockfile = index_pack_lockfile(cmd.out, &is_well_formed);
|
||||
char *pack_lockfile = index_pack_lockfile(the_repository,
|
||||
cmd.out,
|
||||
&is_well_formed);
|
||||
|
||||
if (!is_well_formed)
|
||||
die(_("fetch-pack: invalid index-pack output"));
|
||||
|
@ -658,8 +658,8 @@ static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
|
||||
strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex_algop(midx_hash,
|
||||
ctx->repo->hash_algo));
|
||||
|
||||
tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
|
||||
midx_hash, WRITE_REV);
|
||||
tmp_file = write_rev_file_order(ctx->repo->hash_algo, NULL, ctx->pack_order,
|
||||
ctx->entries_nr, midx_hash, WRITE_REV);
|
||||
|
||||
if (finalize_object_file(tmp_file, buf.buf))
|
||||
die(_("cannot store reverse index file"));
|
||||
|
99
pack-write.c
99
pack-write.c
@ -1,5 +1,3 @@
|
||||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
|
||||
#include "git-compat-util.h"
|
||||
#include "environment.h"
|
||||
#include "gettext.h"
|
||||
@ -56,7 +54,8 @@ static int need_large_offset(off_t offset, const struct pack_idx_option *opts)
|
||||
* The *sha1 contains the pack content SHA1 hash.
|
||||
* The objects array passed in will be sorted by SHA1 on exit.
|
||||
*/
|
||||
const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects,
|
||||
const char *write_idx_file(const struct git_hash_algo *hash_algo,
|
||||
const char *index_name, struct pack_idx_entry **objects,
|
||||
int nr_objects, const struct pack_idx_option *opts,
|
||||
const unsigned char *sha1)
|
||||
{
|
||||
@ -130,7 +129,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
|
||||
struct pack_idx_entry *obj = *list++;
|
||||
if (index_version < 2)
|
||||
hashwrite_be32(f, obj->offset);
|
||||
hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
|
||||
hashwrite(f, obj->oid.hash, hash_algo->rawsz);
|
||||
if ((opts->flags & WRITE_IDX_STRICT) &&
|
||||
(i && oideq(&list[-2]->oid, &obj->oid)))
|
||||
die("The same object %s appears twice in the pack",
|
||||
@ -172,7 +171,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
|
||||
}
|
||||
}
|
||||
|
||||
hashwrite(f, sha1, the_hash_algo->rawsz);
|
||||
hashwrite(f, sha1, hash_algo->rawsz);
|
||||
finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
|
||||
CSUM_HASH_IN_STREAM | CSUM_CLOSE |
|
||||
((opts->flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC));
|
||||
@ -193,11 +192,12 @@ static int pack_order_cmp(const void *va, const void *vb, void *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void write_rev_header(struct hashfile *f)
|
||||
static void write_rev_header(const struct git_hash_algo *hash_algo,
|
||||
struct hashfile *f)
|
||||
{
|
||||
hashwrite_be32(f, RIDX_SIGNATURE);
|
||||
hashwrite_be32(f, RIDX_VERSION);
|
||||
hashwrite_be32(f, oid_version(the_hash_algo));
|
||||
hashwrite_be32(f, oid_version(hash_algo));
|
||||
}
|
||||
|
||||
static void write_rev_index_positions(struct hashfile *f,
|
||||
@ -209,12 +209,14 @@ static void write_rev_index_positions(struct hashfile *f,
|
||||
hashwrite_be32(f, pack_order[i]);
|
||||
}
|
||||
|
||||
static void write_rev_trailer(struct hashfile *f, const unsigned char *hash)
|
||||
static void write_rev_trailer(const struct git_hash_algo *hash_algo,
|
||||
struct hashfile *f, const unsigned char *hash)
|
||||
{
|
||||
hashwrite(f, hash, the_hash_algo->rawsz);
|
||||
hashwrite(f, hash, hash_algo->rawsz);
|
||||
}
|
||||
|
||||
char *write_rev_file(const char *rev_name,
|
||||
char *write_rev_file(const struct git_hash_algo *hash_algo,
|
||||
const char *rev_name,
|
||||
struct pack_idx_entry **objects,
|
||||
uint32_t nr_objects,
|
||||
const unsigned char *hash,
|
||||
@ -232,15 +234,16 @@ char *write_rev_file(const char *rev_name,
|
||||
pack_order[i] = i;
|
||||
QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
|
||||
|
||||
ret = write_rev_file_order(rev_name, pack_order, nr_objects, hash,
|
||||
flags);
|
||||
ret = write_rev_file_order(hash_algo, rev_name, pack_order, nr_objects,
|
||||
hash, flags);
|
||||
|
||||
free(pack_order);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
char *write_rev_file_order(const char *rev_name,
|
||||
char *write_rev_file_order(const struct git_hash_algo *hash_algo,
|
||||
const char *rev_name,
|
||||
uint32_t *pack_order,
|
||||
uint32_t nr_objects,
|
||||
const unsigned char *hash,
|
||||
@ -279,10 +282,10 @@ char *write_rev_file_order(const char *rev_name,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
write_rev_header(f);
|
||||
write_rev_header(hash_algo, f);
|
||||
|
||||
write_rev_index_positions(f, pack_order, nr_objects);
|
||||
write_rev_trailer(f, hash);
|
||||
write_rev_trailer(hash_algo, f, hash);
|
||||
|
||||
if (adjust_shared_perm(path) < 0)
|
||||
die(_("failed to make %s readable"), path);
|
||||
@ -294,11 +297,12 @@ char *write_rev_file_order(const char *rev_name,
|
||||
return path;
|
||||
}
|
||||
|
||||
static void write_mtimes_header(struct hashfile *f)
|
||||
static void write_mtimes_header(const struct git_hash_algo *hash_algo,
|
||||
struct hashfile *f)
|
||||
{
|
||||
hashwrite_be32(f, MTIMES_SIGNATURE);
|
||||
hashwrite_be32(f, MTIMES_VERSION);
|
||||
hashwrite_be32(f, oid_version(the_hash_algo));
|
||||
hashwrite_be32(f, oid_version(hash_algo));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -318,12 +322,14 @@ static void write_mtimes_objects(struct hashfile *f,
|
||||
}
|
||||
}
|
||||
|
||||
static void write_mtimes_trailer(struct hashfile *f, const unsigned char *hash)
|
||||
static void write_mtimes_trailer(const struct git_hash_algo *hash_algo,
|
||||
struct hashfile *f, const unsigned char *hash)
|
||||
{
|
||||
hashwrite(f, hash, the_hash_algo->rawsz);
|
||||
hashwrite(f, hash, hash_algo->rawsz);
|
||||
}
|
||||
|
||||
static char *write_mtimes_file(struct packing_data *to_pack,
|
||||
static char *write_mtimes_file(const struct git_hash_algo *hash_algo,
|
||||
struct packing_data *to_pack,
|
||||
struct pack_idx_entry **objects,
|
||||
uint32_t nr_objects,
|
||||
const unsigned char *hash)
|
||||
@ -340,9 +346,9 @@ static char *write_mtimes_file(struct packing_data *to_pack,
|
||||
mtimes_name = strbuf_detach(&tmp_file, NULL);
|
||||
f = hashfd(fd, mtimes_name);
|
||||
|
||||
write_mtimes_header(f);
|
||||
write_mtimes_header(hash_algo, f);
|
||||
write_mtimes_objects(f, to_pack, objects, nr_objects);
|
||||
write_mtimes_trailer(f, hash);
|
||||
write_mtimes_trailer(hash_algo, f, hash);
|
||||
|
||||
if (adjust_shared_perm(mtimes_name) < 0)
|
||||
die(_("failed to make %s readable"), mtimes_name);
|
||||
@ -380,7 +386,8 @@ off_t write_pack_header(struct hashfile *f, uint32_t nr_entries)
|
||||
* partial_pack_sha1 can refer to the same buffer if the caller is not
|
||||
* interested in the resulting SHA1 of pack data above partial_pack_offset.
|
||||
*/
|
||||
void fixup_pack_header_footer(int pack_fd,
|
||||
void fixup_pack_header_footer(const struct git_hash_algo *hash_algo,
|
||||
int pack_fd,
|
||||
unsigned char *new_pack_hash,
|
||||
const char *pack_name,
|
||||
uint32_t object_count,
|
||||
@ -393,8 +400,8 @@ void fixup_pack_header_footer(int pack_fd,
|
||||
char *buf;
|
||||
ssize_t read_result;
|
||||
|
||||
the_hash_algo->init_fn(&old_hash_ctx);
|
||||
the_hash_algo->init_fn(&new_hash_ctx);
|
||||
hash_algo->init_fn(&old_hash_ctx);
|
||||
hash_algo->init_fn(&new_hash_ctx);
|
||||
|
||||
if (lseek(pack_fd, 0, SEEK_SET) != 0)
|
||||
die_errno("Failed seeking to start of '%s'", pack_name);
|
||||
@ -406,9 +413,9 @@ void fixup_pack_header_footer(int pack_fd,
|
||||
pack_name);
|
||||
if (lseek(pack_fd, 0, SEEK_SET) != 0)
|
||||
die_errno("Failed seeking to start of '%s'", pack_name);
|
||||
the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr));
|
||||
hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr));
|
||||
hdr.hdr_entries = htonl(object_count);
|
||||
the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr));
|
||||
hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr));
|
||||
write_or_die(pack_fd, &hdr, sizeof(hdr));
|
||||
partial_pack_offset -= sizeof(hdr);
|
||||
|
||||
@ -423,7 +430,7 @@ void fixup_pack_header_footer(int pack_fd,
|
||||
break;
|
||||
if (n < 0)
|
||||
die_errno("Failed to checksum '%s'", pack_name);
|
||||
the_hash_algo->update_fn(&new_hash_ctx, buf, n);
|
||||
hash_algo->update_fn(&new_hash_ctx, buf, n);
|
||||
|
||||
aligned_sz -= n;
|
||||
if (!aligned_sz)
|
||||
@ -432,13 +439,12 @@ void fixup_pack_header_footer(int pack_fd,
|
||||
if (!partial_pack_hash)
|
||||
continue;
|
||||
|
||||
the_hash_algo->update_fn(&old_hash_ctx, buf, n);
|
||||
hash_algo->update_fn(&old_hash_ctx, buf, n);
|
||||
partial_pack_offset -= n;
|
||||
if (partial_pack_offset == 0) {
|
||||
unsigned char hash[GIT_MAX_RAWSZ];
|
||||
the_hash_algo->final_fn(hash, &old_hash_ctx);
|
||||
if (!hasheq(hash, partial_pack_hash,
|
||||
the_repository->hash_algo))
|
||||
hash_algo->final_fn(hash, &old_hash_ctx);
|
||||
if (!hasheq(hash, partial_pack_hash, hash_algo))
|
||||
die("Unexpected checksum for %s "
|
||||
"(disk corruption?)", pack_name);
|
||||
/*
|
||||
@ -446,7 +452,7 @@ void fixup_pack_header_footer(int pack_fd,
|
||||
* pack, which also means making partial_pack_offset
|
||||
* big enough not to matter anymore.
|
||||
*/
|
||||
the_hash_algo->init_fn(&old_hash_ctx);
|
||||
hash_algo->init_fn(&old_hash_ctx);
|
||||
partial_pack_offset = ~partial_pack_offset;
|
||||
partial_pack_offset -= MSB(partial_pack_offset, 1);
|
||||
}
|
||||
@ -454,16 +460,16 @@ void fixup_pack_header_footer(int pack_fd,
|
||||
free(buf);
|
||||
|
||||
if (partial_pack_hash)
|
||||
the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
|
||||
the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
|
||||
write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz);
|
||||
hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
|
||||
hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
|
||||
write_or_die(pack_fd, new_pack_hash, hash_algo->rawsz);
|
||||
fsync_component_or_die(FSYNC_COMPONENT_PACK, pack_fd, pack_name);
|
||||
}
|
||||
|
||||
char *index_pack_lockfile(int ip_out, int *is_well_formed)
|
||||
char *index_pack_lockfile(struct repository *r, int ip_out, int *is_well_formed)
|
||||
{
|
||||
char packname[GIT_MAX_HEXSZ + 6];
|
||||
const int len = the_hash_algo->hexsz + 6;
|
||||
const int len = r->hash_algo->hexsz + 6;
|
||||
|
||||
/*
|
||||
* The first thing we expect from index-pack's output
|
||||
@ -480,7 +486,7 @@ char *index_pack_lockfile(int ip_out, int *is_well_formed)
|
||||
packname[len-1] = 0;
|
||||
if (skip_prefix(packname, "keep\t", &name))
|
||||
return xstrfmt("%s/pack/pack-%s.keep",
|
||||
repo_get_object_directory(the_repository), name);
|
||||
repo_get_object_directory(r), name);
|
||||
return NULL;
|
||||
}
|
||||
if (is_well_formed)
|
||||
@ -546,7 +552,8 @@ void rename_tmp_packfile_idx(struct strbuf *name_buffer,
|
||||
rename_tmp_packfile(name_buffer, *idx_tmp_name, "idx");
|
||||
}
|
||||
|
||||
void stage_tmp_packfiles(struct strbuf *name_buffer,
|
||||
void stage_tmp_packfiles(const struct git_hash_algo *hash_algo,
|
||||
struct strbuf *name_buffer,
|
||||
const char *pack_tmp_name,
|
||||
struct pack_idx_entry **written_list,
|
||||
uint32_t nr_written,
|
||||
@ -561,17 +568,17 @@ void stage_tmp_packfiles(struct strbuf *name_buffer,
|
||||
if (adjust_shared_perm(pack_tmp_name))
|
||||
die_errno("unable to make temporary pack file readable");
|
||||
|
||||
*idx_tmp_name = (char *)write_idx_file(NULL, written_list, nr_written,
|
||||
pack_idx_opts, hash);
|
||||
*idx_tmp_name = (char *)write_idx_file(hash_algo, NULL, written_list,
|
||||
nr_written, pack_idx_opts, hash);
|
||||
if (adjust_shared_perm(*idx_tmp_name))
|
||||
die_errno("unable to make temporary index file readable");
|
||||
|
||||
rev_tmp_name = write_rev_file(NULL, written_list, nr_written, hash,
|
||||
pack_idx_opts->flags);
|
||||
rev_tmp_name = write_rev_file(hash_algo, NULL, written_list, nr_written,
|
||||
hash, pack_idx_opts->flags);
|
||||
|
||||
if (pack_idx_opts->flags & WRITE_MTIMES) {
|
||||
mtimes_tmp_name = write_mtimes_file(to_pack, written_list,
|
||||
nr_written,
|
||||
mtimes_tmp_name = write_mtimes_file(hash_algo, to_pack,
|
||||
written_list, nr_written,
|
||||
hash);
|
||||
}
|
||||
|
||||
|
30
pack.h
30
pack.h
@ -87,20 +87,37 @@ struct progress;
|
||||
/* Note, the data argument could be NULL if object type is blob */
|
||||
typedef int (*verify_fn)(const struct object_id *, enum object_type, unsigned long, void*, int*);
|
||||
|
||||
const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects, int nr_objects, const struct pack_idx_option *, const unsigned char *sha1);
|
||||
const char *write_idx_file(const struct git_hash_algo *hash_algo,
|
||||
const char *index_name,
|
||||
struct pack_idx_entry **objects,
|
||||
int nr_objects,
|
||||
const struct pack_idx_option *,
|
||||
const unsigned char *sha1);
|
||||
int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
|
||||
int verify_pack_index(struct packed_git *);
|
||||
int verify_pack(struct repository *, struct packed_git *, verify_fn fn, struct progress *, uint32_t);
|
||||
off_t write_pack_header(struct hashfile *f, uint32_t);
|
||||
void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
|
||||
char *index_pack_lockfile(int fd, int *is_well_formed);
|
||||
void fixup_pack_header_footer(const struct git_hash_algo *, int,
|
||||
unsigned char *, const char *, uint32_t,
|
||||
unsigned char *, off_t);
|
||||
char *index_pack_lockfile(struct repository *r, int fd, int *is_well_formed);
|
||||
|
||||
struct ref;
|
||||
|
||||
void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought);
|
||||
|
||||
char *write_rev_file(const char *rev_name, struct pack_idx_entry **objects, uint32_t nr_objects, const unsigned char *hash, unsigned flags);
|
||||
char *write_rev_file_order(const char *rev_name, uint32_t *pack_order, uint32_t nr_objects, const unsigned char *hash, unsigned flags);
|
||||
char *write_rev_file(const struct git_hash_algo *hash_algo,
|
||||
const char *rev_name,
|
||||
struct pack_idx_entry **objects,
|
||||
uint32_t nr_objects,
|
||||
const unsigned char *hash,
|
||||
unsigned flags);
|
||||
char *write_rev_file_order(const struct git_hash_algo *hash_algo,
|
||||
const char *rev_name,
|
||||
uint32_t *pack_order,
|
||||
uint32_t nr_objects,
|
||||
const unsigned char *hash,
|
||||
unsigned flags);
|
||||
|
||||
/*
|
||||
* The "hdr" output buffer should be at least this big, which will handle sizes
|
||||
@ -118,7 +135,8 @@ int read_pack_header(int fd, struct pack_header *);
|
||||
struct packing_data;
|
||||
|
||||
struct hashfile *create_tmp_packfile(char **pack_tmp_name);
|
||||
void stage_tmp_packfiles(struct strbuf *name_buffer,
|
||||
void stage_tmp_packfiles(const struct git_hash_algo *hash_algo,
|
||||
struct strbuf *name_buffer,
|
||||
const char *pack_tmp_name,
|
||||
struct pack_idx_entry **written_list,
|
||||
uint32_t nr_written,
|
||||
|
Loading…
x
Reference in New Issue
Block a user