#include "progress.h"
#include "fsck.h"
#include "exec_cmd.h"
+#include "thread-utils.h"
static const char index_pack_usage[] =
"git index-pack [-v] [-o <index-file>] [--keep | --keep=<msg>] [--verify] [--strict] (<pack-file> | --stdin [--fix-thin] [<pack-file>])";
int ofs_first, ofs_last;
};
+struct thread_local {
+#ifndef NO_PTHREADS
+ pthread_t thread;
+#endif
+ struct base_data *base_cache;
+ size_t base_cache_used;
+};
+
/*
* Even if sizeof(union delta_base) == 24 on 64-bit archs, we really want
* to memcmp() only the first 20 bytes.
static struct object_entry *objects;
static struct delta_entry *deltas;
-static struct base_data *base_cache;
-static size_t base_cache_used;
+static struct thread_local nothread_data;
static int nr_objects;
static int nr_deltas;
static int nr_resolved_deltas;
+static int nr_threads;
static int from_stdin;
static int strict;
static uint32_t input_crc32;
static int input_fd, output_fd, pack_fd;
+#ifndef NO_PTHREADS
+
+static struct thread_local *thread_data;
+static int nr_dispatched;
+static int threads_active;
+
+static pthread_mutex_t read_mutex;
+#define read_lock() lock_mutex(&read_mutex)
+#define read_unlock() unlock_mutex(&read_mutex)
+
+static pthread_mutex_t counter_mutex;
+#define counter_lock() lock_mutex(&counter_mutex)
+#define counter_unlock() unlock_mutex(&counter_mutex)
+
+static pthread_mutex_t work_mutex;
+#define work_lock() lock_mutex(&work_mutex)
+#define work_unlock() unlock_mutex(&work_mutex)
+
+static pthread_key_t key;
+
+static inline void lock_mutex(pthread_mutex_t *mutex)
+{
+ if (threads_active)
+ pthread_mutex_lock(mutex);
+}
+
+static inline void unlock_mutex(pthread_mutex_t *mutex)
+{
+ if (threads_active)
+ pthread_mutex_unlock(mutex);
+}
+
+/*
+ * Mutex and conditional variable can't be statically-initialized on Windows.
+ */
+static void init_thread(void)
+{
+ init_recursive_mutex(&read_mutex);
+ pthread_mutex_init(&counter_mutex, NULL);
+ pthread_mutex_init(&work_mutex, NULL);
+ pthread_key_create(&key, NULL);
+ thread_data = xcalloc(nr_threads, sizeof(*thread_data));
+ threads_active = 1;
+}
+
+static void cleanup_thread(void)
+{
+ if (!threads_active)
+ return;
+ threads_active = 0;
+ pthread_mutex_destroy(&read_mutex);
+ pthread_mutex_destroy(&counter_mutex);
+ pthread_mutex_destroy(&work_mutex);
+ pthread_key_delete(key);
+ free(thread_data);
+}
+
+#else
+
+#define read_lock()
+#define read_unlock()
+
+#define counter_lock()
+#define counter_unlock()
+
+#define work_lock()
+#define work_unlock()
+
+#endif
+
+
static int mark_link(struct object *obj, int type, void *data)
{
if (!obj)
die("pack has bad object at offset %lu: %s", offset, buf);
}
+static inline struct thread_local *get_thread_data(void)
+{
+#ifndef NO_PTHREADS
+ if (threads_active)
+ return pthread_getspecific(key);
+ assert(!threads_active &&
+ "This should only be reached when all threads are gone");
+#endif
+ return ¬hread_data;
+}
+
+#ifndef NO_PTHREADS
+static void set_thread_data(struct thread_local *data)
+{
+ if (threads_active)
+ pthread_setspecific(key, data);
+}
+#endif
+
static struct base_data *alloc_base_data(void)
{
struct base_data *base = xmalloc(sizeof(struct base_data));
if (c->data) {
free(c->data);
c->data = NULL;
- base_cache_used -= c->size;
+ get_thread_data()->base_cache_used -= c->size;
}
}
static void prune_base_data(struct base_data *retain)
{
struct base_data *b;
- for (b = base_cache;
- base_cache_used > delta_base_cache_limit && b;
+ struct thread_local *data = get_thread_data();
+ for (b = data->base_cache;
+ data->base_cache_used > delta_base_cache_limit && b;
b = b->child) {
if (b->data && b != retain)
free_base_data(b);
if (base)
base->child = c;
else
- base_cache = c;
+ get_thread_data()->base_cache = c;
c->base = base;
c->child = NULL;
if (c->data)
- base_cache_used += c->size;
+ get_thread_data()->base_cache_used += c->size;
prune_base_data(c);
}
if (base)
base->child = NULL;
else
- base_cache = NULL;
+ get_thread_data()->base_cache = NULL;
free_base_data(c);
}
enum object_type type, unsigned char *sha1)
{
hash_sha1_file(data, size, typename(type), sha1);
+ read_lock();
if (has_sha1_file(sha1)) {
void *has_data;
enum object_type has_type;
unsigned long has_size;
has_data = read_sha1_file(sha1, &has_type, &has_size);
+ read_unlock();
if (!has_data)
die("cannot read existing object %s", sha1_to_hex(sha1));
if (size != has_size || type != has_type ||
memcmp(data, has_data, size) != 0)
die("SHA1 COLLISION FOUND WITH %s !", sha1_to_hex(sha1));
free(has_data);
- }
+ } else
+ read_unlock();
+
if (strict) {
+ read_lock();
if (type == OBJ_BLOB) {
struct blob *blob = lookup_blob(sha1);
if (blob)
}
obj->flags |= FLAG_CHECKED;
}
+ read_unlock();
}
}
if (!delta_nr) {
c->data = get_data_from_pack(obj);
c->size = obj->size;
- base_cache_used += c->size;
+ get_thread_data()->base_cache_used += c->size;
prune_base_data(c);
}
for (; delta_nr > 0; delta_nr--) {
free(raw);
if (!c->data)
bad_object(obj->idx.offset, "failed to apply delta");
- base_cache_used += c->size;
+ get_thread_data()->base_cache_used += c->size;
prune_base_data(c);
}
free(delta);
bad_object(delta_obj->idx.offset, "failed to apply delta");
sha1_object(result->data, result->size, delta_obj->real_type,
delta_obj->idx.sha1);
+ counter_lock();
nr_resolved_deltas++;
+ counter_unlock();
}
static struct base_data *find_unresolved_deltas_1(struct base_data *base,
find_unresolved_deltas(base_obj);
}
+#ifndef NO_PTHREADS
+static void *threaded_second_pass(void *data)
+{
+ set_thread_data(data);
+ for (;;) {
+ int i;
+ work_lock();
+ display_progress(progress, nr_resolved_deltas);
+ while (nr_dispatched < nr_objects &&
+ is_delta_type(objects[nr_dispatched].type))
+ nr_dispatched++;
+ if (nr_dispatched >= nr_objects) {
+ work_unlock();
+ break;
+ }
+ i = nr_dispatched++;
+ work_unlock();
+
+ resolve_base(&objects[i]);
+ }
+ return NULL;
+}
+#endif
+
/*
* First pass:
* - find locations of all objects;
if (verbose)
progress = start_progress("Resolving deltas", nr_deltas);
+
+#ifndef NO_PTHREADS
+ nr_dispatched = 0;
+ if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) {
+ init_thread();
+ for (i = 0; i < nr_threads; i++) {
+ int ret = pthread_create(&thread_data[i].thread, NULL,
+ threaded_second_pass, thread_data + i);
+ if (ret)
+ die("unable to create thread: %s", strerror(ret));
+ }
+ for (i = 0; i < nr_threads; i++)
+ pthread_join(thread_data[i].thread, NULL);
+ cleanup_thread();
+ return;
+ }
+#endif
+
for (i = 0; i < nr_objects; i++) {
struct object_entry *obj = &objects[i];
die("bad pack.indexversion=%"PRIu32, opts->version);
return 0;
}
+ if (!strcmp(k, "pack.threads")) {
+ nr_threads = git_config_int(k, v);
+ if (nr_threads < 0)
+ die("invalid number of threads specified (%d)",
+ nr_threads);
+#ifdef NO_PTHREADS
+ if (nr_threads != 1)
+ warning("no threads support, ignoring %s", k);
+ nr_threads = 1;
+#endif
+ return 0;
+ }
return git_default_config(k, v, cb);
}
keep_msg = "";
} else if (!prefixcmp(arg, "--keep=")) {
keep_msg = arg + 7;
+ } else if (!prefixcmp(arg, "--threads=")) {
+ char *end;
+ nr_threads = strtoul(arg+10, &end, 0);
+ if (!arg[10] || *end || nr_threads < 0)
+ usage(index_pack_usage);
+#ifdef NO_PTHREADS
+ if (nr_threads != 1)
+ warning("no threads support, "
+ "ignoring %s", arg);
+ nr_threads = 1;
+#endif
} else if (!prefixcmp(arg, "--pack_header=")) {
struct pack_header *hdr;
char *c;
if (strict)
opts.flags |= WRITE_IDX_STRICT;
+#ifndef NO_PTHREADS
+ if (!nr_threads) {
+ nr_threads = online_cpus();
+ /* An experiment showed that more threads does not mean faster */
+ if (nr_threads > 3)
+ nr_threads = 3;
+ }
+#endif
+
curr_pack = open_pack_file(pack_name);
parse_pack_header();
objects = xcalloc(nr_objects + 1, sizeof(struct object_entry));