#include "cache.h"
#include "commit.h"
#include "pack.h"
-#include "fetch.h"
+#include "walker.h"
#include "http.h"
#define PREV_BUF_SIZE 4096
#define RANGE_HEADER_SIZE 30
-static int commits_on_stdin;
-
-static int got_alternates = -1;
-static int corrupt_object_found;
-
-static struct curl_slist *no_pragma_header;
-
struct alt_base
{
char *base;
struct alt_base *next;
};
-static struct alt_base *alt;
-
enum object_request_state {
WAITING,
ABORTED,
struct object_request
{
+ struct walker *walker;
unsigned char sha1[20];
struct alt_base *repo;
char *url;
};
struct alternates_request {
+ struct walker *walker;
const char *base;
char *url;
struct buffer *buffer;
int http_specific;
};
+struct walker_data {
+ const char *url;
+ int got_alternates;
+ struct alt_base *alt;
+ struct curl_slist *no_pragma_header;
+};
+
static struct object_request *object_queue_head;
static size_t fwrite_sha1_file(void *ptr, size_t eltsize, size_t nmemb,
#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
-static void fetch_alternates(const char *base);
+static void fetch_alternates(struct walker *walker, const char *base);
static void process_object_response(void *callback_data);
-static void start_object_request(struct object_request *obj_req)
+static void start_object_request(struct walker *walker,
+ struct object_request *obj_req)
{
char *hex = sha1_to_hex(obj_req->sha1);
char prevfile[PATH_MAX];
char range[RANGE_HEADER_SIZE];
struct curl_slist *range_header = NULL;
struct active_request_slot *slot;
+ struct walker_data *data = walker->data;
snprintf(prevfile, sizeof(prevfile), "%s.prev", obj_req->filename);
unlink(prevfile);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file);
curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, obj_req->errorstr);
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
- curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
+ curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
/* If we have successfully processed data from a previous fetch
attempt, only fetch the data we don't already have. */
if (prev_posn>0) {
- if (get_verbosely)
+ if (walker->get_verbosely)
fprintf(stderr,
"Resuming fetch of object %s at byte %ld\n",
hex, prev_posn);
move_temp_to_file(obj_req->tmpfile, obj_req->filename);
if (obj_req->rename == 0)
- pull_say("got %s\n", sha1_to_hex(obj_req->sha1));
+ walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1));
}
static void process_object_response(void *callback_data)
{
struct object_request *obj_req =
(struct object_request *)callback_data;
+ struct walker *walker = obj_req->walker;
+ struct walker_data *data = walker->data;
+ struct alt_base *alt = data->alt;
obj_req->curl_result = obj_req->slot->curl_result;
obj_req->http_code = obj_req->slot->http_code;
/* Use alternates if necessary */
if (missing_target(obj_req)) {
- fetch_alternates(alt->base);
+ fetch_alternates(walker, alt->base);
if (obj_req->repo->next != NULL) {
obj_req->repo =
obj_req->repo->next;
close(obj_req->local);
obj_req->local = -1;
- start_object_request(obj_req);
+ start_object_request(walker, obj_req);
return;
}
}
}
#ifdef USE_CURL_MULTI
-static int fill_active_slot(void *unused)
+static int fill_active_slot(struct walker *walker)
{
struct object_request *obj_req;
if (has_sha1_file(obj_req->sha1))
obj_req->state = COMPLETE;
else {
- start_object_request(obj_req);
+ start_object_request(walker, obj_req);
return 1;
}
}
}
#endif
-void prefetch(unsigned char *sha1)
+static void prefetch(struct walker *walker, unsigned char *sha1)
{
struct object_request *newreq;
struct object_request *tail;
+ struct walker_data *data = walker->data;
char *filename = sha1_file_name(sha1);
newreq = xmalloc(sizeof(*newreq));
+ newreq->walker = walker;
hashcpy(newreq->sha1, sha1);
- newreq->repo = alt;
+ newreq->repo = data->alt;
newreq->url = NULL;
newreq->local = -1;
newreq->state = WAITING;
#endif
}
-static int fetch_index(struct alt_base *repo, unsigned char *sha1)
+static int fetch_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
{
char *hex = sha1_to_hex(sha1);
char *filename;
long prev_posn = 0;
char range[RANGE_HEADER_SIZE];
struct curl_slist *range_header = NULL;
+ struct walker_data *data = walker->data;
FILE *indexfile;
struct active_request_slot *slot;
if (has_pack_index(sha1))
return 0;
- if (get_verbosely)
+ if (walker->get_verbosely)
fprintf(stderr, "Getting index for pack %s\n", hex);
url = xmalloc(strlen(repo->base) + 64);
curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
- curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
+ curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
slot->local = indexfile;
/* If there is data present from a previous transfer attempt,
resume where it left off */
prev_posn = ftell(indexfile);
if (prev_posn>0) {
- if (get_verbosely)
+ if (walker->get_verbosely)
fprintf(stderr,
"Resuming fetch of index for pack %s at byte %ld\n",
hex, prev_posn);
return move_temp_to_file(tmpfile, filename);
}
-static int setup_index(struct alt_base *repo, unsigned char *sha1)
+static int setup_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
{
struct packed_git *new_pack;
if (has_pack_file(sha1))
return 0; /* don't list this as something we can get */
- if (fetch_index(repo, sha1))
+ if (fetch_index(walker, repo, sha1))
return -1;
new_pack = parse_pack_index(sha1);
{
struct alternates_request *alt_req =
(struct alternates_request *)callback_data;
+ struct walker *walker = alt_req->walker;
+ struct walker_data *cdata = walker->data;
struct active_request_slot *slot = alt_req->slot;
- struct alt_base *tail = alt;
+ struct alt_base *tail = cdata->alt;
const char *base = alt_req->base;
static const char null_byte = '\0';
char *data;
if (slot->finished != NULL)
(*slot->finished) = 0;
if (!start_active_slot(slot)) {
- got_alternates = -1;
+ cdata->got_alternates = -1;
slot->in_use = 0;
if (slot->finished != NULL)
(*slot->finished) = 1;
}
} else if (slot->curl_result != CURLE_OK) {
if (!missing_target(slot)) {
- got_alternates = -1;
+ cdata->got_alternates = -1;
return;
}
}
memcpy(target + serverlen, data + i,
posn - i - 7);
target[serverlen + posn - i - 7] = 0;
- if (get_verbosely)
+ if (walker->get_verbosely)
fprintf(stderr,
"Also look at %s\n", target);
newalt = xmalloc(sizeof(*newalt));
i = posn + 1;
}
- got_alternates = 1;
+ cdata->got_alternates = 1;
}
-static void fetch_alternates(const char *base)
+static void fetch_alternates(struct walker *walker, const char *base)
{
struct buffer buffer;
char *url;
char *data;
struct active_request_slot *slot;
struct alternates_request alt_req;
+ struct walker_data *cdata = walker->data;
/* If another request has already started fetching alternates,
wait for them to arrive and return to processing this request's
curl message */
#ifdef USE_CURL_MULTI
- while (got_alternates == 0) {
+ while (cdata->got_alternates == 0) {
step_active_slots();
}
#endif
/* Nothing to do if they've already been fetched */
- if (got_alternates == 1)
+ if (cdata->got_alternates == 1)
return;
/* Start the fetch */
- got_alternates = 0;
+ cdata->got_alternates = 0;
data = xmalloc(4096);
buffer.size = 4096;
buffer.posn = 0;
buffer.buffer = data;
- if (get_verbosely)
+ if (walker->get_verbosely)
fprintf(stderr, "Getting alternates list for %s\n", base);
url = xmalloc(strlen(base) + 31);
may fail and need to have alternates loaded before continuing */
slot = get_active_slot();
slot->callback_func = process_alternates_response;
+ alt_req.walker = walker;
slot->callback_data = &alt_req;
curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
if (start_active_slot(slot))
run_active_slot(slot);
else
- got_alternates = -1;
+ cdata->got_alternates = -1;
free(data);
free(url);
}
-static int fetch_indices(struct alt_base *repo)
+static int fetch_indices(struct walker *walker, struct alt_base *repo)
{
unsigned char sha1[20];
char *url;
buffer.posn = 0;
buffer.buffer = data;
- if (get_verbosely)
+ if (walker->get_verbosely)
fprintf(stderr, "Getting pack list for %s\n", repo->base);
url = xmalloc(strlen(repo->base) + 21);
!prefixcmp(data + i, " pack-") &&
!prefixcmp(data + i + 46, ".pack\n")) {
get_sha1_hex(data + i + 6, sha1);
- setup_index(repo, sha1);
+ setup_index(walker, repo, sha1);
i += 51;
break;
}
return 0;
}
-static int fetch_pack(struct alt_base *repo, unsigned char *sha1)
+static int fetch_pack(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
{
char *url;
struct packed_git *target;
long prev_posn = 0;
char range[RANGE_HEADER_SIZE];
struct curl_slist *range_header = NULL;
+ struct walker_data *data = walker->data;
struct active_request_slot *slot;
struct slot_results results;
- if (fetch_indices(repo))
+ if (fetch_indices(walker, repo))
return -1;
target = find_sha1_pack(sha1, repo->packs);
if (!target)
return -1;
- if (get_verbosely) {
+ if (walker->get_verbosely) {
fprintf(stderr, "Getting pack %s\n",
sha1_to_hex(target->sha1));
fprintf(stderr, " which contains %s\n",
curl_easy_setopt(slot->curl, CURLOPT_FILE, packfile);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
- curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
+ curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
slot->local = packfile;
/* If there is data present from a previous transfer attempt,
resume where it left off */
prev_posn = ftell(packfile);
if (prev_posn>0) {
- if (get_verbosely)
+ if (walker->get_verbosely)
fprintf(stderr,
"Resuming fetch of pack %s at byte %ld\n",
sha1_to_hex(target->sha1), prev_posn);
release_object_request(obj_req);
}
-static int fetch_object(struct alt_base *repo, unsigned char *sha1)
+static int fetch_object(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
{
char *hex = sha1_to_hex(sha1);
int ret = 0;
step_active_slots();
}
#else
- start_object_request(obj_req);
+ start_object_request(walker, obj_req);
#endif
while (obj_req->state == ACTIVE) {
obj_req->errorstr, obj_req->curl_result,
obj_req->http_code, hex);
} else if (obj_req->zret != Z_STREAM_END) {
- corrupt_object_found++;
+ walker->corrupt_object_found++;
ret = error("File %s (%s) corrupt", hex, obj_req->url);
} else if (hashcmp(obj_req->sha1, obj_req->real_sha1)) {
ret = error("File %s has bad hash", hex);
return ret;
}
-int fetch(unsigned char *sha1)
+static int fetch(struct walker *walker, unsigned char *sha1)
{
- struct alt_base *altbase = alt;
+ struct walker_data *data = walker->data;
+ struct alt_base *altbase = data->alt;
- if (!fetch_object(altbase, sha1))
+ if (!fetch_object(walker, altbase, sha1))
return 0;
while (altbase) {
- if (!fetch_pack(altbase, sha1))
+ if (!fetch_pack(walker, altbase, sha1))
return 0;
- fetch_alternates(alt->base);
+ fetch_alternates(walker, data->alt->base);
altbase = altbase->next;
}
return error("Unable to find %s under %s", sha1_to_hex(sha1),
- alt->base);
+ data->alt->base);
}
static inline int needs_quote(int ch)
return qref;
}
-int fetch_ref(char *ref, unsigned char *sha1)
+static int fetch_ref(struct walker *walker, char *ref, unsigned char *sha1)
{
char *url;
char hex[42];
struct buffer buffer;
- const char *base = alt->base;
+ struct walker_data *data = walker->data;
+ const char *base = data->alt->base;
struct active_request_slot *slot;
struct slot_results results;
buffer.size = 41;
return 0;
}
-int main(int argc, const char **argv)
+static void cleanup(struct walker *walker)
+{
+ struct walker_data *data = walker->data;
+ http_cleanup();
+
+ curl_slist_free_all(data->no_pragma_header);
+}
+
+struct walker *get_http_walker(const char *url)
{
- int commits;
- const char **write_ref = NULL;
- char **commit_id;
- const char *url;
char *s;
- int arg = 1;
- int rc = 0;
-
- setup_git_directory();
- git_config(git_default_config);
-
- while (arg < argc && argv[arg][0] == '-') {
- if (argv[arg][1] == 't') {
- get_tree = 1;
- } else if (argv[arg][1] == 'c') {
- get_history = 1;
- } else if (argv[arg][1] == 'a') {
- get_all = 1;
- get_tree = 1;
- get_history = 1;
- } else if (argv[arg][1] == 'v') {
- get_verbosely = 1;
- } else if (argv[arg][1] == 'w') {
- write_ref = &argv[arg + 1];
- arg++;
- } else if (!strcmp(argv[arg], "--recover")) {
- get_recover = 1;
- } else if (!strcmp(argv[arg], "--stdin")) {
- commits_on_stdin = 1;
- }
- arg++;
- }
- if (argc < arg + 2 - commits_on_stdin) {
- usage("git-http-fetch [-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin] commit-id url");
- return 1;
- }
- if (commits_on_stdin) {
- commits = pull_targets_stdin(&commit_id, &write_ref);
- } else {
- commit_id = (char **) &argv[arg++];
- commits = 1;
- }
- url = argv[arg];
+ struct walker_data *data = xmalloc(sizeof(struct walker_data));
+ struct walker *walker = xmalloc(sizeof(struct walker));
http_init();
- no_pragma_header = curl_slist_append(no_pragma_header, "Pragma:");
+ data->no_pragma_header = curl_slist_append(NULL, "Pragma:");
- alt = xmalloc(sizeof(*alt));
- alt->base = xmalloc(strlen(url) + 1);
- strcpy(alt->base, url);
- for (s = alt->base + strlen(alt->base) - 1; *s == '/'; --s)
+ data->alt = xmalloc(sizeof(*data->alt));
+ data->alt->base = xmalloc(strlen(url) + 1);
+ strcpy(data->alt->base, url);
+ for (s = data->alt->base + strlen(data->alt->base) - 1; *s == '/'; --s)
*s = 0;
- alt->got_indices = 0;
- alt->packs = NULL;
- alt->next = NULL;
-#ifdef USE_CURL_MULTI
- add_fill_function(NULL, fill_active_slot);
-#endif
+ data->alt->got_indices = 0;
+ data->alt->packs = NULL;
+ data->alt->next = NULL;
+ data->got_alternates = -1;
- if (pull(commits, commit_id, write_ref, url))
- rc = 1;
+ walker->corrupt_object_found = 0;
+ walker->fetch = fetch;
+ walker->fetch_ref = fetch_ref;
+ walker->prefetch = prefetch;
+ walker->cleanup = cleanup;
+ walker->data = data;
- http_cleanup();
-
- curl_slist_free_all(no_pragma_header);
-
- if (commits_on_stdin)
- pull_targets_free(commits, commit_id, write_ref);
+#ifdef USE_CURL_MULTI
+ add_fill_function(walker, (int (*)(void *)) fill_active_slot);
+#endif
- if (corrupt_object_found) {
- fprintf(stderr,
-"Some loose object were found to be corrupt, but they might be just\n"
-"a false '404 Not Found' error message sent with incorrect HTTP\n"
-"status code. Suggest running git-fsck.\n");
- }
- return rc;
+ return walker;
}
#include "cache.h"
-#include "fetch.h"
+#include "walker.h"
#include "commit.h"
#include "tree.h"
#include "tree-walk.h"
#include "refs.h"
#include "strbuf.h"
-int get_tree = 0;
-int get_history = 0;
-int get_all = 0;
-int get_verbosely = 0;
-int get_recover = 0;
static unsigned char current_commit_sha1[20];
-void pull_say(const char *fmt, const char *hex)
+void walker_say(struct walker *walker, const char *fmt, const char *hex)
{
- if (get_verbosely)
+ if (walker->get_verbosely)
fprintf(stderr, fmt, hex);
}
sha1_to_hex(current_commit_sha1));
}
-static int process(struct object *obj);
+static int process(struct walker *walker, struct object *obj);
-static int process_tree(struct tree *tree)
+static int process_tree(struct walker *walker, struct tree *tree)
{
struct tree_desc desc;
struct name_entry entry;
if (blob)
obj = &blob->object;
}
- if (!obj || process(obj))
+ if (!obj || process(walker, obj))
return -1;
}
free(tree->buffer);
static struct commit_list *complete = NULL;
-static int process_commit(struct commit *commit)
+static int process_commit(struct walker *walker, struct commit *commit)
{
if (parse_commit(commit))
return -1;
hashcpy(current_commit_sha1, commit->object.sha1);
- pull_say("walk %s\n", sha1_to_hex(commit->object.sha1));
+ walker_say(walker, "walk %s\n", sha1_to_hex(commit->object.sha1));
- if (get_tree) {
- if (process(&commit->tree->object))
+ if (walker->get_tree) {
+ if (process(walker, &commit->tree->object))
return -1;
- if (!get_all)
- get_tree = 0;
+ if (!walker->get_all)
+ walker->get_tree = 0;
}
- if (get_history) {
+ if (walker->get_history) {
struct commit_list *parents = commit->parents;
for (; parents; parents = parents->next) {
- if (process(&parents->item->object))
+ if (process(walker, &parents->item->object))
return -1;
}
}
return 0;
}
-static int process_tag(struct tag *tag)
+static int process_tag(struct walker *walker, struct tag *tag)
{
if (parse_tag(tag))
return -1;
- return process(tag->tagged);
+ return process(walker, tag->tagged);
}
static struct object_list *process_queue = NULL;
static struct object_list **process_queue_end = &process_queue;
-static int process_object(struct object *obj)
+static int process_object(struct walker *walker, struct object *obj)
{
if (obj->type == OBJ_COMMIT) {
- if (process_commit((struct commit *)obj))
+ if (process_commit(walker, (struct commit *)obj))
return -1;
return 0;
}
if (obj->type == OBJ_TREE) {
- if (process_tree((struct tree *)obj))
+ if (process_tree(walker, (struct tree *)obj))
return -1;
return 0;
}
return 0;
}
if (obj->type == OBJ_TAG) {
- if (process_tag((struct tag *)obj))
+ if (process_tag(walker, (struct tag *)obj))
return -1;
return 0;
}
typename(obj->type), sha1_to_hex(obj->sha1));
}
-static int process(struct object *obj)
+static int process(struct walker *walker, struct object *obj)
{
if (obj->flags & SEEN)
return 0;
else {
if (obj->flags & COMPLETE)
return 0;
- prefetch(obj->sha1);
+ walker->prefetch(walker, obj->sha1);
}
object_list_insert(obj, process_queue_end);
return 0;
}
-static int loop(void)
+static int loop(struct walker *walker)
{
struct object_list *elem;
* the queue because we needed to fetch it first.
*/
if (! (obj->flags & TO_SCAN)) {
- if (fetch(obj->sha1)) {
+ if (walker->fetch(walker, obj->sha1)) {
report_missing(obj);
return -1;
}
}
if (!obj->type)
parse_object(obj->sha1);
- if (process_object(obj))
+ if (process_object(walker, obj))
return -1;
}
return 0;
}
-static int interpret_target(char *target, unsigned char *sha1)
+static int interpret_target(struct walker *walker, char *target, unsigned char *sha1)
{
if (!get_sha1_hex(target, sha1))
return 0;
if (!check_ref_format(target)) {
- if (!fetch_ref(target, sha1)) {
+ if (!walker->fetch_ref(walker, target, sha1)) {
return 0;
}
}
return 0;
}
-int pull_targets_stdin(char ***target, const char ***write_ref)
+int walker_targets_stdin(char ***target, const char ***write_ref)
{
int targets = 0, targets_alloc = 0;
struct strbuf buf;
return targets;
}
-void pull_targets_free(int targets, char **target, const char **write_ref)
+void walker_targets_free(int targets, char **target, const char **write_ref)
{
while (targets--) {
free(target[targets]);
}
}
-int pull(int targets, char **target, const char **write_ref,
- const char *write_ref_log_details)
+int walker_fetch(struct walker *walker, int targets, char **target,
+ const char **write_ref, const char *write_ref_log_details)
{
struct ref_lock **lock = xcalloc(targets, sizeof(struct ref_lock *));
unsigned char *sha1 = xmalloc(targets * 20);
}
}
- if (!get_recover)
+ if (!walker->get_recover)
for_each_ref(mark_complete, NULL);
for (i = 0; i < targets; i++) {
- if (interpret_target(target[i], &sha1[20 * i])) {
+ if (interpret_target(walker, target[i], &sha1[20 * i])) {
error("Could not interpret %s as something to pull", target[i]);
goto unlock_and_fail;
}
- if (process(lookup_unknown_object(&sha1[20 * i])))
+ if (process(walker, lookup_unknown_object(&sha1[20 * i])))
goto unlock_and_fail;
}
- if (loop())
+ if (loop(walker))
goto unlock_and_fail;
if (write_ref_log_details) {
return 0;
-
unlock_and_fail:
for (i = 0; i < targets; i++)
if (lock[i])
unlock_ref(lock[i]);
+
return -1;
}
+
+void walker_free(struct walker *walker)
+{
+ walker->cleanup(walker);
+ free(walker);
+}