Merge branches 'jc/rev-list' and 'jc/pack-thin'
authorJunio C Hamano <junkio@cox.net>
Sat, 25 Feb 2006 05:55:23 +0000 (21:55 -0800)
committerJunio C Hamano <junkio@cox.net>
Sat, 25 Feb 2006 05:55:23 +0000 (21:55 -0800)
* jc/rev-list:
  rev-list --objects: use full pathname to help hashing.
  rev-list --objects-edge: remove duplicated edge commit output.
  rev-list --objects-edge

* jc/pack-thin:
  pack-objects: hash basename and direname a bit differently.
  pack-objects: allow "thin" packs to exceed depth limits
  pack-objects: use full pathname to help hashing with "thin" pack.
  pack-objects: thin pack micro-optimization.
  Use thin pack transfer in "git fetch".
  Add git-push --thin.
  send-pack --thin: use "thin pack" delta transfer.
  Thin pack - create packfile with missing delta base.

Conflicts:

pack-objects.c (taking "next")
send-pack.c (taking "next")

1  2  3 
git-fetch.sh
pack-objects.c
rev-list.c
send-pack.c
upload-pack.c

diff --cc git-fetch.sh
Simple merge
diff --cc pack-objects.c
index 8f352aa6c1c99e7bebc7680909c9139c7a6cf623,c5a5e61605b7aefde9fd3cc8c43a085156671bc9,095bcb82860888991ef7d3b5f81e4e022bcbdfff..be7a2008c52f582626dd18b47372a4765c8bd39c
   #include "delta.h"
   #include "pack.h"
   #include "csum-file.h"
++ #include "diff.h"
   #include <sys/time.h>
 ++#include <signal.h>
   
 - static const char pack_usage[] = "git-pack-objects [-q] [--non-empty] [--local] [--incremental] [--window=N] [--depth=N] {--stdout | base-name} < object-list";
 + static const char pack_usage[] = "git-pack-objects [-q] [--no-reuse-delta] [--non-empty] [--local] [--incremental] [--window=N] [--depth=N] {--stdout | base-name} < object-list";
   
   struct object_entry {
        unsigned char sha1[20];
 -      unsigned long size;
 -      unsigned long offset;
 -      unsigned int depth;
 -      unsigned int hash;
 +      unsigned long size;     /* uncompressed size */
 +      unsigned long offset;   /* offset into the final pack file;
 +                               * nonzero if already written.
 +                               */
 +      unsigned int depth;     /* delta depth */
 +      unsigned int delta_limit;       /* base adjustment for in-pack delta */
 +      unsigned int hash;      /* name hint hash */
        enum object_type type;
 -      unsigned long delta_size;
 -      struct object_entry *delta;
 +      enum object_type in_pack_type;  /* could be delta */
 +      unsigned long delta_size;       /* delta data size (uncompressed) */
 +      struct object_entry *delta;     /* delta base object */
 +      struct packed_git *in_pack;     /* already in pack */
 +      unsigned int in_pack_offset;
 +      struct object_entry *delta_child; /* delitified objects who bases me */
 +      struct object_entry *delta_sibling; /* other deltified objects who
 +                                           * uses the same base as me
 +                                           */
++      int preferred_base;     /* we do not pack this, but is encouraged to
++                               * be used as the base objectto delta huge
++                               * objects against.
++                               */
++      int based_on_preferred; /* current delta candidate is a preferred
++                               * one, or delta against a preferred one.
++                               */
   };
   
 + /*
 +  * Objects we are going to pack are colected in objects array (dynamically
 +  * expanded).  nr_objects & nr_alloc controls this array.  They are stored
 +  * in the order we see -- typically rev-list --objects order that gives us
 +  * nice "minimum seek" order.
 +  *
 +  * sorted-by-sha ans sorted-by-type are arrays of pointers that point at
 +  * elements in the objects array.  The former is used to build the pack
 +  * index (lists object names in the ascending order to help offset lookup),
 +  * and the latter is used to group similar things together by try_delta()
 +  * heuristics.
 +  */
 + 
   static unsigned char object_list_sha1[20];
   static int non_empty = 0;
 + static int no_reuse_delta = 0;
   static int local = 0;
   static int incremental = 0;
   static struct object_entry **sorted_by_sha, **sorted_by_type;
@@@@ -53,138 -28,6 -60,137 +61,138 @@@@ static int nr_objects = 0, nr_alloc = 0
   static const char *base_name;
   static unsigned char pack_file_sha1[20];
   static int progress = 1;
 ++static volatile int progress_update = 0;
 + 
 + /*
 +  * The object names in objects array are hashed with this hashtable,
 +  * to help looking up the entry by object name.  Binary search from
 +  * sorted_by_sha is also possible but this was easier to code and faster.
 +  * This hashtable is built after all the objects are seen.
 +  */
 + static int *object_ix = NULL;
 + static int object_ix_hashsz = 0;
 + 
 + /*
 +  * Pack index for existing packs give us easy access to the offsets into
 +  * corresponding pack file where each object's data starts, but the entries
 +  * do not store the size of the compressed representation (uncompressed
 +  * size is easily available by examining the pack entry header).  We build
 +  * a hashtable of existing packs (pack_revindex), and keep reverse index
 +  * here -- pack index file is sorted by object name mapping to offset; this
 +  * pack_revindex[].revindex array is an ordered list of offsets, so if you
 +  * know the offset of an object, next offset is where its packed
 +  * representation ends.
 +  */
 + struct pack_revindex {
 +      struct packed_git *p;
 +      unsigned long *revindex;
 + } *pack_revindex = NULL;
 + static int pack_revindex_hashsz = 0;
 + 
 + /*
 +  * stats
 +  */
 + static int written = 0;
 + static int written_delta = 0;
 + static int reused = 0;
 + static int reused_delta = 0;
 + 
 + static int pack_revindex_ix(struct packed_git *p)
 + {
 +      unsigned int ui = (unsigned int) p;
 +      int i;
 + 
 +      ui = ui ^ (ui >> 16); /* defeat structure alignment */
 +      i = (int)(ui % pack_revindex_hashsz);
 +      while (pack_revindex[i].p) {
 +              if (pack_revindex[i].p == p)
 +                      return i;
 +              if (++i == pack_revindex_hashsz)
 +                      i = 0;
 +      }
 +      return -1 - i;
 + }
 + 
 + static void prepare_pack_ix(void)
 + {
 +      int num;
 +      struct packed_git *p;
 +      for (num = 0, p = packed_git; p; p = p->next)
 +              num++;
 +      if (!num)
 +              return;
 +      pack_revindex_hashsz = num * 11;
 +      pack_revindex = xcalloc(sizeof(*pack_revindex), pack_revindex_hashsz);
 +      for (p = packed_git; p; p = p->next) {
 +              num = pack_revindex_ix(p);
 +              num = - 1 - num;
 +              pack_revindex[num].p = p;
 +      }
 +      /* revindex elements are lazily initialized */
 + }
 + 
 + static int cmp_offset(const void *a_, const void *b_)
 + {
 +      unsigned long a = *(unsigned long *) a_;
 +      unsigned long b = *(unsigned long *) b_;
 +      if (a < b)
 +              return -1;
 +      else if (a == b)
 +              return 0;
 +      else
 +              return 1;
 + }
 + 
 + /*
 +  * Ordered list of offsets of objects in the pack.
 +  */
 + static void prepare_pack_revindex(struct pack_revindex *rix)
 + {
 +      struct packed_git *p = rix->p;
 +      int num_ent = num_packed_objects(p);
 +      int i;
 +      void *index = p->index_base + 256;
 + 
 +      rix->revindex = xmalloc(sizeof(unsigned long) * (num_ent + 1));
 +      for (i = 0; i < num_ent; i++) {
 +              long hl = *((long *)(index + 24 * i));
 +              rix->revindex[i] = ntohl(hl);
 +      }
 +      /* This knows the pack format -- the 20-byte trailer
 +       * follows immediately after the last object data.
 +       */
 +      rix->revindex[num_ent] = p->pack_size - 20;
 +      qsort(rix->revindex, num_ent, sizeof(unsigned long), cmp_offset);
 + }
 + 
 + static unsigned long find_packed_object_size(struct packed_git *p,
 +                                           unsigned long ofs)
 + {
 +      int num;
 +      int lo, hi;
 +      struct pack_revindex *rix;
 +      unsigned long *revindex;
 +      num = pack_revindex_ix(p);
 +      if (num < 0)
 +              die("internal error: pack revindex uninitialized");
 +      rix = &pack_revindex[num];
 +      if (!rix->revindex)
 +              prepare_pack_revindex(rix);
 +      revindex = rix->revindex;
 +      lo = 0;
 +      hi = num_packed_objects(p) + 1;
 +      do {
 +              int mi = (lo + hi) / 2;
 +              if (revindex[mi] == ofs) {
 +                      return revindex[mi+1] - ofs;
 +              }
 +              else if (ofs < revindex[mi])
 +                      hi = mi;
 +              else
 +                      lo = mi + 1;
 +      } while (lo < hi);
 +      die("internal error: pack revindex corrupt");
 + }
   
   static void *delta_against(void *buf, unsigned long size, struct object_entry *entry)
   {
@@@@ -239,65 -82,31 -246,68 +248,68 @@@@ static unsigned long write_object(struc
        unsigned char header[10];
        unsigned hdrlen, datalen;
        enum object_type obj_type;
 +      int to_reuse = 0;
   
 -      if (!buf)
 -              die("unable to read %s", sha1_to_hex(entry->sha1));
 -      if (size != entry->size)
 -              die("object %s size inconsistency (%lu vs %lu)", sha1_to_hex(entry->sha1), size, entry->size);
++      if (entry->preferred_base)
++              return 0;
+  
 -      /*
 -       * The object header is a byte of 'type' followed by zero or
 -       * more bytes of length.  For deltas, the 20 bytes of delta sha1
 -       * follows that.
 -       */
        obj_type = entry->type;
 -      if (entry->delta) {
 -              buf = delta_against(buf, size, entry);
 -              size = entry->delta_size;
 -              obj_type = OBJ_DELTA;
 -      }
 -      hdrlen = encode_header(obj_type, size, header);
 -      sha1write(f, header, hdrlen);
 -      if (entry->delta) {
 -              sha1write(f, entry->delta, 20);
 -              hdrlen += 20;
 -      }
 -      datalen = sha1write_compressed(f, buf, size);
 -      free(buf);
 +      if (! entry->in_pack)
 +              to_reuse = 0;   /* can't reuse what we don't have */
 +      else if (obj_type == OBJ_DELTA)
 +              to_reuse = 1;   /* check_object() decided it for us */
 +      else if (obj_type != entry->in_pack_type)
 +              to_reuse = 0;   /* pack has delta which is unusable */
 +      else if (entry->delta)
 +              to_reuse = 0;   /* we want to pack afresh */
 +      else
 +              to_reuse = 1;   /* we have it in-pack undeltified,
 +                               * and we do not need to deltify it.
 +                               */
 + 
 +      if (! to_reuse) {
 +              buf = read_sha1_file(entry->sha1, type, &size);
 +              if (!buf)
 +                      die("unable to read %s", sha1_to_hex(entry->sha1));
 +              if (size != entry->size)
 +                      die("object %s size inconsistency (%lu vs %lu)",
 +                          sha1_to_hex(entry->sha1), size, entry->size);
 +              if (entry->delta) {
 +                      buf = delta_against(buf, size, entry);
 +                      size = entry->delta_size;
 +                      obj_type = OBJ_DELTA;
 +              }
 +              /*
 +               * The object header is a byte of 'type' followed by zero or
 +               * more bytes of length.  For deltas, the 20 bytes of delta
 +               * sha1 follows that.
 +               */
 +              hdrlen = encode_header(obj_type, size, header);
 +              sha1write(f, header, hdrlen);
 + 
 +              if (entry->delta) {
 +                      sha1write(f, entry->delta, 20);
 +                      hdrlen += 20;
 +              }
 +              datalen = sha1write_compressed(f, buf, size);
 +              free(buf);
 +      }
 +      else {
 +              struct packed_git *p = entry->in_pack;
 +              use_packed_git(p);
 + 
 +              datalen = find_packed_object_size(p, entry->in_pack_offset);
 +              buf = p->pack_base + entry->in_pack_offset;
 +              sha1write(f, buf, datalen);
 +              unuse_packed_git(p);
 +              hdrlen = 0; /* not really */
 +              if (obj_type == OBJ_DELTA)
 +                      reused_delta++;
 +              reused++;
 +      }
 +      if (obj_type == OBJ_DELTA)
 +              written_delta++;
 +      written++;
        return hdrlen + datalen;
   }
   
@@@@ -323,41 -132,24 -333,22 +335,43 @@@@ static void write_pack_file(void
        int i;
        struct sha1file *f;
        unsigned long offset;
 -      unsigned long mb;
        struct pack_header hdr;
 ++     unsigned last_percent = 999;
 ++     int do_progress = 0;
   
        if (!base_name)
                f = sha1fd(1, "<stdout>");
 --     else
 -              f = sha1create("%s-%s.%s", base_name, sha1_to_hex(object_list_sha1), "pack");
 ++     else {
 +              f = sha1create("%s-%s.%s", base_name,
 +                             sha1_to_hex(object_list_sha1), "pack");
 ++             do_progress = progress;
 ++     }
 ++     if (do_progress)
-               fprintf(stderr, "Writing %d objects.\n", nr_objects);
+++             fprintf(stderr, "Writing %d objects.\n", nr_result);
 ++
        hdr.hdr_signature = htonl(PACK_SIGNATURE);
        hdr.hdr_version = htonl(PACK_VERSION);
--      hdr.hdr_entries = htonl(nr_objects);
++      hdr.hdr_entries = htonl(nr_result);
        sha1write(f, &hdr, sizeof(hdr));
        offset = sizeof(hdr);
 --     for (i = 0; i < nr_objects; i++)
+++     if (!nr_result)
+++             goto done;
 ++     for (i = 0; i < nr_objects; i++) {
                offset = write_one(f, objects + i, offset);
 --
 ++             if (do_progress) {
-                       unsigned percent = written * 100 / nr_objects;
+++                     unsigned percent = written * 100 / nr_result;
 ++                     if (progress_update || percent != last_percent) {
 ++                             fprintf(stderr, "%4u%% (%u/%u) done\r",
-                                       percent, written, nr_objects);
+++                                     percent, written, nr_result);
 ++                             progress_update = 0;
 ++                             last_percent = percent;
 ++                     }
 ++             }
 ++     }
 ++     if (do_progress)
 ++             fputc('\n', stderr);
-  
+++ done:
        sha1close(f, pack_file_sha1, 1);
 -      mb = offset >> 20;
 -      offset &= 0xfffff;
   }
   
   static void write_index_file(void)
@@@@ -404,20 -196,18 -503,23 +526,23 @@@@ static int add_object_entry(const unsig
   {
        unsigned int idx = nr_objects;
        struct object_entry *entry;
-  
-       for (p = packed_git; p; p = p->next) {
-               struct pack_entry e;
-               if (find_pack_entry_one(sha1, &e, p)) {
-                       if (incremental)
-                               return 0;
-                       if (local && !p->pack_local)
-                               return 0;
-                       if (!found_pack) {
-                               found_offset = e.offset;
-                               found_pack = e.p;
 +      struct packed_git *p;
 +      unsigned int found_offset = 0;
 +      struct packed_git *found_pack = NULL;
 -      if (incremental || local) {
 -              struct packed_git *p;
 - 
++      int ix, status = 0;
+  
 - 
++      if (!exclude) {
+               for (p = packed_git; p; p = p->next) {
+                       struct pack_entry e;
+                       if (find_pack_entry_one(sha1, &e, p)) {
+                               if (incremental)
+                                       return 0;
+                               if (local && !p->pack_local)
+                                       return 0;
++                              if (!found_pack) {
++                                      found_offset = e.offset;
++                                      found_pack = e.p;
++                              }
                        }
                }
        }
        memset(entry, 0, sizeof(*entry));
        memcpy(entry->sha1, sha1, 20);
        entry->hash = hash;
-       if (found_pack) {
-               entry->in_pack = found_pack;
-               entry->in_pack_offset = found_offset;
 -      nr_objects = idx+1;
 -      return 1;
++ 
++      if (object_ix_hashsz * 3 <= nr_objects * 4)
++              rehash_objects();
++      else {
++              ix = locate_object_entry_hash(entry->sha1);
++              if (0 <= ix)
++                      die("internal error in object hashing.");
++              object_ix[-1 - ix] = idx + 1;
 +      }
-       nr_objects = idx+1;
-       return 1;
++      status = 1;
++ 
++  already_added:
+++     if (progress_update) {
+++             fprintf(stderr, "Counting objects...%d\r", nr_objects);
+++             progress_update = 0;
+++     }
++      if (exclude)
++              entry->preferred_base = 1;
++      else {
++              if (found_pack) {
++                      entry->in_pack = found_pack;
++                      entry->in_pack_offset = found_offset;
++              }
++      }
++      return status;
 + }
 + 
-  static int locate_object_entry_hash(unsigned char *sha1)
++ static void add_pbase_tree(struct tree_desc *tree, struct name_path *up)
 + {
-       int i;
-       unsigned int ui;
-       memcpy(&ui, sha1, sizeof(unsigned int));
-       i = ui % object_ix_hashsz;
-       while (0 < object_ix[i]) {
-               if (!memcmp(sha1, objects[object_ix[i]-1].sha1, 20))
-                       return i;
-               if (++i == object_ix_hashsz)
-                       i = 0;
++      while (tree->size) {
++              const unsigned char *sha1;
++              const char *name;
++              unsigned mode, hash;
++              unsigned long size;
++              char type[20];
++ 
++              sha1 = tree_entry_extract(tree, &name, &mode);
++              update_tree_entry(tree);
++              if (!has_sha1_file(sha1))
++                      continue;
++              if (sha1_object_info(sha1, type, &size))
++                      continue;
++ 
++              hash = name_hash(up, name);
++              if (!add_object_entry(sha1, hash, 1))
++                      continue;
++ 
++              if (!strcmp(type, "tree")) {
++                      struct tree_desc sub;
++                      void *elem;
++                      struct name_path me;
++ 
++                      elem = read_sha1_file(sha1, type, &sub.size);
++                      sub.buf = elem;
++                      if (sub.buf) {
++                              me.up = up;
++                              me.elem = name;
++                              me.len = strlen(name);
++                              add_pbase_tree(&sub, &me);
++                              free(elem);
++                      }
++              }
 +      }
-       return -1 - i;
 + }
 + 
-  static struct object_entry *locate_object_entry(unsigned char *sha1)
++ static void add_preferred_base(unsigned char *sha1)
 + {
-       int i = locate_object_entry_hash(sha1);
-       if (0 <= i)
-               return &objects[object_ix[i]-1];
-       return NULL;
++      struct tree_desc tree;
++      void *elem;
++ 
++      elem = read_object_with_reference(sha1, "tree", &tree.size, NULL);
++      tree.buf = elem;
++      if (!tree.buf)
++              return;
++      if (add_object_entry(sha1, name_hash(NULL, ""), 1))
++              add_pbase_tree(&tree, NULL);
++      free(elem);
   }
   
   static void check_object(struct object_entry *entry)
   {
        char type[20];
   
-       if (entry->in_pack) {
 -      if (!sha1_object_info(entry->sha1, type, &entry->size)) {
 -              if (!strcmp(type, "commit")) {
 -                      entry->type = OBJ_COMMIT;
 -              } else if (!strcmp(type, "tree")) {
 -                      entry->type = OBJ_TREE;
 -              } else if (!strcmp(type, "blob")) {
 -                      entry->type = OBJ_BLOB;
 -              } else if (!strcmp(type, "tag")) {
 -                      entry->type = OBJ_TAG;
 -              } else
 -                      die("unable to pack object %s of type %s",
 -                          sha1_to_hex(entry->sha1), type);
++      if (entry->in_pack && !entry->preferred_base) {
 +              unsigned char base[20];
 +              unsigned long size;
 +              struct object_entry *base_entry;
 + 
 +              /* We want in_pack_type even if we do not reuse delta.
 +               * There is no point not reusing non-delta representations.
 +               */
 +              check_reuse_pack_delta(entry->in_pack,
 +                                     entry->in_pack_offset,
 +                                     base, &size,
 +                                     &entry->in_pack_type);
 + 
 +              /* Check if it is delta, and the base is also an object
 +               * we are going to pack.  If so we will reuse the existing
 +               * delta.
 +               */
 +              if (!no_reuse_delta &&
 +                  entry->in_pack_type == OBJ_DELTA &&
-                   (base_entry = locate_object_entry(base))) {
++                  (base_entry = locate_object_entry(base)) &&
++                  (!base_entry->preferred_base)) {
 + 
 +                      /* Depth value does not matter - find_deltas()
 +                       * will never consider reused delta as the
 +                       * base object to deltify other objects
 +                       * against, in order to avoid circular deltas.
 +                       */
 + 
 +                      /* uncompressed size of the delta data */
 +                      entry->size = entry->delta_size = size;
 +                      entry->delta = base_entry;
 +                      entry->type = OBJ_DELTA;
 + 
 +                      entry->delta_sibling = base_entry->delta_child;
 +                      base_entry->delta_child = entry;
 + 
 +                      return;
 +              }
 +              /* Otherwise we would do the usual */
        }
 -      else
 + 
 +      if (sha1_object_info(entry->sha1, type, &entry->size))
                die("unable to get type of object %s",
                    sha1_to_hex(entry->sha1));
-  static void hash_objects(void)
-  {
-       int i;
-       struct object_entry *oe;
-  
-       object_ix_hashsz = nr_objects * 2;
-       object_ix = xcalloc(sizeof(int), object_ix_hashsz);
-       for (i = 0, oe = objects; i < nr_objects; i++, oe++) {
-               int ix = locate_object_entry_hash(oe->sha1);
-               if (0 <= ix) {
-                       error("the same object '%s' added twice",
-                             sha1_to_hex(oe->sha1));
-                       continue;
-               }
-               ix = -1 - ix;
-               object_ix[ix] = i + 1;
-       }
-  }
-  
 + 
 +      if (!strcmp(type, "commit")) {
 +              entry->type = OBJ_COMMIT;
 +      } else if (!strcmp(type, "tree")) {
 +              entry->type = OBJ_TREE;
 +      } else if (!strcmp(type, "blob")) {
 +              entry->type = OBJ_BLOB;
 +      } else if (!strcmp(type, "tag")) {
 +              entry->type = OBJ_TAG;
 +      } else
 +              die("unable to pack object %s of type %s",
 +                  sha1_to_hex(entry->sha1), type);
 + }
 + 
 + static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
 + {
 +      struct object_entry *child = me->delta_child;
 +      unsigned int m = n;
 +      while (child) {
 +              unsigned int c = check_delta_limit(child, n + 1);
 +              if (m < c)
 +                      m = c;
 +              child = child->delta_sibling;
 +      }
 +      return m;
   }
   
   static void get_object_details(void)
   {
        int i;
 -      struct object_entry *entry = objects;
 +      struct object_entry *entry;
   
-       hash_objects();
 -      for (i = 0; i < nr_objects; i++)
 -              check_object(entry++);
 +      prepare_pack_ix();
 +      for (i = 0, entry = objects; i < nr_objects; i++, entry++)
 +              check_object(entry);
-       for (i = 0, entry = objects; i < nr_objects; i++, entry++)
-               if (!entry->delta && entry->delta_child)
-                       entry->delta_limit =
-                               check_delta_limit(entry, 1);
++ 
++      if (nr_objects == nr_result) {
++              /*
++               * Depth of objects that depend on the entry -- this
++               * is subtracted from depth-max to break too deep
++               * delta chain because of delta data reusing.
++               * However, we loosen this restriction when we know we
++               * are creating a thin pack -- it will have to be
++               * expanded on the other end anyway, so do not
++               * artificially cut the delta chain and let it go as
++               * deep as it wants.
++               */
++              for (i = 0, entry = objects; i < nr_objects; i++, entry++)
++                      if (!entry->delta && entry->delta_child)
++                              entry->delta_limit =
++                                      check_delta_limit(entry, 1);
++      }
   }
   
   typedef int (*entry_sort_t)(const struct object_entry *, const struct object_entry *);
@@@@ -640,22 -326,12 -807,28 +834,27 @@@@ static int try_delta(struct unpacked *c
        if (cur_entry->type != old_entry->type)
                return -1;
   
-       /* If the current object is at edge, take the depth the objects
-        * that depend on the current object into account -- otherwise
-        * they would become too deep.
 -      size = cur_entry->size;
 -      if (size < 50)
++      /* We do not compute delta to *create* objects we are not
++       * going to pack.
++       */
++      if (cur_entry->preferred_base)
+               return -1;
++ 
++      /* If the current object is at pack edge, take the depth the
++       * objects that depend on the current object into account --
++       * otherwise they would become too deep.
 +       */
 +      if (cur_entry->delta_child) {
 +              if (max_depth <= cur_entry->delta_limit)
 +                      return 0;
 +              max_depth -= cur_entry->delta_limit;
 +      }
 + 
 +      size = cur_entry->size;
- -     if (size < 50)
- -             return -1;
        oldsize = old_entry->size;
        sizediff = oldsize > size ? oldsize - size : size - oldsize;
---     if (sizediff > size / 8)
+++
+++     if (size < 50)
                return -1;
        if (old_entry->depth >= max_depth)
                return 0;
@@@@ -700,8 -369,7 -886,7 +919,8 @@@@ static void find_deltas(struct object_e
        memset(array, 0, array_size);
        i = nr_objects;
        idx = 0;
 --     eye_candy = i - (nr_objects / 20);
 ++     if (progress)
-               fprintf(stderr, "Deltifying %d objects.\n", nr_objects);
+++             fprintf(stderr, "Deltifying %d objects.\n", nr_result);
   
        while (--i >= 0) {
                struct object_entry *entry = list[i];
                char type[10];
                int j;
   
-               processed++;
 --             if (progress && i <= eye_candy) {
 --                     eye_candy -= nr_objects / 20;
 --                     fputc('.', stderr);
+++             if (!entry->preferred_base)
+++                     processed++;
+++
 ++             if (progress) {
-                       unsigned percent = processed * 100 / nr_objects;
+++                     unsigned percent = processed * 100 / nr_result;
 ++                     if (percent != last_percent || progress_update) {
 ++                             fprintf(stderr, "%4u%% (%u/%u) done\r",
-                                       percent, processed, nr_objects);
+++                                     percent, processed, nr_result);
 ++                             progress_update = 0;
 ++                             last_percent = percent;
 ++                     }
                }
 + 
 +              if (entry->delta)
 +                      /* This happens if we decided to reuse existing
 +                       * delta from a pack.  "!no_reuse_delta &&" is implied.
 +                       */
 +                      continue;
 + 
                free(n->data);
                n->entry = entry;
                n->data = read_sha1_file(entry->sha1, type, &size);
   
   static void prepare_pack(int window, int depth)
   {
  -     if (progress)
  -             fprintf(stderr, "Packing %d objects", nr_result);
        get_object_details();
 - 
 --     if (progress)
 -              fprintf(stderr, "Packing %d objects", nr_objects);
  -             fputc('.', stderr);
  -
        sorted_by_type = create_sorted_list(type_size_sort);
        if (window && depth)
                find_deltas(sorted_by_type, window+1, depth);
@@@@ -883,25 -539,30 -1071,35 +1104,26 @@@@ int main(int argc, char **argv
                usage(pack_usage);
   
        prepare_packed_git();
 ++
        if (progress) {
 ++             struct itimerval v;
 ++             v.it_interval.tv_sec = 1;
 ++             v.it_interval.tv_usec = 0;
 ++             v.it_value = v.it_interval;
 ++             signal(SIGALRM, progress_interval);
 ++             setitimer(ITIMER_REAL, &v, NULL);
                fprintf(stderr, "Generating pack...\n");
 --             gettimeofday(&prev_tv, NULL);
        }
 ++
        while (fgets(line, sizeof(line), stdin) != NULL) {
--              unsigned int hash;
--              char *p;
                unsigned char sha1[20];
   
-               if (progress_update) {
 --             if (progress && (eye_candy <= nr_objects)) {
---                     fprintf(stderr, "Counting objects...%d\r", nr_objects);
-                       progress_update = 0;
 --                     if (eye_candy && (50 <= eye_candy_incr)) {
 --                             struct timeval tv;
 --                             int time_diff;
 --                             gettimeofday(&tv, NULL);
 --                             time_diff = (tv.tv_sec - prev_tv.tv_sec);
 --                             time_diff <<= 10;
 --                             time_diff += (tv.tv_usec - prev_tv.tv_usec);
 --                             if ((1 << 9) < time_diff)
 --                                     eye_candy_incr += 50;
 --                             else if (50 < eye_candy_incr)
 --                                     eye_candy_incr -= 50;
 --                     }
 --                     eye_candy += eye_candy_incr;
  -             }
++              if (line[0] == '-') {
++                      if (get_sha1_hex(line+1, sha1))
++                              die("expected edge sha1, got garbage:\n %s",
++                                  line+1);
++                      add_preferred_base(sha1);
++                      continue;
                }
                if (get_sha1_hex(line, sha1))
                        die("expected sha1, got garbage:\n %s", line);
        }
        if (progress)
                fprintf(stderr, "Done counting %d objects.\n", nr_objects);
---     if (non_empty && !nr_objects)
+++     sorted_by_sha = create_final_object_list();
+++     if (non_empty && !nr_result)
                return 0;
   
--      sorted_by_sha = create_sorted_list(sha1_sort);
  -     sorted_by_sha = create_final_object_list();
        SHA1_Init(&ctx);
        list = sorted_by_sha;
--      for (i = 0; i < nr_objects; i++) {
++      for (i = 0; i < nr_result; i++) {
                struct object_entry *entry = *list++;
                SHA1_Update(&ctx, entry->sha1, 20);
        }
        if (reuse_cached_pack(object_list_sha1, pack_to_stdout))
                ;
        else {
---             prepare_pack(window, depth);
+++             if (nr_result)
+++                     prepare_pack(window, depth);
 ++             if (progress && pack_to_stdout) {
 ++                     /* the other end usually displays progress itself */
 ++                     struct itimerval v = {{0,},};
 ++                     setitimer(ITIMER_REAL, &v, NULL);
 ++                     signal(SIGALRM, SIG_IGN );
 ++                     progress_update = 0;
 ++             }
 ++             write_pack_file();
                if (!pack_to_stdout) {
                        write_index_file();
                        puts(sha1_to_hex(object_list_sha1));
                }
        }
-                       nr_objects, written, written_delta, reused, reused_delta);
 +      if (progress)
 +              fprintf(stderr, "Total %d, written %d (delta %d), reused %d (delta %d)\n",
++                      nr_result, written, written_delta, reused, reused_delta);
        return 0;
   }
diff --cc rev-list.c
index ee5f15ae4e21bcf931db034ad7c02bed76d18fa3,dda6fcaa9694c332432da03e5b1ec0d0b46f0f0b,63391fc1136d4d4045db6406face4c53915cd5bf..67d2a483fce79e23433ef21f209e55f7ff7a171d
@@@@ -27,10 -27,10 -27,9 +27,10 @@@@ static const char rev_list_usage[] 
   "  ordering output:\n"
   "    --merge-order [ --show-breaks ]\n"
   "    --topo-order\n"
  +"    --date-order\n"
   "  formatting output:\n"
   "    --parents\n"
- -"    --objects\n"
+ +"    --objects | --objects-edge\n"
   "    --unpacked\n"
   "    --header | --pretty\n"
   "    --abbrev=nr | --no-abbrev\n"
diff --cc send-pack.c
index b58bbabc15c7619622d7796fc0c7d69b7dcf7e9b,990be3f1a338a34025afa4acbe25f4c67ea6ce30,ad22da56e9d43b6dc36a1f9542d07e4b3da459ef..f5583861433517527943dcefb3320e923fe634a0
@@@@ -37,44 -37,26 -38,29 +38,47 @@@@ static void exec_pack_objects(void
   
   static void exec_rev_list(struct ref *refs)
   {
 ++     struct ref *ref;
        static char *args[1000];
 --     int i = 0;
 ++     int i = 0, j;
   
        args[i++] = "rev-list"; /* 0 */
--      args[i++] = "--objects";        /* 1 */
 -      while (refs) {
 -              char *buf = malloc(100);
 -              if (i > 900)
++      if (use_thin_pack)      /* 1 */
++              args[i++] = "--objects-edge";
++      else
++              args[i++] = "--objects";
  -     while (refs) {
  -             char *buf = malloc(100);
  -             if (i > 900)
 ++
 ++     /* First send the ones we care about most */
 ++     for (ref = refs; ref; ref = ref->next) {
 ++             if (900 < i)
                        die("git-rev-list environment overflow");
 --             if (!is_zero_sha1(refs->old_sha1) &&
 --                 has_sha1_file(refs->old_sha1)) {
 ++             if (!is_zero_sha1(ref->new_sha1)) {
 ++                     char *buf = malloc(100);
                        args[i++] = buf;
 --                     snprintf(buf, 50, "^%s", sha1_to_hex(refs->old_sha1));
 ++                     snprintf(buf, 50, "%s", sha1_to_hex(ref->new_sha1));
                        buf += 50;
 ++                     if (!is_zero_sha1(ref->old_sha1) &&
 ++                         has_sha1_file(ref->old_sha1)) {
 ++                             args[i++] = buf;
 ++                             snprintf(buf, 50, "^%s",
 ++                                      sha1_to_hex(ref->old_sha1));
 ++                     }
                }
 --             if (!is_zero_sha1(refs->new_sha1)) {
 ++     }
 ++
 ++     /* Then a handful of the remainder
 ++      * NEEDSWORK: we would be better off if used the newer ones first.
 ++      */
 ++     for (ref = refs, j = i + 16;
 ++          i < 900 && i < j && ref;
 ++          ref = ref->next) {
 ++             if (is_zero_sha1(ref->new_sha1) &&
 ++                 !is_zero_sha1(ref->old_sha1) &&
 ++                 has_sha1_file(ref->old_sha1)) {
 ++                     char *buf = malloc(42);
                        args[i++] = buf;
 --                     snprintf(buf, 50, "%s", sha1_to_hex(refs->new_sha1));
 ++                     snprintf(buf, 42, "^%s", sha1_to_hex(ref->old_sha1));
                }
 --             refs = refs->next;
        }
        args[i] = NULL;
        execv_git_cmd(args);
diff --cc upload-pack.c
index 3606529f61c50aa1c4d73877ccc4601ef036fcb5,3606529f61c50aa1c4d73877ccc4601ef036fcb5,3cdf4288b838c045b4524c7dd906525a39f0e67d..635abb371d4c86677f091551f731047b4ca08612
@@@@ -213,12 -213,12 -218,9 +218,12 @@@@ static int receive_needs(void
   
   static int send_ref(const char *refname, const unsigned char *sha1)
   {
--      static char *capabilities = "multi_ack";
++      static char *capabilities = "multi_ack thin-pack";
        struct object *o = parse_object(sha1);
   
  +     if (!o)
  +             die("git-upload-pack: cannot find object %s:", sha1_to_hex(sha1));
  +
        if (capabilities)
                packet_write(1, "%s %s%c%s\n", sha1_to_hex(sha1), refname,
                        0, capabilities);