Merge branch 'jk/loose-object-cache-oid'

Code clean-up.

* jk/loose-object-cache-oid:
  prefer "hash mismatch" to "sha1 mismatch"
  sha1-file: avoid "sha1 file" for generic use in messages
  sha1-file: prefer "loose object file" to "sha1 file" in messages
  sha1-file: drop has_sha1_file()
  convert has_sha1_file() callers to has_object_file()
  sha1-file: convert pass-through functions to object_id
  sha1-file: modernize loose header/stream functions
  sha1-file: modernize loose object file functions
  http: use struct object_id instead of bare sha1
  update comment references to sha1_object_info()
  sha1-file: fix outdated sha1 comment references
diff --git a/Documentation/git-fsck.txt b/Documentation/git-fsck.txt
index ab9a93f..55950d9 100644
--- a/Documentation/git-fsck.txt
+++ b/Documentation/git-fsck.txt
@@ -140,9 +140,9 @@
 	The <type> object <object>, is present in the database but never
 	'directly' used. A dangling commit could be a root node.
 
-sha1 mismatch <object>::
-	The database has an object who's sha1 doesn't match the
-	database value.
+hash mismatch <object>::
+	The database has an object whose hash doesn't match the
+	object database value.
 	This indicates a serious data integrity problem.
 
 Environment Variables
diff --git a/apply.c b/apply.c
index beed309..892ede5 100644
--- a/apply.c
+++ b/apply.c
@@ -3182,7 +3182,7 @@
 		return 0; /* deletion patch */
 	}
 
-	if (has_sha1_file(oid.hash)) {
+	if (has_object_file(&oid)) {
 		/* We already have the postimage */
 		enum object_type type;
 		unsigned long size;
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 8487cd7..0f09238 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -211,14 +211,14 @@
 
 	/*
 	 * After a mark_query run, this object_info is set up to be
-	 * passed to sha1_object_info_extended. It will point to the data
+	 * passed to oid_object_info_extended. It will point to the data
 	 * elements above, so you can retrieve the response from there.
 	 */
 	struct object_info info;
 
 	/*
 	 * This flag will be true if the requested batch format and options
-	 * don't require us to call sha1_object_info, which can then be
+	 * don't require us to call oid_object_info, which can then be
 	 * optimized out.
 	 */
 	unsigned skip_object_info : 1;
@@ -496,7 +496,7 @@
 
 	/*
 	 * Expand once with our special mark_query flag, which will prime the
-	 * object_info to be handed to sha1_object_info_extended for each
+	 * object_info to be handed to oid_object_info_extended for each
 	 * object.
 	 */
 	memset(&data, 0, sizeof(data));
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 39c4210..5a09fe2 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -317,8 +317,7 @@
 			    !has_object_file_with_flags(&ref->old_oid,
 							OBJECT_INFO_QUICK) &&
 			    !will_fetch(head, ref->old_oid.hash) &&
-			    !has_sha1_file_with_flags(item->oid.hash,
-						      OBJECT_INFO_QUICK) &&
+			    !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
 			    !will_fetch(head, item->oid.hash))
 				oidclr(&item->oid);
 			item = NULL;
@@ -332,7 +331,7 @@
 		 * fetch.
 		 */
 		if (item &&
-		    !has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) &&
+		    !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
 		    !will_fetch(head, item->oid.hash))
 			oidclr(&item->oid);
 
@@ -353,7 +352,7 @@
 	 * checked to see if it needs fetching.
 	 */
 	if (item &&
-	    !has_sha1_file_with_flags(item->oid.hash, OBJECT_INFO_QUICK) &&
+	    !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
 	    !will_fetch(head, item->oid.hash))
 		oidclr(&item->oid);
 
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index ac1f4ea..31046c7 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -772,7 +772,7 @@
 	if (startup_info->have_repository) {
 		read_lock();
 		collision_test_needed =
-			has_sha1_file_with_flags(oid->hash, OBJECT_INFO_QUICK);
+			has_object_file_with_flags(oid, OBJECT_INFO_QUICK);
 		read_unlock();
 	}
 
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index bd67491..68e2e66 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -1643,7 +1643,7 @@
 
 		/*
 		 * No choice but to fall back to the recursive delta walk
-		 * with sha1_object_info() to find about the object type
+		 * with oid_object_info() to find about the object type
 		 * at this point...
 		 */
 		give_up:
@@ -1719,7 +1719,7 @@
 	if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
 		/*
 		 * We failed to get the info from this pack for some reason;
-		 * fall back to sha1_object_info, which may find another copy.
+		 * fall back to oid_object_info, which may find another copy.
 		 * And if that fails, the error will be recorded in oe_type(entry)
 		 * and dealt with in prepare_pack().
 		 */
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 1f1010e..4d34309 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -94,7 +94,7 @@
 	init_tree_desc(&desc, tree->buffer, tree->size);
 	complete = 1;
 	while (tree_entry(&desc, &entry)) {
-		if (!has_sha1_file(entry.oid.hash) ||
+		if (!has_object_file(&entry.oid) ||
 		    (S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) {
 			tree->object.flags |= INCOMPLETE;
 			complete = 0;
diff --git a/builtin/show-ref.c b/builtin/show-ref.c
index ed888ff..6a706c0 100644
--- a/builtin/show-ref.c
+++ b/builtin/show-ref.c
@@ -23,7 +23,7 @@
 	const char *hex;
 	struct object_id peeled;
 
-	if (!has_sha1_file(oid->hash))
+	if (!has_object_file(oid))
 		die("git show-ref: bad ref %s (%s)", refname,
 		    oid_to_hex(oid));
 
diff --git a/bulk-checkin.c b/bulk-checkin.c
index 409ecb5..39ee7d6 100644
--- a/bulk-checkin.c
+++ b/bulk-checkin.c
@@ -67,7 +67,7 @@
 	int i;
 
 	/* The object may already exist in the repository */
-	if (has_sha1_file(oid->hash))
+	if (has_object_file(oid))
 		return 1;
 
 	/* Might want to keep the list sorted */
diff --git a/cache-tree.c b/cache-tree.c
index c4b8a1f..b13bfaf 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -225,7 +225,7 @@
 	int i;
 	if (!it)
 		return 0;
-	if (it->entry_count < 0 || !has_sha1_file(it->oid.hash))
+	if (it->entry_count < 0 || !has_object_file(&it->oid))
 		return 0;
 	for (i = 0; i < it->subtree_nr; i++) {
 		if (!cache_tree_fully_valid(it->down[i]->cache_tree))
@@ -253,7 +253,7 @@
 
 	*skip_count = 0;
 
-	if (0 <= it->entry_count && has_sha1_file(it->oid.hash))
+	if (0 <= it->entry_count && has_object_file(&it->oid))
 		return it->entry_count;
 
 	/*
diff --git a/cache.h b/cache.h
index ef9f3c4..27fe635 100644
--- a/cache.h
+++ b/cache.h
@@ -1271,8 +1271,8 @@
 
 extern int git_open_cloexec(const char *name, int flags);
 #define git_open(name) git_open_cloexec(name, O_RDONLY)
-extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
-extern int parse_sha1_header(const char *hdr, unsigned long *sizep);
+extern int unpack_loose_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
+extern int parse_loose_header(const char *hdr, unsigned long *sizep);
 
 extern int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type);
 
@@ -1619,7 +1619,7 @@
 extern int odb_pack_keep(const char *name);
 
 /*
- * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
+ * Set this to 0 to prevent oid_object_info_extended() from fetching missing
  * blobs. This has a difference only if extensions.partialClone is set.
  *
  * Its default value is 1.
diff --git a/http-push.c b/http-push.c
index 77e2e22..b22c7ca 100644
--- a/http-push.c
+++ b/http-push.c
@@ -255,7 +255,7 @@
 	struct active_request_slot *slot;
 	struct http_object_request *obj_req;
 
-	obj_req = new_http_object_request(repo->url, request->obj->oid.hash);
+	obj_req = new_http_object_request(repo->url, &request->obj->oid);
 	if (obj_req == NULL) {
 		request->state = ABORTED;
 		return;
diff --git a/http-walker.c b/http-walker.c
index 0a392c8..8ae5d76 100644
--- a/http-walker.c
+++ b/http-walker.c
@@ -58,7 +58,7 @@
 	struct active_request_slot *slot;
 	struct http_object_request *req;
 
-	req = new_http_object_request(obj_req->repo->base, obj_req->oid.hash);
+	req = new_http_object_request(obj_req->repo->base, &obj_req->oid);
 	if (req == NULL) {
 		obj_req->state = ABORTED;
 		return;
@@ -131,7 +131,7 @@
 	list_for_each_safe(pos, tmp, head) {
 		obj_req = list_entry(pos, struct object_request, node);
 		if (obj_req->state == WAITING) {
-			if (has_sha1_file(obj_req->oid.hash))
+			if (has_object_file(&obj_req->oid))
 				obj_req->state = COMPLETE;
 			else {
 				start_object_request(walker, obj_req);
@@ -489,7 +489,7 @@
 	if (obj_req == NULL)
 		return error("Couldn't find request for %s in the queue", hex);
 
-	if (has_sha1_file(obj_req->oid.hash)) {
+	if (has_object_file(&obj_req->oid)) {
 		if (obj_req->req != NULL)
 			abort_http_object_request(obj_req->req);
 		abort_object_request(obj_req);
@@ -543,11 +543,11 @@
 	} else if (req->zret != Z_STREAM_END) {
 		walker->corrupt_object_found++;
 		ret = error("File %s (%s) corrupt", hex, req->url);
-	} else if (!hasheq(obj_req->oid.hash, req->real_sha1)) {
+	} else if (!oideq(&obj_req->oid, &req->real_oid)) {
 		ret = error("File %s has bad hash", hex);
 	} else if (req->rename < 0) {
 		struct strbuf buf = STRBUF_INIT;
-		loose_object_path(the_repository, &buf, req->sha1);
+		loose_object_path(the_repository, &buf, &req->oid);
 		ret = error("unable to write sha1 filename %s", buf.buf);
 		strbuf_release(&buf);
 	}
diff --git a/http.c b/http.c
index 954bebf..a32ad36 100644
--- a/http.c
+++ b/http.c
@@ -2343,9 +2343,9 @@
 }
 
 struct http_object_request *new_http_object_request(const char *base_url,
-	unsigned char *sha1)
+						    const struct object_id *oid)
 {
-	char *hex = sha1_to_hex(sha1);
+	char *hex = oid_to_hex(oid);
 	struct strbuf filename = STRBUF_INIT;
 	struct strbuf prevfile = STRBUF_INIT;
 	int prevlocal;
@@ -2356,10 +2356,10 @@
 
 	freq = xcalloc(1, sizeof(*freq));
 	strbuf_init(&freq->tmpfile, 0);
-	hashcpy(freq->sha1, sha1);
+	oidcpy(&freq->oid, oid);
 	freq->localfile = -1;
 
-	loose_object_path(the_repository, &filename, sha1);
+	loose_object_path(the_repository, &filename, oid);
 	strbuf_addf(&freq->tmpfile, "%s.temp", filename.buf);
 
 	strbuf_addf(&prevfile, "%s.prev", filename.buf);
@@ -2501,16 +2501,16 @@
 	}
 
 	git_inflate_end(&freq->stream);
-	git_SHA1_Final(freq->real_sha1, &freq->c);
+	git_SHA1_Final(freq->real_oid.hash, &freq->c);
 	if (freq->zret != Z_STREAM_END) {
 		unlink_or_warn(freq->tmpfile.buf);
 		return -1;
 	}
-	if (!hasheq(freq->sha1, freq->real_sha1)) {
+	if (!oideq(&freq->oid, &freq->real_oid)) {
 		unlink_or_warn(freq->tmpfile.buf);
 		return -1;
 	}
-	loose_object_path(the_repository, &filename, freq->sha1);
+	loose_object_path(the_repository, &filename, &freq->oid);
 	freq->rename = finalize_object_file(freq->tmpfile.buf, filename.buf);
 	strbuf_release(&filename);
 
diff --git a/http.h b/http.h
index eebf406..4eb4e80 100644
--- a/http.h
+++ b/http.h
@@ -223,8 +223,8 @@
 	CURLcode curl_result;
 	char errorstr[CURL_ERROR_SIZE];
 	long http_code;
-	unsigned char sha1[20];
-	unsigned char real_sha1[20];
+	struct object_id oid;
+	struct object_id real_oid;
 	git_SHA_CTX c;
 	git_zstream stream;
 	int zret;
@@ -233,7 +233,7 @@
 };
 
 extern struct http_object_request *new_http_object_request(
-	const char *base_url, unsigned char *sha1);
+	const char *base_url, const struct object_id *oid);
 extern void process_http_object_request(struct http_object_request *freq);
 extern int finish_http_object_request(struct http_object_request *freq);
 extern void abort_http_object_request(struct http_object_request *freq);
diff --git a/object-store.h b/object-store.h
index ba57630..14fc935 100644
--- a/object-store.h
+++ b/object-store.h
@@ -154,11 +154,13 @@
 
 /*
  * Put in `buf` the name of the file in the local object database that
- * would be used to store a loose object with the specified sha1.
+ * would be used to store a loose object with the specified oid.
  */
-const char *loose_object_path(struct repository *r, struct strbuf *buf, const unsigned char *sha1);
+const char *loose_object_path(struct repository *r, struct strbuf *buf,
+			      const struct object_id *oid);
 
-void *map_sha1_file(struct repository *r, const unsigned char *sha1, unsigned long *size);
+void *map_loose_object(struct repository *r, const struct object_id *oid,
+		       unsigned long *size);
 
 extern void *read_object_file_extended(struct repository *r,
 				       const struct object_id *oid,
@@ -206,19 +208,6 @@
 		      unsigned long *size,
 		      void **contents);
 
-/*
- * Convenience for sha1_object_info_extended() with a NULL struct
- * object_info. OBJECT_INFO_SKIP_CACHED is automatically set; pass
- * nonzero flags to also set other flags.
- */
-int repo_has_sha1_file_with_flags(struct repository *r,
-				  const unsigned char *sha1, int flags);
-static inline int repo_has_sha1_file(struct repository *r,
-				     const unsigned char *sha1)
-{
-	return repo_has_sha1_file_with_flags(r, sha1, 0);
-}
-
 #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
 #define has_sha1_file_with_flags(sha1, flags) repo_has_sha1_file_with_flags(the_repository, sha1, flags)
 #define has_sha1_file(sha1) repo_has_sha1_file(the_repository, sha1)
diff --git a/object.c b/object.c
index 5dc5eec..59ea24c 100644
--- a/object.c
+++ b/object.c
@@ -264,7 +264,7 @@
 	    (!obj && repo_has_object_file(r, oid) &&
 	     oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
 		if (check_object_signature(repl, NULL, 0, NULL) < 0) {
-			error(_("sha1 mismatch %s"), oid_to_hex(oid));
+			error(_("hash mismatch %s"), oid_to_hex(oid));
 			return NULL;
 		}
 		parse_blob_buffer(lookup_blob(r, oid), NULL, 0);
@@ -275,7 +275,7 @@
 	if (buffer) {
 		if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
 			free(buffer);
-			error(_("sha1 mismatch %s"), oid_to_hex(repl));
+			error(_("hash mismatch %s"), oid_to_hex(repl));
 			return NULL;
 		}
 
diff --git a/refs.c b/refs.c
index f993635..142888a 100644
--- a/refs.c
+++ b/refs.c
@@ -188,7 +188,7 @@
 {
 	if (flags & REF_ISBROKEN)
 		return 0;
-	if (!has_sha1_file(oid->hash)) {
+	if (!has_object_file(oid)) {
 		error(_("%s does not point to a valid object!"), refname);
 		return 0;
 	}
diff --git a/send-pack.c b/send-pack.c
index 7b9829f..6dc16c3 100644
--- a/send-pack.c
+++ b/send-pack.c
@@ -40,7 +40,7 @@
 
 static void feed_object(const struct object_id *oid, FILE *fh, int negative)
 {
-	if (negative && !has_sha1_file(oid->hash))
+	if (negative && !has_object_file(oid))
 		return;
 
 	if (negative)
diff --git a/sha1-file.c b/sha1-file.c
index c8da9f3..494606f 100644
--- a/sha1-file.c
+++ b/sha1-file.c
@@ -192,7 +192,7 @@
 
 /*
  * This is meant to hold a *small* number of objects that you would
- * want read_sha1_file() to be able to return, but yet you do not want
+ * want read_object_file() to be able to return, but yet you do not want
  * to write them into the object store (e.g. a browse-only
  * application).
  */
@@ -401,12 +401,12 @@
 	return ret;
 }
 
-static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
+static void fill_loose_path(struct strbuf *buf, const struct object_id *oid)
 {
 	int i;
 	for (i = 0; i < the_hash_algo->rawsz; i++) {
 		static char hex[] = "0123456789abcdef";
-		unsigned int val = sha1[i];
+		unsigned int val = oid->hash[i];
 		strbuf_addch(buf, hex[val >> 4]);
 		strbuf_addch(buf, hex[val & 0xf]);
 		if (!i)
@@ -416,19 +416,19 @@
 
 static const char *odb_loose_path(struct object_directory *odb,
 				  struct strbuf *buf,
-				  const unsigned char *sha1)
+				  const struct object_id *oid)
 {
 	strbuf_reset(buf);
 	strbuf_addstr(buf, odb->path);
 	strbuf_addch(buf, '/');
-	fill_sha1_path(buf, sha1);
+	fill_loose_path(buf, oid);
 	return buf->buf;
 }
 
 const char *loose_object_path(struct repository *r, struct strbuf *buf,
-			      const unsigned char *sha1)
+			      const struct object_id *oid)
 {
-	return odb_loose_path(r->objects->odb, buf, sha1);
+	return odb_loose_path(r->objects->odb, buf, oid);
 }
 
 /*
@@ -789,7 +789,7 @@
 				 int freshen)
 {
 	static struct strbuf path = STRBUF_INIT;
-	odb_loose_path(odb, &path, oid->hash);
+	odb_loose_path(odb, &path, oid);
 	return check_and_freshen_file(path.buf, freshen);
 }
 
@@ -866,8 +866,8 @@
 
 /*
  * With an in-core object data in "map", rehash it to make sure the
- * object name actually matches "sha1" to detect object corruption.
- * With "map" == NULL, try reading the object named with "sha1" using
+ * object name actually matches "oid" to detect object corruption.
+ * With "map" == NULL, try reading the object named with "oid" using
  * the streaming interface and rehash it to do the same.
  */
 int check_object_signature(const struct object_id *oid, void *map,
@@ -940,22 +940,22 @@
 }
 
 /*
- * Find "sha1" as a loose object in the local repository or in an alternate.
+ * Find "oid" as a loose object in the local repository or in an alternate.
  * Returns 0 on success, negative on failure.
  *
  * The "path" out-parameter will give the path of the object we found (if any).
  * Note that it may point to static storage and is only valid until another
- * call to stat_sha1_file().
+ * call to stat_loose_object().
  */
-static int stat_sha1_file(struct repository *r, const unsigned char *sha1,
-			  struct stat *st, const char **path)
+static int stat_loose_object(struct repository *r, const struct object_id *oid,
+			     struct stat *st, const char **path)
 {
 	struct object_directory *odb;
 	static struct strbuf buf = STRBUF_INIT;
 
 	prepare_alt_odb(r);
 	for (odb = r->objects->odb; odb; odb = odb->next) {
-		*path = odb_loose_path(odb, &buf, sha1);
+		*path = odb_loose_path(odb, &buf, oid);
 		if (!lstat(*path, st))
 			return 0;
 	}
@@ -964,11 +964,11 @@
 }
 
 /*
- * Like stat_sha1_file(), but actually open the object and return the
+ * Like stat_loose_object(), but actually open the object and return the
  * descriptor. See the caveats on the "path" parameter above.
  */
-static int open_sha1_file(struct repository *r,
-			  const unsigned char *sha1, const char **path)
+static int open_loose_object(struct repository *r,
+			     const struct object_id *oid, const char **path)
 {
 	int fd;
 	struct object_directory *odb;
@@ -977,7 +977,7 @@
 
 	prepare_alt_odb(r);
 	for (odb = r->objects->odb; odb; odb = odb->next) {
-		*path = odb_loose_path(odb, &buf, sha1);
+		*path = odb_loose_path(odb, &buf, oid);
 		fd = git_open(*path);
 		if (fd >= 0)
 			return fd;
@@ -990,16 +990,13 @@
 }
 
 static int quick_has_loose(struct repository *r,
-			   const unsigned char *sha1)
+			   const struct object_id *oid)
 {
-	struct object_id oid;
 	struct object_directory *odb;
 
-	hashcpy(oid.hash, sha1);
-
 	prepare_alt_odb(r);
 	for (odb = r->objects->odb; odb; odb = odb->next) {
-		if (oid_array_lookup(odb_loose_cache(odb, &oid), &oid) >= 0)
+		if (oid_array_lookup(odb_loose_cache(odb, oid), oid) >= 0)
 			return 1;
 	}
 	return 0;
@@ -1007,10 +1004,10 @@
 
 /*
  * Map the loose object at "path" if it is not NULL, or the path found by
- * searching for a loose object named "sha1".
+ * searching for a loose object named "oid".
  */
-static void *map_sha1_file_1(struct repository *r, const char *path,
-			     const unsigned char *sha1, unsigned long *size)
+static void *map_loose_object_1(struct repository *r, const char *path,
+			     const struct object_id *oid, unsigned long *size)
 {
 	void *map;
 	int fd;
@@ -1018,7 +1015,7 @@
 	if (path)
 		fd = git_open(path);
 	else
-		fd = open_sha1_file(r, sha1, &path);
+		fd = open_loose_object(r, oid, &path);
 	map = NULL;
 	if (fd >= 0) {
 		struct stat st;
@@ -1038,15 +1035,16 @@
 	return map;
 }
 
-void *map_sha1_file(struct repository *r,
-		    const unsigned char *sha1, unsigned long *size)
+void *map_loose_object(struct repository *r,
+		       const struct object_id *oid,
+		       unsigned long *size)
 {
-	return map_sha1_file_1(r, NULL, sha1, size);
+	return map_loose_object_1(r, NULL, oid, size);
 }
 
-static int unpack_sha1_short_header(git_zstream *stream,
-				    unsigned char *map, unsigned long mapsize,
-				    void *buffer, unsigned long bufsiz)
+static int unpack_loose_short_header(git_zstream *stream,
+				     unsigned char *map, unsigned long mapsize,
+				     void *buffer, unsigned long bufsiz)
 {
 	/* Get the data stream */
 	memset(stream, 0, sizeof(*stream));
@@ -1059,12 +1057,12 @@
 	return git_inflate(stream, 0);
 }
 
-int unpack_sha1_header(git_zstream *stream,
-		       unsigned char *map, unsigned long mapsize,
-		       void *buffer, unsigned long bufsiz)
+int unpack_loose_header(git_zstream *stream,
+			unsigned char *map, unsigned long mapsize,
+			void *buffer, unsigned long bufsiz)
 {
-	int status = unpack_sha1_short_header(stream, map, mapsize,
-					      buffer, bufsiz);
+	int status = unpack_loose_short_header(stream, map, mapsize,
+					       buffer, bufsiz);
 
 	if (status < Z_OK)
 		return status;
@@ -1075,13 +1073,13 @@
 	return 0;
 }
 
-static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map,
-					unsigned long mapsize, void *buffer,
-					unsigned long bufsiz, struct strbuf *header)
+static int unpack_loose_header_to_strbuf(git_zstream *stream, unsigned char *map,
+					 unsigned long mapsize, void *buffer,
+					 unsigned long bufsiz, struct strbuf *header)
 {
 	int status;
 
-	status = unpack_sha1_short_header(stream, map, mapsize, buffer, bufsiz);
+	status = unpack_loose_short_header(stream, map, mapsize, buffer, bufsiz);
 	if (status < Z_OK)
 		return -1;
 
@@ -1111,7 +1109,9 @@
 	return -1;
 }
 
-static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
+static void *unpack_loose_rest(git_zstream *stream,
+			       void *buffer, unsigned long size,
+			       const struct object_id *oid)
 {
 	int bytes = strlen(buffer) + 1;
 	unsigned char *buf = xmallocz(size);
@@ -1148,10 +1148,10 @@
 	}
 
 	if (status < 0)
-		error(_("corrupt loose object '%s'"), sha1_to_hex(sha1));
+		error(_("corrupt loose object '%s'"), oid_to_hex(oid));
 	else if (stream->avail_in)
 		error(_("garbage at end of loose object '%s'"),
-		      sha1_to_hex(sha1));
+		      oid_to_hex(oid));
 	free(buf);
 	return NULL;
 }
@@ -1161,8 +1161,8 @@
  * too permissive for what we want to check. So do an anal
  * object header parse by hand.
  */
-static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
-			       unsigned int flags)
+static int parse_loose_header_extended(const char *hdr, struct object_info *oi,
+				       unsigned int flags)
 {
 	const char *type_buf = hdr;
 	unsigned long size;
@@ -1222,17 +1222,17 @@
 	return *hdr ? -1 : type;
 }
 
-int parse_sha1_header(const char *hdr, unsigned long *sizep)
+int parse_loose_header(const char *hdr, unsigned long *sizep)
 {
 	struct object_info oi = OBJECT_INFO_INIT;
 
 	oi.sizep = sizep;
-	return parse_sha1_header_extended(hdr, &oi, 0);
+	return parse_loose_header_extended(hdr, &oi, 0);
 }
 
-static int sha1_loose_object_info(struct repository *r,
-				  const unsigned char *sha1,
-				  struct object_info *oi, int flags)
+static int loose_object_info(struct repository *r,
+			     const struct object_id *oid,
+			     struct object_info *oi, int flags)
 {
 	int status = 0;
 	unsigned long mapsize;
@@ -1257,15 +1257,15 @@
 		const char *path;
 		struct stat st;
 		if (!oi->disk_sizep && (flags & OBJECT_INFO_QUICK))
-			return quick_has_loose(r, sha1) ? 0 : -1;
-		if (stat_sha1_file(r, sha1, &st, &path) < 0)
+			return quick_has_loose(r, oid) ? 0 : -1;
+		if (stat_loose_object(r, oid, &st, &path) < 0)
 			return -1;
 		if (oi->disk_sizep)
 			*oi->disk_sizep = st.st_size;
 		return 0;
 	}
 
-	map = map_sha1_file(r, sha1, &mapsize);
+	map = map_loose_object(r, oid, &mapsize);
 	if (!map)
 		return -1;
 
@@ -1275,24 +1275,24 @@
 	if (oi->disk_sizep)
 		*oi->disk_sizep = mapsize;
 	if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
-		if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
+		if (unpack_loose_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
 			status = error(_("unable to unpack %s header with --allow-unknown-type"),
-				       sha1_to_hex(sha1));
-	} else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
+				       oid_to_hex(oid));
+	} else if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
 		status = error(_("unable to unpack %s header"),
-			       sha1_to_hex(sha1));
+			       oid_to_hex(oid));
 	if (status < 0)
 		; /* Do nothing */
 	else if (hdrbuf.len) {
-		if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
+		if ((status = parse_loose_header_extended(hdrbuf.buf, oi, flags)) < 0)
 			status = error(_("unable to parse %s header with --allow-unknown-type"),
-				       sha1_to_hex(sha1));
-	} else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
-		status = error(_("unable to parse %s header"), sha1_to_hex(sha1));
+				       oid_to_hex(oid));
+	} else if ((status = parse_loose_header_extended(hdr, oi, flags)) < 0)
+		status = error(_("unable to parse %s header"), oid_to_hex(oid));
 
 	if (status >= 0 && oi->contentp) {
-		*oi->contentp = unpack_sha1_rest(&stream, hdr,
-						 *oi->sizep, sha1);
+		*oi->contentp = unpack_loose_rest(&stream, hdr,
+						  *oi->sizep, oid);
 		if (!*oi->contentp) {
 			git_inflate_end(&stream);
 			status = -1;
@@ -1358,7 +1358,7 @@
 			return -1;
 
 		/* Most likely it's a loose object. */
-		if (!sha1_loose_object_info(r, real->hash, oi, flags))
+		if (!loose_object_info(r, real, oi, flags))
 			return 0;
 
 		/* Not a loose object; someone else may have just packed it. */
@@ -1422,20 +1422,16 @@
 }
 
 static void *read_object(struct repository *r,
-			 const unsigned char *sha1,
-			 enum object_type *type,
+			 const struct object_id *oid, enum object_type *type,
 			 unsigned long *size)
 {
-	struct object_id oid;
 	struct object_info oi = OBJECT_INFO_INIT;
 	void *content;
 	oi.typep = type;
 	oi.sizep = size;
 	oi.contentp = &content;
 
-	hashcpy(oid.hash, sha1);
-
-	if (oid_object_info_extended(r, &oid, &oi, 0) < 0)
+	if (oid_object_info_extended(r, oid, &oi, 0) < 0)
 		return NULL;
 	return content;
 }
@@ -1446,7 +1442,7 @@
 	struct cached_object *co;
 
 	hash_object_file(buf, len, type_name(type), oid);
-	if (has_sha1_file(oid->hash) || find_cached_object(oid))
+	if (has_object_file(oid) || find_cached_object(oid))
 		return 0;
 	ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
 	co = &cached_objects[cached_object_nr++];
@@ -1477,7 +1473,7 @@
 		lookup_replace_object(r, oid) : oid;
 
 	errno = 0;
-	data = read_object(r, repl->hash, type, size);
+	data = read_object(r, repl, type, size);
 	if (data)
 		return data;
 
@@ -1489,7 +1485,7 @@
 		die(_("replacement %s not found for %s"),
 		    oid_to_hex(repl), oid_to_hex(oid));
 
-	if (!stat_sha1_file(r, repl->hash, &st, &path))
+	if (!stat_loose_object(r, repl, &st, &path))
 		die(_("loose object %s (stored in %s) is corrupt"),
 		    oid_to_hex(repl), path);
 
@@ -1596,7 +1592,7 @@
 	unlink_or_warn(tmpfile);
 	if (ret) {
 		if (ret != EEXIST) {
-			return error_errno(_("unable to write sha1 filename %s"), filename);
+			return error_errno(_("unable to write file %s"), filename);
 		}
 		/* FIXME!!! Collision check here ? */
 	}
@@ -1624,12 +1620,12 @@
 }
 
 /* Finalize a file on disk, and close it. */
-static void close_sha1_file(int fd)
+static void close_loose_object(int fd)
 {
 	if (fsync_object_files)
-		fsync_or_die(fd, "sha1 file");
+		fsync_or_die(fd, "loose object file");
 	if (close(fd) != 0)
-		die_errno(_("error when closing sha1 file"));
+		die_errno(_("error when closing loose object file"));
 }
 
 /* Size of directory component, including the ending '/' */
@@ -1689,7 +1685,7 @@
 	static struct strbuf tmp_file = STRBUF_INIT;
 	static struct strbuf filename = STRBUF_INIT;
 
-	loose_object_path(the_repository, &filename, oid->hash);
+	loose_object_path(the_repository, &filename, oid);
 
 	fd = create_tmpfile(&tmp_file, filename.buf);
 	if (fd < 0) {
@@ -1720,7 +1716,7 @@
 		ret = git_deflate(&stream, Z_FINISH);
 		the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
 		if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
-			die(_("unable to write sha1 file"));
+			die(_("unable to write loose object file"));
 		stream.next_out = compressed;
 		stream.avail_out = sizeof(compressed);
 	} while (ret == Z_OK);
@@ -1737,7 +1733,7 @@
 		die(_("confused by unstable object source data for %s"),
 		    oid_to_hex(oid));
 
-	close_sha1_file(fd);
+	close_loose_object(fd);
 
 	if (mtime) {
 		struct utimbuf utb;
@@ -1817,9 +1813,9 @@
 
 	if (has_loose_object(oid))
 		return 0;
-	buf = read_object(the_repository, oid->hash, &type, &len);
+	buf = read_object(the_repository, oid, &type, &len);
 	if (!buf)
-		return error(_("cannot read sha1_file for %s"), oid_to_hex(oid));
+		return error(_("cannot read object for %s"), oid_to_hex(oid));
 	hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1;
 	ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
 	free(buf);
@@ -1827,27 +1823,19 @@
 	return ret;
 }
 
-int repo_has_sha1_file_with_flags(struct repository *r,
-				  const unsigned char *sha1, int flags)
+int repo_has_object_file_with_flags(struct repository *r,
+				    const struct object_id *oid, int flags)
 {
-	struct object_id oid;
 	if (!startup_info->have_repository)
 		return 0;
-	hashcpy(oid.hash, sha1);
-	return oid_object_info_extended(r, &oid, NULL,
+	return oid_object_info_extended(r, oid, NULL,
 					flags | OBJECT_INFO_SKIP_CACHED) >= 0;
 }
 
 int repo_has_object_file(struct repository *r,
 			 const struct object_id *oid)
 {
-	return repo_has_sha1_file(r, oid->hash);
-}
-
-int repo_has_object_file_with_flags(struct repository *r,
-				    const struct object_id *oid, int flags)
-{
-	return repo_has_sha1_file_with_flags(r, oid->hash, flags);
+	return repo_has_object_file_with_flags(r, oid, 0);
 }
 
 static void check_tree(const void *buf, size_t size)
@@ -2258,14 +2246,14 @@
 	       sizeof(odb->loose_objects_subdir_seen));
 }
 
-static int check_stream_sha1(git_zstream *stream,
-			     const char *hdr,
-			     unsigned long size,
-			     const char *path,
-			     const unsigned char *expected_sha1)
+static int check_stream_oid(git_zstream *stream,
+			    const char *hdr,
+			    unsigned long size,
+			    const char *path,
+			    const struct object_id *expected_oid)
 {
 	git_hash_ctx c;
-	unsigned char real_sha1[GIT_MAX_RAWSZ];
+	struct object_id real_oid;
 	unsigned char buf[4096];
 	unsigned long total_read;
 	int status = Z_OK;
@@ -2281,7 +2269,7 @@
 
 	/*
 	 * This size comparison must be "<=" to read the final zlib packets;
-	 * see the comment in unpack_sha1_rest for details.
+	 * see the comment in unpack_loose_rest for details.
 	 */
 	while (total_read <= size &&
 	       (status == Z_OK ||
@@ -2297,19 +2285,19 @@
 	git_inflate_end(stream);
 
 	if (status != Z_STREAM_END) {
-		error(_("corrupt loose object '%s'"), sha1_to_hex(expected_sha1));
+		error(_("corrupt loose object '%s'"), oid_to_hex(expected_oid));
 		return -1;
 	}
 	if (stream->avail_in) {
 		error(_("garbage at end of loose object '%s'"),
-		      sha1_to_hex(expected_sha1));
+		      oid_to_hex(expected_oid));
 		return -1;
 	}
 
-	the_hash_algo->final_fn(real_sha1, &c);
-	if (!hasheq(expected_sha1, real_sha1)) {
-		error(_("sha1 mismatch for %s (expected %s)"), path,
-		      sha1_to_hex(expected_sha1));
+	the_hash_algo->final_fn(real_oid.hash, &c);
+	if (!oideq(expected_oid, &real_oid)) {
+		error(_("hash mismatch for %s (expected %s)"), path,
+		      oid_to_hex(expected_oid));
 		return -1;
 	}
 
@@ -2330,18 +2318,18 @@
 
 	*contents = NULL;
 
-	map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
+	map = map_loose_object_1(the_repository, path, NULL, &mapsize);
 	if (!map) {
 		error_errno(_("unable to mmap %s"), path);
 		goto out;
 	}
 
-	if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
+	if (unpack_loose_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
 		error(_("unable to unpack header of %s"), path);
 		goto out;
 	}
 
-	*type = parse_sha1_header(hdr, size);
+	*type = parse_loose_header(hdr, size);
 	if (*type < 0) {
 		error(_("unable to parse header of %s"), path);
 		git_inflate_end(&stream);
@@ -2349,10 +2337,10 @@
 	}
 
 	if (*type == OBJ_BLOB && *size > big_file_threshold) {
-		if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
+		if (check_stream_oid(&stream, hdr, *size, path, expected_oid) < 0)
 			goto out;
 	} else {
-		*contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
+		*contents = unpack_loose_rest(&stream, hdr, *size, expected_oid);
 		if (!*contents) {
 			error(_("unable to unpack contents of %s"), path);
 			git_inflate_end(&stream);
@@ -2360,7 +2348,7 @@
 		}
 		if (check_object_signature(expected_oid, *contents,
 					 *size, type_name(*type))) {
-			error(_("sha1 mismatch for %s (expected %s)"), path,
+			error(_("hash mismatch for %s (expected %s)"), path,
 			      oid_to_hex(expected_oid));
 			free(*contents);
 			goto out;
diff --git a/streaming.c b/streaming.c
index 891f82f..fcd6303 100644
--- a/streaming.c
+++ b/streaming.c
@@ -338,16 +338,16 @@
 
 static open_method_decl(loose)
 {
-	st->u.loose.mapped = map_sha1_file(the_repository,
-					   oid->hash, &st->u.loose.mapsize);
+	st->u.loose.mapped = map_loose_object(the_repository,
+					      oid, &st->u.loose.mapsize);
 	if (!st->u.loose.mapped)
 		return -1;
-	if ((unpack_sha1_header(&st->z,
-				st->u.loose.mapped,
-				st->u.loose.mapsize,
-				st->u.loose.hdr,
-				sizeof(st->u.loose.hdr)) < 0) ||
-	    (parse_sha1_header(st->u.loose.hdr, &st->size) < 0)) {
+	if ((unpack_loose_header(&st->z,
+				 st->u.loose.mapped,
+				 st->u.loose.mapsize,
+				 st->u.loose.hdr,
+				 sizeof(st->u.loose.hdr)) < 0) ||
+	    (parse_loose_header(st->u.loose.hdr, &st->size) < 0)) {
 		git_inflate_end(&st->z);
 		munmap(st->u.loose.mapped, st->u.loose.mapsize);
 		return -1;
diff --git a/t/t1450-fsck.sh b/t/t1450-fsck.sh
index 2e5e979..c61f972 100755
--- a/t/t1450-fsck.sh
+++ b/t/t1450-fsck.sh
@@ -406,7 +406,7 @@
 
 	test_might_fail git rev-list --verify-objects refs/heads/bogus >/dev/null 2>out &&
 	cat out &&
-	test_i18ngrep -q "error: sha1 mismatch 63ffffffffffffffffffffffffffffffffffffff" out
+	test_i18ngrep -q "error: hash mismatch 63ffffffffffffffffffffffffffffffffffffff" out
 '
 
 test_expect_success 'force fsck to ignore double author' '