# HG changeset patch # User Mads Kiilerich # Date 2013-01-28 14:19:44 # Node ID ce5f529deb36b093c5fa35c70ba336b4ce04d07c # Parent 6f219eb83435396f1a87a3c3d22908d0459cc3dd largefiles: don't allow corruption to propagate after detection basestore.get uses util.atomictempfile when checking and receiving a new largefile ... but the close/discard logic was too clever for largefiles. Largefiles relied on being able to discard the file and thus prevent it from being written to the store. That was however too brittle. lfutil.copyandhash closes the infile after writing to it ... with a 'blecch' comment. The discard was thus a silent noop, and as a result of that corruption would be detected ... and then the corrupted files would be used anyway. Instead we now use a tmp file and rename or unlink it after validating it. A better solution should be implemented ... but not now. diff --git a/hgext/largefiles/basestore.py b/hgext/largefiles/basestore.py --- a/hgext/largefiles/basestore.py +++ b/hgext/largefiles/basestore.py @@ -67,7 +67,7 @@ class basestore(object): ui.note(_('getting %s:%s\n') % (filename, hash)) storefilename = lfutil.storepath(self.repo, hash) - tmpfile = util.atomictempfile(storefilename, + tmpfile = util.atomictempfile(storefilename + '.tmp', createmode=self.repo.store.createmode) try: @@ -75,16 +75,17 @@ class basestore(object): except StoreError, err: ui.warn(err.longmessage()) hhash = "" + tmpfile.close() # has probably already been closed! if hhash != hash: if hhash != "": ui.warn(_('%s: data corruption (expected %s, got %s)\n') % (filename, hash, hhash)) - tmpfile.discard() # no-op if it's already closed + util.unlink(storefilename + '.tmp') missing.append(filename) continue - tmpfile.close() + util.rename(storefilename + '.tmp', storefilename) lfutil.linktousercache(self.repo, hash) success.append((filename, hhash)) diff --git a/tests/test-largefiles.t b/tests/test-largefiles.t --- a/tests/test-largefiles.t +++ b/tests/test-largefiles.t @@ -1584,14 +1584,12 @@ largefiles pulled on update - a largefil $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache getting changed largefiles f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27) - 1 largefiles updated, 0 removed + 0 largefiles updated, 0 removed 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg -R http-clone st - M f1 - $ cat http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 - corruption - $ cat http-clone/f1 - corruption + ! f1 + $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ] + $ [ ! -f http-clone/f1 ] $ [ ! -f http-clone-usercache ] $ hg -R http-clone verify --large --lfc checking changesets