##// END OF EJS Templates
largefiles: respect the rev when reading standins in copytostore() (issue3630)...
Matt Harbison -
r17877:92bbb21d stable
parent child Browse files
Show More
@@ -1,469 +1,469 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import platform
14 14 import shutil
15 15 import stat
16 16
17 17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 18 from mercurial.i18n import _
19 19
20 20 shortname = '.hglf'
21 21 longname = 'largefiles'
22 22
23 23
24 24 # -- Portability wrappers ----------------------------------------------
25 25
26 26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 27 return dirstate.walk(matcher, [], unknown, ignored)
28 28
29 29 def repoadd(repo, list):
30 30 add = repo[None].add
31 31 return add(list)
32 32
33 33 def reporemove(repo, list, unlink=False):
34 34 def remove(list, unlink):
35 35 wlock = repo.wlock()
36 36 try:
37 37 if unlink:
38 38 for f in list:
39 39 try:
40 40 util.unlinkpath(repo.wjoin(f))
41 41 except OSError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 repo[None].forget(list)
45 45 finally:
46 46 wlock.release()
47 47 return remove(list, unlink=unlink)
48 48
49 49 def repoforget(repo, list):
50 50 forget = repo[None].forget
51 51 return forget(list)
52 52
53 53 def findoutgoing(repo, remote, force):
54 54 from mercurial import discovery
55 55 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
56 56 return outgoing.missing
57 57
58 58 # -- Private worker functions ------------------------------------------
59 59
60 60 def getminsize(ui, assumelfiles, opt, default=10):
61 61 lfsize = opt
62 62 if not lfsize and assumelfiles:
63 63 lfsize = ui.config(longname, 'minsize', default=default)
64 64 if lfsize:
65 65 try:
66 66 lfsize = float(lfsize)
67 67 except ValueError:
68 68 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 69 % lfsize)
70 70 if lfsize is None:
71 71 raise util.Abort(_('minimum size for largefiles must be specified'))
72 72 return lfsize
73 73
74 74 def link(src, dest):
75 75 try:
76 76 util.oslink(src, dest)
77 77 except OSError:
78 78 # if hardlinks fail, fallback on atomic copy
79 79 dst = util.atomictempfile(dest)
80 80 for chunk in util.filechunkiter(open(src, 'rb')):
81 81 dst.write(chunk)
82 82 dst.close()
83 83 os.chmod(dest, os.stat(src).st_mode)
84 84
85 85 def usercachepath(ui, hash):
86 86 path = ui.configpath(longname, 'usercache', None)
87 87 if path:
88 88 path = os.path.join(path, hash)
89 89 else:
90 90 if os.name == 'nt':
91 91 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
92 92 if appdata:
93 93 path = os.path.join(appdata, longname, hash)
94 94 elif platform.system() == 'Darwin':
95 95 home = os.getenv('HOME')
96 96 if home:
97 97 path = os.path.join(home, 'Library', 'Caches',
98 98 longname, hash)
99 99 elif os.name == 'posix':
100 100 path = os.getenv('XDG_CACHE_HOME')
101 101 if path:
102 102 path = os.path.join(path, longname, hash)
103 103 else:
104 104 home = os.getenv('HOME')
105 105 if home:
106 106 path = os.path.join(home, '.cache', longname, hash)
107 107 else:
108 108 raise util.Abort(_('unknown operating system: %s\n') % os.name)
109 109 return path
110 110
111 111 def inusercache(ui, hash):
112 112 path = usercachepath(ui, hash)
113 113 return path and os.path.exists(path)
114 114
115 115 def findfile(repo, hash):
116 116 if instore(repo, hash):
117 117 repo.ui.note(_('found %s in store\n') % hash)
118 118 return storepath(repo, hash)
119 119 elif inusercache(repo.ui, hash):
120 120 repo.ui.note(_('found %s in system cache\n') % hash)
121 121 path = storepath(repo, hash)
122 122 util.makedirs(os.path.dirname(path))
123 123 link(usercachepath(repo.ui, hash), path)
124 124 return path
125 125 return None
126 126
127 127 class largefilesdirstate(dirstate.dirstate):
128 128 def __getitem__(self, key):
129 129 return super(largefilesdirstate, self).__getitem__(unixpath(key))
130 130 def normal(self, f):
131 131 return super(largefilesdirstate, self).normal(unixpath(f))
132 132 def remove(self, f):
133 133 return super(largefilesdirstate, self).remove(unixpath(f))
134 134 def add(self, f):
135 135 return super(largefilesdirstate, self).add(unixpath(f))
136 136 def drop(self, f):
137 137 return super(largefilesdirstate, self).drop(unixpath(f))
138 138 def forget(self, f):
139 139 return super(largefilesdirstate, self).forget(unixpath(f))
140 140 def normallookup(self, f):
141 141 return super(largefilesdirstate, self).normallookup(unixpath(f))
142 142
143 143 def openlfdirstate(ui, repo, create=True):
144 144 '''
145 145 Return a dirstate object that tracks largefiles: i.e. its root is
146 146 the repo root, but it is saved in .hg/largefiles/dirstate.
147 147 '''
148 148 admin = repo.join(longname)
149 149 opener = scmutil.opener(admin)
150 150 lfdirstate = largefilesdirstate(opener, ui, repo.root,
151 151 repo.dirstate._validate)
152 152
153 153 # If the largefiles dirstate does not exist, populate and create
154 154 # it. This ensures that we create it on the first meaningful
155 155 # largefiles operation in a new clone.
156 156 if create and not os.path.exists(os.path.join(admin, 'dirstate')):
157 157 util.makedirs(admin)
158 158 matcher = getstandinmatcher(repo)
159 159 for standin in dirstatewalk(repo.dirstate, matcher):
160 160 lfile = splitstandin(standin)
161 161 hash = readstandin(repo, lfile)
162 162 lfdirstate.normallookup(lfile)
163 163 try:
164 164 if hash == hashfile(repo.wjoin(lfile)):
165 165 lfdirstate.normal(lfile)
166 166 except OSError, err:
167 167 if err.errno != errno.ENOENT:
168 168 raise
169 169 return lfdirstate
170 170
171 171 def lfdirstatestatus(lfdirstate, repo, rev):
172 172 match = match_.always(repo.root, repo.getcwd())
173 173 s = lfdirstate.status(match, [], False, False, False)
174 174 unsure, modified, added, removed, missing, unknown, ignored, clean = s
175 175 for lfile in unsure:
176 176 if repo[rev][standin(lfile)].data().strip() != \
177 177 hashfile(repo.wjoin(lfile)):
178 178 modified.append(lfile)
179 179 else:
180 180 clean.append(lfile)
181 181 lfdirstate.normal(lfile)
182 182 return (modified, added, removed, missing, unknown, ignored, clean)
183 183
184 184 def listlfiles(repo, rev=None, matcher=None):
185 185 '''return a list of largefiles in the working copy or the
186 186 specified changeset'''
187 187
188 188 if matcher is None:
189 189 matcher = getstandinmatcher(repo)
190 190
191 191 # ignore unknown files in working directory
192 192 return [splitstandin(f)
193 193 for f in repo[rev].walk(matcher)
194 194 if rev is not None or repo.dirstate[f] != '?']
195 195
196 196 def instore(repo, hash):
197 197 return os.path.exists(storepath(repo, hash))
198 198
199 199 def storepath(repo, hash):
200 200 return repo.join(os.path.join(longname, hash))
201 201
202 202 def copyfromcache(repo, hash, filename):
203 203 '''Copy the specified largefile from the repo or system cache to
204 204 filename in the repository. Return true on success or false if the
205 205 file was not found in either cache (which should not happened:
206 206 this is meant to be called only after ensuring that the needed
207 207 largefile exists in the cache).'''
208 208 path = findfile(repo, hash)
209 209 if path is None:
210 210 return False
211 211 util.makedirs(os.path.dirname(repo.wjoin(filename)))
212 212 # The write may fail before the file is fully written, but we
213 213 # don't use atomic writes in the working copy.
214 214 shutil.copy(path, repo.wjoin(filename))
215 215 return True
216 216
217 217 def copytostore(repo, rev, file, uploaded=False):
218 hash = readstandin(repo, file)
218 hash = readstandin(repo, file, rev)
219 219 if instore(repo, hash):
220 220 return
221 221 copytostoreabsolute(repo, repo.wjoin(file), hash)
222 222
223 223 def copyalltostore(repo, node):
224 224 '''Copy all largefiles in a given revision to the store'''
225 225
226 226 ctx = repo[node]
227 227 for filename in ctx.files():
228 228 if isstandin(filename) and filename in ctx.manifest():
229 229 realfile = splitstandin(filename)
230 230 copytostore(repo, ctx.node(), realfile)
231 231
232 232
233 233 def copytostoreabsolute(repo, file, hash):
234 234 util.makedirs(os.path.dirname(storepath(repo, hash)))
235 235 if inusercache(repo.ui, hash):
236 236 link(usercachepath(repo.ui, hash), storepath(repo, hash))
237 237 else:
238 238 dst = util.atomictempfile(storepath(repo, hash),
239 239 createmode=repo.store.createmode)
240 240 for chunk in util.filechunkiter(open(file, 'rb')):
241 241 dst.write(chunk)
242 242 dst.close()
243 243 linktousercache(repo, hash)
244 244
245 245 def linktousercache(repo, hash):
246 246 path = usercachepath(repo.ui, hash)
247 247 if path:
248 248 util.makedirs(os.path.dirname(path))
249 249 link(storepath(repo, hash), path)
250 250
251 251 def getstandinmatcher(repo, pats=[], opts={}):
252 252 '''Return a match object that applies pats to the standin directory'''
253 253 standindir = repo.pathto(shortname)
254 254 if pats:
255 255 # patterns supplied: search standin directory relative to current dir
256 256 cwd = repo.getcwd()
257 257 if os.path.isabs(cwd):
258 258 # cwd is an absolute path for hg -R <reponame>
259 259 # work relative to the repository root in this case
260 260 cwd = ''
261 261 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
262 262 elif os.path.isdir(standindir):
263 263 # no patterns: relative to repo root
264 264 pats = [standindir]
265 265 else:
266 266 # no patterns and no standin dir: return matcher that matches nothing
267 267 match = match_.match(repo.root, None, [], exact=True)
268 268 match.matchfn = lambda f: False
269 269 return match
270 270 return getmatcher(repo, pats, opts, showbad=False)
271 271
272 272 def getmatcher(repo, pats=[], opts={}, showbad=True):
273 273 '''Wrapper around scmutil.match() that adds showbad: if false,
274 274 neuter the match object's bad() method so it does not print any
275 275 warnings about missing files or directories.'''
276 276 match = scmutil.match(repo[None], pats, opts)
277 277
278 278 if not showbad:
279 279 match.bad = lambda f, msg: None
280 280 return match
281 281
282 282 def composestandinmatcher(repo, rmatcher):
283 283 '''Return a matcher that accepts standins corresponding to the
284 284 files accepted by rmatcher. Pass the list of files in the matcher
285 285 as the paths specified by the user.'''
286 286 smatcher = getstandinmatcher(repo, rmatcher.files())
287 287 isstandin = smatcher.matchfn
288 288 def composedmatchfn(f):
289 289 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
290 290 smatcher.matchfn = composedmatchfn
291 291
292 292 return smatcher
293 293
294 294 def standin(filename):
295 295 '''Return the repo-relative path to the standin for the specified big
296 296 file.'''
297 297 # Notes:
298 298 # 1) Some callers want an absolute path, but for instance addlargefiles
299 299 # needs it repo-relative so it can be passed to repoadd(). So leave
300 300 # it up to the caller to use repo.wjoin() to get an absolute path.
301 301 # 2) Join with '/' because that's what dirstate always uses, even on
302 302 # Windows. Change existing separator to '/' first in case we are
303 303 # passed filenames from an external source (like the command line).
304 304 return shortname + '/' + util.pconvert(filename)
305 305
306 306 def isstandin(filename):
307 307 '''Return true if filename is a big file standin. filename must be
308 308 in Mercurial's internal form (slash-separated).'''
309 309 return filename.startswith(shortname + '/')
310 310
311 311 def splitstandin(filename):
312 312 # Split on / because that's what dirstate always uses, even on Windows.
313 313 # Change local separator to / first just in case we are passed filenames
314 314 # from an external source (like the command line).
315 315 bits = util.pconvert(filename).split('/', 1)
316 316 if len(bits) == 2 and bits[0] == shortname:
317 317 return bits[1]
318 318 else:
319 319 return None
320 320
321 321 def updatestandin(repo, standin):
322 322 file = repo.wjoin(splitstandin(standin))
323 323 if os.path.exists(file):
324 324 hash = hashfile(file)
325 325 executable = getexecutable(file)
326 326 writestandin(repo, standin, hash, executable)
327 327
328 328 def readstandin(repo, filename, node=None):
329 329 '''read hex hash from standin for filename at given node, or working
330 330 directory if no node is given'''
331 331 return repo[node][standin(filename)].data().strip()
332 332
333 333 def writestandin(repo, standin, hash, executable):
334 334 '''write hash to <repo.root>/<standin>'''
335 335 writehash(hash, repo.wjoin(standin), executable)
336 336
337 337 def copyandhash(instream, outfile):
338 338 '''Read bytes from instream (iterable) and write them to outfile,
339 339 computing the SHA-1 hash of the data along the way. Close outfile
340 340 when done and return the binary hash.'''
341 341 hasher = util.sha1('')
342 342 for data in instream:
343 343 hasher.update(data)
344 344 outfile.write(data)
345 345
346 346 # Blecch: closing a file that somebody else opened is rude and
347 347 # wrong. But it's so darn convenient and practical! After all,
348 348 # outfile was opened just to copy and hash.
349 349 outfile.close()
350 350
351 351 return hasher.digest()
352 352
353 353 def hashrepofile(repo, file):
354 354 return hashfile(repo.wjoin(file))
355 355
356 356 def hashfile(file):
357 357 if not os.path.exists(file):
358 358 return ''
359 359 hasher = util.sha1('')
360 360 fd = open(file, 'rb')
361 361 for data in blockstream(fd):
362 362 hasher.update(data)
363 363 fd.close()
364 364 return hasher.hexdigest()
365 365
366 366 class limitreader(object):
367 367 def __init__(self, f, limit):
368 368 self.f = f
369 369 self.limit = limit
370 370
371 371 def read(self, length):
372 372 if self.limit == 0:
373 373 return ''
374 374 length = length > self.limit and self.limit or length
375 375 self.limit -= length
376 376 return self.f.read(length)
377 377
378 378 def close(self):
379 379 pass
380 380
381 381 def blockstream(infile, blocksize=128 * 1024):
382 382 """Generator that yields blocks of data from infile and closes infile."""
383 383 while True:
384 384 data = infile.read(blocksize)
385 385 if not data:
386 386 break
387 387 yield data
388 388 # same blecch as copyandhash() above
389 389 infile.close()
390 390
391 391 def writehash(hash, filename, executable):
392 392 util.makedirs(os.path.dirname(filename))
393 393 util.writefile(filename, hash + '\n')
394 394 os.chmod(filename, getmode(executable))
395 395
396 396 def getexecutable(filename):
397 397 mode = os.stat(filename).st_mode
398 398 return ((mode & stat.S_IXUSR) and
399 399 (mode & stat.S_IXGRP) and
400 400 (mode & stat.S_IXOTH))
401 401
402 402 def getmode(executable):
403 403 if executable:
404 404 return 0755
405 405 else:
406 406 return 0644
407 407
408 408 def urljoin(first, second, *arg):
409 409 def join(left, right):
410 410 if not left.endswith('/'):
411 411 left += '/'
412 412 if right.startswith('/'):
413 413 right = right[1:]
414 414 return left + right
415 415
416 416 url = join(first, second)
417 417 for a in arg:
418 418 url = join(url, a)
419 419 return url
420 420
421 421 def hexsha1(data):
422 422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
423 423 object data"""
424 424 h = util.sha1()
425 425 for chunk in util.filechunkiter(data):
426 426 h.update(chunk)
427 427 return h.hexdigest()
428 428
429 429 def httpsendfile(ui, filename):
430 430 return httpconnection.httpsendfile(ui, filename, 'rb')
431 431
432 432 def unixpath(path):
433 433 '''Return a version of path normalized for use with the lfdirstate.'''
434 434 return util.pconvert(os.path.normpath(path))
435 435
436 436 def islfilesrepo(repo):
437 437 if ('largefiles' in repo.requirements and
438 438 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
439 439 return True
440 440
441 441 return util.any(openlfdirstate(repo.ui, repo, False))
442 442
443 443 class storeprotonotcapable(Exception):
444 444 def __init__(self, storetypes):
445 445 self.storetypes = storetypes
446 446
447 447 def getcurrentheads(repo):
448 448 branches = repo.branchmap()
449 449 heads = []
450 450 for branch in branches:
451 451 newheads = repo.branchheads(branch)
452 452 heads = heads + newheads
453 453 return heads
454 454
455 455 def getstandinsstate(repo):
456 456 standins = []
457 457 matcher = getstandinmatcher(repo)
458 458 for standin in dirstatewalk(repo.dirstate, matcher):
459 459 lfile = splitstandin(standin)
460 460 standins.append((lfile, readstandin(repo, lfile)))
461 461 return standins
462 462
463 463 def getlfilestoupdate(oldstandins, newstandins):
464 464 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
465 465 filelist = []
466 466 for f in changedstandins:
467 467 if f[0] not in filelist:
468 468 filelist.append(f[0])
469 469 return filelist
@@ -1,297 +1,338 b''
1 1 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
2 2 $ mkdir "${USERCACHE}"
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > largefiles =
6 6 > share =
7 7 > graphlog =
8 8 > mq =
9 > convert =
9 10 > [largefiles]
10 11 > minsize = 0.5
11 12 > patterns = **.other
12 13 > **.dat
13 14 > usercache=${USERCACHE}
14 15 > EOF
15 16
16 17 "lfconvert" works
17 18 $ hg init bigfile-repo
18 19 $ cd bigfile-repo
19 20 $ cat >> .hg/hgrc <<EOF
20 21 > [extensions]
21 22 > largefiles = !
22 23 > EOF
23 24 $ mkdir sub
24 25 $ dd if=/dev/zero bs=1k count=256 > large 2> /dev/null
25 26 $ dd if=/dev/zero bs=1k count=256 > large2 2> /dev/null
26 27 $ echo normal > normal1
27 28 $ echo alsonormal > sub/normal2
28 29 $ dd if=/dev/zero bs=1k count=10 > sub/maybelarge.dat 2> /dev/null
29 30 $ hg addremove
30 31 adding large
31 32 adding large2
32 33 adding normal1
33 34 adding sub/maybelarge.dat
34 35 adding sub/normal2
35 36 $ hg commit -m"add large, normal1" large normal1
36 37 $ hg commit -m"add sub/*" sub
37 38
38 39 Test tag parsing
39 40 $ cat >> .hgtags <<EOF
40 41 > IncorrectlyFormattedTag!
41 42 > invalidhash sometag
42 43 > 0123456789abcdef anothertag
43 44 > EOF
44 45 $ hg add .hgtags
45 46 $ hg commit -m"add large2" large2 .hgtags
46 47
47 48 Test link+rename largefile codepath
48 49 $ [ -d .hg/largefiles ] && echo fail || echo pass
49 50 pass
50 51 $ cd ..
51 52 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
52 53 initializing destination largefiles-repo
53 54 skipping incorrectly formatted tag IncorrectlyFormattedTag!
54 55 skipping incorrectly formatted id invalidhash
55 56 no mapping for id 0123456789abcdef
56 57 #if symlink
57 58 $ hg --cwd bigfile-repo rename large2 large3
58 59 $ ln -sf large bigfile-repo/large3
59 60 $ hg --cwd bigfile-repo commit -m"make large2 a symlink" large2 large3
60 61 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo-symlink
61 62 initializing destination largefiles-repo-symlink
62 63 skipping incorrectly formatted tag IncorrectlyFormattedTag!
63 64 skipping incorrectly formatted id invalidhash
64 65 no mapping for id 0123456789abcdef
65 66 abort: renamed/copied largefile large3 becomes symlink
66 67 [255]
67 68 #endif
68 69 $ cd bigfile-repo
69 70 $ hg strip --no-backup 2
70 71 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
71 72 $ cd ..
72 73 $ rm -rf largefiles-repo largefiles-repo-symlink
73 74
74 75 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
75 76 initializing destination largefiles-repo
76 77
77 78 "lfconvert" converts content correctly
78 79 $ cd largefiles-repo
79 80 $ hg up
80 81 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
81 82 getting changed largefiles
82 83 2 largefiles updated, 0 removed
83 84 $ hg locate
84 85 .hglf/large
85 86 .hglf/sub/maybelarge.dat
86 87 normal1
87 88 sub/normal2
88 89 $ cat normal1
89 90 normal
90 91 $ cat sub/normal2
91 92 alsonormal
92 93 $ "$TESTDIR/md5sum.py" large sub/maybelarge.dat
93 94 ec87a838931d4d5d2e94a04644788a55 large
94 95 1276481102f218c981e0324180bafd9f sub/maybelarge.dat
95 96
96 97 "lfconvert" adds 'largefiles' to .hg/requires.
97 98 $ cat .hg/requires
98 99 largefiles
99 100 revlogv1
100 101 fncache
101 102 store
102 103 dotencode
103 104
104 105 "lfconvert" includes a newline at the end of the standin files.
105 106 $ cat .hglf/large .hglf/sub/maybelarge.dat
106 107 2e000fa7e85759c7f4c254d4d9c33ef481e459a7
107 108 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c
108 109 $ cd ..
109 110
110 111 add some changesets to rename/remove/merge
111 112 $ cd bigfile-repo
112 113 $ hg mv -q sub stuff
113 114 $ hg commit -m"rename sub/ to stuff/"
114 115 $ hg update -q 1
115 116 $ echo blah >> normal3
116 117 $ echo blah >> sub/normal2
117 118 $ echo blah >> sub/maybelarge.dat
118 119 $ "$TESTDIR/md5sum.py" sub/maybelarge.dat
119 120 1dd0b99ff80e19cff409702a1d3f5e15 sub/maybelarge.dat
120 121 $ hg commit -A -m"add normal3, modify sub/*"
121 122 adding normal3
122 123 created new head
123 124 $ hg rm large normal3
124 125 $ hg commit -q -m"remove large, normal3"
125 126 $ hg merge
126 127 merging sub/maybelarge.dat and stuff/maybelarge.dat to stuff/maybelarge.dat
127 128 warning: $TESTTMP/bigfile-repo/stuff/maybelarge.dat looks like a binary file. (glob)
128 129 merging stuff/maybelarge.dat incomplete! (edit conflicts, then use 'hg resolve --mark')
129 130 merging sub/normal2 and stuff/normal2 to stuff/normal2
130 131 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
131 132 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
132 133 [1]
133 134 $ hg cat -r . sub/maybelarge.dat > stuff/maybelarge.dat
134 135 $ hg resolve -m stuff/maybelarge.dat
135 136 $ hg commit -m"merge"
136 137 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
137 138 @ 5:4884f215abda merge
138 139 |\
139 140 | o 4:7285f817b77e remove large, normal3
140 141 | |
141 142 | o 3:67e3892e3534 add normal3, modify sub/*
142 143 | |
143 144 o | 2:c96c8beb5d56 rename sub/ to stuff/
144 145 |/
145 146 o 1:020c65d24e11 add sub/*
146 147 |
147 148 o 0:117b8328f97a add large, normal1
148 149
149 150 $ cd ..
150 151
151 152 lfconvert with rename, merge, and remove
152 153 $ rm -rf largefiles-repo
153 154 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
154 155 initializing destination largefiles-repo
155 156 $ cd largefiles-repo
156 157 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
157 158 o 5:8e05f5f2b77e merge
158 159 |\
159 160 | o 4:a5a02de7a8e4 remove large, normal3
160 161 | |
161 162 | o 3:55759520c76f add normal3, modify sub/*
162 163 | |
163 164 o | 2:261ad3f3f037 rename sub/ to stuff/
164 165 |/
165 166 o 1:334e5237836d add sub/*
166 167 |
167 168 o 0:d4892ec57ce2 add large, normal1
168 169
169 170 $ hg locate -r 2
170 171 .hglf/large
171 172 .hglf/stuff/maybelarge.dat
172 173 normal1
173 174 stuff/normal2
174 175 $ hg locate -r 3
175 176 .hglf/large
176 177 .hglf/sub/maybelarge.dat
177 178 normal1
178 179 normal3
179 180 sub/normal2
180 181 $ hg locate -r 4
181 182 .hglf/sub/maybelarge.dat
182 183 normal1
183 184 sub/normal2
184 185 $ hg locate -r 5
185 186 .hglf/stuff/maybelarge.dat
186 187 normal1
187 188 stuff/normal2
188 189 $ hg update
189 190 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
190 191 getting changed largefiles
191 192 1 largefiles updated, 0 removed
192 193 $ cat stuff/normal2
193 194 alsonormal
194 195 blah
195 196 $ "$TESTDIR/md5sum.py" stuff/maybelarge.dat
196 197 1dd0b99ff80e19cff409702a1d3f5e15 stuff/maybelarge.dat
197 198 $ cat .hglf/stuff/maybelarge.dat
198 199 76236b6a2c6102826c61af4297dd738fb3b1de38
199 200 $ cd ..
200 201
201 202 "lfconvert" error cases
202 203 $ hg lfconvert http://localhost/foo foo
203 204 abort: http://localhost/foo is not a local Mercurial repo
204 205 [255]
205 206 $ hg lfconvert foo ssh://localhost/foo
206 207 abort: ssh://localhost/foo is not a local Mercurial repo
207 208 [255]
208 209 $ hg lfconvert nosuchrepo foo
209 210 abort: repository nosuchrepo not found!
210 211 [255]
211 212 $ hg share -q -U bigfile-repo shared
212 213 $ printf 'bogus' > shared/.hg/sharedpath
213 214 $ hg lfconvert shared foo
214 215 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/bogus! (glob)
215 216 [255]
216 217 $ hg lfconvert bigfile-repo largefiles-repo
217 218 initializing destination largefiles-repo
218 219 abort: repository largefiles-repo already exists!
219 220 [255]
220 221
221 222 add another largefile to the new largefiles repo
222 223 $ cd largefiles-repo
223 224 $ dd if=/dev/zero bs=1k count=1k > anotherlarge 2> /dev/null
224 225 $ hg add --lfsize=1 anotherlarge
225 226 $ hg commit -m "add anotherlarge (should be a largefile)"
226 227 $ cat .hglf/anotherlarge
227 228 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
228 229 $ cd ..
229 230
230 231 round-trip: converting back to a normal (non-largefiles) repo with
231 232 "lfconvert --to-normal" should give the same as ../bigfile-repo
232 233 $ cd largefiles-repo
233 234 $ hg lfconvert --to-normal . ../normal-repo
234 235 initializing destination ../normal-repo
235 236 $ cd ../normal-repo
236 237 $ cat >> .hg/hgrc <<EOF
237 238 > [extensions]
238 239 > largefiles = !
239 240 > EOF
240 241
241 242 # Hmmm: the changeset ID for rev 5 is different from the original
242 243 # normal repo (../bigfile-repo), because the changelog filelist
243 244 # differs between the two incarnations of rev 5: this repo includes
244 245 # 'large' in the list, but ../bigfile-repo does not. Since rev 5
245 246 # removes 'large' relative to the first parent in both repos, it seems
246 247 # to me that lfconvert is doing a *better* job than
247 248 # "hg remove" + "hg merge" + "hg commit".
248 249 # $ hg -R ../bigfile-repo debugdata -c 5
249 250 # $ hg debugdata -c 5
250 251 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
251 252 o 6:1635824e6f59 add anotherlarge (should be a largefile)
252 253 |
253 254 o 5:7215f8deeaaf merge
254 255 |\
255 256 | o 4:7285f817b77e remove large, normal3
256 257 | |
257 258 | o 3:67e3892e3534 add normal3, modify sub/*
258 259 | |
259 260 o | 2:c96c8beb5d56 rename sub/ to stuff/
260 261 |/
261 262 o 1:020c65d24e11 add sub/*
262 263 |
263 264 o 0:117b8328f97a add large, normal1
264 265
265 266 $ hg update
266 267 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
267 268 $ hg locate
268 269 anotherlarge
269 270 normal1
270 271 stuff/maybelarge.dat
271 272 stuff/normal2
272 273 $ [ -d .hg/largefiles ] && echo fail || echo pass
273 274 pass
274 275
275 276 $ cd ..
276 277
278 $ hg convert largefiles-repo
279 assuming destination largefiles-repo-hg
280 initializing destination largefiles-repo-hg repository
281 scanning source...
282 sorting...
283 converting...
284 6 add large, normal1
285 5 add sub/*
286 4 rename sub/ to stuff/
287 3 add normal3, modify sub/*
288 2 remove large, normal3
289 1 merge
290 0 add anotherlarge (should be a largefile)
291
292 $ hg -R largefiles-repo-hg glog --template "{rev}:{node|short} {desc|firstline}\n"
293 o 6:17126745edfd add anotherlarge (should be a largefile)
294 |
295 o 5:9cc5aa7204f0 merge
296 |\
297 | o 4:a5a02de7a8e4 remove large, normal3
298 | |
299 | o 3:55759520c76f add normal3, modify sub/*
300 | |
301 o | 2:261ad3f3f037 rename sub/ to stuff/
302 |/
303 o 1:334e5237836d add sub/*
304 |
305 o 0:d4892ec57ce2 add large, normal1
306
307 $ hg -R largefiles-repo-hg verify --large --lfa
308 checking changesets
309 checking manifests
310 crosschecking files in changesets and manifests
311 checking files
312 8 files, 7 changesets, 12 total revisions
313 searching 7 changesets for largefiles
314 verified existence of 6 revisions of 4 largefiles
315 $ hg -R largefiles-repo-hg showconfig paths
316
317
277 318 Avoid a traceback if a largefile isn't available (issue3519)
278 319
279 320 Ensure the largefile can be cached in the source if necessary
280 321 $ hg clone -U largefiles-repo issue3519
281 322 $ rm "${USERCACHE}"/*
282 323 $ hg lfconvert --to-normal issue3519 normalized3519
283 324 initializing destination normalized3519
284 325
285 326 Ensure the abort message is useful if a largefile is entirely unavailable
286 327 $ rm -rf normalized3519
287 328 $ rm "${USERCACHE}"/*
288 329 $ rm issue3519/.hg/largefiles/*
289 330 $ rm largefiles-repo/.hg/largefiles/*
290 331 $ hg lfconvert --to-normal issue3519 normalized3519
291 332 initializing destination normalized3519
292 333 large: can't get file locally
293 334 (no default or default-push path set in hgrc)
294 335 abort: missing largefile 'large' from revision d4892ec57ce212905215fad1d9018f56b99202ad
295 336 [255]
296 337
297 338
General Comments 0
You need to be logged in to leave comments. Login now