##// END OF EJS Templates
largefiles: fix an explicit largefile commit after a remove (issue4969)...
Matt Harbison -
r27942:eb1135d5 stable
parent child Browse files
Show More
@@ -1,633 +1,637 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import platform
13 13 import stat
14 14 import copy
15 15
16 16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 17 from mercurial.i18n import _
18 18 from mercurial import node, error
19 19
20 20 shortname = '.hglf'
21 21 shortnameslash = shortname + '/'
22 22 longname = 'largefiles'
23 23
24 24
25 25 # -- Private worker functions ------------------------------------------
26 26
27 27 def getminsize(ui, assumelfiles, opt, default=10):
28 28 lfsize = opt
29 29 if not lfsize and assumelfiles:
30 30 lfsize = ui.config(longname, 'minsize', default=default)
31 31 if lfsize:
32 32 try:
33 33 lfsize = float(lfsize)
34 34 except ValueError:
35 35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
36 36 % lfsize)
37 37 if lfsize is None:
38 38 raise error.Abort(_('minimum size for largefiles must be specified'))
39 39 return lfsize
40 40
41 41 def link(src, dest):
42 42 util.makedirs(os.path.dirname(dest))
43 43 try:
44 44 util.oslink(src, dest)
45 45 except OSError:
46 46 # if hardlinks fail, fallback on atomic copy
47 47 dst = util.atomictempfile(dest)
48 48 for chunk in util.filechunkiter(open(src, 'rb')):
49 49 dst.write(chunk)
50 50 dst.close()
51 51 os.chmod(dest, os.stat(src).st_mode)
52 52
53 53 def usercachepath(ui, hash):
54 54 path = ui.configpath(longname, 'usercache', None)
55 55 if path:
56 56 path = os.path.join(path, hash)
57 57 else:
58 58 if os.name == 'nt':
59 59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 60 if appdata:
61 61 path = os.path.join(appdata, longname, hash)
62 62 elif platform.system() == 'Darwin':
63 63 home = os.getenv('HOME')
64 64 if home:
65 65 path = os.path.join(home, 'Library', 'Caches',
66 66 longname, hash)
67 67 elif os.name == 'posix':
68 68 path = os.getenv('XDG_CACHE_HOME')
69 69 if path:
70 70 path = os.path.join(path, longname, hash)
71 71 else:
72 72 home = os.getenv('HOME')
73 73 if home:
74 74 path = os.path.join(home, '.cache', longname, hash)
75 75 else:
76 76 raise error.Abort(_('unknown operating system: %s\n') % os.name)
77 77 return path
78 78
79 79 def inusercache(ui, hash):
80 80 path = usercachepath(ui, hash)
81 81 return path and os.path.exists(path)
82 82
83 83 def findfile(repo, hash):
84 84 path, exists = findstorepath(repo, hash)
85 85 if exists:
86 86 repo.ui.note(_('found %s in store\n') % hash)
87 87 return path
88 88 elif inusercache(repo.ui, hash):
89 89 repo.ui.note(_('found %s in system cache\n') % hash)
90 90 path = storepath(repo, hash)
91 91 link(usercachepath(repo.ui, hash), path)
92 92 return path
93 93 return None
94 94
95 95 class largefilesdirstate(dirstate.dirstate):
96 96 def __getitem__(self, key):
97 97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
98 98 def normal(self, f):
99 99 return super(largefilesdirstate, self).normal(unixpath(f))
100 100 def remove(self, f):
101 101 return super(largefilesdirstate, self).remove(unixpath(f))
102 102 def add(self, f):
103 103 return super(largefilesdirstate, self).add(unixpath(f))
104 104 def drop(self, f):
105 105 return super(largefilesdirstate, self).drop(unixpath(f))
106 106 def forget(self, f):
107 107 return super(largefilesdirstate, self).forget(unixpath(f))
108 108 def normallookup(self, f):
109 109 return super(largefilesdirstate, self).normallookup(unixpath(f))
110 110 def _ignore(self, f):
111 111 return False
112 112 def write(self, tr=False):
113 113 # (1) disable PENDING mode always
114 114 # (lfdirstate isn't yet managed as a part of the transaction)
115 115 # (2) avoid develwarn 'use dirstate.write with ....'
116 116 super(largefilesdirstate, self).write(None)
117 117
118 118 def openlfdirstate(ui, repo, create=True):
119 119 '''
120 120 Return a dirstate object that tracks largefiles: i.e. its root is
121 121 the repo root, but it is saved in .hg/largefiles/dirstate.
122 122 '''
123 123 lfstoredir = repo.join(longname)
124 124 opener = scmutil.opener(lfstoredir)
125 125 lfdirstate = largefilesdirstate(opener, ui, repo.root,
126 126 repo.dirstate._validate)
127 127
128 128 # If the largefiles dirstate does not exist, populate and create
129 129 # it. This ensures that we create it on the first meaningful
130 130 # largefiles operation in a new clone.
131 131 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
132 132 matcher = getstandinmatcher(repo)
133 133 standins = repo.dirstate.walk(matcher, [], False, False)
134 134
135 135 if len(standins) > 0:
136 136 util.makedirs(lfstoredir)
137 137
138 138 for standin in standins:
139 139 lfile = splitstandin(standin)
140 140 lfdirstate.normallookup(lfile)
141 141 return lfdirstate
142 142
143 143 def lfdirstatestatus(lfdirstate, repo):
144 144 wctx = repo['.']
145 145 match = match_.always(repo.root, repo.getcwd())
146 146 unsure, s = lfdirstate.status(match, [], False, False, False)
147 147 modified, clean = s.modified, s.clean
148 148 for lfile in unsure:
149 149 try:
150 150 fctx = wctx[standin(lfile)]
151 151 except LookupError:
152 152 fctx = None
153 153 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
154 154 modified.append(lfile)
155 155 else:
156 156 clean.append(lfile)
157 157 lfdirstate.normal(lfile)
158 158 return s
159 159
160 160 def listlfiles(repo, rev=None, matcher=None):
161 161 '''return a list of largefiles in the working copy or the
162 162 specified changeset'''
163 163
164 164 if matcher is None:
165 165 matcher = getstandinmatcher(repo)
166 166
167 167 # ignore unknown files in working directory
168 168 return [splitstandin(f)
169 169 for f in repo[rev].walk(matcher)
170 170 if rev is not None or repo.dirstate[f] != '?']
171 171
172 172 def instore(repo, hash, forcelocal=False):
173 173 return os.path.exists(storepath(repo, hash, forcelocal))
174 174
175 175 def storepath(repo, hash, forcelocal=False):
176 176 if not forcelocal and repo.shared():
177 177 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
178 178 return repo.join(longname, hash)
179 179
180 180 def findstorepath(repo, hash):
181 181 '''Search through the local store path(s) to find the file for the given
182 182 hash. If the file is not found, its path in the primary store is returned.
183 183 The return value is a tuple of (path, exists(path)).
184 184 '''
185 185 # For shared repos, the primary store is in the share source. But for
186 186 # backward compatibility, force a lookup in the local store if it wasn't
187 187 # found in the share source.
188 188 path = storepath(repo, hash, False)
189 189
190 190 if instore(repo, hash):
191 191 return (path, True)
192 192 elif repo.shared() and instore(repo, hash, True):
193 193 return storepath(repo, hash, True)
194 194
195 195 return (path, False)
196 196
197 197 def copyfromcache(repo, hash, filename):
198 198 '''Copy the specified largefile from the repo or system cache to
199 199 filename in the repository. Return true on success or false if the
200 200 file was not found in either cache (which should not happened:
201 201 this is meant to be called only after ensuring that the needed
202 202 largefile exists in the cache).'''
203 203 path = findfile(repo, hash)
204 204 if path is None:
205 205 return False
206 206 util.makedirs(os.path.dirname(repo.wjoin(filename)))
207 207 # The write may fail before the file is fully written, but we
208 208 # don't use atomic writes in the working copy.
209 209 dest = repo.wjoin(filename)
210 210 with open(path, 'rb') as srcfd:
211 211 with open(dest, 'wb') as destfd:
212 212 gothash = copyandhash(srcfd, destfd)
213 213 if gothash != hash:
214 214 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
215 215 % (filename, path, gothash))
216 216 util.unlink(dest)
217 217 return False
218 218 return True
219 219
220 220 def copytostore(repo, rev, file, uploaded=False):
221 221 hash = readstandin(repo, file, rev)
222 222 if instore(repo, hash):
223 223 return
224 224 absfile = repo.wjoin(file)
225 225 if os.path.exists(absfile):
226 226 copytostoreabsolute(repo, absfile, hash)
227 227 else:
228 228 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
229 229 (file, hash))
230 230
231 231 def copyalltostore(repo, node):
232 232 '''Copy all largefiles in a given revision to the store'''
233 233
234 234 ctx = repo[node]
235 235 for filename in ctx.files():
236 236 if isstandin(filename) and filename in ctx.manifest():
237 237 realfile = splitstandin(filename)
238 238 copytostore(repo, ctx.node(), realfile)
239 239
240 240
241 241 def copytostoreabsolute(repo, file, hash):
242 242 if inusercache(repo.ui, hash):
243 243 link(usercachepath(repo.ui, hash), storepath(repo, hash))
244 244 else:
245 245 util.makedirs(os.path.dirname(storepath(repo, hash)))
246 246 dst = util.atomictempfile(storepath(repo, hash),
247 247 createmode=repo.store.createmode)
248 248 for chunk in util.filechunkiter(open(file, 'rb')):
249 249 dst.write(chunk)
250 250 dst.close()
251 251 linktousercache(repo, hash)
252 252
253 253 def linktousercache(repo, hash):
254 254 path = usercachepath(repo.ui, hash)
255 255 if path:
256 256 link(storepath(repo, hash), path)
257 257
258 258 def getstandinmatcher(repo, rmatcher=None):
259 259 '''Return a match object that applies rmatcher to the standin directory'''
260 260 standindir = repo.wjoin(shortname)
261 261
262 262 # no warnings about missing files or directories
263 263 badfn = lambda f, msg: None
264 264
265 265 if rmatcher and not rmatcher.always():
266 266 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
267 267 if not pats:
268 268 pats = [standindir]
269 269 match = scmutil.match(repo[None], pats, badfn=badfn)
270 270 # if pats is empty, it would incorrectly always match, so clear _always
271 271 match._always = False
272 272 else:
273 273 # no patterns: relative to repo root
274 274 match = scmutil.match(repo[None], [standindir], badfn=badfn)
275 275 return match
276 276
277 277 def composestandinmatcher(repo, rmatcher):
278 278 '''Return a matcher that accepts standins corresponding to the
279 279 files accepted by rmatcher. Pass the list of files in the matcher
280 280 as the paths specified by the user.'''
281 281 smatcher = getstandinmatcher(repo, rmatcher)
282 282 isstandin = smatcher.matchfn
283 283 def composedmatchfn(f):
284 284 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
285 285 smatcher.matchfn = composedmatchfn
286 286
287 287 return smatcher
288 288
289 289 def standin(filename):
290 290 '''Return the repo-relative path to the standin for the specified big
291 291 file.'''
292 292 # Notes:
293 293 # 1) Some callers want an absolute path, but for instance addlargefiles
294 294 # needs it repo-relative so it can be passed to repo[None].add(). So
295 295 # leave it up to the caller to use repo.wjoin() to get an absolute path.
296 296 # 2) Join with '/' because that's what dirstate always uses, even on
297 297 # Windows. Change existing separator to '/' first in case we are
298 298 # passed filenames from an external source (like the command line).
299 299 return shortnameslash + util.pconvert(filename)
300 300
301 301 def isstandin(filename):
302 302 '''Return true if filename is a big file standin. filename must be
303 303 in Mercurial's internal form (slash-separated).'''
304 304 return filename.startswith(shortnameslash)
305 305
306 306 def splitstandin(filename):
307 307 # Split on / because that's what dirstate always uses, even on Windows.
308 308 # Change local separator to / first just in case we are passed filenames
309 309 # from an external source (like the command line).
310 310 bits = util.pconvert(filename).split('/', 1)
311 311 if len(bits) == 2 and bits[0] == shortname:
312 312 return bits[1]
313 313 else:
314 314 return None
315 315
316 316 def updatestandin(repo, standin):
317 317 file = repo.wjoin(splitstandin(standin))
318 318 if os.path.exists(file):
319 319 hash = hashfile(file)
320 320 executable = getexecutable(file)
321 321 writestandin(repo, standin, hash, executable)
322 322
323 323 def readstandin(repo, filename, node=None):
324 324 '''read hex hash from standin for filename at given node, or working
325 325 directory if no node is given'''
326 326 return repo[node][standin(filename)].data().strip()
327 327
328 328 def writestandin(repo, standin, hash, executable):
329 329 '''write hash to <repo.root>/<standin>'''
330 330 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
331 331
332 332 def copyandhash(instream, outfile):
333 333 '''Read bytes from instream (iterable) and write them to outfile,
334 334 computing the SHA-1 hash of the data along the way. Return the hash.'''
335 335 hasher = util.sha1('')
336 336 for data in instream:
337 337 hasher.update(data)
338 338 outfile.write(data)
339 339 return hasher.hexdigest()
340 340
341 341 def hashrepofile(repo, file):
342 342 return hashfile(repo.wjoin(file))
343 343
344 344 def hashfile(file):
345 345 if not os.path.exists(file):
346 346 return ''
347 347 hasher = util.sha1('')
348 348 fd = open(file, 'rb')
349 349 for data in util.filechunkiter(fd, 128 * 1024):
350 350 hasher.update(data)
351 351 fd.close()
352 352 return hasher.hexdigest()
353 353
354 354 def getexecutable(filename):
355 355 mode = os.stat(filename).st_mode
356 356 return ((mode & stat.S_IXUSR) and
357 357 (mode & stat.S_IXGRP) and
358 358 (mode & stat.S_IXOTH))
359 359
360 360 def urljoin(first, second, *arg):
361 361 def join(left, right):
362 362 if not left.endswith('/'):
363 363 left += '/'
364 364 if right.startswith('/'):
365 365 right = right[1:]
366 366 return left + right
367 367
368 368 url = join(first, second)
369 369 for a in arg:
370 370 url = join(url, a)
371 371 return url
372 372
373 373 def hexsha1(data):
374 374 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
375 375 object data"""
376 376 h = util.sha1()
377 377 for chunk in util.filechunkiter(data):
378 378 h.update(chunk)
379 379 return h.hexdigest()
380 380
381 381 def httpsendfile(ui, filename):
382 382 return httpconnection.httpsendfile(ui, filename, 'rb')
383 383
384 384 def unixpath(path):
385 385 '''Return a version of path normalized for use with the lfdirstate.'''
386 386 return util.pconvert(os.path.normpath(path))
387 387
388 388 def islfilesrepo(repo):
389 389 if ('largefiles' in repo.requirements and
390 390 any(shortnameslash in f[0] for f in repo.store.datafiles())):
391 391 return True
392 392
393 393 return any(openlfdirstate(repo.ui, repo, False))
394 394
395 395 class storeprotonotcapable(Exception):
396 396 def __init__(self, storetypes):
397 397 self.storetypes = storetypes
398 398
399 399 def getstandinsstate(repo):
400 400 standins = []
401 401 matcher = getstandinmatcher(repo)
402 402 for standin in repo.dirstate.walk(matcher, [], False, False):
403 403 lfile = splitstandin(standin)
404 404 try:
405 405 hash = readstandin(repo, lfile)
406 406 except IOError:
407 407 hash = None
408 408 standins.append((lfile, hash))
409 409 return standins
410 410
411 411 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
412 412 lfstandin = standin(lfile)
413 413 if lfstandin in repo.dirstate:
414 414 stat = repo.dirstate._map[lfstandin]
415 415 state, mtime = stat[0], stat[3]
416 416 else:
417 417 state, mtime = '?', -1
418 418 if state == 'n':
419 419 if (normallookup or mtime < 0 or
420 420 not os.path.exists(repo.wjoin(lfile))):
421 421 # state 'n' doesn't ensure 'clean' in this case
422 422 lfdirstate.normallookup(lfile)
423 423 else:
424 424 lfdirstate.normal(lfile)
425 425 elif state == 'm':
426 426 lfdirstate.normallookup(lfile)
427 427 elif state == 'r':
428 428 lfdirstate.remove(lfile)
429 429 elif state == 'a':
430 430 lfdirstate.add(lfile)
431 431 elif state == '?':
432 432 lfdirstate.drop(lfile)
433 433
434 434 def markcommitted(orig, ctx, node):
435 435 repo = ctx.repo()
436 436
437 437 orig(node)
438 438
439 439 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
440 440 # because files coming from the 2nd parent are omitted in the latter.
441 441 #
442 442 # The former should be used to get targets of "synclfdirstate",
443 443 # because such files:
444 444 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
445 445 # - have to be marked as "n" after commit, but
446 446 # - aren't listed in "repo[node].files()"
447 447
448 448 lfdirstate = openlfdirstate(repo.ui, repo)
449 449 for f in ctx.files():
450 450 if isstandin(f):
451 451 lfile = splitstandin(f)
452 452 synclfdirstate(repo, lfdirstate, lfile, False)
453 453 lfdirstate.write()
454 454
455 455 # As part of committing, copy all of the largefiles into the cache.
456 456 copyalltostore(repo, node)
457 457
458 458 def getlfilestoupdate(oldstandins, newstandins):
459 459 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
460 460 filelist = []
461 461 for f in changedstandins:
462 462 if f[0] not in filelist:
463 463 filelist.append(f[0])
464 464 return filelist
465 465
466 466 def getlfilestoupload(repo, missing, addfunc):
467 467 for i, n in enumerate(missing):
468 468 repo.ui.progress(_('finding outgoing largefiles'), i,
469 469 unit=_('revision'), total=len(missing))
470 470 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
471 471
472 472 oldlfstatus = repo.lfstatus
473 473 repo.lfstatus = False
474 474 try:
475 475 ctx = repo[n]
476 476 finally:
477 477 repo.lfstatus = oldlfstatus
478 478
479 479 files = set(ctx.files())
480 480 if len(parents) == 2:
481 481 mc = ctx.manifest()
482 482 mp1 = ctx.parents()[0].manifest()
483 483 mp2 = ctx.parents()[1].manifest()
484 484 for f in mp1:
485 485 if f not in mc:
486 486 files.add(f)
487 487 for f in mp2:
488 488 if f not in mc:
489 489 files.add(f)
490 490 for f in mc:
491 491 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
492 492 files.add(f)
493 493 for fn in files:
494 494 if isstandin(fn) and fn in ctx:
495 495 addfunc(fn, ctx[fn].data().strip())
496 496 repo.ui.progress(_('finding outgoing largefiles'), None)
497 497
498 498 def updatestandinsbymatch(repo, match):
499 499 '''Update standins in the working directory according to specified match
500 500
501 501 This returns (possibly modified) ``match`` object to be used for
502 502 subsequent commit process.
503 503 '''
504 504
505 505 ui = repo.ui
506 506
507 507 # Case 1: user calls commit with no specific files or
508 508 # include/exclude patterns: refresh and commit all files that
509 509 # are "dirty".
510 510 if match is None or match.always():
511 511 # Spend a bit of time here to get a list of files we know
512 512 # are modified so we can compare only against those.
513 513 # It can cost a lot of time (several seconds)
514 514 # otherwise to update all standins if the largefiles are
515 515 # large.
516 516 lfdirstate = openlfdirstate(ui, repo)
517 517 dirtymatch = match_.always(repo.root, repo.getcwd())
518 518 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
519 519 False)
520 520 modifiedfiles = unsure + s.modified + s.added + s.removed
521 521 lfiles = listlfiles(repo)
522 522 # this only loops through largefiles that exist (not
523 523 # removed/renamed)
524 524 for lfile in lfiles:
525 525 if lfile in modifiedfiles:
526 526 if os.path.exists(
527 527 repo.wjoin(standin(lfile))):
528 528 # this handles the case where a rebase is being
529 529 # performed and the working copy is not updated
530 530 # yet.
531 531 if os.path.exists(repo.wjoin(lfile)):
532 532 updatestandin(repo,
533 533 standin(lfile))
534 534
535 535 return match
536 536
537 537 lfiles = listlfiles(repo)
538 538 match._files = repo._subdirlfs(match.files(), lfiles)
539 539
540 540 # Case 2: user calls commit with specified patterns: refresh
541 541 # any matching big files.
542 542 smatcher = composestandinmatcher(repo, match)
543 543 standins = repo.dirstate.walk(smatcher, [], False, False)
544 544
545 545 # No matching big files: get out of the way and pass control to
546 546 # the usual commit() method.
547 547 if not standins:
548 548 return match
549 549
550 550 # Refresh all matching big files. It's possible that the
551 551 # commit will end up failing, in which case the big files will
552 552 # stay refreshed. No harm done: the user modified them and
553 553 # asked to commit them, so sooner or later we're going to
554 554 # refresh the standins. Might as well leave them refreshed.
555 555 lfdirstate = openlfdirstate(ui, repo)
556 556 for fstandin in standins:
557 557 lfile = splitstandin(fstandin)
558 558 if lfdirstate[lfile] != 'r':
559 559 updatestandin(repo, fstandin)
560 560
561 561 # Cook up a new matcher that only matches regular files or
562 562 # standins corresponding to the big files requested by the
563 563 # user. Have to modify _files to prevent commit() from
564 564 # complaining "not tracked" for big files.
565 565 match = copy.copy(match)
566 566 origmatchfn = match.matchfn
567 567
568 568 # Check both the list of largefiles and the list of
569 569 # standins because if a largefile was removed, it
570 570 # won't be in the list of largefiles at this point
571 571 match._files += sorted(standins)
572 572
573 573 actualfiles = []
574 574 for f in match._files:
575 575 fstandin = standin(f)
576 576
577 577 # For largefiles, only one of the normal and standin should be
578 # committed (except if one of them is a remove).
578 # committed (except if one of them is a remove). In the case of a
579 # standin removal, drop the normal file if it is unknown to dirstate.
579 580 # Thus, skip plain largefile names but keep the standin.
580 if (f in lfiles or fstandin in standins) and \
581 repo.dirstate[f] != 'r' and repo.dirstate[fstandin] != 'r':
581 if f in lfiles or fstandin in standins:
582 if repo.dirstate[fstandin] != 'r':
583 if repo.dirstate[f] != 'r':
584 continue
585 elif repo.dirstate[f] == '?':
582 586 continue
583 587
584 588 actualfiles.append(f)
585 589 match._files = actualfiles
586 590
587 591 def matchfn(f):
588 592 if origmatchfn(f):
589 593 return f not in lfiles
590 594 else:
591 595 return f in standins
592 596
593 597 match.matchfn = matchfn
594 598
595 599 return match
596 600
597 601 class automatedcommithook(object):
598 602 '''Stateful hook to update standins at the 1st commit of resuming
599 603
600 604 For efficiency, updating standins in the working directory should
601 605 be avoided while automated committing (like rebase, transplant and
602 606 so on), because they should be updated before committing.
603 607
604 608 But the 1st commit of resuming automated committing (e.g. ``rebase
605 609 --continue``) should update them, because largefiles may be
606 610 modified manually.
607 611 '''
608 612 def __init__(self, resuming):
609 613 self.resuming = resuming
610 614
611 615 def __call__(self, repo, match):
612 616 if self.resuming:
613 617 self.resuming = False # avoids updating at subsequent commits
614 618 return updatestandinsbymatch(repo, match)
615 619 else:
616 620 return match
617 621
618 622 def getstatuswriter(ui, repo, forcibly=None):
619 623 '''Return the function to write largefiles specific status out
620 624
621 625 If ``forcibly`` is ``None``, this returns the last element of
622 626 ``repo._lfstatuswriters`` as "default" writer function.
623 627
624 628 Otherwise, this returns the function to always write out (or
625 629 ignore if ``not forcibly``) status.
626 630 '''
627 631 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
628 632 return repo._lfstatuswriters[-1]
629 633 else:
630 634 if forcibly:
631 635 return ui.status # forcibly WRITE OUT
632 636 else:
633 637 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,231 +1,231 b''
1 1 Create user cache directory
2 2
3 3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 4 $ cat <<EOF >> ${HGRCPATH}
5 5 > [extensions]
6 6 > hgext.largefiles=
7 7 > [largefiles]
8 8 > usercache=${USERCACHE}
9 9 > EOF
10 10 $ mkdir -p ${USERCACHE}
11 11
12 12 Create source repo, and commit adding largefile.
13 13
14 14 $ hg init src
15 15 $ cd src
16 16 $ echo large > large
17 17 $ hg add --large large
18 18 $ hg commit -m 'add largefile'
19 19 $ hg rm large
20 $ hg commit -m 'branchhead without largefile'
20 $ hg commit -m 'branchhead without largefile' large
21 21 $ hg up -qr 0
22 22 $ cd ..
23 23
24 24 Discard all cached largefiles in USERCACHE
25 25
26 26 $ rm -rf ${USERCACHE}
27 27
28 28 Create mirror repo, and pull from source without largefile:
29 29 "pull" is used instead of "clone" for suppression of (1) updating to
30 30 tip (= caching largefile from source repo), and (2) recording source
31 31 repo as "default" path in .hg/hgrc.
32 32
33 33 $ hg init mirror
34 34 $ cd mirror
35 35 $ hg pull ../src
36 36 pulling from ../src
37 37 requesting all changes
38 38 adding changesets
39 39 adding manifests
40 40 adding file changes
41 41 added 2 changesets with 1 changes to 1 files
42 42 (run 'hg update' to get a working copy)
43 43
44 44 Update working directory to "tip", which requires largefile("large"),
45 45 but there is no cache file for it. So, hg must treat it as
46 46 "missing"(!) file.
47 47
48 48 $ hg update -r0
49 49 getting changed largefiles
50 50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
51 51 0 largefiles updated, 0 removed
52 52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 53 $ hg status
54 54 ! large
55 55
56 56 Update working directory to null: this cleanup .hg/largefiles/dirstate
57 57
58 58 $ hg update null
59 59 getting changed largefiles
60 60 0 largefiles updated, 0 removed
61 61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62 62
63 63 Update working directory to tip, again.
64 64
65 65 $ hg update -r0
66 66 getting changed largefiles
67 67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
68 68 0 largefiles updated, 0 removed
69 69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 70 $ hg status
71 71 ! large
72 72 $ cd ..
73 73
74 74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
75 75
76 76 $ hg init mirror2
77 77 $ hg -R mirror2 pull src -r0
78 78 pulling from src
79 79 adding changesets
80 80 adding manifests
81 81 adding file changes
82 82 added 1 changesets with 1 changes to 1 files
83 83 (run 'hg update' to get a working copy)
84 84
85 85 #if unix-permissions
86 86
87 87 Portable way to print file permissions:
88 88
89 89 $ cat > ls-l.py <<EOF
90 90 > #!/usr/bin/env python
91 91 > import sys, os
92 92 > path = sys.argv[1]
93 93 > print '%03o' % (os.lstat(path).st_mode & 0777)
94 94 > EOF
95 95 $ chmod +x ls-l.py
96 96
97 97 Test that files in .hg/largefiles inherit mode from .hg/store, not
98 98 from file in working copy:
99 99
100 100 $ cd src
101 101 $ chmod 750 .hg/store
102 102 $ chmod 660 large
103 103 $ echo change >> large
104 104 $ hg commit -m change
105 105 created new head
106 106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
107 107 640
108 108
109 109 Test permission of with files in .hg/largefiles created by update:
110 110
111 111 $ cd ../mirror
112 112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
113 113 $ chmod 750 .hg/store
114 114 $ hg pull ../src --update -q
115 115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
116 116 640
117 117
118 118 Test permission of files created by push:
119 119
120 120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
121 121 > --config "web.allow_push=*" --config web.push_ssl=no
122 122 $ cat hg.pid >> $DAEMON_PIDS
123 123
124 124 $ echo change >> large
125 125 $ hg commit -m change
126 126
127 127 $ rm -r "$USERCACHE"
128 128
129 129 $ hg push -q http://localhost:$HGPORT/
130 130
131 131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
132 132 640
133 133
134 134 $ cd ..
135 135
136 136 #endif
137 137
138 138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
139 139 it is missing, but a remove on a nonexistent unknown file still should. Same
140 140 for a forget.)
141 141
142 142 $ cd src
143 143 $ touch x
144 144 $ hg add x
145 145 $ mv x y
146 146 $ hg remove -A x y ENOENT
147 147 ENOENT: * (glob)
148 148 not removing y: file is untracked
149 149 [1]
150 150 $ hg add y
151 151 $ mv y z
152 152 $ hg forget y z ENOENT
153 153 ENOENT: * (glob)
154 154 not removing z: file is already untracked
155 155 [1]
156 156
157 157 Largefiles are accessible from the share's store
158 158 $ cd ..
159 159 $ hg share -q src share_dst --config extensions.share=
160 160 $ hg -R share_dst update -r0
161 161 getting changed largefiles
162 162 1 largefiles updated, 0 removed
163 163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 164
165 165 $ echo modified > share_dst/large
166 166 $ hg -R share_dst ci -m modified
167 167 created new head
168 168
169 169 Only dirstate is in the local store for the share, and the largefile is in the
170 170 share source's local store. Avoid the extra largefiles added in the unix
171 171 conditional above.
172 172 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
173 173 $ echo $hash
174 174 e2fb5f2139d086ded2cb600d5a91a196e76bf020
175 175
176 176 $ find share_dst/.hg/largefiles/* | sort
177 177 share_dst/.hg/largefiles/dirstate
178 178
179 179 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
180 180 src/.hg/largefiles/dirstate
181 181 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
182 182
183 183 Inject corruption into the largefiles store and see how update handles that:
184 184
185 185 $ cd src
186 186 $ hg up -qC
187 187 $ cat large
188 188 modified
189 189 $ rm large
190 190 $ cat .hglf/large
191 191 e2fb5f2139d086ded2cb600d5a91a196e76bf020
192 192 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
193 193 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
194 194 $ hg up -C
195 195 getting changed largefiles
196 196 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob)
197 197 0 largefiles updated, 0 removed
198 198 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 199 $ hg st
200 200 ! large
201 201 ? z
202 202 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
203 203
204 204 #if serve
205 205
206 206 Test coverage of error handling from putlfile:
207 207
208 208 $ mkdir $TESTTMP/mirrorcache
209 209 $ hg serve -R ../mirror -d -p $HGPORT1 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache
210 210 $ cat hg.pid >> $DAEMON_PIDS
211 211
212 212 $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
213 213 pushing to http://localhost:$HGPORT1/
214 214 searching for changes
215 215 abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
216 216 [255]
217 217
218 218 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
219 219
220 220 Test coverage of 'missing from store':
221 221
222 222 $ hg serve -R ../mirror -d -p $HGPORT2 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache --config "web.allow_push=*" --config web.push_ssl=no
223 223 $ cat hg.pid >> $DAEMON_PIDS
224 224
225 225 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache
226 226 pushing to http://localhost:$HGPORT2/
227 227 searching for changes
228 228 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
229 229 [255]
230 230
231 231 #endif
General Comments 0
You need to be logged in to leave comments. Login now