##// END OF EJS Templates
largefiles: check hash of files in the store before copying to working dir...
Mads Kiilerich -
r26823:45e8bd2f stable
parent child Browse files
Show More
@@ -1,621 +1,628 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import platform
13 import shutil
14 13 import stat
15 14 import copy
16 15
17 16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 17 from mercurial.i18n import _
19 18 from mercurial import node, error
20 19
21 20 shortname = '.hglf'
22 21 shortnameslash = shortname + '/'
23 22 longname = 'largefiles'
24 23
25 24
26 25 # -- Private worker functions ------------------------------------------
27 26
28 27 def getminsize(ui, assumelfiles, opt, default=10):
29 28 lfsize = opt
30 29 if not lfsize and assumelfiles:
31 30 lfsize = ui.config(longname, 'minsize', default=default)
32 31 if lfsize:
33 32 try:
34 33 lfsize = float(lfsize)
35 34 except ValueError:
36 35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
37 36 % lfsize)
38 37 if lfsize is None:
39 38 raise error.Abort(_('minimum size for largefiles must be specified'))
40 39 return lfsize
41 40
42 41 def link(src, dest):
43 42 util.makedirs(os.path.dirname(dest))
44 43 try:
45 44 util.oslink(src, dest)
46 45 except OSError:
47 46 # if hardlinks fail, fallback on atomic copy
48 47 dst = util.atomictempfile(dest)
49 48 for chunk in util.filechunkiter(open(src, 'rb')):
50 49 dst.write(chunk)
51 50 dst.close()
52 51 os.chmod(dest, os.stat(src).st_mode)
53 52
54 53 def usercachepath(ui, hash):
55 54 path = ui.configpath(longname, 'usercache', None)
56 55 if path:
57 56 path = os.path.join(path, hash)
58 57 else:
59 58 if os.name == 'nt':
60 59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
61 60 if appdata:
62 61 path = os.path.join(appdata, longname, hash)
63 62 elif platform.system() == 'Darwin':
64 63 home = os.getenv('HOME')
65 64 if home:
66 65 path = os.path.join(home, 'Library', 'Caches',
67 66 longname, hash)
68 67 elif os.name == 'posix':
69 68 path = os.getenv('XDG_CACHE_HOME')
70 69 if path:
71 70 path = os.path.join(path, longname, hash)
72 71 else:
73 72 home = os.getenv('HOME')
74 73 if home:
75 74 path = os.path.join(home, '.cache', longname, hash)
76 75 else:
77 76 raise error.Abort(_('unknown operating system: %s\n') % os.name)
78 77 return path
79 78
80 79 def inusercache(ui, hash):
81 80 path = usercachepath(ui, hash)
82 81 return path and os.path.exists(path)
83 82
84 83 def findfile(repo, hash):
85 84 path, exists = findstorepath(repo, hash)
86 85 if exists:
87 86 repo.ui.note(_('found %s in store\n') % hash)
88 87 return path
89 88 elif inusercache(repo.ui, hash):
90 89 repo.ui.note(_('found %s in system cache\n') % hash)
91 90 path = storepath(repo, hash)
92 91 link(usercachepath(repo.ui, hash), path)
93 92 return path
94 93 return None
95 94
96 95 class largefilesdirstate(dirstate.dirstate):
97 96 def __getitem__(self, key):
98 97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
99 98 def normal(self, f):
100 99 return super(largefilesdirstate, self).normal(unixpath(f))
101 100 def remove(self, f):
102 101 return super(largefilesdirstate, self).remove(unixpath(f))
103 102 def add(self, f):
104 103 return super(largefilesdirstate, self).add(unixpath(f))
105 104 def drop(self, f):
106 105 return super(largefilesdirstate, self).drop(unixpath(f))
107 106 def forget(self, f):
108 107 return super(largefilesdirstate, self).forget(unixpath(f))
109 108 def normallookup(self, f):
110 109 return super(largefilesdirstate, self).normallookup(unixpath(f))
111 110 def _ignore(self, f):
112 111 return False
113 112 def write(self, tr=False):
114 113 # (1) disable PENDING mode always
115 114 # (lfdirstate isn't yet managed as a part of the transaction)
116 115 # (2) avoid develwarn 'use dirstate.write with ....'
117 116 super(largefilesdirstate, self).write(None)
118 117
119 118 def openlfdirstate(ui, repo, create=True):
120 119 '''
121 120 Return a dirstate object that tracks largefiles: i.e. its root is
122 121 the repo root, but it is saved in .hg/largefiles/dirstate.
123 122 '''
124 123 lfstoredir = repo.join(longname)
125 124 opener = scmutil.opener(lfstoredir)
126 125 lfdirstate = largefilesdirstate(opener, ui, repo.root,
127 126 repo.dirstate._validate)
128 127
129 128 # If the largefiles dirstate does not exist, populate and create
130 129 # it. This ensures that we create it on the first meaningful
131 130 # largefiles operation in a new clone.
132 131 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
133 132 matcher = getstandinmatcher(repo)
134 133 standins = repo.dirstate.walk(matcher, [], False, False)
135 134
136 135 if len(standins) > 0:
137 136 util.makedirs(lfstoredir)
138 137
139 138 for standin in standins:
140 139 lfile = splitstandin(standin)
141 140 lfdirstate.normallookup(lfile)
142 141 return lfdirstate
143 142
144 143 def lfdirstatestatus(lfdirstate, repo):
145 144 wctx = repo['.']
146 145 match = match_.always(repo.root, repo.getcwd())
147 146 unsure, s = lfdirstate.status(match, [], False, False, False)
148 147 modified, clean = s.modified, s.clean
149 148 for lfile in unsure:
150 149 try:
151 150 fctx = wctx[standin(lfile)]
152 151 except LookupError:
153 152 fctx = None
154 153 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
155 154 modified.append(lfile)
156 155 else:
157 156 clean.append(lfile)
158 157 lfdirstate.normal(lfile)
159 158 return s
160 159
161 160 def listlfiles(repo, rev=None, matcher=None):
162 161 '''return a list of largefiles in the working copy or the
163 162 specified changeset'''
164 163
165 164 if matcher is None:
166 165 matcher = getstandinmatcher(repo)
167 166
168 167 # ignore unknown files in working directory
169 168 return [splitstandin(f)
170 169 for f in repo[rev].walk(matcher)
171 170 if rev is not None or repo.dirstate[f] != '?']
172 171
173 172 def instore(repo, hash, forcelocal=False):
174 173 return os.path.exists(storepath(repo, hash, forcelocal))
175 174
176 175 def storepath(repo, hash, forcelocal=False):
177 176 if not forcelocal and repo.shared():
178 177 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
179 178 return repo.join(longname, hash)
180 179
181 180 def findstorepath(repo, hash):
182 181 '''Search through the local store path(s) to find the file for the given
183 182 hash. If the file is not found, its path in the primary store is returned.
184 183 The return value is a tuple of (path, exists(path)).
185 184 '''
186 185 # For shared repos, the primary store is in the share source. But for
187 186 # backward compatibility, force a lookup in the local store if it wasn't
188 187 # found in the share source.
189 188 path = storepath(repo, hash, False)
190 189
191 190 if instore(repo, hash):
192 191 return (path, True)
193 192 elif repo.shared() and instore(repo, hash, True):
194 193 return storepath(repo, hash, True)
195 194
196 195 return (path, False)
197 196
198 197 def copyfromcache(repo, hash, filename):
199 198 '''Copy the specified largefile from the repo or system cache to
200 199 filename in the repository. Return true on success or false if the
201 200 file was not found in either cache (which should not happened:
202 201 this is meant to be called only after ensuring that the needed
203 202 largefile exists in the cache).'''
204 203 path = findfile(repo, hash)
205 204 if path is None:
206 205 return False
207 206 util.makedirs(os.path.dirname(repo.wjoin(filename)))
208 207 # The write may fail before the file is fully written, but we
209 208 # don't use atomic writes in the working copy.
210 shutil.copy(path, repo.wjoin(filename))
209 dest = repo.wjoin(filename)
210 with open(path, 'rb') as srcfd:
211 with open(dest, 'wb') as destfd:
212 gothash = copyandhash(srcfd, destfd)
213 if gothash != hash:
214 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
215 % (filename, path, gothash))
216 util.unlink(dest)
217 return False
211 218 return True
212 219
213 220 def copytostore(repo, rev, file, uploaded=False):
214 221 hash = readstandin(repo, file, rev)
215 222 if instore(repo, hash):
216 223 return
217 224 copytostoreabsolute(repo, repo.wjoin(file), hash)
218 225
219 226 def copyalltostore(repo, node):
220 227 '''Copy all largefiles in a given revision to the store'''
221 228
222 229 ctx = repo[node]
223 230 for filename in ctx.files():
224 231 if isstandin(filename) and filename in ctx.manifest():
225 232 realfile = splitstandin(filename)
226 233 copytostore(repo, ctx.node(), realfile)
227 234
228 235
229 236 def copytostoreabsolute(repo, file, hash):
230 237 if inusercache(repo.ui, hash):
231 238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
232 239 else:
233 240 util.makedirs(os.path.dirname(storepath(repo, hash)))
234 241 dst = util.atomictempfile(storepath(repo, hash),
235 242 createmode=repo.store.createmode)
236 243 for chunk in util.filechunkiter(open(file, 'rb')):
237 244 dst.write(chunk)
238 245 dst.close()
239 246 linktousercache(repo, hash)
240 247
241 248 def linktousercache(repo, hash):
242 249 path = usercachepath(repo.ui, hash)
243 250 if path:
244 251 link(storepath(repo, hash), path)
245 252
246 253 def getstandinmatcher(repo, rmatcher=None):
247 254 '''Return a match object that applies rmatcher to the standin directory'''
248 255 standindir = repo.wjoin(shortname)
249 256
250 257 # no warnings about missing files or directories
251 258 badfn = lambda f, msg: None
252 259
253 260 if rmatcher and not rmatcher.always():
254 261 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
255 262 if not pats:
256 263 pats = [standindir]
257 264 match = scmutil.match(repo[None], pats, badfn=badfn)
258 265 # if pats is empty, it would incorrectly always match, so clear _always
259 266 match._always = False
260 267 else:
261 268 # no patterns: relative to repo root
262 269 match = scmutil.match(repo[None], [standindir], badfn=badfn)
263 270 return match
264 271
265 272 def composestandinmatcher(repo, rmatcher):
266 273 '''Return a matcher that accepts standins corresponding to the
267 274 files accepted by rmatcher. Pass the list of files in the matcher
268 275 as the paths specified by the user.'''
269 276 smatcher = getstandinmatcher(repo, rmatcher)
270 277 isstandin = smatcher.matchfn
271 278 def composedmatchfn(f):
272 279 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
273 280 smatcher.matchfn = composedmatchfn
274 281
275 282 return smatcher
276 283
277 284 def standin(filename):
278 285 '''Return the repo-relative path to the standin for the specified big
279 286 file.'''
280 287 # Notes:
281 288 # 1) Some callers want an absolute path, but for instance addlargefiles
282 289 # needs it repo-relative so it can be passed to repo[None].add(). So
283 290 # leave it up to the caller to use repo.wjoin() to get an absolute path.
284 291 # 2) Join with '/' because that's what dirstate always uses, even on
285 292 # Windows. Change existing separator to '/' first in case we are
286 293 # passed filenames from an external source (like the command line).
287 294 return shortnameslash + util.pconvert(filename)
288 295
289 296 def isstandin(filename):
290 297 '''Return true if filename is a big file standin. filename must be
291 298 in Mercurial's internal form (slash-separated).'''
292 299 return filename.startswith(shortnameslash)
293 300
294 301 def splitstandin(filename):
295 302 # Split on / because that's what dirstate always uses, even on Windows.
296 303 # Change local separator to / first just in case we are passed filenames
297 304 # from an external source (like the command line).
298 305 bits = util.pconvert(filename).split('/', 1)
299 306 if len(bits) == 2 and bits[0] == shortname:
300 307 return bits[1]
301 308 else:
302 309 return None
303 310
304 311 def updatestandin(repo, standin):
305 312 file = repo.wjoin(splitstandin(standin))
306 313 if os.path.exists(file):
307 314 hash = hashfile(file)
308 315 executable = getexecutable(file)
309 316 writestandin(repo, standin, hash, executable)
310 317
311 318 def readstandin(repo, filename, node=None):
312 319 '''read hex hash from standin for filename at given node, or working
313 320 directory if no node is given'''
314 321 return repo[node][standin(filename)].data().strip()
315 322
316 323 def writestandin(repo, standin, hash, executable):
317 324 '''write hash to <repo.root>/<standin>'''
318 325 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
319 326
320 327 def copyandhash(instream, outfile):
321 328 '''Read bytes from instream (iterable) and write them to outfile,
322 329 computing the SHA-1 hash of the data along the way. Return the hash.'''
323 330 hasher = util.sha1('')
324 331 for data in instream:
325 332 hasher.update(data)
326 333 outfile.write(data)
327 334 return hasher.hexdigest()
328 335
329 336 def hashrepofile(repo, file):
330 337 return hashfile(repo.wjoin(file))
331 338
332 339 def hashfile(file):
333 340 if not os.path.exists(file):
334 341 return ''
335 342 hasher = util.sha1('')
336 343 fd = open(file, 'rb')
337 344 for data in util.filechunkiter(fd, 128 * 1024):
338 345 hasher.update(data)
339 346 fd.close()
340 347 return hasher.hexdigest()
341 348
342 349 def getexecutable(filename):
343 350 mode = os.stat(filename).st_mode
344 351 return ((mode & stat.S_IXUSR) and
345 352 (mode & stat.S_IXGRP) and
346 353 (mode & stat.S_IXOTH))
347 354
348 355 def urljoin(first, second, *arg):
349 356 def join(left, right):
350 357 if not left.endswith('/'):
351 358 left += '/'
352 359 if right.startswith('/'):
353 360 right = right[1:]
354 361 return left + right
355 362
356 363 url = join(first, second)
357 364 for a in arg:
358 365 url = join(url, a)
359 366 return url
360 367
361 368 def hexsha1(data):
362 369 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
363 370 object data"""
364 371 h = util.sha1()
365 372 for chunk in util.filechunkiter(data):
366 373 h.update(chunk)
367 374 return h.hexdigest()
368 375
369 376 def httpsendfile(ui, filename):
370 377 return httpconnection.httpsendfile(ui, filename, 'rb')
371 378
372 379 def unixpath(path):
373 380 '''Return a version of path normalized for use with the lfdirstate.'''
374 381 return util.pconvert(os.path.normpath(path))
375 382
376 383 def islfilesrepo(repo):
377 384 if ('largefiles' in repo.requirements and
378 385 any(shortnameslash in f[0] for f in repo.store.datafiles())):
379 386 return True
380 387
381 388 return any(openlfdirstate(repo.ui, repo, False))
382 389
383 390 class storeprotonotcapable(Exception):
384 391 def __init__(self, storetypes):
385 392 self.storetypes = storetypes
386 393
387 394 def getstandinsstate(repo):
388 395 standins = []
389 396 matcher = getstandinmatcher(repo)
390 397 for standin in repo.dirstate.walk(matcher, [], False, False):
391 398 lfile = splitstandin(standin)
392 399 try:
393 400 hash = readstandin(repo, lfile)
394 401 except IOError:
395 402 hash = None
396 403 standins.append((lfile, hash))
397 404 return standins
398 405
399 406 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
400 407 lfstandin = standin(lfile)
401 408 if lfstandin in repo.dirstate:
402 409 stat = repo.dirstate._map[lfstandin]
403 410 state, mtime = stat[0], stat[3]
404 411 else:
405 412 state, mtime = '?', -1
406 413 if state == 'n':
407 414 if (normallookup or mtime < 0 or
408 415 not os.path.exists(repo.wjoin(lfile))):
409 416 # state 'n' doesn't ensure 'clean' in this case
410 417 lfdirstate.normallookup(lfile)
411 418 else:
412 419 lfdirstate.normal(lfile)
413 420 elif state == 'm':
414 421 lfdirstate.normallookup(lfile)
415 422 elif state == 'r':
416 423 lfdirstate.remove(lfile)
417 424 elif state == 'a':
418 425 lfdirstate.add(lfile)
419 426 elif state == '?':
420 427 lfdirstate.drop(lfile)
421 428
422 429 def markcommitted(orig, ctx, node):
423 430 repo = ctx.repo()
424 431
425 432 orig(node)
426 433
427 434 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
428 435 # because files coming from the 2nd parent are omitted in the latter.
429 436 #
430 437 # The former should be used to get targets of "synclfdirstate",
431 438 # because such files:
432 439 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
433 440 # - have to be marked as "n" after commit, but
434 441 # - aren't listed in "repo[node].files()"
435 442
436 443 lfdirstate = openlfdirstate(repo.ui, repo)
437 444 for f in ctx.files():
438 445 if isstandin(f):
439 446 lfile = splitstandin(f)
440 447 synclfdirstate(repo, lfdirstate, lfile, False)
441 448 lfdirstate.write()
442 449
443 450 # As part of committing, copy all of the largefiles into the cache.
444 451 copyalltostore(repo, node)
445 452
446 453 def getlfilestoupdate(oldstandins, newstandins):
447 454 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
448 455 filelist = []
449 456 for f in changedstandins:
450 457 if f[0] not in filelist:
451 458 filelist.append(f[0])
452 459 return filelist
453 460
454 461 def getlfilestoupload(repo, missing, addfunc):
455 462 for i, n in enumerate(missing):
456 463 repo.ui.progress(_('finding outgoing largefiles'), i,
457 464 unit=_('revision'), total=len(missing))
458 465 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
459 466
460 467 oldlfstatus = repo.lfstatus
461 468 repo.lfstatus = False
462 469 try:
463 470 ctx = repo[n]
464 471 finally:
465 472 repo.lfstatus = oldlfstatus
466 473
467 474 files = set(ctx.files())
468 475 if len(parents) == 2:
469 476 mc = ctx.manifest()
470 477 mp1 = ctx.parents()[0].manifest()
471 478 mp2 = ctx.parents()[1].manifest()
472 479 for f in mp1:
473 480 if f not in mc:
474 481 files.add(f)
475 482 for f in mp2:
476 483 if f not in mc:
477 484 files.add(f)
478 485 for f in mc:
479 486 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
480 487 files.add(f)
481 488 for fn in files:
482 489 if isstandin(fn) and fn in ctx:
483 490 addfunc(fn, ctx[fn].data().strip())
484 491 repo.ui.progress(_('finding outgoing largefiles'), None)
485 492
486 493 def updatestandinsbymatch(repo, match):
487 494 '''Update standins in the working directory according to specified match
488 495
489 496 This returns (possibly modified) ``match`` object to be used for
490 497 subsequent commit process.
491 498 '''
492 499
493 500 ui = repo.ui
494 501
495 502 # Case 1: user calls commit with no specific files or
496 503 # include/exclude patterns: refresh and commit all files that
497 504 # are "dirty".
498 505 if match is None or match.always():
499 506 # Spend a bit of time here to get a list of files we know
500 507 # are modified so we can compare only against those.
501 508 # It can cost a lot of time (several seconds)
502 509 # otherwise to update all standins if the largefiles are
503 510 # large.
504 511 lfdirstate = openlfdirstate(ui, repo)
505 512 dirtymatch = match_.always(repo.root, repo.getcwd())
506 513 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
507 514 False)
508 515 modifiedfiles = unsure + s.modified + s.added + s.removed
509 516 lfiles = listlfiles(repo)
510 517 # this only loops through largefiles that exist (not
511 518 # removed/renamed)
512 519 for lfile in lfiles:
513 520 if lfile in modifiedfiles:
514 521 if os.path.exists(
515 522 repo.wjoin(standin(lfile))):
516 523 # this handles the case where a rebase is being
517 524 # performed and the working copy is not updated
518 525 # yet.
519 526 if os.path.exists(repo.wjoin(lfile)):
520 527 updatestandin(repo,
521 528 standin(lfile))
522 529
523 530 return match
524 531
525 532 lfiles = listlfiles(repo)
526 533 match._files = repo._subdirlfs(match.files(), lfiles)
527 534
528 535 # Case 2: user calls commit with specified patterns: refresh
529 536 # any matching big files.
530 537 smatcher = composestandinmatcher(repo, match)
531 538 standins = repo.dirstate.walk(smatcher, [], False, False)
532 539
533 540 # No matching big files: get out of the way and pass control to
534 541 # the usual commit() method.
535 542 if not standins:
536 543 return match
537 544
538 545 # Refresh all matching big files. It's possible that the
539 546 # commit will end up failing, in which case the big files will
540 547 # stay refreshed. No harm done: the user modified them and
541 548 # asked to commit them, so sooner or later we're going to
542 549 # refresh the standins. Might as well leave them refreshed.
543 550 lfdirstate = openlfdirstate(ui, repo)
544 551 for fstandin in standins:
545 552 lfile = splitstandin(fstandin)
546 553 if lfdirstate[lfile] != 'r':
547 554 updatestandin(repo, fstandin)
548 555
549 556 # Cook up a new matcher that only matches regular files or
550 557 # standins corresponding to the big files requested by the
551 558 # user. Have to modify _files to prevent commit() from
552 559 # complaining "not tracked" for big files.
553 560 match = copy.copy(match)
554 561 origmatchfn = match.matchfn
555 562
556 563 # Check both the list of largefiles and the list of
557 564 # standins because if a largefile was removed, it
558 565 # won't be in the list of largefiles at this point
559 566 match._files += sorted(standins)
560 567
561 568 actualfiles = []
562 569 for f in match._files:
563 570 fstandin = standin(f)
564 571
565 572 # For largefiles, only one of the normal and standin should be
566 573 # committed (except if one of them is a remove).
567 574 # Thus, skip plain largefile names but keep the standin.
568 575 if (f in lfiles or fstandin in standins) and \
569 576 repo.dirstate[f] != 'r' and repo.dirstate[fstandin] != 'r':
570 577 continue
571 578
572 579 actualfiles.append(f)
573 580 match._files = actualfiles
574 581
575 582 def matchfn(f):
576 583 if origmatchfn(f):
577 584 return f not in lfiles
578 585 else:
579 586 return f in standins
580 587
581 588 match.matchfn = matchfn
582 589
583 590 return match
584 591
585 592 class automatedcommithook(object):
586 593 '''Stateful hook to update standins at the 1st commit of resuming
587 594
588 595 For efficiency, updating standins in the working directory should
589 596 be avoided while automated committing (like rebase, transplant and
590 597 so on), because they should be updated before committing.
591 598
592 599 But the 1st commit of resuming automated committing (e.g. ``rebase
593 600 --continue``) should update them, because largefiles may be
594 601 modified manually.
595 602 '''
596 603 def __init__(self, resuming):
597 604 self.resuming = resuming
598 605
599 606 def __call__(self, repo, match):
600 607 if self.resuming:
601 608 self.resuming = False # avoids updating at subsequent commits
602 609 return updatestandinsbymatch(repo, match)
603 610 else:
604 611 return match
605 612
606 613 def getstatuswriter(ui, repo, forcibly=None):
607 614 '''Return the function to write largefiles specific status out
608 615
609 616 If ``forcibly`` is ``None``, this returns the last element of
610 617 ``repo._lfstatuswriters`` as "default" writer function.
611 618
612 619 Otherwise, this returns the function to always write out (or
613 620 ignore if ``not forcibly``) status.
614 621 '''
615 622 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
616 623 return repo._lfstatuswriters[-1]
617 624 else:
618 625 if forcibly:
619 626 return ui.status # forcibly WRITE OUT
620 627 else:
621 628 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,203 +1,202 b''
1 1 Create user cache directory
2 2
3 3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 4 $ cat <<EOF >> ${HGRCPATH}
5 5 > [extensions]
6 6 > hgext.largefiles=
7 7 > [largefiles]
8 8 > usercache=${USERCACHE}
9 9 > EOF
10 10 $ mkdir -p ${USERCACHE}
11 11
12 12 Create source repo, and commit adding largefile.
13 13
14 14 $ hg init src
15 15 $ cd src
16 16 $ echo large > large
17 17 $ hg add --large large
18 18 $ hg commit -m 'add largefile'
19 19 $ hg rm large
20 20 $ hg commit -m 'branchhead without largefile'
21 21 $ hg up -qr 0
22 22 $ cd ..
23 23
24 24 Discard all cached largefiles in USERCACHE
25 25
26 26 $ rm -rf ${USERCACHE}
27 27
28 28 Create mirror repo, and pull from source without largefile:
29 29 "pull" is used instead of "clone" for suppression of (1) updating to
30 30 tip (= caching largefile from source repo), and (2) recording source
31 31 repo as "default" path in .hg/hgrc.
32 32
33 33 $ hg init mirror
34 34 $ cd mirror
35 35 $ hg pull ../src
36 36 pulling from ../src
37 37 requesting all changes
38 38 adding changesets
39 39 adding manifests
40 40 adding file changes
41 41 added 2 changesets with 1 changes to 1 files
42 42 (run 'hg update' to get a working copy)
43 43
44 44 Update working directory to "tip", which requires largefile("large"),
45 45 but there is no cache file for it. So, hg must treat it as
46 46 "missing"(!) file.
47 47
48 48 $ hg update -r0
49 49 getting changed largefiles
50 50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
51 51 0 largefiles updated, 0 removed
52 52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 53 $ hg status
54 54 ! large
55 55
56 56 Update working directory to null: this cleanup .hg/largefiles/dirstate
57 57
58 58 $ hg update null
59 59 getting changed largefiles
60 60 0 largefiles updated, 0 removed
61 61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62 62
63 63 Update working directory to tip, again.
64 64
65 65 $ hg update -r0
66 66 getting changed largefiles
67 67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
68 68 0 largefiles updated, 0 removed
69 69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 70 $ hg status
71 71 ! large
72 72 $ cd ..
73 73
74 74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
75 75
76 76 $ hg init mirror2
77 77 $ hg -R mirror2 pull src -r0
78 78 pulling from src
79 79 adding changesets
80 80 adding manifests
81 81 adding file changes
82 82 added 1 changesets with 1 changes to 1 files
83 83 (run 'hg update' to get a working copy)
84 84
85 85 #if unix-permissions
86 86
87 87 Portable way to print file permissions:
88 88
89 89 $ cat > ls-l.py <<EOF
90 90 > #!/usr/bin/env python
91 91 > import sys, os
92 92 > path = sys.argv[1]
93 93 > print '%03o' % (os.lstat(path).st_mode & 0777)
94 94 > EOF
95 95 $ chmod +x ls-l.py
96 96
97 97 Test that files in .hg/largefiles inherit mode from .hg/store, not
98 98 from file in working copy:
99 99
100 100 $ cd src
101 101 $ chmod 750 .hg/store
102 102 $ chmod 660 large
103 103 $ echo change >> large
104 104 $ hg commit -m change
105 105 created new head
106 106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
107 107 640
108 108
109 109 Test permission of with files in .hg/largefiles created by update:
110 110
111 111 $ cd ../mirror
112 112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
113 113 $ chmod 750 .hg/store
114 114 $ hg pull ../src --update -q
115 115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
116 116 640
117 117
118 118 Test permission of files created by push:
119 119
120 120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
121 121 > --config "web.allow_push=*" --config web.push_ssl=no
122 122 $ cat hg.pid >> $DAEMON_PIDS
123 123
124 124 $ echo change >> large
125 125 $ hg commit -m change
126 126
127 127 $ rm -r "$USERCACHE"
128 128
129 129 $ hg push -q http://localhost:$HGPORT/
130 130
131 131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
132 132 640
133 133
134 134 $ cd ..
135 135
136 136 #endif
137 137
138 138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
139 139 it is missing, but a remove on a nonexistent unknown file still should. Same
140 140 for a forget.)
141 141
142 142 $ cd src
143 143 $ touch x
144 144 $ hg add x
145 145 $ mv x y
146 146 $ hg remove -A x y ENOENT
147 147 ENOENT: * (glob)
148 148 not removing y: file is untracked
149 149 [1]
150 150 $ hg add y
151 151 $ mv y z
152 152 $ hg forget y z ENOENT
153 153 ENOENT: * (glob)
154 154 not removing z: file is already untracked
155 155 [1]
156 156
157 157 Largefiles are accessible from the share's store
158 158 $ cd ..
159 159 $ hg share -q src share_dst --config extensions.share=
160 160 $ hg -R share_dst update -r0
161 161 getting changed largefiles
162 162 1 largefiles updated, 0 removed
163 163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 164
165 165 $ echo modified > share_dst/large
166 166 $ hg -R share_dst ci -m modified
167 167 created new head
168 168
169 169 Only dirstate is in the local store for the share, and the largefile is in the
170 170 share source's local store. Avoid the extra largefiles added in the unix
171 171 conditional above.
172 172 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
173 173 $ echo $hash
174 174 e2fb5f2139d086ded2cb600d5a91a196e76bf020
175 175
176 176 $ find share_dst/.hg/largefiles/* | sort
177 177 share_dst/.hg/largefiles/dirstate
178 178
179 179 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
180 180 src/.hg/largefiles/dirstate
181 181 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
182 182
183 183 Inject corruption into the largefiles store and see how update handles that:
184 184
185 185 $ cd src
186 186 $ hg up -qC
187 187 $ cat large
188 188 modified
189 189 $ rm large
190 190 $ cat .hglf/large
191 191 e2fb5f2139d086ded2cb600d5a91a196e76bf020
192 192 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
193 193 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
194 (the following update will put the corrupted file into the working directory
195 where it will show up as a change)
196 194 $ hg up -C
197 195 getting changed largefiles
198 1 largefiles updated, 0 removed
196 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27
197 0 largefiles updated, 0 removed
199 198 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 199 $ hg st
201 M large
200 ! large
202 201 ? z
203 202 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
General Comments 0
You need to be logged in to leave comments. Login now