##// END OF EJS Templates
largefiles: prevent committing a missing largefile...
Matt Harbison -
r27947:571ba161 stable
parent child Browse files
Show More
@@ -1,637 +1,639 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import platform
13 13 import stat
14 14 import copy
15 15
16 16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 17 from mercurial.i18n import _
18 18 from mercurial import node, error
19 19
20 20 shortname = '.hglf'
21 21 shortnameslash = shortname + '/'
22 22 longname = 'largefiles'
23 23
24 24
25 25 # -- Private worker functions ------------------------------------------
26 26
27 27 def getminsize(ui, assumelfiles, opt, default=10):
28 28 lfsize = opt
29 29 if not lfsize and assumelfiles:
30 30 lfsize = ui.config(longname, 'minsize', default=default)
31 31 if lfsize:
32 32 try:
33 33 lfsize = float(lfsize)
34 34 except ValueError:
35 35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
36 36 % lfsize)
37 37 if lfsize is None:
38 38 raise error.Abort(_('minimum size for largefiles must be specified'))
39 39 return lfsize
40 40
41 41 def link(src, dest):
42 42 util.makedirs(os.path.dirname(dest))
43 43 try:
44 44 util.oslink(src, dest)
45 45 except OSError:
46 46 # if hardlinks fail, fallback on atomic copy
47 47 dst = util.atomictempfile(dest)
48 48 for chunk in util.filechunkiter(open(src, 'rb')):
49 49 dst.write(chunk)
50 50 dst.close()
51 51 os.chmod(dest, os.stat(src).st_mode)
52 52
53 53 def usercachepath(ui, hash):
54 54 path = ui.configpath(longname, 'usercache', None)
55 55 if path:
56 56 path = os.path.join(path, hash)
57 57 else:
58 58 if os.name == 'nt':
59 59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 60 if appdata:
61 61 path = os.path.join(appdata, longname, hash)
62 62 elif platform.system() == 'Darwin':
63 63 home = os.getenv('HOME')
64 64 if home:
65 65 path = os.path.join(home, 'Library', 'Caches',
66 66 longname, hash)
67 67 elif os.name == 'posix':
68 68 path = os.getenv('XDG_CACHE_HOME')
69 69 if path:
70 70 path = os.path.join(path, longname, hash)
71 71 else:
72 72 home = os.getenv('HOME')
73 73 if home:
74 74 path = os.path.join(home, '.cache', longname, hash)
75 75 else:
76 76 raise error.Abort(_('unknown operating system: %s\n') % os.name)
77 77 return path
78 78
79 79 def inusercache(ui, hash):
80 80 path = usercachepath(ui, hash)
81 81 return path and os.path.exists(path)
82 82
83 83 def findfile(repo, hash):
84 84 path, exists = findstorepath(repo, hash)
85 85 if exists:
86 86 repo.ui.note(_('found %s in store\n') % hash)
87 87 return path
88 88 elif inusercache(repo.ui, hash):
89 89 repo.ui.note(_('found %s in system cache\n') % hash)
90 90 path = storepath(repo, hash)
91 91 link(usercachepath(repo.ui, hash), path)
92 92 return path
93 93 return None
94 94
95 95 class largefilesdirstate(dirstate.dirstate):
96 96 def __getitem__(self, key):
97 97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
98 98 def normal(self, f):
99 99 return super(largefilesdirstate, self).normal(unixpath(f))
100 100 def remove(self, f):
101 101 return super(largefilesdirstate, self).remove(unixpath(f))
102 102 def add(self, f):
103 103 return super(largefilesdirstate, self).add(unixpath(f))
104 104 def drop(self, f):
105 105 return super(largefilesdirstate, self).drop(unixpath(f))
106 106 def forget(self, f):
107 107 return super(largefilesdirstate, self).forget(unixpath(f))
108 108 def normallookup(self, f):
109 109 return super(largefilesdirstate, self).normallookup(unixpath(f))
110 110 def _ignore(self, f):
111 111 return False
112 112 def write(self, tr=False):
113 113 # (1) disable PENDING mode always
114 114 # (lfdirstate isn't yet managed as a part of the transaction)
115 115 # (2) avoid develwarn 'use dirstate.write with ....'
116 116 super(largefilesdirstate, self).write(None)
117 117
118 118 def openlfdirstate(ui, repo, create=True):
119 119 '''
120 120 Return a dirstate object that tracks largefiles: i.e. its root is
121 121 the repo root, but it is saved in .hg/largefiles/dirstate.
122 122 '''
123 123 lfstoredir = repo.join(longname)
124 124 opener = scmutil.opener(lfstoredir)
125 125 lfdirstate = largefilesdirstate(opener, ui, repo.root,
126 126 repo.dirstate._validate)
127 127
128 128 # If the largefiles dirstate does not exist, populate and create
129 129 # it. This ensures that we create it on the first meaningful
130 130 # largefiles operation in a new clone.
131 131 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
132 132 matcher = getstandinmatcher(repo)
133 133 standins = repo.dirstate.walk(matcher, [], False, False)
134 134
135 135 if len(standins) > 0:
136 136 util.makedirs(lfstoredir)
137 137
138 138 for standin in standins:
139 139 lfile = splitstandin(standin)
140 140 lfdirstate.normallookup(lfile)
141 141 return lfdirstate
142 142
143 143 def lfdirstatestatus(lfdirstate, repo):
144 144 wctx = repo['.']
145 145 match = match_.always(repo.root, repo.getcwd())
146 146 unsure, s = lfdirstate.status(match, [], False, False, False)
147 147 modified, clean = s.modified, s.clean
148 148 for lfile in unsure:
149 149 try:
150 150 fctx = wctx[standin(lfile)]
151 151 except LookupError:
152 152 fctx = None
153 153 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
154 154 modified.append(lfile)
155 155 else:
156 156 clean.append(lfile)
157 157 lfdirstate.normal(lfile)
158 158 return s
159 159
160 160 def listlfiles(repo, rev=None, matcher=None):
161 161 '''return a list of largefiles in the working copy or the
162 162 specified changeset'''
163 163
164 164 if matcher is None:
165 165 matcher = getstandinmatcher(repo)
166 166
167 167 # ignore unknown files in working directory
168 168 return [splitstandin(f)
169 169 for f in repo[rev].walk(matcher)
170 170 if rev is not None or repo.dirstate[f] != '?']
171 171
172 172 def instore(repo, hash, forcelocal=False):
173 173 return os.path.exists(storepath(repo, hash, forcelocal))
174 174
175 175 def storepath(repo, hash, forcelocal=False):
176 176 if not forcelocal and repo.shared():
177 177 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
178 178 return repo.join(longname, hash)
179 179
180 180 def findstorepath(repo, hash):
181 181 '''Search through the local store path(s) to find the file for the given
182 182 hash. If the file is not found, its path in the primary store is returned.
183 183 The return value is a tuple of (path, exists(path)).
184 184 '''
185 185 # For shared repos, the primary store is in the share source. But for
186 186 # backward compatibility, force a lookup in the local store if it wasn't
187 187 # found in the share source.
188 188 path = storepath(repo, hash, False)
189 189
190 190 if instore(repo, hash):
191 191 return (path, True)
192 192 elif repo.shared() and instore(repo, hash, True):
193 193 return storepath(repo, hash, True)
194 194
195 195 return (path, False)
196 196
197 197 def copyfromcache(repo, hash, filename):
198 198 '''Copy the specified largefile from the repo or system cache to
199 199 filename in the repository. Return true on success or false if the
200 200 file was not found in either cache (which should not happened:
201 201 this is meant to be called only after ensuring that the needed
202 202 largefile exists in the cache).'''
203 203 path = findfile(repo, hash)
204 204 if path is None:
205 205 return False
206 206 util.makedirs(os.path.dirname(repo.wjoin(filename)))
207 207 # The write may fail before the file is fully written, but we
208 208 # don't use atomic writes in the working copy.
209 209 dest = repo.wjoin(filename)
210 210 with open(path, 'rb') as srcfd:
211 211 with open(dest, 'wb') as destfd:
212 212 gothash = copyandhash(srcfd, destfd)
213 213 if gothash != hash:
214 214 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
215 215 % (filename, path, gothash))
216 216 util.unlink(dest)
217 217 return False
218 218 return True
219 219
220 220 def copytostore(repo, rev, file, uploaded=False):
221 221 hash = readstandin(repo, file, rev)
222 222 if instore(repo, hash):
223 223 return
224 224 absfile = repo.wjoin(file)
225 225 if os.path.exists(absfile):
226 226 copytostoreabsolute(repo, absfile, hash)
227 227 else:
228 228 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
229 229 (file, hash))
230 230
231 231 def copyalltostore(repo, node):
232 232 '''Copy all largefiles in a given revision to the store'''
233 233
234 234 ctx = repo[node]
235 235 for filename in ctx.files():
236 236 if isstandin(filename) and filename in ctx.manifest():
237 237 realfile = splitstandin(filename)
238 238 copytostore(repo, ctx.node(), realfile)
239 239
240 240
241 241 def copytostoreabsolute(repo, file, hash):
242 242 if inusercache(repo.ui, hash):
243 243 link(usercachepath(repo.ui, hash), storepath(repo, hash))
244 244 else:
245 245 util.makedirs(os.path.dirname(storepath(repo, hash)))
246 246 dst = util.atomictempfile(storepath(repo, hash),
247 247 createmode=repo.store.createmode)
248 248 for chunk in util.filechunkiter(open(file, 'rb')):
249 249 dst.write(chunk)
250 250 dst.close()
251 251 linktousercache(repo, hash)
252 252
253 253 def linktousercache(repo, hash):
254 254 path = usercachepath(repo.ui, hash)
255 255 if path:
256 256 link(storepath(repo, hash), path)
257 257
258 258 def getstandinmatcher(repo, rmatcher=None):
259 259 '''Return a match object that applies rmatcher to the standin directory'''
260 260 standindir = repo.wjoin(shortname)
261 261
262 262 # no warnings about missing files or directories
263 263 badfn = lambda f, msg: None
264 264
265 265 if rmatcher and not rmatcher.always():
266 266 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
267 267 if not pats:
268 268 pats = [standindir]
269 269 match = scmutil.match(repo[None], pats, badfn=badfn)
270 270 # if pats is empty, it would incorrectly always match, so clear _always
271 271 match._always = False
272 272 else:
273 273 # no patterns: relative to repo root
274 274 match = scmutil.match(repo[None], [standindir], badfn=badfn)
275 275 return match
276 276
277 277 def composestandinmatcher(repo, rmatcher):
278 278 '''Return a matcher that accepts standins corresponding to the
279 279 files accepted by rmatcher. Pass the list of files in the matcher
280 280 as the paths specified by the user.'''
281 281 smatcher = getstandinmatcher(repo, rmatcher)
282 282 isstandin = smatcher.matchfn
283 283 def composedmatchfn(f):
284 284 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
285 285 smatcher.matchfn = composedmatchfn
286 286
287 287 return smatcher
288 288
289 289 def standin(filename):
290 290 '''Return the repo-relative path to the standin for the specified big
291 291 file.'''
292 292 # Notes:
293 293 # 1) Some callers want an absolute path, but for instance addlargefiles
294 294 # needs it repo-relative so it can be passed to repo[None].add(). So
295 295 # leave it up to the caller to use repo.wjoin() to get an absolute path.
296 296 # 2) Join with '/' because that's what dirstate always uses, even on
297 297 # Windows. Change existing separator to '/' first in case we are
298 298 # passed filenames from an external source (like the command line).
299 299 return shortnameslash + util.pconvert(filename)
300 300
301 301 def isstandin(filename):
302 302 '''Return true if filename is a big file standin. filename must be
303 303 in Mercurial's internal form (slash-separated).'''
304 304 return filename.startswith(shortnameslash)
305 305
306 306 def splitstandin(filename):
307 307 # Split on / because that's what dirstate always uses, even on Windows.
308 308 # Change local separator to / first just in case we are passed filenames
309 309 # from an external source (like the command line).
310 310 bits = util.pconvert(filename).split('/', 1)
311 311 if len(bits) == 2 and bits[0] == shortname:
312 312 return bits[1]
313 313 else:
314 314 return None
315 315
316 316 def updatestandin(repo, standin):
317 317 file = repo.wjoin(splitstandin(standin))
318 318 if os.path.exists(file):
319 319 hash = hashfile(file)
320 320 executable = getexecutable(file)
321 321 writestandin(repo, standin, hash, executable)
322 else:
323 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
322 324
323 325 def readstandin(repo, filename, node=None):
324 326 '''read hex hash from standin for filename at given node, or working
325 327 directory if no node is given'''
326 328 return repo[node][standin(filename)].data().strip()
327 329
328 330 def writestandin(repo, standin, hash, executable):
329 331 '''write hash to <repo.root>/<standin>'''
330 332 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
331 333
332 334 def copyandhash(instream, outfile):
333 335 '''Read bytes from instream (iterable) and write them to outfile,
334 336 computing the SHA-1 hash of the data along the way. Return the hash.'''
335 337 hasher = util.sha1('')
336 338 for data in instream:
337 339 hasher.update(data)
338 340 outfile.write(data)
339 341 return hasher.hexdigest()
340 342
341 343 def hashrepofile(repo, file):
342 344 return hashfile(repo.wjoin(file))
343 345
344 346 def hashfile(file):
345 347 if not os.path.exists(file):
346 348 return ''
347 349 hasher = util.sha1('')
348 350 fd = open(file, 'rb')
349 351 for data in util.filechunkiter(fd, 128 * 1024):
350 352 hasher.update(data)
351 353 fd.close()
352 354 return hasher.hexdigest()
353 355
354 356 def getexecutable(filename):
355 357 mode = os.stat(filename).st_mode
356 358 return ((mode & stat.S_IXUSR) and
357 359 (mode & stat.S_IXGRP) and
358 360 (mode & stat.S_IXOTH))
359 361
360 362 def urljoin(first, second, *arg):
361 363 def join(left, right):
362 364 if not left.endswith('/'):
363 365 left += '/'
364 366 if right.startswith('/'):
365 367 right = right[1:]
366 368 return left + right
367 369
368 370 url = join(first, second)
369 371 for a in arg:
370 372 url = join(url, a)
371 373 return url
372 374
373 375 def hexsha1(data):
374 376 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
375 377 object data"""
376 378 h = util.sha1()
377 379 for chunk in util.filechunkiter(data):
378 380 h.update(chunk)
379 381 return h.hexdigest()
380 382
381 383 def httpsendfile(ui, filename):
382 384 return httpconnection.httpsendfile(ui, filename, 'rb')
383 385
384 386 def unixpath(path):
385 387 '''Return a version of path normalized for use with the lfdirstate.'''
386 388 return util.pconvert(os.path.normpath(path))
387 389
388 390 def islfilesrepo(repo):
389 391 if ('largefiles' in repo.requirements and
390 392 any(shortnameslash in f[0] for f in repo.store.datafiles())):
391 393 return True
392 394
393 395 return any(openlfdirstate(repo.ui, repo, False))
394 396
395 397 class storeprotonotcapable(Exception):
396 398 def __init__(self, storetypes):
397 399 self.storetypes = storetypes
398 400
399 401 def getstandinsstate(repo):
400 402 standins = []
401 403 matcher = getstandinmatcher(repo)
402 404 for standin in repo.dirstate.walk(matcher, [], False, False):
403 405 lfile = splitstandin(standin)
404 406 try:
405 407 hash = readstandin(repo, lfile)
406 408 except IOError:
407 409 hash = None
408 410 standins.append((lfile, hash))
409 411 return standins
410 412
411 413 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
412 414 lfstandin = standin(lfile)
413 415 if lfstandin in repo.dirstate:
414 416 stat = repo.dirstate._map[lfstandin]
415 417 state, mtime = stat[0], stat[3]
416 418 else:
417 419 state, mtime = '?', -1
418 420 if state == 'n':
419 421 if (normallookup or mtime < 0 or
420 422 not os.path.exists(repo.wjoin(lfile))):
421 423 # state 'n' doesn't ensure 'clean' in this case
422 424 lfdirstate.normallookup(lfile)
423 425 else:
424 426 lfdirstate.normal(lfile)
425 427 elif state == 'm':
426 428 lfdirstate.normallookup(lfile)
427 429 elif state == 'r':
428 430 lfdirstate.remove(lfile)
429 431 elif state == 'a':
430 432 lfdirstate.add(lfile)
431 433 elif state == '?':
432 434 lfdirstate.drop(lfile)
433 435
434 436 def markcommitted(orig, ctx, node):
435 437 repo = ctx.repo()
436 438
437 439 orig(node)
438 440
439 441 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
440 442 # because files coming from the 2nd parent are omitted in the latter.
441 443 #
442 444 # The former should be used to get targets of "synclfdirstate",
443 445 # because such files:
444 446 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
445 447 # - have to be marked as "n" after commit, but
446 448 # - aren't listed in "repo[node].files()"
447 449
448 450 lfdirstate = openlfdirstate(repo.ui, repo)
449 451 for f in ctx.files():
450 452 if isstandin(f):
451 453 lfile = splitstandin(f)
452 454 synclfdirstate(repo, lfdirstate, lfile, False)
453 455 lfdirstate.write()
454 456
455 457 # As part of committing, copy all of the largefiles into the cache.
456 458 copyalltostore(repo, node)
457 459
458 460 def getlfilestoupdate(oldstandins, newstandins):
459 461 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
460 462 filelist = []
461 463 for f in changedstandins:
462 464 if f[0] not in filelist:
463 465 filelist.append(f[0])
464 466 return filelist
465 467
466 468 def getlfilestoupload(repo, missing, addfunc):
467 469 for i, n in enumerate(missing):
468 470 repo.ui.progress(_('finding outgoing largefiles'), i,
469 471 unit=_('revision'), total=len(missing))
470 472 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
471 473
472 474 oldlfstatus = repo.lfstatus
473 475 repo.lfstatus = False
474 476 try:
475 477 ctx = repo[n]
476 478 finally:
477 479 repo.lfstatus = oldlfstatus
478 480
479 481 files = set(ctx.files())
480 482 if len(parents) == 2:
481 483 mc = ctx.manifest()
482 484 mp1 = ctx.parents()[0].manifest()
483 485 mp2 = ctx.parents()[1].manifest()
484 486 for f in mp1:
485 487 if f not in mc:
486 488 files.add(f)
487 489 for f in mp2:
488 490 if f not in mc:
489 491 files.add(f)
490 492 for f in mc:
491 493 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
492 494 files.add(f)
493 495 for fn in files:
494 496 if isstandin(fn) and fn in ctx:
495 497 addfunc(fn, ctx[fn].data().strip())
496 498 repo.ui.progress(_('finding outgoing largefiles'), None)
497 499
498 500 def updatestandinsbymatch(repo, match):
499 501 '''Update standins in the working directory according to specified match
500 502
501 503 This returns (possibly modified) ``match`` object to be used for
502 504 subsequent commit process.
503 505 '''
504 506
505 507 ui = repo.ui
506 508
507 509 # Case 1: user calls commit with no specific files or
508 510 # include/exclude patterns: refresh and commit all files that
509 511 # are "dirty".
510 512 if match is None or match.always():
511 513 # Spend a bit of time here to get a list of files we know
512 514 # are modified so we can compare only against those.
513 515 # It can cost a lot of time (several seconds)
514 516 # otherwise to update all standins if the largefiles are
515 517 # large.
516 518 lfdirstate = openlfdirstate(ui, repo)
517 519 dirtymatch = match_.always(repo.root, repo.getcwd())
518 520 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
519 521 False)
520 522 modifiedfiles = unsure + s.modified + s.added + s.removed
521 523 lfiles = listlfiles(repo)
522 524 # this only loops through largefiles that exist (not
523 525 # removed/renamed)
524 526 for lfile in lfiles:
525 527 if lfile in modifiedfiles:
526 528 if os.path.exists(
527 529 repo.wjoin(standin(lfile))):
528 530 # this handles the case where a rebase is being
529 531 # performed and the working copy is not updated
530 532 # yet.
531 533 if os.path.exists(repo.wjoin(lfile)):
532 534 updatestandin(repo,
533 535 standin(lfile))
534 536
535 537 return match
536 538
537 539 lfiles = listlfiles(repo)
538 540 match._files = repo._subdirlfs(match.files(), lfiles)
539 541
540 542 # Case 2: user calls commit with specified patterns: refresh
541 543 # any matching big files.
542 544 smatcher = composestandinmatcher(repo, match)
543 545 standins = repo.dirstate.walk(smatcher, [], False, False)
544 546
545 547 # No matching big files: get out of the way and pass control to
546 548 # the usual commit() method.
547 549 if not standins:
548 550 return match
549 551
550 552 # Refresh all matching big files. It's possible that the
551 553 # commit will end up failing, in which case the big files will
552 554 # stay refreshed. No harm done: the user modified them and
553 555 # asked to commit them, so sooner or later we're going to
554 556 # refresh the standins. Might as well leave them refreshed.
555 557 lfdirstate = openlfdirstate(ui, repo)
556 558 for fstandin in standins:
557 559 lfile = splitstandin(fstandin)
558 560 if lfdirstate[lfile] != 'r':
559 561 updatestandin(repo, fstandin)
560 562
561 563 # Cook up a new matcher that only matches regular files or
562 564 # standins corresponding to the big files requested by the
563 565 # user. Have to modify _files to prevent commit() from
564 566 # complaining "not tracked" for big files.
565 567 match = copy.copy(match)
566 568 origmatchfn = match.matchfn
567 569
568 570 # Check both the list of largefiles and the list of
569 571 # standins because if a largefile was removed, it
570 572 # won't be in the list of largefiles at this point
571 573 match._files += sorted(standins)
572 574
573 575 actualfiles = []
574 576 for f in match._files:
575 577 fstandin = standin(f)
576 578
577 579 # For largefiles, only one of the normal and standin should be
578 580 # committed (except if one of them is a remove). In the case of a
579 581 # standin removal, drop the normal file if it is unknown to dirstate.
580 582 # Thus, skip plain largefile names but keep the standin.
581 583 if f in lfiles or fstandin in standins:
582 584 if repo.dirstate[fstandin] != 'r':
583 585 if repo.dirstate[f] != 'r':
584 586 continue
585 587 elif repo.dirstate[f] == '?':
586 588 continue
587 589
588 590 actualfiles.append(f)
589 591 match._files = actualfiles
590 592
591 593 def matchfn(f):
592 594 if origmatchfn(f):
593 595 return f not in lfiles
594 596 else:
595 597 return f in standins
596 598
597 599 match.matchfn = matchfn
598 600
599 601 return match
600 602
601 603 class automatedcommithook(object):
602 604 '''Stateful hook to update standins at the 1st commit of resuming
603 605
604 606 For efficiency, updating standins in the working directory should
605 607 be avoided while automated committing (like rebase, transplant and
606 608 so on), because they should be updated before committing.
607 609
608 610 But the 1st commit of resuming automated committing (e.g. ``rebase
609 611 --continue``) should update them, because largefiles may be
610 612 modified manually.
611 613 '''
612 614 def __init__(self, resuming):
613 615 self.resuming = resuming
614 616
615 617 def __call__(self, repo, match):
616 618 if self.resuming:
617 619 self.resuming = False # avoids updating at subsequent commits
618 620 return updatestandinsbymatch(repo, match)
619 621 else:
620 622 return match
621 623
622 624 def getstatuswriter(ui, repo, forcibly=None):
623 625 '''Return the function to write largefiles specific status out
624 626
625 627 If ``forcibly`` is ``None``, this returns the last element of
626 628 ``repo._lfstatuswriters`` as "default" writer function.
627 629
628 630 Otherwise, this returns the function to always write out (or
629 631 ignore if ``not forcibly``) status.
630 632 '''
631 633 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
632 634 return repo._lfstatuswriters[-1]
633 635 else:
634 636 if forcibly:
635 637 return ui.status # forcibly WRITE OUT
636 638 else:
637 639 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,231 +1,237 b''
1 1 Create user cache directory
2 2
3 3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 4 $ cat <<EOF >> ${HGRCPATH}
5 5 > [extensions]
6 6 > hgext.largefiles=
7 7 > [largefiles]
8 8 > usercache=${USERCACHE}
9 9 > EOF
10 10 $ mkdir -p ${USERCACHE}
11 11
12 12 Create source repo, and commit adding largefile.
13 13
14 14 $ hg init src
15 15 $ cd src
16 16 $ echo large > large
17 17 $ hg add --large large
18 18 $ hg commit -m 'add largefile'
19 19 $ hg rm large
20 20 $ hg commit -m 'branchhead without largefile' large
21 21 $ hg up -qr 0
22 $ rm large
23 $ echo "0000000000000000000000000000000000000000" > .hglf/large
24 $ hg commit -m 'commit missing file with corrupt standin' large
25 abort: large: file not found!
26 [255]
27 $ hg up -Cqr 0
22 28 $ cd ..
23 29
24 30 Discard all cached largefiles in USERCACHE
25 31
26 32 $ rm -rf ${USERCACHE}
27 33
28 34 Create mirror repo, and pull from source without largefile:
29 35 "pull" is used instead of "clone" for suppression of (1) updating to
30 36 tip (= caching largefile from source repo), and (2) recording source
31 37 repo as "default" path in .hg/hgrc.
32 38
33 39 $ hg init mirror
34 40 $ cd mirror
35 41 $ hg pull ../src
36 42 pulling from ../src
37 43 requesting all changes
38 44 adding changesets
39 45 adding manifests
40 46 adding file changes
41 47 added 2 changesets with 1 changes to 1 files
42 48 (run 'hg update' to get a working copy)
43 49
44 50 Update working directory to "tip", which requires largefile("large"),
45 51 but there is no cache file for it. So, hg must treat it as
46 52 "missing"(!) file.
47 53
48 54 $ hg update -r0
49 55 getting changed largefiles
50 56 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
51 57 0 largefiles updated, 0 removed
52 58 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 59 $ hg status
54 60 ! large
55 61
56 62 Update working directory to null: this cleanup .hg/largefiles/dirstate
57 63
58 64 $ hg update null
59 65 getting changed largefiles
60 66 0 largefiles updated, 0 removed
61 67 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62 68
63 69 Update working directory to tip, again.
64 70
65 71 $ hg update -r0
66 72 getting changed largefiles
67 73 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
68 74 0 largefiles updated, 0 removed
69 75 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 76 $ hg status
71 77 ! large
72 78 $ cd ..
73 79
74 80 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
75 81
76 82 $ hg init mirror2
77 83 $ hg -R mirror2 pull src -r0
78 84 pulling from src
79 85 adding changesets
80 86 adding manifests
81 87 adding file changes
82 88 added 1 changesets with 1 changes to 1 files
83 89 (run 'hg update' to get a working copy)
84 90
85 91 #if unix-permissions
86 92
87 93 Portable way to print file permissions:
88 94
89 95 $ cat > ls-l.py <<EOF
90 96 > #!/usr/bin/env python
91 97 > import sys, os
92 98 > path = sys.argv[1]
93 99 > print '%03o' % (os.lstat(path).st_mode & 0777)
94 100 > EOF
95 101 $ chmod +x ls-l.py
96 102
97 103 Test that files in .hg/largefiles inherit mode from .hg/store, not
98 104 from file in working copy:
99 105
100 106 $ cd src
101 107 $ chmod 750 .hg/store
102 108 $ chmod 660 large
103 109 $ echo change >> large
104 110 $ hg commit -m change
105 111 created new head
106 112 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
107 113 640
108 114
109 115 Test permission of with files in .hg/largefiles created by update:
110 116
111 117 $ cd ../mirror
112 118 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
113 119 $ chmod 750 .hg/store
114 120 $ hg pull ../src --update -q
115 121 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
116 122 640
117 123
118 124 Test permission of files created by push:
119 125
120 126 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
121 127 > --config "web.allow_push=*" --config web.push_ssl=no
122 128 $ cat hg.pid >> $DAEMON_PIDS
123 129
124 130 $ echo change >> large
125 131 $ hg commit -m change
126 132
127 133 $ rm -r "$USERCACHE"
128 134
129 135 $ hg push -q http://localhost:$HGPORT/
130 136
131 137 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
132 138 640
133 139
134 140 $ cd ..
135 141
136 142 #endif
137 143
138 144 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
139 145 it is missing, but a remove on a nonexistent unknown file still should. Same
140 146 for a forget.)
141 147
142 148 $ cd src
143 149 $ touch x
144 150 $ hg add x
145 151 $ mv x y
146 152 $ hg remove -A x y ENOENT
147 153 ENOENT: * (glob)
148 154 not removing y: file is untracked
149 155 [1]
150 156 $ hg add y
151 157 $ mv y z
152 158 $ hg forget y z ENOENT
153 159 ENOENT: * (glob)
154 160 not removing z: file is already untracked
155 161 [1]
156 162
157 163 Largefiles are accessible from the share's store
158 164 $ cd ..
159 165 $ hg share -q src share_dst --config extensions.share=
160 166 $ hg -R share_dst update -r0
161 167 getting changed largefiles
162 168 1 largefiles updated, 0 removed
163 169 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 170
165 171 $ echo modified > share_dst/large
166 172 $ hg -R share_dst ci -m modified
167 173 created new head
168 174
169 175 Only dirstate is in the local store for the share, and the largefile is in the
170 176 share source's local store. Avoid the extra largefiles added in the unix
171 177 conditional above.
172 178 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
173 179 $ echo $hash
174 180 e2fb5f2139d086ded2cb600d5a91a196e76bf020
175 181
176 182 $ find share_dst/.hg/largefiles/* | sort
177 183 share_dst/.hg/largefiles/dirstate
178 184
179 185 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
180 186 src/.hg/largefiles/dirstate
181 187 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
182 188
183 189 Inject corruption into the largefiles store and see how update handles that:
184 190
185 191 $ cd src
186 192 $ hg up -qC
187 193 $ cat large
188 194 modified
189 195 $ rm large
190 196 $ cat .hglf/large
191 197 e2fb5f2139d086ded2cb600d5a91a196e76bf020
192 198 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
193 199 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
194 200 $ hg up -C
195 201 getting changed largefiles
196 202 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob)
197 203 0 largefiles updated, 0 removed
198 204 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 205 $ hg st
200 206 ! large
201 207 ? z
202 208 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
203 209
204 210 #if serve
205 211
206 212 Test coverage of error handling from putlfile:
207 213
208 214 $ mkdir $TESTTMP/mirrorcache
209 215 $ hg serve -R ../mirror -d -p $HGPORT1 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache
210 216 $ cat hg.pid >> $DAEMON_PIDS
211 217
212 218 $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
213 219 pushing to http://localhost:$HGPORT1/
214 220 searching for changes
215 221 abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
216 222 [255]
217 223
218 224 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
219 225
220 226 Test coverage of 'missing from store':
221 227
222 228 $ hg serve -R ../mirror -d -p $HGPORT2 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache --config "web.allow_push=*" --config web.push_ssl=no
223 229 $ cat hg.pid >> $DAEMON_PIDS
224 230
225 231 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache
226 232 pushing to http://localhost:$HGPORT2/
227 233 searching for changes
228 234 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
229 235 [255]
230 236
231 237 #endif
General Comments 0
You need to be logged in to leave comments. Login now