##// END OF EJS Templates
largefiles: allow "lfstatus" context manager to set value to False...
Martin von Zweigbergk -
r43983:a02e4c12 default
parent child Browse files
Show More
@@ -1,764 +1,760 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import contextlib
13 13 import copy
14 14 import hashlib
15 15 import os
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19 from mercurial.node import hex
20 20 from mercurial.pycompat import open
21 21
22 22 from mercurial import (
23 23 dirstate,
24 24 encoding,
25 25 error,
26 26 httpconnection,
27 27 match as matchmod,
28 28 node,
29 29 pycompat,
30 30 scmutil,
31 31 sparse,
32 32 util,
33 33 vfs as vfsmod,
34 34 )
35 35
36 36 shortname = b'.hglf'
37 37 shortnameslash = shortname + b'/'
38 38 longname = b'largefiles'
39 39
40 40 # -- Private worker functions ------------------------------------------
41 41
42 42
43 43 @contextlib.contextmanager
44 def lfstatus(repo):
44 def lfstatus(repo, value=True):
45 45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = True
46 repo.lfstatus = value
47 47 try:
48 48 yield
49 49 finally:
50 50 repo.lfstatus = oldvalue
51 51
52 52
53 53 def getminsize(ui, assumelfiles, opt, default=10):
54 54 lfsize = opt
55 55 if not lfsize and assumelfiles:
56 56 lfsize = ui.config(longname, b'minsize', default=default)
57 57 if lfsize:
58 58 try:
59 59 lfsize = float(lfsize)
60 60 except ValueError:
61 61 raise error.Abort(
62 62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 63 )
64 64 if lfsize is None:
65 65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 66 return lfsize
67 67
68 68
69 69 def link(src, dest):
70 70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 71 util.makedirs(os.path.dirname(dest))
72 72 try:
73 73 util.oslink(src, dest)
74 74 except OSError:
75 75 # if hardlinks fail, fallback on atomic copy
76 76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 77 for chunk in util.filechunkiter(srcf):
78 78 dstf.write(chunk)
79 79 os.chmod(dest, os.stat(src).st_mode)
80 80
81 81
82 82 def usercachepath(ui, hash):
83 83 '''Return the correct location in the "global" largefiles cache for a file
84 84 with the given hash.
85 85 This cache is used for sharing of largefiles across repositories - both
86 86 to preserve download bandwidth and storage space.'''
87 87 return os.path.join(_usercachedir(ui), hash)
88 88
89 89
90 90 def _usercachedir(ui, name=longname):
91 91 '''Return the location of the "global" largefiles cache.'''
92 92 path = ui.configpath(name, b'usercache')
93 93 if path:
94 94 return path
95 95 if pycompat.iswindows:
96 96 appdata = encoding.environ.get(
97 97 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
98 98 )
99 99 if appdata:
100 100 return os.path.join(appdata, name)
101 101 elif pycompat.isdarwin:
102 102 home = encoding.environ.get(b'HOME')
103 103 if home:
104 104 return os.path.join(home, b'Library', b'Caches', name)
105 105 elif pycompat.isposix:
106 106 path = encoding.environ.get(b'XDG_CACHE_HOME')
107 107 if path:
108 108 return os.path.join(path, name)
109 109 home = encoding.environ.get(b'HOME')
110 110 if home:
111 111 return os.path.join(home, b'.cache', name)
112 112 else:
113 113 raise error.Abort(
114 114 _(b'unknown operating system: %s\n') % pycompat.osname
115 115 )
116 116 raise error.Abort(_(b'unknown %s usercache location') % name)
117 117
118 118
119 119 def inusercache(ui, hash):
120 120 path = usercachepath(ui, hash)
121 121 return os.path.exists(path)
122 122
123 123
124 124 def findfile(repo, hash):
125 125 '''Return store path of the largefile with the specified hash.
126 126 As a side effect, the file might be linked from user cache.
127 127 Return None if the file can't be found locally.'''
128 128 path, exists = findstorepath(repo, hash)
129 129 if exists:
130 130 repo.ui.note(_(b'found %s in store\n') % hash)
131 131 return path
132 132 elif inusercache(repo.ui, hash):
133 133 repo.ui.note(_(b'found %s in system cache\n') % hash)
134 134 path = storepath(repo, hash)
135 135 link(usercachepath(repo.ui, hash), path)
136 136 return path
137 137 return None
138 138
139 139
140 140 class largefilesdirstate(dirstate.dirstate):
141 141 def __getitem__(self, key):
142 142 return super(largefilesdirstate, self).__getitem__(unixpath(key))
143 143
144 144 def normal(self, f):
145 145 return super(largefilesdirstate, self).normal(unixpath(f))
146 146
147 147 def remove(self, f):
148 148 return super(largefilesdirstate, self).remove(unixpath(f))
149 149
150 150 def add(self, f):
151 151 return super(largefilesdirstate, self).add(unixpath(f))
152 152
153 153 def drop(self, f):
154 154 return super(largefilesdirstate, self).drop(unixpath(f))
155 155
156 156 def forget(self, f):
157 157 return super(largefilesdirstate, self).forget(unixpath(f))
158 158
159 159 def normallookup(self, f):
160 160 return super(largefilesdirstate, self).normallookup(unixpath(f))
161 161
162 162 def _ignore(self, f):
163 163 return False
164 164
165 165 def write(self, tr=False):
166 166 # (1) disable PENDING mode always
167 167 # (lfdirstate isn't yet managed as a part of the transaction)
168 168 # (2) avoid develwarn 'use dirstate.write with ....'
169 169 super(largefilesdirstate, self).write(None)
170 170
171 171
172 172 def openlfdirstate(ui, repo, create=True):
173 173 '''
174 174 Return a dirstate object that tracks largefiles: i.e. its root is
175 175 the repo root, but it is saved in .hg/largefiles/dirstate.
176 176 '''
177 177 vfs = repo.vfs
178 178 lfstoredir = longname
179 179 opener = vfsmod.vfs(vfs.join(lfstoredir))
180 180 lfdirstate = largefilesdirstate(
181 181 opener,
182 182 ui,
183 183 repo.root,
184 184 repo.dirstate._validate,
185 185 lambda: sparse.matcher(repo),
186 186 )
187 187
188 188 # If the largefiles dirstate does not exist, populate and create
189 189 # it. This ensures that we create it on the first meaningful
190 190 # largefiles operation in a new clone.
191 191 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
192 192 matcher = getstandinmatcher(repo)
193 193 standins = repo.dirstate.walk(
194 194 matcher, subrepos=[], unknown=False, ignored=False
195 195 )
196 196
197 197 if len(standins) > 0:
198 198 vfs.makedirs(lfstoredir)
199 199
200 200 for standin in standins:
201 201 lfile = splitstandin(standin)
202 202 lfdirstate.normallookup(lfile)
203 203 return lfdirstate
204 204
205 205
206 206 def lfdirstatestatus(lfdirstate, repo):
207 207 pctx = repo[b'.']
208 208 match = matchmod.always()
209 209 unsure, s = lfdirstate.status(
210 210 match, subrepos=[], ignored=False, clean=False, unknown=False
211 211 )
212 212 modified, clean = s.modified, s.clean
213 213 for lfile in unsure:
214 214 try:
215 215 fctx = pctx[standin(lfile)]
216 216 except LookupError:
217 217 fctx = None
218 218 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
219 219 modified.append(lfile)
220 220 else:
221 221 clean.append(lfile)
222 222 lfdirstate.normal(lfile)
223 223 return s
224 224
225 225
226 226 def listlfiles(repo, rev=None, matcher=None):
227 227 '''return a list of largefiles in the working copy or the
228 228 specified changeset'''
229 229
230 230 if matcher is None:
231 231 matcher = getstandinmatcher(repo)
232 232
233 233 # ignore unknown files in working directory
234 234 return [
235 235 splitstandin(f)
236 236 for f in repo[rev].walk(matcher)
237 237 if rev is not None or repo.dirstate[f] != b'?'
238 238 ]
239 239
240 240
241 241 def instore(repo, hash, forcelocal=False):
242 242 '''Return true if a largefile with the given hash exists in the store'''
243 243 return os.path.exists(storepath(repo, hash, forcelocal))
244 244
245 245
246 246 def storepath(repo, hash, forcelocal=False):
247 247 '''Return the correct location in the repository largefiles store for a
248 248 file with the given hash.'''
249 249 if not forcelocal and repo.shared():
250 250 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
251 251 return repo.vfs.join(longname, hash)
252 252
253 253
254 254 def findstorepath(repo, hash):
255 255 '''Search through the local store path(s) to find the file for the given
256 256 hash. If the file is not found, its path in the primary store is returned.
257 257 The return value is a tuple of (path, exists(path)).
258 258 '''
259 259 # For shared repos, the primary store is in the share source. But for
260 260 # backward compatibility, force a lookup in the local store if it wasn't
261 261 # found in the share source.
262 262 path = storepath(repo, hash, False)
263 263
264 264 if instore(repo, hash):
265 265 return (path, True)
266 266 elif repo.shared() and instore(repo, hash, True):
267 267 return storepath(repo, hash, True), True
268 268
269 269 return (path, False)
270 270
271 271
272 272 def copyfromcache(repo, hash, filename):
273 273 '''Copy the specified largefile from the repo or system cache to
274 274 filename in the repository. Return true on success or false if the
275 275 file was not found in either cache (which should not happened:
276 276 this is meant to be called only after ensuring that the needed
277 277 largefile exists in the cache).'''
278 278 wvfs = repo.wvfs
279 279 path = findfile(repo, hash)
280 280 if path is None:
281 281 return False
282 282 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
283 283 # The write may fail before the file is fully written, but we
284 284 # don't use atomic writes in the working copy.
285 285 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
286 286 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
287 287 if gothash != hash:
288 288 repo.ui.warn(
289 289 _(b'%s: data corruption in %s with hash %s\n')
290 290 % (filename, path, gothash)
291 291 )
292 292 wvfs.unlink(filename)
293 293 return False
294 294 return True
295 295
296 296
297 297 def copytostore(repo, ctx, file, fstandin):
298 298 wvfs = repo.wvfs
299 299 hash = readasstandin(ctx[fstandin])
300 300 if instore(repo, hash):
301 301 return
302 302 if wvfs.exists(file):
303 303 copytostoreabsolute(repo, wvfs.join(file), hash)
304 304 else:
305 305 repo.ui.warn(
306 306 _(b"%s: largefile %s not available from local store\n")
307 307 % (file, hash)
308 308 )
309 309
310 310
311 311 def copyalltostore(repo, node):
312 312 '''Copy all largefiles in a given revision to the store'''
313 313
314 314 ctx = repo[node]
315 315 for filename in ctx.files():
316 316 realfile = splitstandin(filename)
317 317 if realfile is not None and filename in ctx.manifest():
318 318 copytostore(repo, ctx, realfile, filename)
319 319
320 320
321 321 def copytostoreabsolute(repo, file, hash):
322 322 if inusercache(repo.ui, hash):
323 323 link(usercachepath(repo.ui, hash), storepath(repo, hash))
324 324 else:
325 325 util.makedirs(os.path.dirname(storepath(repo, hash)))
326 326 with open(file, b'rb') as srcf:
327 327 with util.atomictempfile(
328 328 storepath(repo, hash), createmode=repo.store.createmode
329 329 ) as dstf:
330 330 for chunk in util.filechunkiter(srcf):
331 331 dstf.write(chunk)
332 332 linktousercache(repo, hash)
333 333
334 334
335 335 def linktousercache(repo, hash):
336 336 '''Link / copy the largefile with the specified hash from the store
337 337 to the cache.'''
338 338 path = usercachepath(repo.ui, hash)
339 339 link(storepath(repo, hash), path)
340 340
341 341
342 342 def getstandinmatcher(repo, rmatcher=None):
343 343 '''Return a match object that applies rmatcher to the standin directory'''
344 344 wvfs = repo.wvfs
345 345 standindir = shortname
346 346
347 347 # no warnings about missing files or directories
348 348 badfn = lambda f, msg: None
349 349
350 350 if rmatcher and not rmatcher.always():
351 351 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
352 352 if not pats:
353 353 pats = [wvfs.join(standindir)]
354 354 match = scmutil.match(repo[None], pats, badfn=badfn)
355 355 else:
356 356 # no patterns: relative to repo root
357 357 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
358 358 return match
359 359
360 360
361 361 def composestandinmatcher(repo, rmatcher):
362 362 '''Return a matcher that accepts standins corresponding to the
363 363 files accepted by rmatcher. Pass the list of files in the matcher
364 364 as the paths specified by the user.'''
365 365 smatcher = getstandinmatcher(repo, rmatcher)
366 366 isstandin = smatcher.matchfn
367 367
368 368 def composedmatchfn(f):
369 369 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
370 370
371 371 smatcher.matchfn = composedmatchfn
372 372
373 373 return smatcher
374 374
375 375
376 376 def standin(filename):
377 377 '''Return the repo-relative path to the standin for the specified big
378 378 file.'''
379 379 # Notes:
380 380 # 1) Some callers want an absolute path, but for instance addlargefiles
381 381 # needs it repo-relative so it can be passed to repo[None].add(). So
382 382 # leave it up to the caller to use repo.wjoin() to get an absolute path.
383 383 # 2) Join with '/' because that's what dirstate always uses, even on
384 384 # Windows. Change existing separator to '/' first in case we are
385 385 # passed filenames from an external source (like the command line).
386 386 return shortnameslash + util.pconvert(filename)
387 387
388 388
389 389 def isstandin(filename):
390 390 '''Return true if filename is a big file standin. filename must be
391 391 in Mercurial's internal form (slash-separated).'''
392 392 return filename.startswith(shortnameslash)
393 393
394 394
395 395 def splitstandin(filename):
396 396 # Split on / because that's what dirstate always uses, even on Windows.
397 397 # Change local separator to / first just in case we are passed filenames
398 398 # from an external source (like the command line).
399 399 bits = util.pconvert(filename).split(b'/', 1)
400 400 if len(bits) == 2 and bits[0] == shortname:
401 401 return bits[1]
402 402 else:
403 403 return None
404 404
405 405
406 406 def updatestandin(repo, lfile, standin):
407 407 """Re-calculate hash value of lfile and write it into standin
408 408
409 409 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
410 410 """
411 411 file = repo.wjoin(lfile)
412 412 if repo.wvfs.exists(lfile):
413 413 hash = hashfile(file)
414 414 executable = getexecutable(file)
415 415 writestandin(repo, standin, hash, executable)
416 416 else:
417 417 raise error.Abort(_(b'%s: file not found!') % lfile)
418 418
419 419
420 420 def readasstandin(fctx):
421 421 '''read hex hash from given filectx of standin file
422 422
423 423 This encapsulates how "standin" data is stored into storage layer.'''
424 424 return fctx.data().strip()
425 425
426 426
427 427 def writestandin(repo, standin, hash, executable):
428 428 '''write hash to <repo.root>/<standin>'''
429 429 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
430 430
431 431
432 432 def copyandhash(instream, outfile):
433 433 '''Read bytes from instream (iterable) and write them to outfile,
434 434 computing the SHA-1 hash of the data along the way. Return the hash.'''
435 435 hasher = hashlib.sha1(b'')
436 436 for data in instream:
437 437 hasher.update(data)
438 438 outfile.write(data)
439 439 return hex(hasher.digest())
440 440
441 441
442 442 def hashfile(file):
443 443 if not os.path.exists(file):
444 444 return b''
445 445 with open(file, b'rb') as fd:
446 446 return hexsha1(fd)
447 447
448 448
449 449 def getexecutable(filename):
450 450 mode = os.stat(filename).st_mode
451 451 return (
452 452 (mode & stat.S_IXUSR)
453 453 and (mode & stat.S_IXGRP)
454 454 and (mode & stat.S_IXOTH)
455 455 )
456 456
457 457
458 458 def urljoin(first, second, *arg):
459 459 def join(left, right):
460 460 if not left.endswith(b'/'):
461 461 left += b'/'
462 462 if right.startswith(b'/'):
463 463 right = right[1:]
464 464 return left + right
465 465
466 466 url = join(first, second)
467 467 for a in arg:
468 468 url = join(url, a)
469 469 return url
470 470
471 471
472 472 def hexsha1(fileobj):
473 473 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
474 474 object data"""
475 475 h = hashlib.sha1()
476 476 for chunk in util.filechunkiter(fileobj):
477 477 h.update(chunk)
478 478 return hex(h.digest())
479 479
480 480
481 481 def httpsendfile(ui, filename):
482 482 return httpconnection.httpsendfile(ui, filename, b'rb')
483 483
484 484
485 485 def unixpath(path):
486 486 '''Return a version of path normalized for use with the lfdirstate.'''
487 487 return util.pconvert(os.path.normpath(path))
488 488
489 489
490 490 def islfilesrepo(repo):
491 491 '''Return true if the repo is a largefile repo.'''
492 492 if b'largefiles' in repo.requirements and any(
493 493 shortnameslash in f[0] for f in repo.store.datafiles()
494 494 ):
495 495 return True
496 496
497 497 return any(openlfdirstate(repo.ui, repo, False))
498 498
499 499
500 500 class storeprotonotcapable(Exception):
501 501 def __init__(self, storetypes):
502 502 self.storetypes = storetypes
503 503
504 504
505 505 def getstandinsstate(repo):
506 506 standins = []
507 507 matcher = getstandinmatcher(repo)
508 508 wctx = repo[None]
509 509 for standin in repo.dirstate.walk(
510 510 matcher, subrepos=[], unknown=False, ignored=False
511 511 ):
512 512 lfile = splitstandin(standin)
513 513 try:
514 514 hash = readasstandin(wctx[standin])
515 515 except IOError:
516 516 hash = None
517 517 standins.append((lfile, hash))
518 518 return standins
519 519
520 520
521 521 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
522 522 lfstandin = standin(lfile)
523 523 if lfstandin in repo.dirstate:
524 524 stat = repo.dirstate._map[lfstandin]
525 525 state, mtime = stat[0], stat[3]
526 526 else:
527 527 state, mtime = b'?', -1
528 528 if state == b'n':
529 529 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
530 530 # state 'n' doesn't ensure 'clean' in this case
531 531 lfdirstate.normallookup(lfile)
532 532 else:
533 533 lfdirstate.normal(lfile)
534 534 elif state == b'm':
535 535 lfdirstate.normallookup(lfile)
536 536 elif state == b'r':
537 537 lfdirstate.remove(lfile)
538 538 elif state == b'a':
539 539 lfdirstate.add(lfile)
540 540 elif state == b'?':
541 541 lfdirstate.drop(lfile)
542 542
543 543
544 544 def markcommitted(orig, ctx, node):
545 545 repo = ctx.repo()
546 546
547 547 orig(node)
548 548
549 549 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
550 550 # because files coming from the 2nd parent are omitted in the latter.
551 551 #
552 552 # The former should be used to get targets of "synclfdirstate",
553 553 # because such files:
554 554 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
555 555 # - have to be marked as "n" after commit, but
556 556 # - aren't listed in "repo[node].files()"
557 557
558 558 lfdirstate = openlfdirstate(repo.ui, repo)
559 559 for f in ctx.files():
560 560 lfile = splitstandin(f)
561 561 if lfile is not None:
562 562 synclfdirstate(repo, lfdirstate, lfile, False)
563 563 lfdirstate.write()
564 564
565 565 # As part of committing, copy all of the largefiles into the cache.
566 566 #
567 567 # Using "node" instead of "ctx" implies additional "repo[node]"
568 568 # lookup while copyalltostore(), but can omit redundant check for
569 569 # files comming from the 2nd parent, which should exist in store
570 570 # at merging.
571 571 copyalltostore(repo, node)
572 572
573 573
574 574 def getlfilestoupdate(oldstandins, newstandins):
575 575 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
576 576 filelist = []
577 577 for f in changedstandins:
578 578 if f[0] not in filelist:
579 579 filelist.append(f[0])
580 580 return filelist
581 581
582 582
583 583 def getlfilestoupload(repo, missing, addfunc):
584 584 makeprogress = repo.ui.makeprogress
585 585 with makeprogress(
586 586 _(b'finding outgoing largefiles'),
587 587 unit=_(b'revisions'),
588 588 total=len(missing),
589 589 ) as progress:
590 590 for i, n in enumerate(missing):
591 591 progress.update(i)
592 592 parents = [p for p in repo[n].parents() if p != node.nullid]
593 593
594 oldlfstatus = repo.lfstatus
595 repo.lfstatus = False
596 try:
594 with lfstatus(repo, value=False):
597 595 ctx = repo[n]
598 finally:
599 repo.lfstatus = oldlfstatus
600 596
601 597 files = set(ctx.files())
602 598 if len(parents) == 2:
603 599 mc = ctx.manifest()
604 600 mp1 = ctx.p1().manifest()
605 601 mp2 = ctx.p2().manifest()
606 602 for f in mp1:
607 603 if f not in mc:
608 604 files.add(f)
609 605 for f in mp2:
610 606 if f not in mc:
611 607 files.add(f)
612 608 for f in mc:
613 609 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
614 610 files.add(f)
615 611 for fn in files:
616 612 if isstandin(fn) and fn in ctx:
617 613 addfunc(fn, readasstandin(ctx[fn]))
618 614
619 615
620 616 def updatestandinsbymatch(repo, match):
621 617 '''Update standins in the working directory according to specified match
622 618
623 619 This returns (possibly modified) ``match`` object to be used for
624 620 subsequent commit process.
625 621 '''
626 622
627 623 ui = repo.ui
628 624
629 625 # Case 1: user calls commit with no specific files or
630 626 # include/exclude patterns: refresh and commit all files that
631 627 # are "dirty".
632 628 if match is None or match.always():
633 629 # Spend a bit of time here to get a list of files we know
634 630 # are modified so we can compare only against those.
635 631 # It can cost a lot of time (several seconds)
636 632 # otherwise to update all standins if the largefiles are
637 633 # large.
638 634 lfdirstate = openlfdirstate(ui, repo)
639 635 dirtymatch = matchmod.always()
640 636 unsure, s = lfdirstate.status(
641 637 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
642 638 )
643 639 modifiedfiles = unsure + s.modified + s.added + s.removed
644 640 lfiles = listlfiles(repo)
645 641 # this only loops through largefiles that exist (not
646 642 # removed/renamed)
647 643 for lfile in lfiles:
648 644 if lfile in modifiedfiles:
649 645 fstandin = standin(lfile)
650 646 if repo.wvfs.exists(fstandin):
651 647 # this handles the case where a rebase is being
652 648 # performed and the working copy is not updated
653 649 # yet.
654 650 if repo.wvfs.exists(lfile):
655 651 updatestandin(repo, lfile, fstandin)
656 652
657 653 return match
658 654
659 655 lfiles = listlfiles(repo)
660 656 match._files = repo._subdirlfs(match.files(), lfiles)
661 657
662 658 # Case 2: user calls commit with specified patterns: refresh
663 659 # any matching big files.
664 660 smatcher = composestandinmatcher(repo, match)
665 661 standins = repo.dirstate.walk(
666 662 smatcher, subrepos=[], unknown=False, ignored=False
667 663 )
668 664
669 665 # No matching big files: get out of the way and pass control to
670 666 # the usual commit() method.
671 667 if not standins:
672 668 return match
673 669
674 670 # Refresh all matching big files. It's possible that the
675 671 # commit will end up failing, in which case the big files will
676 672 # stay refreshed. No harm done: the user modified them and
677 673 # asked to commit them, so sooner or later we're going to
678 674 # refresh the standins. Might as well leave them refreshed.
679 675 lfdirstate = openlfdirstate(ui, repo)
680 676 for fstandin in standins:
681 677 lfile = splitstandin(fstandin)
682 678 if lfdirstate[lfile] != b'r':
683 679 updatestandin(repo, lfile, fstandin)
684 680
685 681 # Cook up a new matcher that only matches regular files or
686 682 # standins corresponding to the big files requested by the
687 683 # user. Have to modify _files to prevent commit() from
688 684 # complaining "not tracked" for big files.
689 685 match = copy.copy(match)
690 686 origmatchfn = match.matchfn
691 687
692 688 # Check both the list of largefiles and the list of
693 689 # standins because if a largefile was removed, it
694 690 # won't be in the list of largefiles at this point
695 691 match._files += sorted(standins)
696 692
697 693 actualfiles = []
698 694 for f in match._files:
699 695 fstandin = standin(f)
700 696
701 697 # For largefiles, only one of the normal and standin should be
702 698 # committed (except if one of them is a remove). In the case of a
703 699 # standin removal, drop the normal file if it is unknown to dirstate.
704 700 # Thus, skip plain largefile names but keep the standin.
705 701 if f in lfiles or fstandin in standins:
706 702 if repo.dirstate[fstandin] != b'r':
707 703 if repo.dirstate[f] != b'r':
708 704 continue
709 705 elif repo.dirstate[f] == b'?':
710 706 continue
711 707
712 708 actualfiles.append(f)
713 709 match._files = actualfiles
714 710
715 711 def matchfn(f):
716 712 if origmatchfn(f):
717 713 return f not in lfiles
718 714 else:
719 715 return f in standins
720 716
721 717 match.matchfn = matchfn
722 718
723 719 return match
724 720
725 721
726 722 class automatedcommithook(object):
727 723 '''Stateful hook to update standins at the 1st commit of resuming
728 724
729 725 For efficiency, updating standins in the working directory should
730 726 be avoided while automated committing (like rebase, transplant and
731 727 so on), because they should be updated before committing.
732 728
733 729 But the 1st commit of resuming automated committing (e.g. ``rebase
734 730 --continue``) should update them, because largefiles may be
735 731 modified manually.
736 732 '''
737 733
738 734 def __init__(self, resuming):
739 735 self.resuming = resuming
740 736
741 737 def __call__(self, repo, match):
742 738 if self.resuming:
743 739 self.resuming = False # avoids updating at subsequent commits
744 740 return updatestandinsbymatch(repo, match)
745 741 else:
746 742 return match
747 743
748 744
749 745 def getstatuswriter(ui, repo, forcibly=None):
750 746 '''Return the function to write largefiles specific status out
751 747
752 748 If ``forcibly`` is ``None``, this returns the last element of
753 749 ``repo._lfstatuswriters`` as "default" writer function.
754 750
755 751 Otherwise, this returns the function to always write out (or
756 752 ignore if ``not forcibly``) status.
757 753 '''
758 754 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
759 755 return repo._lfstatuswriters[-1]
760 756 else:
761 757 if forcibly:
762 758 return ui.status # forcibly WRITE OUT
763 759 else:
764 760 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now