##// END OF EJS Templates
largefile: make sure we hold the lock when updating the second dirstate...
marmoute -
r50859:0cf4c1b8 default
parent child Browse files
Show More
@@ -1,797 +1,807
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import contextlib
12 12 import copy
13 13 import os
14 14 import stat
15 15
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from mercurial.pycompat import open
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 pycompat,
27 27 requirements,
28 28 scmutil,
29 29 sparse,
30 30 util,
31 31 vfs as vfsmod,
32 32 )
33 33 from mercurial.utils import hashutil
34 34 from mercurial.dirstateutils import timestamp
35 35
36 36 shortname = b'.hglf'
37 37 shortnameslash = shortname + b'/'
38 38 longname = b'largefiles'
39 39
40 40 # -- Private worker functions ------------------------------------------
41 41
42 42
43 43 @contextlib.contextmanager
44 44 def lfstatus(repo, value=True):
45 45 oldvalue = getattr(repo, 'lfstatus', False)
46 46 repo.lfstatus = value
47 47 try:
48 48 yield
49 49 finally:
50 50 repo.lfstatus = oldvalue
51 51
52 52
53 53 def getminsize(ui, assumelfiles, opt, default=10):
54 54 lfsize = opt
55 55 if not lfsize and assumelfiles:
56 56 lfsize = ui.config(longname, b'minsize', default=default)
57 57 if lfsize:
58 58 try:
59 59 lfsize = float(lfsize)
60 60 except ValueError:
61 61 raise error.Abort(
62 62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 63 )
64 64 if lfsize is None:
65 65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 66 return lfsize
67 67
68 68
69 69 def link(src, dest):
70 70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 71 util.makedirs(os.path.dirname(dest))
72 72 try:
73 73 util.oslink(src, dest)
74 74 except OSError:
75 75 # if hardlinks fail, fallback on atomic copy
76 76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 77 for chunk in util.filechunkiter(srcf):
78 78 dstf.write(chunk)
79 79 os.chmod(dest, os.stat(src).st_mode)
80 80
81 81
82 82 def usercachepath(ui, hash):
83 83 """Return the correct location in the "global" largefiles cache for a file
84 84 with the given hash.
85 85 This cache is used for sharing of largefiles across repositories - both
86 86 to preserve download bandwidth and storage space."""
87 87 return os.path.join(_usercachedir(ui), hash)
88 88
89 89
90 90 def _usercachedir(ui, name=longname):
91 91 '''Return the location of the "global" largefiles cache.'''
92 92 path = ui.configpath(name, b'usercache')
93 93 if path:
94 94 return path
95 95
96 96 hint = None
97 97
98 98 if pycompat.iswindows:
99 99 appdata = encoding.environ.get(
100 100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 101 )
102 102 if appdata:
103 103 return os.path.join(appdata, name)
104 104
105 105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 106 b"LOCALAPPDATA",
107 107 b"APPDATA",
108 108 name,
109 109 )
110 110 elif pycompat.isdarwin:
111 111 home = encoding.environ.get(b'HOME')
112 112 if home:
113 113 return os.path.join(home, b'Library', b'Caches', name)
114 114
115 115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 116 b"HOME",
117 117 name,
118 118 )
119 119 elif pycompat.isposix:
120 120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 121 if path:
122 122 return os.path.join(path, name)
123 123 home = encoding.environ.get(b'HOME')
124 124 if home:
125 125 return os.path.join(home, b'.cache', name)
126 126
127 127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 128 b"XDG_CACHE_HOME",
129 129 b"HOME",
130 130 name,
131 131 )
132 132 else:
133 133 raise error.Abort(
134 134 _(b'unknown operating system: %s\n') % pycompat.osname
135 135 )
136 136
137 137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138 138
139 139
140 140 def inusercache(ui, hash):
141 141 path = usercachepath(ui, hash)
142 142 return os.path.exists(path)
143 143
144 144
145 145 def findfile(repo, hash):
146 146 """Return store path of the largefile with the specified hash.
147 147 As a side effect, the file might be linked from user cache.
148 148 Return None if the file can't be found locally."""
149 149 path, exists = findstorepath(repo, hash)
150 150 if exists:
151 151 repo.ui.note(_(b'found %s in store\n') % hash)
152 152 return path
153 153 elif inusercache(repo.ui, hash):
154 154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 155 path = storepath(repo, hash)
156 156 link(usercachepath(repo.ui, hash), path)
157 157 return path
158 158 return None
159 159
160 160
161 161 class largefilesdirstate(dirstate.dirstate):
162 162 def __getitem__(self, key):
163 163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164 164
165 165 def set_tracked(self, f):
166 166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167 167
168 168 def set_untracked(self, f):
169 169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170 170
171 171 def normal(self, f, parentfiledata=None):
172 172 # not sure if we should pass the `parentfiledata` down or throw it
173 173 # away. So throwing it away to stay on the safe side.
174 174 return super(largefilesdirstate, self).normal(unixpath(f))
175 175
176 176 def remove(self, f):
177 177 return super(largefilesdirstate, self).remove(unixpath(f))
178 178
179 179 def add(self, f):
180 180 return super(largefilesdirstate, self).add(unixpath(f))
181 181
182 182 def drop(self, f):
183 183 return super(largefilesdirstate, self).drop(unixpath(f))
184 184
185 185 def forget(self, f):
186 186 return super(largefilesdirstate, self).forget(unixpath(f))
187 187
188 188 def normallookup(self, f):
189 189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190 190
191 191 def _ignore(self, f):
192 192 return False
193 193
194 194 def write(self, tr):
195 195 # (1) disable PENDING mode always
196 196 # (lfdirstate isn't yet managed as a part of the transaction)
197 197 # (2) avoid develwarn 'use dirstate.write with ....'
198 198 if tr:
199 199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
200 200 super(largefilesdirstate, self).write(None)
201 201
202 202
203 203 def openlfdirstate(ui, repo, create=True):
204 204 """
205 205 Return a dirstate object that tracks largefiles: i.e. its root is
206 206 the repo root, but it is saved in .hg/largefiles/dirstate.
207 207 """
208 208 vfs = repo.vfs
209 209 lfstoredir = longname
210 210 opener = vfsmod.vfs(vfs.join(lfstoredir))
211 211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
212 212 lfdirstate = largefilesdirstate(
213 213 opener,
214 214 ui,
215 215 repo.root,
216 216 repo.dirstate._validate,
217 217 lambda: sparse.matcher(repo),
218 218 repo.nodeconstants,
219 219 use_dirstate_v2,
220 220 )
221 221
222 222 # If the largefiles dirstate does not exist, populate and create
223 223 # it. This ensures that we create it on the first meaningful
224 224 # largefiles operation in a new clone.
225 225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 try:
227 with repo.wlock(wait=False):
226 228 matcher = getstandinmatcher(repo)
227 229 standins = repo.dirstate.walk(
228 230 matcher, subrepos=[], unknown=False, ignored=False
229 231 )
230 232
231 233 if len(standins) > 0:
232 234 vfs.makedirs(lfstoredir)
233 235
234 236 with lfdirstate.changing_parents(repo):
235 237 for standin in standins:
236 238 lfile = splitstandin(standin)
237 239 lfdirstate.update_file(
238 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
240 lfile,
241 p1_tracked=True,
242 wc_tracked=True,
243 possibly_dirty=True,
239 244 )
245 except error.LockError:
246 # Assume that whatever was holding the lock was important.
247 # If we were doing something important, we would already have
248 # either the lock or a largefile dirstate.
249 pass
240 250 return lfdirstate
241 251
242 252
243 253 def lfdirstatestatus(lfdirstate, repo):
244 254 pctx = repo[b'.']
245 255 match = matchmod.always()
246 256 unsure, s, mtime_boundary = lfdirstate.status(
247 257 match, subrepos=[], ignored=False, clean=False, unknown=False
248 258 )
249 259 modified, clean = s.modified, s.clean
250 260 wctx = repo[None]
251 261 for lfile in unsure:
252 262 try:
253 263 fctx = pctx[standin(lfile)]
254 264 except LookupError:
255 265 fctx = None
256 266 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
257 267 modified.append(lfile)
258 268 else:
259 269 clean.append(lfile)
260 270 st = wctx[lfile].lstat()
261 271 mode = st.st_mode
262 272 size = st.st_size
263 273 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
264 274 if mtime is not None:
265 275 cache_data = (mode, size, mtime)
266 276 lfdirstate.set_clean(lfile, cache_data)
267 277 return s
268 278
269 279
270 280 def listlfiles(repo, rev=None, matcher=None):
271 281 """return a list of largefiles in the working copy or the
272 282 specified changeset"""
273 283
274 284 if matcher is None:
275 285 matcher = getstandinmatcher(repo)
276 286
277 287 # ignore unknown files in working directory
278 288 return [
279 289 splitstandin(f)
280 290 for f in repo[rev].walk(matcher)
281 291 if rev is not None or repo.dirstate.get_entry(f).any_tracked
282 292 ]
283 293
284 294
285 295 def instore(repo, hash, forcelocal=False):
286 296 '''Return true if a largefile with the given hash exists in the store'''
287 297 return os.path.exists(storepath(repo, hash, forcelocal))
288 298
289 299
290 300 def storepath(repo, hash, forcelocal=False):
291 301 """Return the correct location in the repository largefiles store for a
292 302 file with the given hash."""
293 303 if not forcelocal and repo.shared():
294 304 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
295 305 return repo.vfs.join(longname, hash)
296 306
297 307
298 308 def findstorepath(repo, hash):
299 309 """Search through the local store path(s) to find the file for the given
300 310 hash. If the file is not found, its path in the primary store is returned.
301 311 The return value is a tuple of (path, exists(path)).
302 312 """
303 313 # For shared repos, the primary store is in the share source. But for
304 314 # backward compatibility, force a lookup in the local store if it wasn't
305 315 # found in the share source.
306 316 path = storepath(repo, hash, False)
307 317
308 318 if instore(repo, hash):
309 319 return (path, True)
310 320 elif repo.shared() and instore(repo, hash, True):
311 321 return storepath(repo, hash, True), True
312 322
313 323 return (path, False)
314 324
315 325
316 326 def copyfromcache(repo, hash, filename):
317 327 """Copy the specified largefile from the repo or system cache to
318 328 filename in the repository. Return true on success or false if the
319 329 file was not found in either cache (which should not happened:
320 330 this is meant to be called only after ensuring that the needed
321 331 largefile exists in the cache)."""
322 332 wvfs = repo.wvfs
323 333 path = findfile(repo, hash)
324 334 if path is None:
325 335 return False
326 336 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
327 337 # The write may fail before the file is fully written, but we
328 338 # don't use atomic writes in the working copy.
329 339 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
330 340 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
331 341 if gothash != hash:
332 342 repo.ui.warn(
333 343 _(b'%s: data corruption in %s with hash %s\n')
334 344 % (filename, path, gothash)
335 345 )
336 346 wvfs.unlink(filename)
337 347 return False
338 348 return True
339 349
340 350
341 351 def copytostore(repo, ctx, file, fstandin):
342 352 wvfs = repo.wvfs
343 353 hash = readasstandin(ctx[fstandin])
344 354 if instore(repo, hash):
345 355 return
346 356 if wvfs.exists(file):
347 357 copytostoreabsolute(repo, wvfs.join(file), hash)
348 358 else:
349 359 repo.ui.warn(
350 360 _(b"%s: largefile %s not available from local store\n")
351 361 % (file, hash)
352 362 )
353 363
354 364
355 365 def copyalltostore(repo, node):
356 366 '''Copy all largefiles in a given revision to the store'''
357 367
358 368 ctx = repo[node]
359 369 for filename in ctx.files():
360 370 realfile = splitstandin(filename)
361 371 if realfile is not None and filename in ctx.manifest():
362 372 copytostore(repo, ctx, realfile, filename)
363 373
364 374
365 375 def copytostoreabsolute(repo, file, hash):
366 376 if inusercache(repo.ui, hash):
367 377 link(usercachepath(repo.ui, hash), storepath(repo, hash))
368 378 else:
369 379 util.makedirs(os.path.dirname(storepath(repo, hash)))
370 380 with open(file, b'rb') as srcf:
371 381 with util.atomictempfile(
372 382 storepath(repo, hash), createmode=repo.store.createmode
373 383 ) as dstf:
374 384 for chunk in util.filechunkiter(srcf):
375 385 dstf.write(chunk)
376 386 linktousercache(repo, hash)
377 387
378 388
379 389 def linktousercache(repo, hash):
380 390 """Link / copy the largefile with the specified hash from the store
381 391 to the cache."""
382 392 path = usercachepath(repo.ui, hash)
383 393 link(storepath(repo, hash), path)
384 394
385 395
386 396 def getstandinmatcher(repo, rmatcher=None):
387 397 '''Return a match object that applies rmatcher to the standin directory'''
388 398 wvfs = repo.wvfs
389 399 standindir = shortname
390 400
391 401 # no warnings about missing files or directories
392 402 badfn = lambda f, msg: None
393 403
394 404 if rmatcher and not rmatcher.always():
395 405 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
396 406 if not pats:
397 407 pats = [wvfs.join(standindir)]
398 408 match = scmutil.match(repo[None], pats, badfn=badfn)
399 409 else:
400 410 # no patterns: relative to repo root
401 411 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
402 412 return match
403 413
404 414
405 415 def composestandinmatcher(repo, rmatcher):
406 416 """Return a matcher that accepts standins corresponding to the
407 417 files accepted by rmatcher. Pass the list of files in the matcher
408 418 as the paths specified by the user."""
409 419 smatcher = getstandinmatcher(repo, rmatcher)
410 420 isstandin = smatcher.matchfn
411 421
412 422 def composedmatchfn(f):
413 423 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
414 424
415 425 smatcher.matchfn = composedmatchfn
416 426
417 427 return smatcher
418 428
419 429
420 430 def standin(filename):
421 431 """Return the repo-relative path to the standin for the specified big
422 432 file."""
423 433 # Notes:
424 434 # 1) Some callers want an absolute path, but for instance addlargefiles
425 435 # needs it repo-relative so it can be passed to repo[None].add(). So
426 436 # leave it up to the caller to use repo.wjoin() to get an absolute path.
427 437 # 2) Join with '/' because that's what dirstate always uses, even on
428 438 # Windows. Change existing separator to '/' first in case we are
429 439 # passed filenames from an external source (like the command line).
430 440 return shortnameslash + util.pconvert(filename)
431 441
432 442
433 443 def isstandin(filename):
434 444 """Return true if filename is a big file standin. filename must be
435 445 in Mercurial's internal form (slash-separated)."""
436 446 return filename.startswith(shortnameslash)
437 447
438 448
439 449 def splitstandin(filename):
440 450 # Split on / because that's what dirstate always uses, even on Windows.
441 451 # Change local separator to / first just in case we are passed filenames
442 452 # from an external source (like the command line).
443 453 bits = util.pconvert(filename).split(b'/', 1)
444 454 if len(bits) == 2 and bits[0] == shortname:
445 455 return bits[1]
446 456 else:
447 457 return None
448 458
449 459
450 460 def updatestandin(repo, lfile, standin):
451 461 """Re-calculate hash value of lfile and write it into standin
452 462
453 463 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
454 464 """
455 465 file = repo.wjoin(lfile)
456 466 if repo.wvfs.exists(lfile):
457 467 hash = hashfile(file)
458 468 executable = getexecutable(file)
459 469 writestandin(repo, standin, hash, executable)
460 470 else:
461 471 raise error.Abort(_(b'%s: file not found!') % lfile)
462 472
463 473
464 474 def readasstandin(fctx):
465 475 """read hex hash from given filectx of standin file
466 476
467 477 This encapsulates how "standin" data is stored into storage layer."""
468 478 return fctx.data().strip()
469 479
470 480
471 481 def writestandin(repo, standin, hash, executable):
472 482 '''write hash to <repo.root>/<standin>'''
473 483 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
474 484
475 485
476 486 def copyandhash(instream, outfile):
477 487 """Read bytes from instream (iterable) and write them to outfile,
478 488 computing the SHA-1 hash of the data along the way. Return the hash."""
479 489 hasher = hashutil.sha1(b'')
480 490 for data in instream:
481 491 hasher.update(data)
482 492 outfile.write(data)
483 493 return hex(hasher.digest())
484 494
485 495
486 496 def hashfile(file):
487 497 if not os.path.exists(file):
488 498 return b''
489 499 with open(file, b'rb') as fd:
490 500 return hexsha1(fd)
491 501
492 502
493 503 def getexecutable(filename):
494 504 mode = os.stat(filename).st_mode
495 505 return (
496 506 (mode & stat.S_IXUSR)
497 507 and (mode & stat.S_IXGRP)
498 508 and (mode & stat.S_IXOTH)
499 509 )
500 510
501 511
502 512 def urljoin(first, second, *arg):
503 513 def join(left, right):
504 514 if not left.endswith(b'/'):
505 515 left += b'/'
506 516 if right.startswith(b'/'):
507 517 right = right[1:]
508 518 return left + right
509 519
510 520 url = join(first, second)
511 521 for a in arg:
512 522 url = join(url, a)
513 523 return url
514 524
515 525
516 526 def hexsha1(fileobj):
517 527 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
518 528 object data"""
519 529 h = hashutil.sha1()
520 530 for chunk in util.filechunkiter(fileobj):
521 531 h.update(chunk)
522 532 return hex(h.digest())
523 533
524 534
525 535 def httpsendfile(ui, filename):
526 536 return httpconnection.httpsendfile(ui, filename, b'rb')
527 537
528 538
529 539 def unixpath(path):
530 540 '''Return a version of path normalized for use with the lfdirstate.'''
531 541 return util.pconvert(os.path.normpath(path))
532 542
533 543
534 544 def islfilesrepo(repo):
535 545 '''Return true if the repo is a largefile repo.'''
536 546 if b'largefiles' in repo.requirements and any(
537 547 shortnameslash in f[1] for f in repo.store.datafiles()
538 548 ):
539 549 return True
540 550
541 551 return any(openlfdirstate(repo.ui, repo, False))
542 552
543 553
544 554 class storeprotonotcapable(Exception):
545 555 def __init__(self, storetypes):
546 556 self.storetypes = storetypes
547 557
548 558
549 559 def getstandinsstate(repo):
550 560 standins = []
551 561 matcher = getstandinmatcher(repo)
552 562 wctx = repo[None]
553 563 for standin in repo.dirstate.walk(
554 564 matcher, subrepos=[], unknown=False, ignored=False
555 565 ):
556 566 lfile = splitstandin(standin)
557 567 try:
558 568 hash = readasstandin(wctx[standin])
559 569 except IOError:
560 570 hash = None
561 571 standins.append((lfile, hash))
562 572 return standins
563 573
564 574
565 575 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
566 576 lfstandin = standin(lfile)
567 577 if lfstandin not in repo.dirstate:
568 578 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
569 579 else:
570 580 entry = repo.dirstate.get_entry(lfstandin)
571 581 lfdirstate.update_file(
572 582 lfile,
573 583 wc_tracked=entry.tracked,
574 584 p1_tracked=entry.p1_tracked,
575 585 p2_info=entry.p2_info,
576 586 possibly_dirty=True,
577 587 )
578 588
579 589
580 590 def markcommitted(orig, ctx, node):
581 591 repo = ctx.repo()
582 592
583 593 lfdirstate = openlfdirstate(repo.ui, repo)
584 594 with lfdirstate.changing_parents(repo):
585 595 orig(node)
586 596
587 597 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
588 598 # because files coming from the 2nd parent are omitted in the latter.
589 599 #
590 600 # The former should be used to get targets of "synclfdirstate",
591 601 # because such files:
592 602 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
593 603 # - have to be marked as "n" after commit, but
594 604 # - aren't listed in "repo[node].files()"
595 605
596 606 for f in ctx.files():
597 607 lfile = splitstandin(f)
598 608 if lfile is not None:
599 609 synclfdirstate(repo, lfdirstate, lfile, False)
600 610 lfdirstate.write(repo.currenttransaction())
601 611
602 612 # As part of committing, copy all of the largefiles into the cache.
603 613 #
604 614 # Using "node" instead of "ctx" implies additional "repo[node]"
605 615 # lookup while copyalltostore(), but can omit redundant check for
606 616 # files comming from the 2nd parent, which should exist in store
607 617 # at merging.
608 618 copyalltostore(repo, node)
609 619
610 620
611 621 def getlfilestoupdate(oldstandins, newstandins):
612 622 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
613 623 filelist = []
614 624 for f in changedstandins:
615 625 if f[0] not in filelist:
616 626 filelist.append(f[0])
617 627 return filelist
618 628
619 629
620 630 def getlfilestoupload(repo, missing, addfunc):
621 631 makeprogress = repo.ui.makeprogress
622 632 with makeprogress(
623 633 _(b'finding outgoing largefiles'),
624 634 unit=_(b'revisions'),
625 635 total=len(missing),
626 636 ) as progress:
627 637 for i, n in enumerate(missing):
628 638 progress.update(i)
629 639 parents = [p for p in repo[n].parents() if p != repo.nullid]
630 640
631 641 with lfstatus(repo, value=False):
632 642 ctx = repo[n]
633 643
634 644 files = set(ctx.files())
635 645 if len(parents) == 2:
636 646 mc = ctx.manifest()
637 647 mp1 = ctx.p1().manifest()
638 648 mp2 = ctx.p2().manifest()
639 649 for f in mp1:
640 650 if f not in mc:
641 651 files.add(f)
642 652 for f in mp2:
643 653 if f not in mc:
644 654 files.add(f)
645 655 for f in mc:
646 656 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
647 657 files.add(f)
648 658 for fn in files:
649 659 if isstandin(fn) and fn in ctx:
650 660 addfunc(fn, readasstandin(ctx[fn]))
651 661
652 662
653 663 def updatestandinsbymatch(repo, match):
654 664 """Update standins in the working directory according to specified match
655 665
656 666 This returns (possibly modified) ``match`` object to be used for
657 667 subsequent commit process.
658 668 """
659 669
660 670 ui = repo.ui
661 671
662 672 # Case 1: user calls commit with no specific files or
663 673 # include/exclude patterns: refresh and commit all files that
664 674 # are "dirty".
665 675 if match is None or match.always():
666 676 # Spend a bit of time here to get a list of files we know
667 677 # are modified so we can compare only against those.
668 678 # It can cost a lot of time (several seconds)
669 679 # otherwise to update all standins if the largefiles are
670 680 # large.
671 681 lfdirstate = openlfdirstate(ui, repo)
672 682 dirtymatch = matchmod.always()
673 683 unsure, s, mtime_boundary = lfdirstate.status(
674 684 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
675 685 )
676 686 modifiedfiles = unsure + s.modified + s.added + s.removed
677 687 lfiles = listlfiles(repo)
678 688 # this only loops through largefiles that exist (not
679 689 # removed/renamed)
680 690 for lfile in lfiles:
681 691 if lfile in modifiedfiles:
682 692 fstandin = standin(lfile)
683 693 if repo.wvfs.exists(fstandin):
684 694 # this handles the case where a rebase is being
685 695 # performed and the working copy is not updated
686 696 # yet.
687 697 if repo.wvfs.exists(lfile):
688 698 updatestandin(repo, lfile, fstandin)
689 699
690 700 return match
691 701
692 702 lfiles = listlfiles(repo)
693 703 match._files = repo._subdirlfs(match.files(), lfiles)
694 704
695 705 # Case 2: user calls commit with specified patterns: refresh
696 706 # any matching big files.
697 707 smatcher = composestandinmatcher(repo, match)
698 708 standins = repo.dirstate.walk(
699 709 smatcher, subrepos=[], unknown=False, ignored=False
700 710 )
701 711
702 712 # No matching big files: get out of the way and pass control to
703 713 # the usual commit() method.
704 714 if not standins:
705 715 return match
706 716
707 717 # Refresh all matching big files. It's possible that the
708 718 # commit will end up failing, in which case the big files will
709 719 # stay refreshed. No harm done: the user modified them and
710 720 # asked to commit them, so sooner or later we're going to
711 721 # refresh the standins. Might as well leave them refreshed.
712 722 lfdirstate = openlfdirstate(ui, repo)
713 723 for fstandin in standins:
714 724 lfile = splitstandin(fstandin)
715 725 if lfdirstate.get_entry(lfile).tracked:
716 726 updatestandin(repo, lfile, fstandin)
717 727
718 728 # Cook up a new matcher that only matches regular files or
719 729 # standins corresponding to the big files requested by the
720 730 # user. Have to modify _files to prevent commit() from
721 731 # complaining "not tracked" for big files.
722 732 match = copy.copy(match)
723 733 origmatchfn = match.matchfn
724 734
725 735 # Check both the list of largefiles and the list of
726 736 # standins because if a largefile was removed, it
727 737 # won't be in the list of largefiles at this point
728 738 match._files += sorted(standins)
729 739
730 740 actualfiles = []
731 741 for f in match._files:
732 742 fstandin = standin(f)
733 743
734 744 # For largefiles, only one of the normal and standin should be
735 745 # committed (except if one of them is a remove). In the case of a
736 746 # standin removal, drop the normal file if it is unknown to dirstate.
737 747 # Thus, skip plain largefile names but keep the standin.
738 748 if f in lfiles or fstandin in standins:
739 749 if not repo.dirstate.get_entry(fstandin).removed:
740 750 if not repo.dirstate.get_entry(f).removed:
741 751 continue
742 752 elif not repo.dirstate.get_entry(f).any_tracked:
743 753 continue
744 754
745 755 actualfiles.append(f)
746 756 match._files = actualfiles
747 757
748 758 def matchfn(f):
749 759 if origmatchfn(f):
750 760 return f not in lfiles
751 761 else:
752 762 return f in standins
753 763
754 764 match.matchfn = matchfn
755 765
756 766 return match
757 767
758 768
759 769 class automatedcommithook:
760 770 """Stateful hook to update standins at the 1st commit of resuming
761 771
762 772 For efficiency, updating standins in the working directory should
763 773 be avoided while automated committing (like rebase, transplant and
764 774 so on), because they should be updated before committing.
765 775
766 776 But the 1st commit of resuming automated committing (e.g. ``rebase
767 777 --continue``) should update them, because largefiles may be
768 778 modified manually.
769 779 """
770 780
771 781 def __init__(self, resuming):
772 782 self.resuming = resuming
773 783
774 784 def __call__(self, repo, match):
775 785 if self.resuming:
776 786 self.resuming = False # avoids updating at subsequent commits
777 787 return updatestandinsbymatch(repo, match)
778 788 else:
779 789 return match
780 790
781 791
782 792 def getstatuswriter(ui, repo, forcibly=None):
783 793 """Return the function to write largefiles specific status out
784 794
785 795 If ``forcibly`` is ``None``, this returns the last element of
786 796 ``repo._lfstatuswriters`` as "default" writer function.
787 797
788 798 Otherwise, this returns the function to always write out (or
789 799 ignore if ``not forcibly``) status.
790 800 """
791 801 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
792 802 return repo._lfstatuswriters[-1]
793 803 else:
794 804 if forcibly:
795 805 return ui.status # forcibly WRITE OUT
796 806 else:
797 807 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now