##// END OF EJS Templates
largefiles: remove the `changing_parents` context in `openlfdirstate`...
marmoute -
r50915:e2f3cba6 default
parent child Browse files
Show More
@@ -1,811 +1,810 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import contextlib
12 12 import copy
13 13 import os
14 14 import stat
15 15
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from mercurial.pycompat import open
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 pycompat,
27 27 requirements,
28 28 scmutil,
29 29 sparse,
30 30 util,
31 31 vfs as vfsmod,
32 32 )
33 33 from mercurial.utils import hashutil
34 34 from mercurial.dirstateutils import timestamp
35 35
36 36 shortname = b'.hglf'
37 37 shortnameslash = shortname + b'/'
38 38 longname = b'largefiles'
39 39
40 40 # -- Private worker functions ------------------------------------------
41 41
42 42
43 43 @contextlib.contextmanager
44 44 def lfstatus(repo, value=True):
45 45 oldvalue = getattr(repo, 'lfstatus', False)
46 46 repo.lfstatus = value
47 47 try:
48 48 yield
49 49 finally:
50 50 repo.lfstatus = oldvalue
51 51
52 52
53 53 def getminsize(ui, assumelfiles, opt, default=10):
54 54 lfsize = opt
55 55 if not lfsize and assumelfiles:
56 56 lfsize = ui.config(longname, b'minsize', default=default)
57 57 if lfsize:
58 58 try:
59 59 lfsize = float(lfsize)
60 60 except ValueError:
61 61 raise error.Abort(
62 62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 63 )
64 64 if lfsize is None:
65 65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 66 return lfsize
67 67
68 68
69 69 def link(src, dest):
70 70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 71 util.makedirs(os.path.dirname(dest))
72 72 try:
73 73 util.oslink(src, dest)
74 74 except OSError:
75 75 # if hardlinks fail, fallback on atomic copy
76 76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 77 for chunk in util.filechunkiter(srcf):
78 78 dstf.write(chunk)
79 79 os.chmod(dest, os.stat(src).st_mode)
80 80
81 81
82 82 def usercachepath(ui, hash):
83 83 """Return the correct location in the "global" largefiles cache for a file
84 84 with the given hash.
85 85 This cache is used for sharing of largefiles across repositories - both
86 86 to preserve download bandwidth and storage space."""
87 87 return os.path.join(_usercachedir(ui), hash)
88 88
89 89
90 90 def _usercachedir(ui, name=longname):
91 91 '''Return the location of the "global" largefiles cache.'''
92 92 path = ui.configpath(name, b'usercache')
93 93 if path:
94 94 return path
95 95
96 96 hint = None
97 97
98 98 if pycompat.iswindows:
99 99 appdata = encoding.environ.get(
100 100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 101 )
102 102 if appdata:
103 103 return os.path.join(appdata, name)
104 104
105 105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 106 b"LOCALAPPDATA",
107 107 b"APPDATA",
108 108 name,
109 109 )
110 110 elif pycompat.isdarwin:
111 111 home = encoding.environ.get(b'HOME')
112 112 if home:
113 113 return os.path.join(home, b'Library', b'Caches', name)
114 114
115 115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 116 b"HOME",
117 117 name,
118 118 )
119 119 elif pycompat.isposix:
120 120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 121 if path:
122 122 return os.path.join(path, name)
123 123 home = encoding.environ.get(b'HOME')
124 124 if home:
125 125 return os.path.join(home, b'.cache', name)
126 126
127 127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 128 b"XDG_CACHE_HOME",
129 129 b"HOME",
130 130 name,
131 131 )
132 132 else:
133 133 raise error.Abort(
134 134 _(b'unknown operating system: %s\n') % pycompat.osname
135 135 )
136 136
137 137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138 138
139 139
140 140 def inusercache(ui, hash):
141 141 path = usercachepath(ui, hash)
142 142 return os.path.exists(path)
143 143
144 144
145 145 def findfile(repo, hash):
146 146 """Return store path of the largefile with the specified hash.
147 147 As a side effect, the file might be linked from user cache.
148 148 Return None if the file can't be found locally."""
149 149 path, exists = findstorepath(repo, hash)
150 150 if exists:
151 151 repo.ui.note(_(b'found %s in store\n') % hash)
152 152 return path
153 153 elif inusercache(repo.ui, hash):
154 154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 155 path = storepath(repo, hash)
156 156 link(usercachepath(repo.ui, hash), path)
157 157 return path
158 158 return None
159 159
160 160
161 161 class largefilesdirstate(dirstate.dirstate):
162 162 def __getitem__(self, key):
163 163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164 164
165 165 def set_tracked(self, f):
166 166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167 167
168 168 def set_untracked(self, f):
169 169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170 170
171 171 def normal(self, f, parentfiledata=None):
172 172 # not sure if we should pass the `parentfiledata` down or throw it
173 173 # away. So throwing it away to stay on the safe side.
174 174 return super(largefilesdirstate, self).normal(unixpath(f))
175 175
176 176 def remove(self, f):
177 177 return super(largefilesdirstate, self).remove(unixpath(f))
178 178
179 179 def add(self, f):
180 180 return super(largefilesdirstate, self).add(unixpath(f))
181 181
182 182 def drop(self, f):
183 183 return super(largefilesdirstate, self).drop(unixpath(f))
184 184
185 185 def forget(self, f):
186 186 return super(largefilesdirstate, self).forget(unixpath(f))
187 187
188 188 def normallookup(self, f):
189 189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190 190
191 191 def _ignore(self, f):
192 192 return False
193 193
194 194 def write(self, tr):
195 195 # (1) disable PENDING mode always
196 196 # (lfdirstate isn't yet managed as a part of the transaction)
197 197 # (2) avoid develwarn 'use dirstate.write with ....'
198 198 if tr:
199 199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
200 200 super(largefilesdirstate, self).write(None)
201 201
202 202
203 203 def openlfdirstate(ui, repo, create=True):
204 204 """
205 205 Return a dirstate object that tracks largefiles: i.e. its root is
206 206 the repo root, but it is saved in .hg/largefiles/dirstate.
207 207 """
208 208 vfs = repo.vfs
209 209 lfstoredir = longname
210 210 opener = vfsmod.vfs(vfs.join(lfstoredir))
211 211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
212 212 lfdirstate = largefilesdirstate(
213 213 opener,
214 214 ui,
215 215 repo.root,
216 216 repo.dirstate._validate,
217 217 lambda: sparse.matcher(repo),
218 218 repo.nodeconstants,
219 219 use_dirstate_v2,
220 220 )
221 221
222 222 # If the largefiles dirstate does not exist, populate and create
223 223 # it. This ensures that we create it on the first meaningful
224 224 # largefiles operation in a new clone.
225 225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 226 try:
227 227 with repo.wlock(wait=False):
228 228 matcher = getstandinmatcher(repo)
229 229 standins = repo.dirstate.walk(
230 230 matcher, subrepos=[], unknown=False, ignored=False
231 231 )
232 232
233 233 if len(standins) > 0:
234 234 vfs.makedirs(lfstoredir)
235 235
236 with lfdirstate.changing_parents(repo):
237 for standin in standins:
238 lfile = splitstandin(standin)
239 lfdirstate.hacky_extension_update_file(
240 lfile,
241 p1_tracked=True,
242 wc_tracked=True,
243 possibly_dirty=True,
244 )
236 for standin in standins:
237 lfile = splitstandin(standin)
238 lfdirstate.hacky_extension_update_file(
239 lfile,
240 p1_tracked=True,
241 wc_tracked=True,
242 possibly_dirty=True,
243 )
245 244 except error.LockError:
246 245 # Assume that whatever was holding the lock was important.
247 246 # If we were doing something important, we would already have
248 247 # either the lock or a largefile dirstate.
249 248 pass
250 249 return lfdirstate
251 250
252 251
253 252 def lfdirstatestatus(lfdirstate, repo):
254 253 pctx = repo[b'.']
255 254 match = matchmod.always()
256 255 unsure, s, mtime_boundary = lfdirstate.status(
257 256 match, subrepos=[], ignored=False, clean=False, unknown=False
258 257 )
259 258 modified, clean = s.modified, s.clean
260 259 wctx = repo[None]
261 260 for lfile in unsure:
262 261 try:
263 262 fctx = pctx[standin(lfile)]
264 263 except LookupError:
265 264 fctx = None
266 265 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
267 266 modified.append(lfile)
268 267 else:
269 268 clean.append(lfile)
270 269 st = wctx[lfile].lstat()
271 270 mode = st.st_mode
272 271 size = st.st_size
273 272 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
274 273 if mtime is not None:
275 274 cache_data = (mode, size, mtime)
276 275 lfdirstate.set_clean(lfile, cache_data)
277 276 return s
278 277
279 278
280 279 def listlfiles(repo, rev=None, matcher=None):
281 280 """return a list of largefiles in the working copy or the
282 281 specified changeset"""
283 282
284 283 if matcher is None:
285 284 matcher = getstandinmatcher(repo)
286 285
287 286 # ignore unknown files in working directory
288 287 return [
289 288 splitstandin(f)
290 289 for f in repo[rev].walk(matcher)
291 290 if rev is not None or repo.dirstate.get_entry(f).any_tracked
292 291 ]
293 292
294 293
295 294 def instore(repo, hash, forcelocal=False):
296 295 '''Return true if a largefile with the given hash exists in the store'''
297 296 return os.path.exists(storepath(repo, hash, forcelocal))
298 297
299 298
300 299 def storepath(repo, hash, forcelocal=False):
301 300 """Return the correct location in the repository largefiles store for a
302 301 file with the given hash."""
303 302 if not forcelocal and repo.shared():
304 303 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
305 304 return repo.vfs.join(longname, hash)
306 305
307 306
308 307 def findstorepath(repo, hash):
309 308 """Search through the local store path(s) to find the file for the given
310 309 hash. If the file is not found, its path in the primary store is returned.
311 310 The return value is a tuple of (path, exists(path)).
312 311 """
313 312 # For shared repos, the primary store is in the share source. But for
314 313 # backward compatibility, force a lookup in the local store if it wasn't
315 314 # found in the share source.
316 315 path = storepath(repo, hash, False)
317 316
318 317 if instore(repo, hash):
319 318 return (path, True)
320 319 elif repo.shared() and instore(repo, hash, True):
321 320 return storepath(repo, hash, True), True
322 321
323 322 return (path, False)
324 323
325 324
326 325 def copyfromcache(repo, hash, filename):
327 326 """Copy the specified largefile from the repo or system cache to
328 327 filename in the repository. Return true on success or false if the
329 328 file was not found in either cache (which should not happened:
330 329 this is meant to be called only after ensuring that the needed
331 330 largefile exists in the cache)."""
332 331 wvfs = repo.wvfs
333 332 path = findfile(repo, hash)
334 333 if path is None:
335 334 return False
336 335 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
337 336 # The write may fail before the file is fully written, but we
338 337 # don't use atomic writes in the working copy.
339 338 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
340 339 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
341 340 if gothash != hash:
342 341 repo.ui.warn(
343 342 _(b'%s: data corruption in %s with hash %s\n')
344 343 % (filename, path, gothash)
345 344 )
346 345 wvfs.unlink(filename)
347 346 return False
348 347 return True
349 348
350 349
351 350 def copytostore(repo, ctx, file, fstandin):
352 351 wvfs = repo.wvfs
353 352 hash = readasstandin(ctx[fstandin])
354 353 if instore(repo, hash):
355 354 return
356 355 if wvfs.exists(file):
357 356 copytostoreabsolute(repo, wvfs.join(file), hash)
358 357 else:
359 358 repo.ui.warn(
360 359 _(b"%s: largefile %s not available from local store\n")
361 360 % (file, hash)
362 361 )
363 362
364 363
365 364 def copyalltostore(repo, node):
366 365 '''Copy all largefiles in a given revision to the store'''
367 366
368 367 ctx = repo[node]
369 368 for filename in ctx.files():
370 369 realfile = splitstandin(filename)
371 370 if realfile is not None and filename in ctx.manifest():
372 371 copytostore(repo, ctx, realfile, filename)
373 372
374 373
375 374 def copytostoreabsolute(repo, file, hash):
376 375 if inusercache(repo.ui, hash):
377 376 link(usercachepath(repo.ui, hash), storepath(repo, hash))
378 377 else:
379 378 util.makedirs(os.path.dirname(storepath(repo, hash)))
380 379 with open(file, b'rb') as srcf:
381 380 with util.atomictempfile(
382 381 storepath(repo, hash), createmode=repo.store.createmode
383 382 ) as dstf:
384 383 for chunk in util.filechunkiter(srcf):
385 384 dstf.write(chunk)
386 385 linktousercache(repo, hash)
387 386
388 387
389 388 def linktousercache(repo, hash):
390 389 """Link / copy the largefile with the specified hash from the store
391 390 to the cache."""
392 391 path = usercachepath(repo.ui, hash)
393 392 link(storepath(repo, hash), path)
394 393
395 394
396 395 def getstandinmatcher(repo, rmatcher=None):
397 396 '''Return a match object that applies rmatcher to the standin directory'''
398 397 wvfs = repo.wvfs
399 398 standindir = shortname
400 399
401 400 # no warnings about missing files or directories
402 401 badfn = lambda f, msg: None
403 402
404 403 if rmatcher and not rmatcher.always():
405 404 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
406 405 if not pats:
407 406 pats = [wvfs.join(standindir)]
408 407 match = scmutil.match(repo[None], pats, badfn=badfn)
409 408 else:
410 409 # no patterns: relative to repo root
411 410 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
412 411 return match
413 412
414 413
415 414 def composestandinmatcher(repo, rmatcher):
416 415 """Return a matcher that accepts standins corresponding to the
417 416 files accepted by rmatcher. Pass the list of files in the matcher
418 417 as the paths specified by the user."""
419 418 smatcher = getstandinmatcher(repo, rmatcher)
420 419 isstandin = smatcher.matchfn
421 420
422 421 def composedmatchfn(f):
423 422 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
424 423
425 424 smatcher.matchfn = composedmatchfn
426 425
427 426 return smatcher
428 427
429 428
430 429 def standin(filename):
431 430 """Return the repo-relative path to the standin for the specified big
432 431 file."""
433 432 # Notes:
434 433 # 1) Some callers want an absolute path, but for instance addlargefiles
435 434 # needs it repo-relative so it can be passed to repo[None].add(). So
436 435 # leave it up to the caller to use repo.wjoin() to get an absolute path.
437 436 # 2) Join with '/' because that's what dirstate always uses, even on
438 437 # Windows. Change existing separator to '/' first in case we are
439 438 # passed filenames from an external source (like the command line).
440 439 return shortnameslash + util.pconvert(filename)
441 440
442 441
443 442 def isstandin(filename):
444 443 """Return true if filename is a big file standin. filename must be
445 444 in Mercurial's internal form (slash-separated)."""
446 445 return filename.startswith(shortnameslash)
447 446
448 447
449 448 def splitstandin(filename):
450 449 # Split on / because that's what dirstate always uses, even on Windows.
451 450 # Change local separator to / first just in case we are passed filenames
452 451 # from an external source (like the command line).
453 452 bits = util.pconvert(filename).split(b'/', 1)
454 453 if len(bits) == 2 and bits[0] == shortname:
455 454 return bits[1]
456 455 else:
457 456 return None
458 457
459 458
460 459 def updatestandin(repo, lfile, standin):
461 460 """Re-calculate hash value of lfile and write it into standin
462 461
463 462 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
464 463 """
465 464 file = repo.wjoin(lfile)
466 465 if repo.wvfs.exists(lfile):
467 466 hash = hashfile(file)
468 467 executable = getexecutable(file)
469 468 writestandin(repo, standin, hash, executable)
470 469 else:
471 470 raise error.Abort(_(b'%s: file not found!') % lfile)
472 471
473 472
474 473 def readasstandin(fctx):
475 474 """read hex hash from given filectx of standin file
476 475
477 476 This encapsulates how "standin" data is stored into storage layer."""
478 477 return fctx.data().strip()
479 478
480 479
481 480 def writestandin(repo, standin, hash, executable):
482 481 '''write hash to <repo.root>/<standin>'''
483 482 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
484 483
485 484
486 485 def copyandhash(instream, outfile):
487 486 """Read bytes from instream (iterable) and write them to outfile,
488 487 computing the SHA-1 hash of the data along the way. Return the hash."""
489 488 hasher = hashutil.sha1(b'')
490 489 for data in instream:
491 490 hasher.update(data)
492 491 outfile.write(data)
493 492 return hex(hasher.digest())
494 493
495 494
496 495 def hashfile(file):
497 496 if not os.path.exists(file):
498 497 return b''
499 498 with open(file, b'rb') as fd:
500 499 return hexsha1(fd)
501 500
502 501
503 502 def getexecutable(filename):
504 503 mode = os.stat(filename).st_mode
505 504 return (
506 505 (mode & stat.S_IXUSR)
507 506 and (mode & stat.S_IXGRP)
508 507 and (mode & stat.S_IXOTH)
509 508 )
510 509
511 510
512 511 def urljoin(first, second, *arg):
513 512 def join(left, right):
514 513 if not left.endswith(b'/'):
515 514 left += b'/'
516 515 if right.startswith(b'/'):
517 516 right = right[1:]
518 517 return left + right
519 518
520 519 url = join(first, second)
521 520 for a in arg:
522 521 url = join(url, a)
523 522 return url
524 523
525 524
526 525 def hexsha1(fileobj):
527 526 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
528 527 object data"""
529 528 h = hashutil.sha1()
530 529 for chunk in util.filechunkiter(fileobj):
531 530 h.update(chunk)
532 531 return hex(h.digest())
533 532
534 533
535 534 def httpsendfile(ui, filename):
536 535 return httpconnection.httpsendfile(ui, filename, b'rb')
537 536
538 537
539 538 def unixpath(path):
540 539 '''Return a version of path normalized for use with the lfdirstate.'''
541 540 return util.pconvert(os.path.normpath(path))
542 541
543 542
544 543 def islfilesrepo(repo):
545 544 '''Return true if the repo is a largefile repo.'''
546 545 if b'largefiles' in repo.requirements and any(
547 546 shortnameslash in f[1] for f in repo.store.datafiles()
548 547 ):
549 548 return True
550 549
551 550 return any(openlfdirstate(repo.ui, repo, False))
552 551
553 552
554 553 class storeprotonotcapable(Exception):
555 554 def __init__(self, storetypes):
556 555 self.storetypes = storetypes
557 556
558 557
559 558 def getstandinsstate(repo):
560 559 standins = []
561 560 matcher = getstandinmatcher(repo)
562 561 wctx = repo[None]
563 562 for standin in repo.dirstate.walk(
564 563 matcher, subrepos=[], unknown=False, ignored=False
565 564 ):
566 565 lfile = splitstandin(standin)
567 566 try:
568 567 hash = readasstandin(wctx[standin])
569 568 except IOError:
570 569 hash = None
571 570 standins.append((lfile, hash))
572 571 return standins
573 572
574 573
575 574 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
576 575 lfstandin = standin(lfile)
577 576 if lfstandin not in repo.dirstate:
578 577 lfdirstate.hacky_extension_update_file(
579 578 lfile,
580 579 p1_tracked=False,
581 580 wc_tracked=False,
582 581 )
583 582 else:
584 583 entry = repo.dirstate.get_entry(lfstandin)
585 584 lfdirstate.hacky_extension_update_file(
586 585 lfile,
587 586 wc_tracked=entry.tracked,
588 587 p1_tracked=entry.p1_tracked,
589 588 p2_info=entry.p2_info,
590 589 possibly_dirty=True,
591 590 )
592 591
593 592
594 593 def markcommitted(orig, ctx, node):
595 594 repo = ctx.repo()
596 595
597 596 lfdirstate = openlfdirstate(repo.ui, repo)
598 597 with lfdirstate.changing_parents(repo):
599 598 orig(node)
600 599
601 600 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
602 601 # because files coming from the 2nd parent are omitted in the latter.
603 602 #
604 603 # The former should be used to get targets of "synclfdirstate",
605 604 # because such files:
606 605 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
607 606 # - have to be marked as "n" after commit, but
608 607 # - aren't listed in "repo[node].files()"
609 608
610 609 for f in ctx.files():
611 610 lfile = splitstandin(f)
612 611 if lfile is not None:
613 612 synclfdirstate(repo, lfdirstate, lfile, False)
614 613 lfdirstate.write(repo.currenttransaction())
615 614
616 615 # As part of committing, copy all of the largefiles into the cache.
617 616 #
618 617 # Using "node" instead of "ctx" implies additional "repo[node]"
619 618 # lookup while copyalltostore(), but can omit redundant check for
620 619 # files comming from the 2nd parent, which should exist in store
621 620 # at merging.
622 621 copyalltostore(repo, node)
623 622
624 623
625 624 def getlfilestoupdate(oldstandins, newstandins):
626 625 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
627 626 filelist = []
628 627 for f in changedstandins:
629 628 if f[0] not in filelist:
630 629 filelist.append(f[0])
631 630 return filelist
632 631
633 632
634 633 def getlfilestoupload(repo, missing, addfunc):
635 634 makeprogress = repo.ui.makeprogress
636 635 with makeprogress(
637 636 _(b'finding outgoing largefiles'),
638 637 unit=_(b'revisions'),
639 638 total=len(missing),
640 639 ) as progress:
641 640 for i, n in enumerate(missing):
642 641 progress.update(i)
643 642 parents = [p for p in repo[n].parents() if p != repo.nullid]
644 643
645 644 with lfstatus(repo, value=False):
646 645 ctx = repo[n]
647 646
648 647 files = set(ctx.files())
649 648 if len(parents) == 2:
650 649 mc = ctx.manifest()
651 650 mp1 = ctx.p1().manifest()
652 651 mp2 = ctx.p2().manifest()
653 652 for f in mp1:
654 653 if f not in mc:
655 654 files.add(f)
656 655 for f in mp2:
657 656 if f not in mc:
658 657 files.add(f)
659 658 for f in mc:
660 659 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
661 660 files.add(f)
662 661 for fn in files:
663 662 if isstandin(fn) and fn in ctx:
664 663 addfunc(fn, readasstandin(ctx[fn]))
665 664
666 665
667 666 def updatestandinsbymatch(repo, match):
668 667 """Update standins in the working directory according to specified match
669 668
670 669 This returns (possibly modified) ``match`` object to be used for
671 670 subsequent commit process.
672 671 """
673 672
674 673 ui = repo.ui
675 674
676 675 # Case 1: user calls commit with no specific files or
677 676 # include/exclude patterns: refresh and commit all files that
678 677 # are "dirty".
679 678 if match is None or match.always():
680 679 # Spend a bit of time here to get a list of files we know
681 680 # are modified so we can compare only against those.
682 681 # It can cost a lot of time (several seconds)
683 682 # otherwise to update all standins if the largefiles are
684 683 # large.
685 684 lfdirstate = openlfdirstate(ui, repo)
686 685 dirtymatch = matchmod.always()
687 686 unsure, s, mtime_boundary = lfdirstate.status(
688 687 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
689 688 )
690 689 modifiedfiles = unsure + s.modified + s.added + s.removed
691 690 lfiles = listlfiles(repo)
692 691 # this only loops through largefiles that exist (not
693 692 # removed/renamed)
694 693 for lfile in lfiles:
695 694 if lfile in modifiedfiles:
696 695 fstandin = standin(lfile)
697 696 if repo.wvfs.exists(fstandin):
698 697 # this handles the case where a rebase is being
699 698 # performed and the working copy is not updated
700 699 # yet.
701 700 if repo.wvfs.exists(lfile):
702 701 updatestandin(repo, lfile, fstandin)
703 702
704 703 return match
705 704
706 705 lfiles = listlfiles(repo)
707 706 match._files = repo._subdirlfs(match.files(), lfiles)
708 707
709 708 # Case 2: user calls commit with specified patterns: refresh
710 709 # any matching big files.
711 710 smatcher = composestandinmatcher(repo, match)
712 711 standins = repo.dirstate.walk(
713 712 smatcher, subrepos=[], unknown=False, ignored=False
714 713 )
715 714
716 715 # No matching big files: get out of the way and pass control to
717 716 # the usual commit() method.
718 717 if not standins:
719 718 return match
720 719
721 720 # Refresh all matching big files. It's possible that the
722 721 # commit will end up failing, in which case the big files will
723 722 # stay refreshed. No harm done: the user modified them and
724 723 # asked to commit them, so sooner or later we're going to
725 724 # refresh the standins. Might as well leave them refreshed.
726 725 lfdirstate = openlfdirstate(ui, repo)
727 726 for fstandin in standins:
728 727 lfile = splitstandin(fstandin)
729 728 if lfdirstate.get_entry(lfile).tracked:
730 729 updatestandin(repo, lfile, fstandin)
731 730
732 731 # Cook up a new matcher that only matches regular files or
733 732 # standins corresponding to the big files requested by the
734 733 # user. Have to modify _files to prevent commit() from
735 734 # complaining "not tracked" for big files.
736 735 match = copy.copy(match)
737 736 origmatchfn = match.matchfn
738 737
739 738 # Check both the list of largefiles and the list of
740 739 # standins because if a largefile was removed, it
741 740 # won't be in the list of largefiles at this point
742 741 match._files += sorted(standins)
743 742
744 743 actualfiles = []
745 744 for f in match._files:
746 745 fstandin = standin(f)
747 746
748 747 # For largefiles, only one of the normal and standin should be
749 748 # committed (except if one of them is a remove). In the case of a
750 749 # standin removal, drop the normal file if it is unknown to dirstate.
751 750 # Thus, skip plain largefile names but keep the standin.
752 751 if f in lfiles or fstandin in standins:
753 752 if not repo.dirstate.get_entry(fstandin).removed:
754 753 if not repo.dirstate.get_entry(f).removed:
755 754 continue
756 755 elif not repo.dirstate.get_entry(f).any_tracked:
757 756 continue
758 757
759 758 actualfiles.append(f)
760 759 match._files = actualfiles
761 760
762 761 def matchfn(f):
763 762 if origmatchfn(f):
764 763 return f not in lfiles
765 764 else:
766 765 return f in standins
767 766
768 767 match.matchfn = matchfn
769 768
770 769 return match
771 770
772 771
773 772 class automatedcommithook:
774 773 """Stateful hook to update standins at the 1st commit of resuming
775 774
776 775 For efficiency, updating standins in the working directory should
777 776 be avoided while automated committing (like rebase, transplant and
778 777 so on), because they should be updated before committing.
779 778
780 779 But the 1st commit of resuming automated committing (e.g. ``rebase
781 780 --continue``) should update them, because largefiles may be
782 781 modified manually.
783 782 """
784 783
785 784 def __init__(self, resuming):
786 785 self.resuming = resuming
787 786
788 787 def __call__(self, repo, match):
789 788 if self.resuming:
790 789 self.resuming = False # avoids updating at subsequent commits
791 790 return updatestandinsbymatch(repo, match)
792 791 else:
793 792 return match
794 793
795 794
796 795 def getstatuswriter(ui, repo, forcibly=None):
797 796 """Return the function to write largefiles specific status out
798 797
799 798 If ``forcibly`` is ``None``, this returns the last element of
800 799 ``repo._lfstatuswriters`` as "default" writer function.
801 800
802 801 Otherwise, this returns the function to always write out (or
803 802 ignore if ``not forcibly``) status.
804 803 """
805 804 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
806 805 return repo._lfstatuswriters[-1]
807 806 else:
808 807 if forcibly:
809 808 return ui.status # forcibly WRITE OUT
810 809 else:
811 810 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now