##// END OF EJS Templates
largefile: use sysstr to check for attribute presence in `getstatuswriter`...
marmoute -
r51782:3934d85c default
parent child Browse files
Show More
@@ -1,823 +1,823 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import contextlib
12 12 import copy
13 13 import os
14 14 import stat
15 15
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from mercurial.pycompat import open
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 pycompat,
27 27 requirements,
28 28 scmutil,
29 29 sparse,
30 30 util,
31 31 vfs as vfsmod,
32 32 )
33 33 from mercurial.utils import hashutil
34 34 from mercurial.dirstateutils import timestamp
35 35
36 36 shortname = b'.hglf'
37 37 shortnameslash = shortname + b'/'
38 38 longname = b'largefiles'
39 39
40 40 # -- Private worker functions ------------------------------------------
41 41
42 42
43 43 @contextlib.contextmanager
44 44 def lfstatus(repo, value=True):
45 45 oldvalue = getattr(repo, 'lfstatus', False)
46 46 repo.lfstatus = value
47 47 try:
48 48 yield
49 49 finally:
50 50 repo.lfstatus = oldvalue
51 51
52 52
53 53 def getminsize(ui, assumelfiles, opt, default=10):
54 54 lfsize = opt
55 55 if not lfsize and assumelfiles:
56 56 lfsize = ui.config(longname, b'minsize', default=default)
57 57 if lfsize:
58 58 try:
59 59 lfsize = float(lfsize)
60 60 except ValueError:
61 61 raise error.Abort(
62 62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 63 )
64 64 if lfsize is None:
65 65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 66 return lfsize
67 67
68 68
69 69 def link(src, dest):
70 70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 71 util.makedirs(os.path.dirname(dest))
72 72 try:
73 73 util.oslink(src, dest)
74 74 except OSError:
75 75 # if hardlinks fail, fallback on atomic copy
76 76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 77 for chunk in util.filechunkiter(srcf):
78 78 dstf.write(chunk)
79 79 os.chmod(dest, os.stat(src).st_mode)
80 80
81 81
82 82 def usercachepath(ui, hash):
83 83 """Return the correct location in the "global" largefiles cache for a file
84 84 with the given hash.
85 85 This cache is used for sharing of largefiles across repositories - both
86 86 to preserve download bandwidth and storage space."""
87 87 return os.path.join(_usercachedir(ui), hash)
88 88
89 89
90 90 def _usercachedir(ui, name=longname):
91 91 '''Return the location of the "global" largefiles cache.'''
92 92 path = ui.configpath(name, b'usercache')
93 93 if path:
94 94 return path
95 95
96 96 hint = None
97 97
98 98 if pycompat.iswindows:
99 99 appdata = encoding.environ.get(
100 100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 101 )
102 102 if appdata:
103 103 return os.path.join(appdata, name)
104 104
105 105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 106 b"LOCALAPPDATA",
107 107 b"APPDATA",
108 108 name,
109 109 )
110 110 elif pycompat.isdarwin:
111 111 home = encoding.environ.get(b'HOME')
112 112 if home:
113 113 return os.path.join(home, b'Library', b'Caches', name)
114 114
115 115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 116 b"HOME",
117 117 name,
118 118 )
119 119 elif pycompat.isposix:
120 120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 121 if path:
122 122 return os.path.join(path, name)
123 123 home = encoding.environ.get(b'HOME')
124 124 if home:
125 125 return os.path.join(home, b'.cache', name)
126 126
127 127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 128 b"XDG_CACHE_HOME",
129 129 b"HOME",
130 130 name,
131 131 )
132 132 else:
133 133 raise error.Abort(
134 134 _(b'unknown operating system: %s\n') % pycompat.osname
135 135 )
136 136
137 137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138 138
139 139
140 140 def inusercache(ui, hash):
141 141 path = usercachepath(ui, hash)
142 142 return os.path.exists(path)
143 143
144 144
145 145 def findfile(repo, hash):
146 146 """Return store path of the largefile with the specified hash.
147 147 As a side effect, the file might be linked from user cache.
148 148 Return None if the file can't be found locally."""
149 149 path, exists = findstorepath(repo, hash)
150 150 if exists:
151 151 repo.ui.note(_(b'found %s in store\n') % hash)
152 152 return path
153 153 elif inusercache(repo.ui, hash):
154 154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 155 path = storepath(repo, hash)
156 156 link(usercachepath(repo.ui, hash), path)
157 157 return path
158 158 return None
159 159
160 160
161 161 class largefilesdirstate(dirstate.dirstate):
162 162 _large_file_dirstate = True
163 163 _tr_key_suffix = b'-large-files'
164 164
165 165 def __getitem__(self, key):
166 166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
167 167
168 168 def set_tracked(self, f):
169 169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
170 170
171 171 def set_untracked(self, f):
172 172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
173 173
174 174 def normal(self, f, parentfiledata=None):
175 175 # not sure if we should pass the `parentfiledata` down or throw it
176 176 # away. So throwing it away to stay on the safe side.
177 177 return super(largefilesdirstate, self).normal(unixpath(f))
178 178
179 179 def remove(self, f):
180 180 return super(largefilesdirstate, self).remove(unixpath(f))
181 181
182 182 def add(self, f):
183 183 return super(largefilesdirstate, self).add(unixpath(f))
184 184
185 185 def drop(self, f):
186 186 return super(largefilesdirstate, self).drop(unixpath(f))
187 187
188 188 def forget(self, f):
189 189 return super(largefilesdirstate, self).forget(unixpath(f))
190 190
191 191 def normallookup(self, f):
192 192 return super(largefilesdirstate, self).normallookup(unixpath(f))
193 193
194 194 def _ignore(self, f):
195 195 return False
196 196
197 197 def write(self, tr):
198 198 # (1) disable PENDING mode always
199 199 # (lfdirstate isn't yet managed as a part of the transaction)
200 200 # (2) avoid develwarn 'use dirstate.write with ....'
201 201 if tr:
202 202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
203 203 super(largefilesdirstate, self).write(None)
204 204
205 205
206 206 def openlfdirstate(ui, repo, create=True):
207 207 """
208 208 Return a dirstate object that tracks largefiles: i.e. its root is
209 209 the repo root, but it is saved in .hg/largefiles/dirstate.
210 210
211 211 If a dirstate object already exists and is being used for a 'changing_*'
212 212 context, it will be returned.
213 213 """
214 214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
215 215 if sub_dirstate is not None:
216 216 return sub_dirstate
217 217 vfs = repo.vfs
218 218 lfstoredir = longname
219 219 opener = vfsmod.vfs(vfs.join(lfstoredir))
220 220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
221 221 lfdirstate = largefilesdirstate(
222 222 opener,
223 223 ui,
224 224 repo.root,
225 225 repo.dirstate._validate,
226 226 lambda: sparse.matcher(repo),
227 227 repo.nodeconstants,
228 228 use_dirstate_v2,
229 229 )
230 230
231 231 # If the largefiles dirstate does not exist, populate and create
232 232 # it. This ensures that we create it on the first meaningful
233 233 # largefiles operation in a new clone.
234 234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
235 235 try:
236 236 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
237 237 matcher = getstandinmatcher(repo)
238 238 standins = repo.dirstate.walk(
239 239 matcher, subrepos=[], unknown=False, ignored=False
240 240 )
241 241
242 242 if len(standins) > 0:
243 243 vfs.makedirs(lfstoredir)
244 244
245 245 for standin in standins:
246 246 lfile = splitstandin(standin)
247 247 lfdirstate.hacky_extension_update_file(
248 248 lfile,
249 249 p1_tracked=True,
250 250 wc_tracked=True,
251 251 possibly_dirty=True,
252 252 )
253 253 except error.LockError:
254 254 # Assume that whatever was holding the lock was important.
255 255 # If we were doing something important, we would already have
256 256 # either the lock or a largefile dirstate.
257 257 pass
258 258 return lfdirstate
259 259
260 260
261 261 def lfdirstatestatus(lfdirstate, repo):
262 262 pctx = repo[b'.']
263 263 match = matchmod.always()
264 264 unsure, s, mtime_boundary = lfdirstate.status(
265 265 match, subrepos=[], ignored=False, clean=False, unknown=False
266 266 )
267 267 modified, clean = s.modified, s.clean
268 268 wctx = repo[None]
269 269 for lfile in unsure:
270 270 try:
271 271 fctx = pctx[standin(lfile)]
272 272 except LookupError:
273 273 fctx = None
274 274 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
275 275 modified.append(lfile)
276 276 else:
277 277 clean.append(lfile)
278 278 st = wctx[lfile].lstat()
279 279 mode = st.st_mode
280 280 size = st.st_size
281 281 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
282 282 if mtime is not None:
283 283 cache_data = (mode, size, mtime)
284 284 lfdirstate.set_clean(lfile, cache_data)
285 285 return s
286 286
287 287
288 288 def listlfiles(repo, rev=None, matcher=None):
289 289 """return a list of largefiles in the working copy or the
290 290 specified changeset"""
291 291
292 292 if matcher is None:
293 293 matcher = getstandinmatcher(repo)
294 294
295 295 # ignore unknown files in working directory
296 296 return [
297 297 splitstandin(f)
298 298 for f in repo[rev].walk(matcher)
299 299 if rev is not None or repo.dirstate.get_entry(f).any_tracked
300 300 ]
301 301
302 302
303 303 def instore(repo, hash, forcelocal=False):
304 304 '''Return true if a largefile with the given hash exists in the store'''
305 305 return os.path.exists(storepath(repo, hash, forcelocal))
306 306
307 307
308 308 def storepath(repo, hash, forcelocal=False):
309 309 """Return the correct location in the repository largefiles store for a
310 310 file with the given hash."""
311 311 if not forcelocal and repo.shared():
312 312 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
313 313 return repo.vfs.join(longname, hash)
314 314
315 315
316 316 def findstorepath(repo, hash):
317 317 """Search through the local store path(s) to find the file for the given
318 318 hash. If the file is not found, its path in the primary store is returned.
319 319 The return value is a tuple of (path, exists(path)).
320 320 """
321 321 # For shared repos, the primary store is in the share source. But for
322 322 # backward compatibility, force a lookup in the local store if it wasn't
323 323 # found in the share source.
324 324 path = storepath(repo, hash, False)
325 325
326 326 if instore(repo, hash):
327 327 return (path, True)
328 328 elif repo.shared() and instore(repo, hash, True):
329 329 return storepath(repo, hash, True), True
330 330
331 331 return (path, False)
332 332
333 333
334 334 def copyfromcache(repo, hash, filename):
335 335 """Copy the specified largefile from the repo or system cache to
336 336 filename in the repository. Return true on success or false if the
337 337 file was not found in either cache (which should not happened:
338 338 this is meant to be called only after ensuring that the needed
339 339 largefile exists in the cache)."""
340 340 wvfs = repo.wvfs
341 341 path = findfile(repo, hash)
342 342 if path is None:
343 343 return False
344 344 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
345 345 # The write may fail before the file is fully written, but we
346 346 # don't use atomic writes in the working copy.
347 347 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
348 348 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
349 349 if gothash != hash:
350 350 repo.ui.warn(
351 351 _(b'%s: data corruption in %s with hash %s\n')
352 352 % (filename, path, gothash)
353 353 )
354 354 wvfs.unlink(filename)
355 355 return False
356 356 return True
357 357
358 358
359 359 def copytostore(repo, ctx, file, fstandin):
360 360 wvfs = repo.wvfs
361 361 hash = readasstandin(ctx[fstandin])
362 362 if instore(repo, hash):
363 363 return
364 364 if wvfs.exists(file):
365 365 copytostoreabsolute(repo, wvfs.join(file), hash)
366 366 else:
367 367 repo.ui.warn(
368 368 _(b"%s: largefile %s not available from local store\n")
369 369 % (file, hash)
370 370 )
371 371
372 372
373 373 def copyalltostore(repo, node):
374 374 '''Copy all largefiles in a given revision to the store'''
375 375
376 376 ctx = repo[node]
377 377 for filename in ctx.files():
378 378 realfile = splitstandin(filename)
379 379 if realfile is not None and filename in ctx.manifest():
380 380 copytostore(repo, ctx, realfile, filename)
381 381
382 382
383 383 def copytostoreabsolute(repo, file, hash):
384 384 if inusercache(repo.ui, hash):
385 385 link(usercachepath(repo.ui, hash), storepath(repo, hash))
386 386 else:
387 387 util.makedirs(os.path.dirname(storepath(repo, hash)))
388 388 with open(file, b'rb') as srcf:
389 389 with util.atomictempfile(
390 390 storepath(repo, hash), createmode=repo.store.createmode
391 391 ) as dstf:
392 392 for chunk in util.filechunkiter(srcf):
393 393 dstf.write(chunk)
394 394 linktousercache(repo, hash)
395 395
396 396
397 397 def linktousercache(repo, hash):
398 398 """Link / copy the largefile with the specified hash from the store
399 399 to the cache."""
400 400 path = usercachepath(repo.ui, hash)
401 401 link(storepath(repo, hash), path)
402 402
403 403
404 404 def getstandinmatcher(repo, rmatcher=None):
405 405 '''Return a match object that applies rmatcher to the standin directory'''
406 406 wvfs = repo.wvfs
407 407 standindir = shortname
408 408
409 409 # no warnings about missing files or directories
410 410 badfn = lambda f, msg: None
411 411
412 412 if rmatcher and not rmatcher.always():
413 413 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
414 414 if not pats:
415 415 pats = [wvfs.join(standindir)]
416 416 match = scmutil.match(repo[None], pats, badfn=badfn)
417 417 else:
418 418 # no patterns: relative to repo root
419 419 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
420 420 return match
421 421
422 422
423 423 def composestandinmatcher(repo, rmatcher):
424 424 """Return a matcher that accepts standins corresponding to the
425 425 files accepted by rmatcher. Pass the list of files in the matcher
426 426 as the paths specified by the user."""
427 427 smatcher = getstandinmatcher(repo, rmatcher)
428 428 isstandin = smatcher.matchfn
429 429
430 430 def composedmatchfn(f):
431 431 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
432 432
433 433 smatcher.matchfn = composedmatchfn
434 434
435 435 return smatcher
436 436
437 437
438 438 def standin(filename):
439 439 """Return the repo-relative path to the standin for the specified big
440 440 file."""
441 441 # Notes:
442 442 # 1) Some callers want an absolute path, but for instance addlargefiles
443 443 # needs it repo-relative so it can be passed to repo[None].add(). So
444 444 # leave it up to the caller to use repo.wjoin() to get an absolute path.
445 445 # 2) Join with '/' because that's what dirstate always uses, even on
446 446 # Windows. Change existing separator to '/' first in case we are
447 447 # passed filenames from an external source (like the command line).
448 448 return shortnameslash + util.pconvert(filename)
449 449
450 450
451 451 def isstandin(filename):
452 452 """Return true if filename is a big file standin. filename must be
453 453 in Mercurial's internal form (slash-separated)."""
454 454 return filename.startswith(shortnameslash)
455 455
456 456
457 457 def splitstandin(filename):
458 458 # Split on / because that's what dirstate always uses, even on Windows.
459 459 # Change local separator to / first just in case we are passed filenames
460 460 # from an external source (like the command line).
461 461 bits = util.pconvert(filename).split(b'/', 1)
462 462 if len(bits) == 2 and bits[0] == shortname:
463 463 return bits[1]
464 464 else:
465 465 return None
466 466
467 467
468 468 def updatestandin(repo, lfile, standin):
469 469 """Re-calculate hash value of lfile and write it into standin
470 470
471 471 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
472 472 """
473 473 file = repo.wjoin(lfile)
474 474 if repo.wvfs.exists(lfile):
475 475 hash = hashfile(file)
476 476 executable = getexecutable(file)
477 477 writestandin(repo, standin, hash, executable)
478 478 else:
479 479 raise error.Abort(_(b'%s: file not found!') % lfile)
480 480
481 481
482 482 def readasstandin(fctx):
483 483 """read hex hash from given filectx of standin file
484 484
485 485 This encapsulates how "standin" data is stored into storage layer."""
486 486 return fctx.data().strip()
487 487
488 488
489 489 def writestandin(repo, standin, hash, executable):
490 490 '''write hash to <repo.root>/<standin>'''
491 491 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
492 492
493 493
494 494 def copyandhash(instream, outfile):
495 495 """Read bytes from instream (iterable) and write them to outfile,
496 496 computing the SHA-1 hash of the data along the way. Return the hash."""
497 497 hasher = hashutil.sha1(b'')
498 498 for data in instream:
499 499 hasher.update(data)
500 500 outfile.write(data)
501 501 return hex(hasher.digest())
502 502
503 503
504 504 def hashfile(file):
505 505 if not os.path.exists(file):
506 506 return b''
507 507 with open(file, b'rb') as fd:
508 508 return hexsha1(fd)
509 509
510 510
511 511 def getexecutable(filename):
512 512 mode = os.stat(filename).st_mode
513 513 return (
514 514 (mode & stat.S_IXUSR)
515 515 and (mode & stat.S_IXGRP)
516 516 and (mode & stat.S_IXOTH)
517 517 )
518 518
519 519
520 520 def urljoin(first, second, *arg):
521 521 def join(left, right):
522 522 if not left.endswith(b'/'):
523 523 left += b'/'
524 524 if right.startswith(b'/'):
525 525 right = right[1:]
526 526 return left + right
527 527
528 528 url = join(first, second)
529 529 for a in arg:
530 530 url = join(url, a)
531 531 return url
532 532
533 533
534 534 def hexsha1(fileobj):
535 535 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
536 536 object data"""
537 537 h = hashutil.sha1()
538 538 for chunk in util.filechunkiter(fileobj):
539 539 h.update(chunk)
540 540 return hex(h.digest())
541 541
542 542
543 543 def httpsendfile(ui, filename):
544 544 return httpconnection.httpsendfile(ui, filename, b'rb')
545 545
546 546
547 547 def unixpath(path):
548 548 '''Return a version of path normalized for use with the lfdirstate.'''
549 549 return util.pconvert(os.path.normpath(path))
550 550
551 551
552 552 def islfilesrepo(repo):
553 553 '''Return true if the repo is a largefile repo.'''
554 554 if b'largefiles' in repo.requirements:
555 555 for entry in repo.store.data_entries():
556 556 if entry.is_revlog and shortnameslash in entry.target_id:
557 557 return True
558 558
559 559 return any(openlfdirstate(repo.ui, repo, False))
560 560
561 561
562 562 class storeprotonotcapable(Exception):
563 563 def __init__(self, storetypes):
564 564 self.storetypes = storetypes
565 565
566 566
567 567 def getstandinsstate(repo):
568 568 standins = []
569 569 matcher = getstandinmatcher(repo)
570 570 wctx = repo[None]
571 571 for standin in repo.dirstate.walk(
572 572 matcher, subrepos=[], unknown=False, ignored=False
573 573 ):
574 574 lfile = splitstandin(standin)
575 575 try:
576 576 hash = readasstandin(wctx[standin])
577 577 except IOError:
578 578 hash = None
579 579 standins.append((lfile, hash))
580 580 return standins
581 581
582 582
583 583 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
584 584 lfstandin = standin(lfile)
585 585 if lfstandin not in repo.dirstate:
586 586 lfdirstate.hacky_extension_update_file(
587 587 lfile,
588 588 p1_tracked=False,
589 589 wc_tracked=False,
590 590 )
591 591 else:
592 592 entry = repo.dirstate.get_entry(lfstandin)
593 593 lfdirstate.hacky_extension_update_file(
594 594 lfile,
595 595 wc_tracked=entry.tracked,
596 596 p1_tracked=entry.p1_tracked,
597 597 p2_info=entry.p2_info,
598 598 possibly_dirty=True,
599 599 )
600 600
601 601
602 602 def markcommitted(orig, ctx, node):
603 603 repo = ctx.repo()
604 604
605 605 with repo.dirstate.changing_parents(repo):
606 606 orig(node)
607 607
608 608 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
609 609 # because files coming from the 2nd parent are omitted in the latter.
610 610 #
611 611 # The former should be used to get targets of "synclfdirstate",
612 612 # because such files:
613 613 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
614 614 # - have to be marked as "n" after commit, but
615 615 # - aren't listed in "repo[node].files()"
616 616
617 617 lfdirstate = openlfdirstate(repo.ui, repo)
618 618 for f in ctx.files():
619 619 lfile = splitstandin(f)
620 620 if lfile is not None:
621 621 synclfdirstate(repo, lfdirstate, lfile, False)
622 622
623 623 # As part of committing, copy all of the largefiles into the cache.
624 624 #
625 625 # Using "node" instead of "ctx" implies additional "repo[node]"
626 626 # lookup while copyalltostore(), but can omit redundant check for
627 627 # files comming from the 2nd parent, which should exist in store
628 628 # at merging.
629 629 copyalltostore(repo, node)
630 630
631 631
632 632 def getlfilestoupdate(oldstandins, newstandins):
633 633 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
634 634 filelist = []
635 635 for f in changedstandins:
636 636 if f[0] not in filelist:
637 637 filelist.append(f[0])
638 638 return filelist
639 639
640 640
641 641 def getlfilestoupload(repo, missing, addfunc):
642 642 makeprogress = repo.ui.makeprogress
643 643 with makeprogress(
644 644 _(b'finding outgoing largefiles'),
645 645 unit=_(b'revisions'),
646 646 total=len(missing),
647 647 ) as progress:
648 648 for i, n in enumerate(missing):
649 649 progress.update(i)
650 650 parents = [p for p in repo[n].parents() if p != repo.nullid]
651 651
652 652 with lfstatus(repo, value=False):
653 653 ctx = repo[n]
654 654
655 655 files = set(ctx.files())
656 656 if len(parents) == 2:
657 657 mc = ctx.manifest()
658 658 mp1 = ctx.p1().manifest()
659 659 mp2 = ctx.p2().manifest()
660 660 for f in mp1:
661 661 if f not in mc:
662 662 files.add(f)
663 663 for f in mp2:
664 664 if f not in mc:
665 665 files.add(f)
666 666 for f in mc:
667 667 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
668 668 files.add(f)
669 669 for fn in files:
670 670 if isstandin(fn) and fn in ctx:
671 671 addfunc(fn, readasstandin(ctx[fn]))
672 672
673 673
674 674 def updatestandinsbymatch(repo, match):
675 675 """Update standins in the working directory according to specified match
676 676
677 677 This returns (possibly modified) ``match`` object to be used for
678 678 subsequent commit process.
679 679 """
680 680
681 681 ui = repo.ui
682 682
683 683 # Case 1: user calls commit with no specific files or
684 684 # include/exclude patterns: refresh and commit all files that
685 685 # are "dirty".
686 686 if match is None or match.always():
687 687 # Spend a bit of time here to get a list of files we know
688 688 # are modified so we can compare only against those.
689 689 # It can cost a lot of time (several seconds)
690 690 # otherwise to update all standins if the largefiles are
691 691 # large.
692 692 dirtymatch = matchmod.always()
693 693 with repo.dirstate.running_status(repo):
694 694 lfdirstate = openlfdirstate(ui, repo)
695 695 unsure, s, mtime_boundary = lfdirstate.status(
696 696 dirtymatch,
697 697 subrepos=[],
698 698 ignored=False,
699 699 clean=False,
700 700 unknown=False,
701 701 )
702 702 modifiedfiles = unsure + s.modified + s.added + s.removed
703 703 lfiles = listlfiles(repo)
704 704 # this only loops through largefiles that exist (not
705 705 # removed/renamed)
706 706 for lfile in lfiles:
707 707 if lfile in modifiedfiles:
708 708 fstandin = standin(lfile)
709 709 if repo.wvfs.exists(fstandin):
710 710 # this handles the case where a rebase is being
711 711 # performed and the working copy is not updated
712 712 # yet.
713 713 if repo.wvfs.exists(lfile):
714 714 updatestandin(repo, lfile, fstandin)
715 715
716 716 return match
717 717
718 718 lfiles = listlfiles(repo)
719 719 match._files = repo._subdirlfs(match.files(), lfiles)
720 720
721 721 # Case 2: user calls commit with specified patterns: refresh
722 722 # any matching big files.
723 723 smatcher = composestandinmatcher(repo, match)
724 724 standins = repo.dirstate.walk(
725 725 smatcher, subrepos=[], unknown=False, ignored=False
726 726 )
727 727
728 728 # No matching big files: get out of the way and pass control to
729 729 # the usual commit() method.
730 730 if not standins:
731 731 return match
732 732
733 733 # Refresh all matching big files. It's possible that the
734 734 # commit will end up failing, in which case the big files will
735 735 # stay refreshed. No harm done: the user modified them and
736 736 # asked to commit them, so sooner or later we're going to
737 737 # refresh the standins. Might as well leave them refreshed.
738 738 lfdirstate = openlfdirstate(ui, repo)
739 739 for fstandin in standins:
740 740 lfile = splitstandin(fstandin)
741 741 if lfdirstate.get_entry(lfile).tracked:
742 742 updatestandin(repo, lfile, fstandin)
743 743
744 744 # Cook up a new matcher that only matches regular files or
745 745 # standins corresponding to the big files requested by the
746 746 # user. Have to modify _files to prevent commit() from
747 747 # complaining "not tracked" for big files.
748 748 match = copy.copy(match)
749 749 origmatchfn = match.matchfn
750 750
751 751 # Check both the list of largefiles and the list of
752 752 # standins because if a largefile was removed, it
753 753 # won't be in the list of largefiles at this point
754 754 match._files += sorted(standins)
755 755
756 756 actualfiles = []
757 757 for f in match._files:
758 758 fstandin = standin(f)
759 759
760 760 # For largefiles, only one of the normal and standin should be
761 761 # committed (except if one of them is a remove). In the case of a
762 762 # standin removal, drop the normal file if it is unknown to dirstate.
763 763 # Thus, skip plain largefile names but keep the standin.
764 764 if f in lfiles or fstandin in standins:
765 765 if not repo.dirstate.get_entry(fstandin).removed:
766 766 if not repo.dirstate.get_entry(f).removed:
767 767 continue
768 768 elif not repo.dirstate.get_entry(f).any_tracked:
769 769 continue
770 770
771 771 actualfiles.append(f)
772 772 match._files = actualfiles
773 773
774 774 def matchfn(f):
775 775 if origmatchfn(f):
776 776 return f not in lfiles
777 777 else:
778 778 return f in standins
779 779
780 780 match.matchfn = matchfn
781 781
782 782 return match
783 783
784 784
785 785 class automatedcommithook:
786 786 """Stateful hook to update standins at the 1st commit of resuming
787 787
788 788 For efficiency, updating standins in the working directory should
789 789 be avoided while automated committing (like rebase, transplant and
790 790 so on), because they should be updated before committing.
791 791
792 792 But the 1st commit of resuming automated committing (e.g. ``rebase
793 793 --continue``) should update them, because largefiles may be
794 794 modified manually.
795 795 """
796 796
797 797 def __init__(self, resuming):
798 798 self.resuming = resuming
799 799
800 800 def __call__(self, repo, match):
801 801 if self.resuming:
802 802 self.resuming = False # avoids updating at subsequent commits
803 803 return updatestandinsbymatch(repo, match)
804 804 else:
805 805 return match
806 806
807 807
808 808 def getstatuswriter(ui, repo, forcibly=None):
809 809 """Return the function to write largefiles specific status out
810 810
811 811 If ``forcibly`` is ``None``, this returns the last element of
812 812 ``repo._lfstatuswriters`` as "default" writer function.
813 813
814 814 Otherwise, this returns the function to always write out (or
815 815 ignore if ``not forcibly``) status.
816 816 """
817 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
817 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
818 818 return repo._lfstatuswriters[-1]
819 819 else:
820 820 if forcibly:
821 821 return ui.status # forcibly WRITE OUT
822 822 else:
823 823 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now