##// END OF EJS Templates
largefiles: use `hacky_extension_update_file` in `openlfdirstate`...
marmoute -
r50910:e86262dd default
parent child Browse files
Show More
@@ -1,807 +1,807 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import contextlib
12 12 import copy
13 13 import os
14 14 import stat
15 15
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from mercurial.pycompat import open
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 pycompat,
27 27 requirements,
28 28 scmutil,
29 29 sparse,
30 30 util,
31 31 vfs as vfsmod,
32 32 )
33 33 from mercurial.utils import hashutil
34 34 from mercurial.dirstateutils import timestamp
35 35
36 36 shortname = b'.hglf'
37 37 shortnameslash = shortname + b'/'
38 38 longname = b'largefiles'
39 39
40 40 # -- Private worker functions ------------------------------------------
41 41
42 42
43 43 @contextlib.contextmanager
44 44 def lfstatus(repo, value=True):
45 45 oldvalue = getattr(repo, 'lfstatus', False)
46 46 repo.lfstatus = value
47 47 try:
48 48 yield
49 49 finally:
50 50 repo.lfstatus = oldvalue
51 51
52 52
53 53 def getminsize(ui, assumelfiles, opt, default=10):
54 54 lfsize = opt
55 55 if not lfsize and assumelfiles:
56 56 lfsize = ui.config(longname, b'minsize', default=default)
57 57 if lfsize:
58 58 try:
59 59 lfsize = float(lfsize)
60 60 except ValueError:
61 61 raise error.Abort(
62 62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 63 )
64 64 if lfsize is None:
65 65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 66 return lfsize
67 67
68 68
69 69 def link(src, dest):
70 70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 71 util.makedirs(os.path.dirname(dest))
72 72 try:
73 73 util.oslink(src, dest)
74 74 except OSError:
75 75 # if hardlinks fail, fallback on atomic copy
76 76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 77 for chunk in util.filechunkiter(srcf):
78 78 dstf.write(chunk)
79 79 os.chmod(dest, os.stat(src).st_mode)
80 80
81 81
82 82 def usercachepath(ui, hash):
83 83 """Return the correct location in the "global" largefiles cache for a file
84 84 with the given hash.
85 85 This cache is used for sharing of largefiles across repositories - both
86 86 to preserve download bandwidth and storage space."""
87 87 return os.path.join(_usercachedir(ui), hash)
88 88
89 89
90 90 def _usercachedir(ui, name=longname):
91 91 '''Return the location of the "global" largefiles cache.'''
92 92 path = ui.configpath(name, b'usercache')
93 93 if path:
94 94 return path
95 95
96 96 hint = None
97 97
98 98 if pycompat.iswindows:
99 99 appdata = encoding.environ.get(
100 100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 101 )
102 102 if appdata:
103 103 return os.path.join(appdata, name)
104 104
105 105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 106 b"LOCALAPPDATA",
107 107 b"APPDATA",
108 108 name,
109 109 )
110 110 elif pycompat.isdarwin:
111 111 home = encoding.environ.get(b'HOME')
112 112 if home:
113 113 return os.path.join(home, b'Library', b'Caches', name)
114 114
115 115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 116 b"HOME",
117 117 name,
118 118 )
119 119 elif pycompat.isposix:
120 120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 121 if path:
122 122 return os.path.join(path, name)
123 123 home = encoding.environ.get(b'HOME')
124 124 if home:
125 125 return os.path.join(home, b'.cache', name)
126 126
127 127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 128 b"XDG_CACHE_HOME",
129 129 b"HOME",
130 130 name,
131 131 )
132 132 else:
133 133 raise error.Abort(
134 134 _(b'unknown operating system: %s\n') % pycompat.osname
135 135 )
136 136
137 137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138 138
139 139
140 140 def inusercache(ui, hash):
141 141 path = usercachepath(ui, hash)
142 142 return os.path.exists(path)
143 143
144 144
145 145 def findfile(repo, hash):
146 146 """Return store path of the largefile with the specified hash.
147 147 As a side effect, the file might be linked from user cache.
148 148 Return None if the file can't be found locally."""
149 149 path, exists = findstorepath(repo, hash)
150 150 if exists:
151 151 repo.ui.note(_(b'found %s in store\n') % hash)
152 152 return path
153 153 elif inusercache(repo.ui, hash):
154 154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 155 path = storepath(repo, hash)
156 156 link(usercachepath(repo.ui, hash), path)
157 157 return path
158 158 return None
159 159
160 160
161 161 class largefilesdirstate(dirstate.dirstate):
162 162 def __getitem__(self, key):
163 163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164 164
165 165 def set_tracked(self, f):
166 166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167 167
168 168 def set_untracked(self, f):
169 169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170 170
171 171 def normal(self, f, parentfiledata=None):
172 172 # not sure if we should pass the `parentfiledata` down or throw it
173 173 # away. So throwing it away to stay on the safe side.
174 174 return super(largefilesdirstate, self).normal(unixpath(f))
175 175
176 176 def remove(self, f):
177 177 return super(largefilesdirstate, self).remove(unixpath(f))
178 178
179 179 def add(self, f):
180 180 return super(largefilesdirstate, self).add(unixpath(f))
181 181
182 182 def drop(self, f):
183 183 return super(largefilesdirstate, self).drop(unixpath(f))
184 184
185 185 def forget(self, f):
186 186 return super(largefilesdirstate, self).forget(unixpath(f))
187 187
188 188 def normallookup(self, f):
189 189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190 190
191 191 def _ignore(self, f):
192 192 return False
193 193
194 194 def write(self, tr):
195 195 # (1) disable PENDING mode always
196 196 # (lfdirstate isn't yet managed as a part of the transaction)
197 197 # (2) avoid develwarn 'use dirstate.write with ....'
198 198 if tr:
199 199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
200 200 super(largefilesdirstate, self).write(None)
201 201
202 202
203 203 def openlfdirstate(ui, repo, create=True):
204 204 """
205 205 Return a dirstate object that tracks largefiles: i.e. its root is
206 206 the repo root, but it is saved in .hg/largefiles/dirstate.
207 207 """
208 208 vfs = repo.vfs
209 209 lfstoredir = longname
210 210 opener = vfsmod.vfs(vfs.join(lfstoredir))
211 211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
212 212 lfdirstate = largefilesdirstate(
213 213 opener,
214 214 ui,
215 215 repo.root,
216 216 repo.dirstate._validate,
217 217 lambda: sparse.matcher(repo),
218 218 repo.nodeconstants,
219 219 use_dirstate_v2,
220 220 )
221 221
222 222 # If the largefiles dirstate does not exist, populate and create
223 223 # it. This ensures that we create it on the first meaningful
224 224 # largefiles operation in a new clone.
225 225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 226 try:
227 227 with repo.wlock(wait=False):
228 228 matcher = getstandinmatcher(repo)
229 229 standins = repo.dirstate.walk(
230 230 matcher, subrepos=[], unknown=False, ignored=False
231 231 )
232 232
233 233 if len(standins) > 0:
234 234 vfs.makedirs(lfstoredir)
235 235
236 236 with lfdirstate.changing_parents(repo):
237 237 for standin in standins:
238 238 lfile = splitstandin(standin)
239 lfdirstate.update_file(
239 lfdirstate.hacky_extension_update_file(
240 240 lfile,
241 241 p1_tracked=True,
242 242 wc_tracked=True,
243 243 possibly_dirty=True,
244 244 )
245 245 except error.LockError:
246 246 # Assume that whatever was holding the lock was important.
247 247 # If we were doing something important, we would already have
248 248 # either the lock or a largefile dirstate.
249 249 pass
250 250 return lfdirstate
251 251
252 252
253 253 def lfdirstatestatus(lfdirstate, repo):
254 254 pctx = repo[b'.']
255 255 match = matchmod.always()
256 256 unsure, s, mtime_boundary = lfdirstate.status(
257 257 match, subrepos=[], ignored=False, clean=False, unknown=False
258 258 )
259 259 modified, clean = s.modified, s.clean
260 260 wctx = repo[None]
261 261 for lfile in unsure:
262 262 try:
263 263 fctx = pctx[standin(lfile)]
264 264 except LookupError:
265 265 fctx = None
266 266 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
267 267 modified.append(lfile)
268 268 else:
269 269 clean.append(lfile)
270 270 st = wctx[lfile].lstat()
271 271 mode = st.st_mode
272 272 size = st.st_size
273 273 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
274 274 if mtime is not None:
275 275 cache_data = (mode, size, mtime)
276 276 lfdirstate.set_clean(lfile, cache_data)
277 277 return s
278 278
279 279
280 280 def listlfiles(repo, rev=None, matcher=None):
281 281 """return a list of largefiles in the working copy or the
282 282 specified changeset"""
283 283
284 284 if matcher is None:
285 285 matcher = getstandinmatcher(repo)
286 286
287 287 # ignore unknown files in working directory
288 288 return [
289 289 splitstandin(f)
290 290 for f in repo[rev].walk(matcher)
291 291 if rev is not None or repo.dirstate.get_entry(f).any_tracked
292 292 ]
293 293
294 294
295 295 def instore(repo, hash, forcelocal=False):
296 296 '''Return true if a largefile with the given hash exists in the store'''
297 297 return os.path.exists(storepath(repo, hash, forcelocal))
298 298
299 299
300 300 def storepath(repo, hash, forcelocal=False):
301 301 """Return the correct location in the repository largefiles store for a
302 302 file with the given hash."""
303 303 if not forcelocal and repo.shared():
304 304 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
305 305 return repo.vfs.join(longname, hash)
306 306
307 307
308 308 def findstorepath(repo, hash):
309 309 """Search through the local store path(s) to find the file for the given
310 310 hash. If the file is not found, its path in the primary store is returned.
311 311 The return value is a tuple of (path, exists(path)).
312 312 """
313 313 # For shared repos, the primary store is in the share source. But for
314 314 # backward compatibility, force a lookup in the local store if it wasn't
315 315 # found in the share source.
316 316 path = storepath(repo, hash, False)
317 317
318 318 if instore(repo, hash):
319 319 return (path, True)
320 320 elif repo.shared() and instore(repo, hash, True):
321 321 return storepath(repo, hash, True), True
322 322
323 323 return (path, False)
324 324
325 325
326 326 def copyfromcache(repo, hash, filename):
327 327 """Copy the specified largefile from the repo or system cache to
328 328 filename in the repository. Return true on success or false if the
329 329 file was not found in either cache (which should not happened:
330 330 this is meant to be called only after ensuring that the needed
331 331 largefile exists in the cache)."""
332 332 wvfs = repo.wvfs
333 333 path = findfile(repo, hash)
334 334 if path is None:
335 335 return False
336 336 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
337 337 # The write may fail before the file is fully written, but we
338 338 # don't use atomic writes in the working copy.
339 339 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
340 340 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
341 341 if gothash != hash:
342 342 repo.ui.warn(
343 343 _(b'%s: data corruption in %s with hash %s\n')
344 344 % (filename, path, gothash)
345 345 )
346 346 wvfs.unlink(filename)
347 347 return False
348 348 return True
349 349
350 350
351 351 def copytostore(repo, ctx, file, fstandin):
352 352 wvfs = repo.wvfs
353 353 hash = readasstandin(ctx[fstandin])
354 354 if instore(repo, hash):
355 355 return
356 356 if wvfs.exists(file):
357 357 copytostoreabsolute(repo, wvfs.join(file), hash)
358 358 else:
359 359 repo.ui.warn(
360 360 _(b"%s: largefile %s not available from local store\n")
361 361 % (file, hash)
362 362 )
363 363
364 364
365 365 def copyalltostore(repo, node):
366 366 '''Copy all largefiles in a given revision to the store'''
367 367
368 368 ctx = repo[node]
369 369 for filename in ctx.files():
370 370 realfile = splitstandin(filename)
371 371 if realfile is not None and filename in ctx.manifest():
372 372 copytostore(repo, ctx, realfile, filename)
373 373
374 374
375 375 def copytostoreabsolute(repo, file, hash):
376 376 if inusercache(repo.ui, hash):
377 377 link(usercachepath(repo.ui, hash), storepath(repo, hash))
378 378 else:
379 379 util.makedirs(os.path.dirname(storepath(repo, hash)))
380 380 with open(file, b'rb') as srcf:
381 381 with util.atomictempfile(
382 382 storepath(repo, hash), createmode=repo.store.createmode
383 383 ) as dstf:
384 384 for chunk in util.filechunkiter(srcf):
385 385 dstf.write(chunk)
386 386 linktousercache(repo, hash)
387 387
388 388
389 389 def linktousercache(repo, hash):
390 390 """Link / copy the largefile with the specified hash from the store
391 391 to the cache."""
392 392 path = usercachepath(repo.ui, hash)
393 393 link(storepath(repo, hash), path)
394 394
395 395
396 396 def getstandinmatcher(repo, rmatcher=None):
397 397 '''Return a match object that applies rmatcher to the standin directory'''
398 398 wvfs = repo.wvfs
399 399 standindir = shortname
400 400
401 401 # no warnings about missing files or directories
402 402 badfn = lambda f, msg: None
403 403
404 404 if rmatcher and not rmatcher.always():
405 405 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
406 406 if not pats:
407 407 pats = [wvfs.join(standindir)]
408 408 match = scmutil.match(repo[None], pats, badfn=badfn)
409 409 else:
410 410 # no patterns: relative to repo root
411 411 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
412 412 return match
413 413
414 414
415 415 def composestandinmatcher(repo, rmatcher):
416 416 """Return a matcher that accepts standins corresponding to the
417 417 files accepted by rmatcher. Pass the list of files in the matcher
418 418 as the paths specified by the user."""
419 419 smatcher = getstandinmatcher(repo, rmatcher)
420 420 isstandin = smatcher.matchfn
421 421
422 422 def composedmatchfn(f):
423 423 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
424 424
425 425 smatcher.matchfn = composedmatchfn
426 426
427 427 return smatcher
428 428
429 429
430 430 def standin(filename):
431 431 """Return the repo-relative path to the standin for the specified big
432 432 file."""
433 433 # Notes:
434 434 # 1) Some callers want an absolute path, but for instance addlargefiles
435 435 # needs it repo-relative so it can be passed to repo[None].add(). So
436 436 # leave it up to the caller to use repo.wjoin() to get an absolute path.
437 437 # 2) Join with '/' because that's what dirstate always uses, even on
438 438 # Windows. Change existing separator to '/' first in case we are
439 439 # passed filenames from an external source (like the command line).
440 440 return shortnameslash + util.pconvert(filename)
441 441
442 442
443 443 def isstandin(filename):
444 444 """Return true if filename is a big file standin. filename must be
445 445 in Mercurial's internal form (slash-separated)."""
446 446 return filename.startswith(shortnameslash)
447 447
448 448
449 449 def splitstandin(filename):
450 450 # Split on / because that's what dirstate always uses, even on Windows.
451 451 # Change local separator to / first just in case we are passed filenames
452 452 # from an external source (like the command line).
453 453 bits = util.pconvert(filename).split(b'/', 1)
454 454 if len(bits) == 2 and bits[0] == shortname:
455 455 return bits[1]
456 456 else:
457 457 return None
458 458
459 459
460 460 def updatestandin(repo, lfile, standin):
461 461 """Re-calculate hash value of lfile and write it into standin
462 462
463 463 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
464 464 """
465 465 file = repo.wjoin(lfile)
466 466 if repo.wvfs.exists(lfile):
467 467 hash = hashfile(file)
468 468 executable = getexecutable(file)
469 469 writestandin(repo, standin, hash, executable)
470 470 else:
471 471 raise error.Abort(_(b'%s: file not found!') % lfile)
472 472
473 473
474 474 def readasstandin(fctx):
475 475 """read hex hash from given filectx of standin file
476 476
477 477 This encapsulates how "standin" data is stored into storage layer."""
478 478 return fctx.data().strip()
479 479
480 480
481 481 def writestandin(repo, standin, hash, executable):
482 482 '''write hash to <repo.root>/<standin>'''
483 483 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
484 484
485 485
486 486 def copyandhash(instream, outfile):
487 487 """Read bytes from instream (iterable) and write them to outfile,
488 488 computing the SHA-1 hash of the data along the way. Return the hash."""
489 489 hasher = hashutil.sha1(b'')
490 490 for data in instream:
491 491 hasher.update(data)
492 492 outfile.write(data)
493 493 return hex(hasher.digest())
494 494
495 495
496 496 def hashfile(file):
497 497 if not os.path.exists(file):
498 498 return b''
499 499 with open(file, b'rb') as fd:
500 500 return hexsha1(fd)
501 501
502 502
503 503 def getexecutable(filename):
504 504 mode = os.stat(filename).st_mode
505 505 return (
506 506 (mode & stat.S_IXUSR)
507 507 and (mode & stat.S_IXGRP)
508 508 and (mode & stat.S_IXOTH)
509 509 )
510 510
511 511
512 512 def urljoin(first, second, *arg):
513 513 def join(left, right):
514 514 if not left.endswith(b'/'):
515 515 left += b'/'
516 516 if right.startswith(b'/'):
517 517 right = right[1:]
518 518 return left + right
519 519
520 520 url = join(first, second)
521 521 for a in arg:
522 522 url = join(url, a)
523 523 return url
524 524
525 525
526 526 def hexsha1(fileobj):
527 527 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
528 528 object data"""
529 529 h = hashutil.sha1()
530 530 for chunk in util.filechunkiter(fileobj):
531 531 h.update(chunk)
532 532 return hex(h.digest())
533 533
534 534
535 535 def httpsendfile(ui, filename):
536 536 return httpconnection.httpsendfile(ui, filename, b'rb')
537 537
538 538
539 539 def unixpath(path):
540 540 '''Return a version of path normalized for use with the lfdirstate.'''
541 541 return util.pconvert(os.path.normpath(path))
542 542
543 543
544 544 def islfilesrepo(repo):
545 545 '''Return true if the repo is a largefile repo.'''
546 546 if b'largefiles' in repo.requirements and any(
547 547 shortnameslash in f[1] for f in repo.store.datafiles()
548 548 ):
549 549 return True
550 550
551 551 return any(openlfdirstate(repo.ui, repo, False))
552 552
553 553
554 554 class storeprotonotcapable(Exception):
555 555 def __init__(self, storetypes):
556 556 self.storetypes = storetypes
557 557
558 558
559 559 def getstandinsstate(repo):
560 560 standins = []
561 561 matcher = getstandinmatcher(repo)
562 562 wctx = repo[None]
563 563 for standin in repo.dirstate.walk(
564 564 matcher, subrepos=[], unknown=False, ignored=False
565 565 ):
566 566 lfile = splitstandin(standin)
567 567 try:
568 568 hash = readasstandin(wctx[standin])
569 569 except IOError:
570 570 hash = None
571 571 standins.append((lfile, hash))
572 572 return standins
573 573
574 574
575 575 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
576 576 lfstandin = standin(lfile)
577 577 if lfstandin not in repo.dirstate:
578 578 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
579 579 else:
580 580 entry = repo.dirstate.get_entry(lfstandin)
581 581 lfdirstate.update_file(
582 582 lfile,
583 583 wc_tracked=entry.tracked,
584 584 p1_tracked=entry.p1_tracked,
585 585 p2_info=entry.p2_info,
586 586 possibly_dirty=True,
587 587 )
588 588
589 589
590 590 def markcommitted(orig, ctx, node):
591 591 repo = ctx.repo()
592 592
593 593 lfdirstate = openlfdirstate(repo.ui, repo)
594 594 with lfdirstate.changing_parents(repo):
595 595 orig(node)
596 596
597 597 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
598 598 # because files coming from the 2nd parent are omitted in the latter.
599 599 #
600 600 # The former should be used to get targets of "synclfdirstate",
601 601 # because such files:
602 602 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
603 603 # - have to be marked as "n" after commit, but
604 604 # - aren't listed in "repo[node].files()"
605 605
606 606 for f in ctx.files():
607 607 lfile = splitstandin(f)
608 608 if lfile is not None:
609 609 synclfdirstate(repo, lfdirstate, lfile, False)
610 610 lfdirstate.write(repo.currenttransaction())
611 611
612 612 # As part of committing, copy all of the largefiles into the cache.
613 613 #
614 614 # Using "node" instead of "ctx" implies additional "repo[node]"
615 615 # lookup while copyalltostore(), but can omit redundant check for
616 616 # files comming from the 2nd parent, which should exist in store
617 617 # at merging.
618 618 copyalltostore(repo, node)
619 619
620 620
621 621 def getlfilestoupdate(oldstandins, newstandins):
622 622 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
623 623 filelist = []
624 624 for f in changedstandins:
625 625 if f[0] not in filelist:
626 626 filelist.append(f[0])
627 627 return filelist
628 628
629 629
630 630 def getlfilestoupload(repo, missing, addfunc):
631 631 makeprogress = repo.ui.makeprogress
632 632 with makeprogress(
633 633 _(b'finding outgoing largefiles'),
634 634 unit=_(b'revisions'),
635 635 total=len(missing),
636 636 ) as progress:
637 637 for i, n in enumerate(missing):
638 638 progress.update(i)
639 639 parents = [p for p in repo[n].parents() if p != repo.nullid]
640 640
641 641 with lfstatus(repo, value=False):
642 642 ctx = repo[n]
643 643
644 644 files = set(ctx.files())
645 645 if len(parents) == 2:
646 646 mc = ctx.manifest()
647 647 mp1 = ctx.p1().manifest()
648 648 mp2 = ctx.p2().manifest()
649 649 for f in mp1:
650 650 if f not in mc:
651 651 files.add(f)
652 652 for f in mp2:
653 653 if f not in mc:
654 654 files.add(f)
655 655 for f in mc:
656 656 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
657 657 files.add(f)
658 658 for fn in files:
659 659 if isstandin(fn) and fn in ctx:
660 660 addfunc(fn, readasstandin(ctx[fn]))
661 661
662 662
663 663 def updatestandinsbymatch(repo, match):
664 664 """Update standins in the working directory according to specified match
665 665
666 666 This returns (possibly modified) ``match`` object to be used for
667 667 subsequent commit process.
668 668 """
669 669
670 670 ui = repo.ui
671 671
672 672 # Case 1: user calls commit with no specific files or
673 673 # include/exclude patterns: refresh and commit all files that
674 674 # are "dirty".
675 675 if match is None or match.always():
676 676 # Spend a bit of time here to get a list of files we know
677 677 # are modified so we can compare only against those.
678 678 # It can cost a lot of time (several seconds)
679 679 # otherwise to update all standins if the largefiles are
680 680 # large.
681 681 lfdirstate = openlfdirstate(ui, repo)
682 682 dirtymatch = matchmod.always()
683 683 unsure, s, mtime_boundary = lfdirstate.status(
684 684 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
685 685 )
686 686 modifiedfiles = unsure + s.modified + s.added + s.removed
687 687 lfiles = listlfiles(repo)
688 688 # this only loops through largefiles that exist (not
689 689 # removed/renamed)
690 690 for lfile in lfiles:
691 691 if lfile in modifiedfiles:
692 692 fstandin = standin(lfile)
693 693 if repo.wvfs.exists(fstandin):
694 694 # this handles the case where a rebase is being
695 695 # performed and the working copy is not updated
696 696 # yet.
697 697 if repo.wvfs.exists(lfile):
698 698 updatestandin(repo, lfile, fstandin)
699 699
700 700 return match
701 701
702 702 lfiles = listlfiles(repo)
703 703 match._files = repo._subdirlfs(match.files(), lfiles)
704 704
705 705 # Case 2: user calls commit with specified patterns: refresh
706 706 # any matching big files.
707 707 smatcher = composestandinmatcher(repo, match)
708 708 standins = repo.dirstate.walk(
709 709 smatcher, subrepos=[], unknown=False, ignored=False
710 710 )
711 711
712 712 # No matching big files: get out of the way and pass control to
713 713 # the usual commit() method.
714 714 if not standins:
715 715 return match
716 716
717 717 # Refresh all matching big files. It's possible that the
718 718 # commit will end up failing, in which case the big files will
719 719 # stay refreshed. No harm done: the user modified them and
720 720 # asked to commit them, so sooner or later we're going to
721 721 # refresh the standins. Might as well leave them refreshed.
722 722 lfdirstate = openlfdirstate(ui, repo)
723 723 for fstandin in standins:
724 724 lfile = splitstandin(fstandin)
725 725 if lfdirstate.get_entry(lfile).tracked:
726 726 updatestandin(repo, lfile, fstandin)
727 727
728 728 # Cook up a new matcher that only matches regular files or
729 729 # standins corresponding to the big files requested by the
730 730 # user. Have to modify _files to prevent commit() from
731 731 # complaining "not tracked" for big files.
732 732 match = copy.copy(match)
733 733 origmatchfn = match.matchfn
734 734
735 735 # Check both the list of largefiles and the list of
736 736 # standins because if a largefile was removed, it
737 737 # won't be in the list of largefiles at this point
738 738 match._files += sorted(standins)
739 739
740 740 actualfiles = []
741 741 for f in match._files:
742 742 fstandin = standin(f)
743 743
744 744 # For largefiles, only one of the normal and standin should be
745 745 # committed (except if one of them is a remove). In the case of a
746 746 # standin removal, drop the normal file if it is unknown to dirstate.
747 747 # Thus, skip plain largefile names but keep the standin.
748 748 if f in lfiles or fstandin in standins:
749 749 if not repo.dirstate.get_entry(fstandin).removed:
750 750 if not repo.dirstate.get_entry(f).removed:
751 751 continue
752 752 elif not repo.dirstate.get_entry(f).any_tracked:
753 753 continue
754 754
755 755 actualfiles.append(f)
756 756 match._files = actualfiles
757 757
758 758 def matchfn(f):
759 759 if origmatchfn(f):
760 760 return f not in lfiles
761 761 else:
762 762 return f in standins
763 763
764 764 match.matchfn = matchfn
765 765
766 766 return match
767 767
768 768
769 769 class automatedcommithook:
770 770 """Stateful hook to update standins at the 1st commit of resuming
771 771
772 772 For efficiency, updating standins in the working directory should
773 773 be avoided while automated committing (like rebase, transplant and
774 774 so on), because they should be updated before committing.
775 775
776 776 But the 1st commit of resuming automated committing (e.g. ``rebase
777 777 --continue``) should update them, because largefiles may be
778 778 modified manually.
779 779 """
780 780
781 781 def __init__(self, resuming):
782 782 self.resuming = resuming
783 783
784 784 def __call__(self, repo, match):
785 785 if self.resuming:
786 786 self.resuming = False # avoids updating at subsequent commits
787 787 return updatestandinsbymatch(repo, match)
788 788 else:
789 789 return match
790 790
791 791
792 792 def getstatuswriter(ui, repo, forcibly=None):
793 793 """Return the function to write largefiles specific status out
794 794
795 795 If ``forcibly`` is ``None``, this returns the last element of
796 796 ``repo._lfstatuswriters`` as "default" writer function.
797 797
798 798 Otherwise, this returns the function to always write out (or
799 799 ignore if ``not forcibly``) status.
800 800 """
801 801 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
802 802 return repo._lfstatuswriters[-1]
803 803 else:
804 804 if forcibly:
805 805 return ui.status # forcibly WRITE OUT
806 806 else:
807 807 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now