##// END OF EJS Templates
largefile: use `update_file` instead of `normal` in `synclfdirstate`...
marmoute -
r48516:2d0717b1 default
parent child Browse files
Show More
@@ -1,791 +1,791 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import contextlib
13 13 import copy
14 14 import os
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import hex
19 19 from mercurial.pycompat import open
20 20
21 21 from mercurial import (
22 22 dirstate,
23 23 encoding,
24 24 error,
25 25 httpconnection,
26 26 match as matchmod,
27 27 pycompat,
28 28 requirements,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 vfs as vfsmod,
33 33 )
34 34 from mercurial.utils import hashutil
35 35
36 36 shortname = b'.hglf'
37 37 shortnameslash = shortname + b'/'
38 38 longname = b'largefiles'
39 39
40 40 # -- Private worker functions ------------------------------------------
41 41
42 42
43 43 @contextlib.contextmanager
44 44 def lfstatus(repo, value=True):
45 45 oldvalue = getattr(repo, 'lfstatus', False)
46 46 repo.lfstatus = value
47 47 try:
48 48 yield
49 49 finally:
50 50 repo.lfstatus = oldvalue
51 51
52 52
53 53 def getminsize(ui, assumelfiles, opt, default=10):
54 54 lfsize = opt
55 55 if not lfsize and assumelfiles:
56 56 lfsize = ui.config(longname, b'minsize', default=default)
57 57 if lfsize:
58 58 try:
59 59 lfsize = float(lfsize)
60 60 except ValueError:
61 61 raise error.Abort(
62 62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 63 )
64 64 if lfsize is None:
65 65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 66 return lfsize
67 67
68 68
69 69 def link(src, dest):
70 70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 71 util.makedirs(os.path.dirname(dest))
72 72 try:
73 73 util.oslink(src, dest)
74 74 except OSError:
75 75 # if hardlinks fail, fallback on atomic copy
76 76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 77 for chunk in util.filechunkiter(srcf):
78 78 dstf.write(chunk)
79 79 os.chmod(dest, os.stat(src).st_mode)
80 80
81 81
82 82 def usercachepath(ui, hash):
83 83 """Return the correct location in the "global" largefiles cache for a file
84 84 with the given hash.
85 85 This cache is used for sharing of largefiles across repositories - both
86 86 to preserve download bandwidth and storage space."""
87 87 return os.path.join(_usercachedir(ui), hash)
88 88
89 89
90 90 def _usercachedir(ui, name=longname):
91 91 '''Return the location of the "global" largefiles cache.'''
92 92 path = ui.configpath(name, b'usercache')
93 93 if path:
94 94 return path
95 95
96 96 hint = None
97 97
98 98 if pycompat.iswindows:
99 99 appdata = encoding.environ.get(
100 100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 101 )
102 102 if appdata:
103 103 return os.path.join(appdata, name)
104 104
105 105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 106 b"LOCALAPPDATA",
107 107 b"APPDATA",
108 108 name,
109 109 )
110 110 elif pycompat.isdarwin:
111 111 home = encoding.environ.get(b'HOME')
112 112 if home:
113 113 return os.path.join(home, b'Library', b'Caches', name)
114 114
115 115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 116 b"HOME",
117 117 name,
118 118 )
119 119 elif pycompat.isposix:
120 120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 121 if path:
122 122 return os.path.join(path, name)
123 123 home = encoding.environ.get(b'HOME')
124 124 if home:
125 125 return os.path.join(home, b'.cache', name)
126 126
127 127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 128 b"XDG_CACHE_HOME",
129 129 b"HOME",
130 130 name,
131 131 )
132 132 else:
133 133 raise error.Abort(
134 134 _(b'unknown operating system: %s\n') % pycompat.osname
135 135 )
136 136
137 137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138 138
139 139
140 140 def inusercache(ui, hash):
141 141 path = usercachepath(ui, hash)
142 142 return os.path.exists(path)
143 143
144 144
145 145 def findfile(repo, hash):
146 146 """Return store path of the largefile with the specified hash.
147 147 As a side effect, the file might be linked from user cache.
148 148 Return None if the file can't be found locally."""
149 149 path, exists = findstorepath(repo, hash)
150 150 if exists:
151 151 repo.ui.note(_(b'found %s in store\n') % hash)
152 152 return path
153 153 elif inusercache(repo.ui, hash):
154 154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 155 path = storepath(repo, hash)
156 156 link(usercachepath(repo.ui, hash), path)
157 157 return path
158 158 return None
159 159
160 160
161 161 class largefilesdirstate(dirstate.dirstate):
162 162 def __getitem__(self, key):
163 163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164 164
165 165 def set_tracked(self, f):
166 166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167 167
168 168 def set_untracked(self, f):
169 169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170 170
171 171 def normal(self, f, parentfiledata=None):
172 172 # not sure if we should pass the `parentfiledata` down or throw it
173 173 # away. So throwing it away to stay on the safe side.
174 174 return super(largefilesdirstate, self).normal(unixpath(f))
175 175
176 176 def remove(self, f):
177 177 return super(largefilesdirstate, self).remove(unixpath(f))
178 178
179 179 def add(self, f):
180 180 return super(largefilesdirstate, self).add(unixpath(f))
181 181
182 182 def drop(self, f):
183 183 return super(largefilesdirstate, self).drop(unixpath(f))
184 184
185 185 def forget(self, f):
186 186 return super(largefilesdirstate, self).forget(unixpath(f))
187 187
188 188 def normallookup(self, f):
189 189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190 190
191 191 def _ignore(self, f):
192 192 return False
193 193
194 194 def write(self, tr=False):
195 195 # (1) disable PENDING mode always
196 196 # (lfdirstate isn't yet managed as a part of the transaction)
197 197 # (2) avoid develwarn 'use dirstate.write with ....'
198 198 super(largefilesdirstate, self).write(None)
199 199
200 200
201 201 def openlfdirstate(ui, repo, create=True):
202 202 """
203 203 Return a dirstate object that tracks largefiles: i.e. its root is
204 204 the repo root, but it is saved in .hg/largefiles/dirstate.
205 205 """
206 206 vfs = repo.vfs
207 207 lfstoredir = longname
208 208 opener = vfsmod.vfs(vfs.join(lfstoredir))
209 209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
210 210 lfdirstate = largefilesdirstate(
211 211 opener,
212 212 ui,
213 213 repo.root,
214 214 repo.dirstate._validate,
215 215 lambda: sparse.matcher(repo),
216 216 repo.nodeconstants,
217 217 use_dirstate_v2,
218 218 )
219 219
220 220 # If the largefiles dirstate does not exist, populate and create
221 221 # it. This ensures that we create it on the first meaningful
222 222 # largefiles operation in a new clone.
223 223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
224 224 matcher = getstandinmatcher(repo)
225 225 standins = repo.dirstate.walk(
226 226 matcher, subrepos=[], unknown=False, ignored=False
227 227 )
228 228
229 229 if len(standins) > 0:
230 230 vfs.makedirs(lfstoredir)
231 231
232 232 for standin in standins:
233 233 lfile = splitstandin(standin)
234 234 lfdirstate.normallookup(lfile)
235 235 return lfdirstate
236 236
237 237
238 238 def lfdirstatestatus(lfdirstate, repo):
239 239 pctx = repo[b'.']
240 240 match = matchmod.always()
241 241 unsure, s = lfdirstate.status(
242 242 match, subrepos=[], ignored=False, clean=False, unknown=False
243 243 )
244 244 modified, clean = s.modified, s.clean
245 245 for lfile in unsure:
246 246 try:
247 247 fctx = pctx[standin(lfile)]
248 248 except LookupError:
249 249 fctx = None
250 250 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
251 251 modified.append(lfile)
252 252 else:
253 253 clean.append(lfile)
254 254 lfdirstate.set_clean(lfile)
255 255 return s
256 256
257 257
258 258 def listlfiles(repo, rev=None, matcher=None):
259 259 """return a list of largefiles in the working copy or the
260 260 specified changeset"""
261 261
262 262 if matcher is None:
263 263 matcher = getstandinmatcher(repo)
264 264
265 265 # ignore unknown files in working directory
266 266 return [
267 267 splitstandin(f)
268 268 for f in repo[rev].walk(matcher)
269 269 if rev is not None or repo.dirstate[f] != b'?'
270 270 ]
271 271
272 272
273 273 def instore(repo, hash, forcelocal=False):
274 274 '''Return true if a largefile with the given hash exists in the store'''
275 275 return os.path.exists(storepath(repo, hash, forcelocal))
276 276
277 277
278 278 def storepath(repo, hash, forcelocal=False):
279 279 """Return the correct location in the repository largefiles store for a
280 280 file with the given hash."""
281 281 if not forcelocal and repo.shared():
282 282 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
283 283 return repo.vfs.join(longname, hash)
284 284
285 285
286 286 def findstorepath(repo, hash):
287 287 """Search through the local store path(s) to find the file for the given
288 288 hash. If the file is not found, its path in the primary store is returned.
289 289 The return value is a tuple of (path, exists(path)).
290 290 """
291 291 # For shared repos, the primary store is in the share source. But for
292 292 # backward compatibility, force a lookup in the local store if it wasn't
293 293 # found in the share source.
294 294 path = storepath(repo, hash, False)
295 295
296 296 if instore(repo, hash):
297 297 return (path, True)
298 298 elif repo.shared() and instore(repo, hash, True):
299 299 return storepath(repo, hash, True), True
300 300
301 301 return (path, False)
302 302
303 303
304 304 def copyfromcache(repo, hash, filename):
305 305 """Copy the specified largefile from the repo or system cache to
306 306 filename in the repository. Return true on success or false if the
307 307 file was not found in either cache (which should not happened:
308 308 this is meant to be called only after ensuring that the needed
309 309 largefile exists in the cache)."""
310 310 wvfs = repo.wvfs
311 311 path = findfile(repo, hash)
312 312 if path is None:
313 313 return False
314 314 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
315 315 # The write may fail before the file is fully written, but we
316 316 # don't use atomic writes in the working copy.
317 317 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
318 318 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
319 319 if gothash != hash:
320 320 repo.ui.warn(
321 321 _(b'%s: data corruption in %s with hash %s\n')
322 322 % (filename, path, gothash)
323 323 )
324 324 wvfs.unlink(filename)
325 325 return False
326 326 return True
327 327
328 328
329 329 def copytostore(repo, ctx, file, fstandin):
330 330 wvfs = repo.wvfs
331 331 hash = readasstandin(ctx[fstandin])
332 332 if instore(repo, hash):
333 333 return
334 334 if wvfs.exists(file):
335 335 copytostoreabsolute(repo, wvfs.join(file), hash)
336 336 else:
337 337 repo.ui.warn(
338 338 _(b"%s: largefile %s not available from local store\n")
339 339 % (file, hash)
340 340 )
341 341
342 342
343 343 def copyalltostore(repo, node):
344 344 '''Copy all largefiles in a given revision to the store'''
345 345
346 346 ctx = repo[node]
347 347 for filename in ctx.files():
348 348 realfile = splitstandin(filename)
349 349 if realfile is not None and filename in ctx.manifest():
350 350 copytostore(repo, ctx, realfile, filename)
351 351
352 352
353 353 def copytostoreabsolute(repo, file, hash):
354 354 if inusercache(repo.ui, hash):
355 355 link(usercachepath(repo.ui, hash), storepath(repo, hash))
356 356 else:
357 357 util.makedirs(os.path.dirname(storepath(repo, hash)))
358 358 with open(file, b'rb') as srcf:
359 359 with util.atomictempfile(
360 360 storepath(repo, hash), createmode=repo.store.createmode
361 361 ) as dstf:
362 362 for chunk in util.filechunkiter(srcf):
363 363 dstf.write(chunk)
364 364 linktousercache(repo, hash)
365 365
366 366
367 367 def linktousercache(repo, hash):
368 368 """Link / copy the largefile with the specified hash from the store
369 369 to the cache."""
370 370 path = usercachepath(repo.ui, hash)
371 371 link(storepath(repo, hash), path)
372 372
373 373
374 374 def getstandinmatcher(repo, rmatcher=None):
375 375 '''Return a match object that applies rmatcher to the standin directory'''
376 376 wvfs = repo.wvfs
377 377 standindir = shortname
378 378
379 379 # no warnings about missing files or directories
380 380 badfn = lambda f, msg: None
381 381
382 382 if rmatcher and not rmatcher.always():
383 383 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
384 384 if not pats:
385 385 pats = [wvfs.join(standindir)]
386 386 match = scmutil.match(repo[None], pats, badfn=badfn)
387 387 else:
388 388 # no patterns: relative to repo root
389 389 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
390 390 return match
391 391
392 392
393 393 def composestandinmatcher(repo, rmatcher):
394 394 """Return a matcher that accepts standins corresponding to the
395 395 files accepted by rmatcher. Pass the list of files in the matcher
396 396 as the paths specified by the user."""
397 397 smatcher = getstandinmatcher(repo, rmatcher)
398 398 isstandin = smatcher.matchfn
399 399
400 400 def composedmatchfn(f):
401 401 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
402 402
403 403 smatcher.matchfn = composedmatchfn
404 404
405 405 return smatcher
406 406
407 407
408 408 def standin(filename):
409 409 """Return the repo-relative path to the standin for the specified big
410 410 file."""
411 411 # Notes:
412 412 # 1) Some callers want an absolute path, but for instance addlargefiles
413 413 # needs it repo-relative so it can be passed to repo[None].add(). So
414 414 # leave it up to the caller to use repo.wjoin() to get an absolute path.
415 415 # 2) Join with '/' because that's what dirstate always uses, even on
416 416 # Windows. Change existing separator to '/' first in case we are
417 417 # passed filenames from an external source (like the command line).
418 418 return shortnameslash + util.pconvert(filename)
419 419
420 420
421 421 def isstandin(filename):
422 422 """Return true if filename is a big file standin. filename must be
423 423 in Mercurial's internal form (slash-separated)."""
424 424 return filename.startswith(shortnameslash)
425 425
426 426
427 427 def splitstandin(filename):
428 428 # Split on / because that's what dirstate always uses, even on Windows.
429 429 # Change local separator to / first just in case we are passed filenames
430 430 # from an external source (like the command line).
431 431 bits = util.pconvert(filename).split(b'/', 1)
432 432 if len(bits) == 2 and bits[0] == shortname:
433 433 return bits[1]
434 434 else:
435 435 return None
436 436
437 437
438 438 def updatestandin(repo, lfile, standin):
439 439 """Re-calculate hash value of lfile and write it into standin
440 440
441 441 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
442 442 """
443 443 file = repo.wjoin(lfile)
444 444 if repo.wvfs.exists(lfile):
445 445 hash = hashfile(file)
446 446 executable = getexecutable(file)
447 447 writestandin(repo, standin, hash, executable)
448 448 else:
449 449 raise error.Abort(_(b'%s: file not found!') % lfile)
450 450
451 451
452 452 def readasstandin(fctx):
453 453 """read hex hash from given filectx of standin file
454 454
455 455 This encapsulates how "standin" data is stored into storage layer."""
456 456 return fctx.data().strip()
457 457
458 458
459 459 def writestandin(repo, standin, hash, executable):
460 460 '''write hash to <repo.root>/<standin>'''
461 461 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
462 462
463 463
464 464 def copyandhash(instream, outfile):
465 465 """Read bytes from instream (iterable) and write them to outfile,
466 466 computing the SHA-1 hash of the data along the way. Return the hash."""
467 467 hasher = hashutil.sha1(b'')
468 468 for data in instream:
469 469 hasher.update(data)
470 470 outfile.write(data)
471 471 return hex(hasher.digest())
472 472
473 473
474 474 def hashfile(file):
475 475 if not os.path.exists(file):
476 476 return b''
477 477 with open(file, b'rb') as fd:
478 478 return hexsha1(fd)
479 479
480 480
481 481 def getexecutable(filename):
482 482 mode = os.stat(filename).st_mode
483 483 return (
484 484 (mode & stat.S_IXUSR)
485 485 and (mode & stat.S_IXGRP)
486 486 and (mode & stat.S_IXOTH)
487 487 )
488 488
489 489
490 490 def urljoin(first, second, *arg):
491 491 def join(left, right):
492 492 if not left.endswith(b'/'):
493 493 left += b'/'
494 494 if right.startswith(b'/'):
495 495 right = right[1:]
496 496 return left + right
497 497
498 498 url = join(first, second)
499 499 for a in arg:
500 500 url = join(url, a)
501 501 return url
502 502
503 503
504 504 def hexsha1(fileobj):
505 505 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
506 506 object data"""
507 507 h = hashutil.sha1()
508 508 for chunk in util.filechunkiter(fileobj):
509 509 h.update(chunk)
510 510 return hex(h.digest())
511 511
512 512
513 513 def httpsendfile(ui, filename):
514 514 return httpconnection.httpsendfile(ui, filename, b'rb')
515 515
516 516
517 517 def unixpath(path):
518 518 '''Return a version of path normalized for use with the lfdirstate.'''
519 519 return util.pconvert(os.path.normpath(path))
520 520
521 521
522 522 def islfilesrepo(repo):
523 523 '''Return true if the repo is a largefile repo.'''
524 524 if b'largefiles' in repo.requirements and any(
525 525 shortnameslash in f[1] for f in repo.store.datafiles()
526 526 ):
527 527 return True
528 528
529 529 return any(openlfdirstate(repo.ui, repo, False))
530 530
531 531
532 532 class storeprotonotcapable(Exception):
533 533 def __init__(self, storetypes):
534 534 self.storetypes = storetypes
535 535
536 536
537 537 def getstandinsstate(repo):
538 538 standins = []
539 539 matcher = getstandinmatcher(repo)
540 540 wctx = repo[None]
541 541 for standin in repo.dirstate.walk(
542 542 matcher, subrepos=[], unknown=False, ignored=False
543 543 ):
544 544 lfile = splitstandin(standin)
545 545 try:
546 546 hash = readasstandin(wctx[standin])
547 547 except IOError:
548 548 hash = None
549 549 standins.append((lfile, hash))
550 550 return standins
551 551
552 552
553 553 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
554 554 lfstandin = standin(lfile)
555 555 if lfstandin not in repo.dirstate:
556 556 lfdirstate.drop(lfile)
557 557 else:
558 558 stat = repo.dirstate._map[lfstandin]
559 559 state, mtime = stat.state, stat.mtime
560 560 if state == b'n':
561 561 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
562 562 # state 'n' doesn't ensure 'clean' in this case
563 563 lfdirstate.normallookup(lfile)
564 564 else:
565 lfdirstate.normal(lfile)
565 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
566 566 elif state == b'm':
567 567 lfdirstate.normallookup(lfile)
568 568 elif state == b'r':
569 569 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
570 570 elif state == b'a':
571 571 lfdirstate.add(lfile)
572 572
573 573
574 574 def markcommitted(orig, ctx, node):
575 575 repo = ctx.repo()
576 576
577 577 lfdirstate = openlfdirstate(repo.ui, repo)
578 578 with lfdirstate.parentchange():
579 579 orig(node)
580 580
581 581 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
582 582 # because files coming from the 2nd parent are omitted in the latter.
583 583 #
584 584 # The former should be used to get targets of "synclfdirstate",
585 585 # because such files:
586 586 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
587 587 # - have to be marked as "n" after commit, but
588 588 # - aren't listed in "repo[node].files()"
589 589
590 590 for f in ctx.files():
591 591 lfile = splitstandin(f)
592 592 if lfile is not None:
593 593 synclfdirstate(repo, lfdirstate, lfile, False)
594 594 lfdirstate.write()
595 595
596 596 # As part of committing, copy all of the largefiles into the cache.
597 597 #
598 598 # Using "node" instead of "ctx" implies additional "repo[node]"
599 599 # lookup while copyalltostore(), but can omit redundant check for
600 600 # files comming from the 2nd parent, which should exist in store
601 601 # at merging.
602 602 copyalltostore(repo, node)
603 603
604 604
605 605 def getlfilestoupdate(oldstandins, newstandins):
606 606 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
607 607 filelist = []
608 608 for f in changedstandins:
609 609 if f[0] not in filelist:
610 610 filelist.append(f[0])
611 611 return filelist
612 612
613 613
614 614 def getlfilestoupload(repo, missing, addfunc):
615 615 makeprogress = repo.ui.makeprogress
616 616 with makeprogress(
617 617 _(b'finding outgoing largefiles'),
618 618 unit=_(b'revisions'),
619 619 total=len(missing),
620 620 ) as progress:
621 621 for i, n in enumerate(missing):
622 622 progress.update(i)
623 623 parents = [p for p in repo[n].parents() if p != repo.nullid]
624 624
625 625 with lfstatus(repo, value=False):
626 626 ctx = repo[n]
627 627
628 628 files = set(ctx.files())
629 629 if len(parents) == 2:
630 630 mc = ctx.manifest()
631 631 mp1 = ctx.p1().manifest()
632 632 mp2 = ctx.p2().manifest()
633 633 for f in mp1:
634 634 if f not in mc:
635 635 files.add(f)
636 636 for f in mp2:
637 637 if f not in mc:
638 638 files.add(f)
639 639 for f in mc:
640 640 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
641 641 files.add(f)
642 642 for fn in files:
643 643 if isstandin(fn) and fn in ctx:
644 644 addfunc(fn, readasstandin(ctx[fn]))
645 645
646 646
647 647 def updatestandinsbymatch(repo, match):
648 648 """Update standins in the working directory according to specified match
649 649
650 650 This returns (possibly modified) ``match`` object to be used for
651 651 subsequent commit process.
652 652 """
653 653
654 654 ui = repo.ui
655 655
656 656 # Case 1: user calls commit with no specific files or
657 657 # include/exclude patterns: refresh and commit all files that
658 658 # are "dirty".
659 659 if match is None or match.always():
660 660 # Spend a bit of time here to get a list of files we know
661 661 # are modified so we can compare only against those.
662 662 # It can cost a lot of time (several seconds)
663 663 # otherwise to update all standins if the largefiles are
664 664 # large.
665 665 lfdirstate = openlfdirstate(ui, repo)
666 666 dirtymatch = matchmod.always()
667 667 unsure, s = lfdirstate.status(
668 668 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
669 669 )
670 670 modifiedfiles = unsure + s.modified + s.added + s.removed
671 671 lfiles = listlfiles(repo)
672 672 # this only loops through largefiles that exist (not
673 673 # removed/renamed)
674 674 for lfile in lfiles:
675 675 if lfile in modifiedfiles:
676 676 fstandin = standin(lfile)
677 677 if repo.wvfs.exists(fstandin):
678 678 # this handles the case where a rebase is being
679 679 # performed and the working copy is not updated
680 680 # yet.
681 681 if repo.wvfs.exists(lfile):
682 682 updatestandin(repo, lfile, fstandin)
683 683
684 684 return match
685 685
686 686 lfiles = listlfiles(repo)
687 687 match._files = repo._subdirlfs(match.files(), lfiles)
688 688
689 689 # Case 2: user calls commit with specified patterns: refresh
690 690 # any matching big files.
691 691 smatcher = composestandinmatcher(repo, match)
692 692 standins = repo.dirstate.walk(
693 693 smatcher, subrepos=[], unknown=False, ignored=False
694 694 )
695 695
696 696 # No matching big files: get out of the way and pass control to
697 697 # the usual commit() method.
698 698 if not standins:
699 699 return match
700 700
701 701 # Refresh all matching big files. It's possible that the
702 702 # commit will end up failing, in which case the big files will
703 703 # stay refreshed. No harm done: the user modified them and
704 704 # asked to commit them, so sooner or later we're going to
705 705 # refresh the standins. Might as well leave them refreshed.
706 706 lfdirstate = openlfdirstate(ui, repo)
707 707 for fstandin in standins:
708 708 lfile = splitstandin(fstandin)
709 709 if lfdirstate[lfile] != b'r':
710 710 updatestandin(repo, lfile, fstandin)
711 711
712 712 # Cook up a new matcher that only matches regular files or
713 713 # standins corresponding to the big files requested by the
714 714 # user. Have to modify _files to prevent commit() from
715 715 # complaining "not tracked" for big files.
716 716 match = copy.copy(match)
717 717 origmatchfn = match.matchfn
718 718
719 719 # Check both the list of largefiles and the list of
720 720 # standins because if a largefile was removed, it
721 721 # won't be in the list of largefiles at this point
722 722 match._files += sorted(standins)
723 723
724 724 actualfiles = []
725 725 for f in match._files:
726 726 fstandin = standin(f)
727 727
728 728 # For largefiles, only one of the normal and standin should be
729 729 # committed (except if one of them is a remove). In the case of a
730 730 # standin removal, drop the normal file if it is unknown to dirstate.
731 731 # Thus, skip plain largefile names but keep the standin.
732 732 if f in lfiles or fstandin in standins:
733 733 if repo.dirstate[fstandin] != b'r':
734 734 if repo.dirstate[f] != b'r':
735 735 continue
736 736 elif repo.dirstate[f] == b'?':
737 737 continue
738 738
739 739 actualfiles.append(f)
740 740 match._files = actualfiles
741 741
742 742 def matchfn(f):
743 743 if origmatchfn(f):
744 744 return f not in lfiles
745 745 else:
746 746 return f in standins
747 747
748 748 match.matchfn = matchfn
749 749
750 750 return match
751 751
752 752
753 753 class automatedcommithook(object):
754 754 """Stateful hook to update standins at the 1st commit of resuming
755 755
756 756 For efficiency, updating standins in the working directory should
757 757 be avoided while automated committing (like rebase, transplant and
758 758 so on), because they should be updated before committing.
759 759
760 760 But the 1st commit of resuming automated committing (e.g. ``rebase
761 761 --continue``) should update them, because largefiles may be
762 762 modified manually.
763 763 """
764 764
765 765 def __init__(self, resuming):
766 766 self.resuming = resuming
767 767
768 768 def __call__(self, repo, match):
769 769 if self.resuming:
770 770 self.resuming = False # avoids updating at subsequent commits
771 771 return updatestandinsbymatch(repo, match)
772 772 else:
773 773 return match
774 774
775 775
776 776 def getstatuswriter(ui, repo, forcibly=None):
777 777 """Return the function to write largefiles specific status out
778 778
779 779 If ``forcibly`` is ``None``, this returns the last element of
780 780 ``repo._lfstatuswriters`` as "default" writer function.
781 781
782 782 Otherwise, this returns the function to always write out (or
783 783 ignore if ``not forcibly``) status.
784 784 """
785 785 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
786 786 return repo._lfstatuswriters[-1]
787 787 else:
788 788 if forcibly:
789 789 return ui.status # forcibly WRITE OUT
790 790 else:
791 791 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now