##// END OF EJS Templates
largefile: use `update_file` for `synclfdirstate` "m" case...
marmoute -
r48525:1f3a87a7 default
parent child Browse files
Show More
@@ -1,796 +1,798 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import contextlib
13 13 import copy
14 14 import os
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial.node import hex
19 19 from mercurial.pycompat import open
20 20
21 21 from mercurial import (
22 22 dirstate,
23 23 encoding,
24 24 error,
25 25 httpconnection,
26 26 match as matchmod,
27 27 pycompat,
28 28 requirements,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 vfs as vfsmod,
33 33 )
34 34 from mercurial.utils import hashutil
35 35
36 36 shortname = b'.hglf'
37 37 shortnameslash = shortname + b'/'
38 38 longname = b'largefiles'
39 39
40 40 # -- Private worker functions ------------------------------------------
41 41
42 42
43 43 @contextlib.contextmanager
44 44 def lfstatus(repo, value=True):
45 45 oldvalue = getattr(repo, 'lfstatus', False)
46 46 repo.lfstatus = value
47 47 try:
48 48 yield
49 49 finally:
50 50 repo.lfstatus = oldvalue
51 51
52 52
53 53 def getminsize(ui, assumelfiles, opt, default=10):
54 54 lfsize = opt
55 55 if not lfsize and assumelfiles:
56 56 lfsize = ui.config(longname, b'minsize', default=default)
57 57 if lfsize:
58 58 try:
59 59 lfsize = float(lfsize)
60 60 except ValueError:
61 61 raise error.Abort(
62 62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 63 )
64 64 if lfsize is None:
65 65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 66 return lfsize
67 67
68 68
69 69 def link(src, dest):
70 70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 71 util.makedirs(os.path.dirname(dest))
72 72 try:
73 73 util.oslink(src, dest)
74 74 except OSError:
75 75 # if hardlinks fail, fallback on atomic copy
76 76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 77 for chunk in util.filechunkiter(srcf):
78 78 dstf.write(chunk)
79 79 os.chmod(dest, os.stat(src).st_mode)
80 80
81 81
82 82 def usercachepath(ui, hash):
83 83 """Return the correct location in the "global" largefiles cache for a file
84 84 with the given hash.
85 85 This cache is used for sharing of largefiles across repositories - both
86 86 to preserve download bandwidth and storage space."""
87 87 return os.path.join(_usercachedir(ui), hash)
88 88
89 89
90 90 def _usercachedir(ui, name=longname):
91 91 '''Return the location of the "global" largefiles cache.'''
92 92 path = ui.configpath(name, b'usercache')
93 93 if path:
94 94 return path
95 95
96 96 hint = None
97 97
98 98 if pycompat.iswindows:
99 99 appdata = encoding.environ.get(
100 100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 101 )
102 102 if appdata:
103 103 return os.path.join(appdata, name)
104 104
105 105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 106 b"LOCALAPPDATA",
107 107 b"APPDATA",
108 108 name,
109 109 )
110 110 elif pycompat.isdarwin:
111 111 home = encoding.environ.get(b'HOME')
112 112 if home:
113 113 return os.path.join(home, b'Library', b'Caches', name)
114 114
115 115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 116 b"HOME",
117 117 name,
118 118 )
119 119 elif pycompat.isposix:
120 120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 121 if path:
122 122 return os.path.join(path, name)
123 123 home = encoding.environ.get(b'HOME')
124 124 if home:
125 125 return os.path.join(home, b'.cache', name)
126 126
127 127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 128 b"XDG_CACHE_HOME",
129 129 b"HOME",
130 130 name,
131 131 )
132 132 else:
133 133 raise error.Abort(
134 134 _(b'unknown operating system: %s\n') % pycompat.osname
135 135 )
136 136
137 137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138 138
139 139
140 140 def inusercache(ui, hash):
141 141 path = usercachepath(ui, hash)
142 142 return os.path.exists(path)
143 143
144 144
145 145 def findfile(repo, hash):
146 146 """Return store path of the largefile with the specified hash.
147 147 As a side effect, the file might be linked from user cache.
148 148 Return None if the file can't be found locally."""
149 149 path, exists = findstorepath(repo, hash)
150 150 if exists:
151 151 repo.ui.note(_(b'found %s in store\n') % hash)
152 152 return path
153 153 elif inusercache(repo.ui, hash):
154 154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 155 path = storepath(repo, hash)
156 156 link(usercachepath(repo.ui, hash), path)
157 157 return path
158 158 return None
159 159
160 160
161 161 class largefilesdirstate(dirstate.dirstate):
162 162 def __getitem__(self, key):
163 163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164 164
165 165 def set_tracked(self, f):
166 166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167 167
168 168 def set_untracked(self, f):
169 169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170 170
171 171 def normal(self, f, parentfiledata=None):
172 172 # not sure if we should pass the `parentfiledata` down or throw it
173 173 # away. So throwing it away to stay on the safe side.
174 174 return super(largefilesdirstate, self).normal(unixpath(f))
175 175
176 176 def remove(self, f):
177 177 return super(largefilesdirstate, self).remove(unixpath(f))
178 178
179 179 def add(self, f):
180 180 return super(largefilesdirstate, self).add(unixpath(f))
181 181
182 182 def drop(self, f):
183 183 return super(largefilesdirstate, self).drop(unixpath(f))
184 184
185 185 def forget(self, f):
186 186 return super(largefilesdirstate, self).forget(unixpath(f))
187 187
188 188 def normallookup(self, f):
189 189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190 190
191 191 def _ignore(self, f):
192 192 return False
193 193
194 194 def write(self, tr=False):
195 195 # (1) disable PENDING mode always
196 196 # (lfdirstate isn't yet managed as a part of the transaction)
197 197 # (2) avoid develwarn 'use dirstate.write with ....'
198 198 super(largefilesdirstate, self).write(None)
199 199
200 200
201 201 def openlfdirstate(ui, repo, create=True):
202 202 """
203 203 Return a dirstate object that tracks largefiles: i.e. its root is
204 204 the repo root, but it is saved in .hg/largefiles/dirstate.
205 205 """
206 206 vfs = repo.vfs
207 207 lfstoredir = longname
208 208 opener = vfsmod.vfs(vfs.join(lfstoredir))
209 209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
210 210 lfdirstate = largefilesdirstate(
211 211 opener,
212 212 ui,
213 213 repo.root,
214 214 repo.dirstate._validate,
215 215 lambda: sparse.matcher(repo),
216 216 repo.nodeconstants,
217 217 use_dirstate_v2,
218 218 )
219 219
220 220 # If the largefiles dirstate does not exist, populate and create
221 221 # it. This ensures that we create it on the first meaningful
222 222 # largefiles operation in a new clone.
223 223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
224 224 matcher = getstandinmatcher(repo)
225 225 standins = repo.dirstate.walk(
226 226 matcher, subrepos=[], unknown=False, ignored=False
227 227 )
228 228
229 229 if len(standins) > 0:
230 230 vfs.makedirs(lfstoredir)
231 231
232 232 with lfdirstate.parentchange():
233 233 for standin in standins:
234 234 lfile = splitstandin(standin)
235 235 lfdirstate.update_file(
236 236 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
237 237 )
238 238 return lfdirstate
239 239
240 240
241 241 def lfdirstatestatus(lfdirstate, repo):
242 242 pctx = repo[b'.']
243 243 match = matchmod.always()
244 244 unsure, s = lfdirstate.status(
245 245 match, subrepos=[], ignored=False, clean=False, unknown=False
246 246 )
247 247 modified, clean = s.modified, s.clean
248 248 for lfile in unsure:
249 249 try:
250 250 fctx = pctx[standin(lfile)]
251 251 except LookupError:
252 252 fctx = None
253 253 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
254 254 modified.append(lfile)
255 255 else:
256 256 clean.append(lfile)
257 257 lfdirstate.set_clean(lfile)
258 258 return s
259 259
260 260
261 261 def listlfiles(repo, rev=None, matcher=None):
262 262 """return a list of largefiles in the working copy or the
263 263 specified changeset"""
264 264
265 265 if matcher is None:
266 266 matcher = getstandinmatcher(repo)
267 267
268 268 # ignore unknown files in working directory
269 269 return [
270 270 splitstandin(f)
271 271 for f in repo[rev].walk(matcher)
272 272 if rev is not None or repo.dirstate[f] != b'?'
273 273 ]
274 274
275 275
276 276 def instore(repo, hash, forcelocal=False):
277 277 '''Return true if a largefile with the given hash exists in the store'''
278 278 return os.path.exists(storepath(repo, hash, forcelocal))
279 279
280 280
281 281 def storepath(repo, hash, forcelocal=False):
282 282 """Return the correct location in the repository largefiles store for a
283 283 file with the given hash."""
284 284 if not forcelocal and repo.shared():
285 285 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
286 286 return repo.vfs.join(longname, hash)
287 287
288 288
289 289 def findstorepath(repo, hash):
290 290 """Search through the local store path(s) to find the file for the given
291 291 hash. If the file is not found, its path in the primary store is returned.
292 292 The return value is a tuple of (path, exists(path)).
293 293 """
294 294 # For shared repos, the primary store is in the share source. But for
295 295 # backward compatibility, force a lookup in the local store if it wasn't
296 296 # found in the share source.
297 297 path = storepath(repo, hash, False)
298 298
299 299 if instore(repo, hash):
300 300 return (path, True)
301 301 elif repo.shared() and instore(repo, hash, True):
302 302 return storepath(repo, hash, True), True
303 303
304 304 return (path, False)
305 305
306 306
307 307 def copyfromcache(repo, hash, filename):
308 308 """Copy the specified largefile from the repo or system cache to
309 309 filename in the repository. Return true on success or false if the
310 310 file was not found in either cache (which should not happened:
311 311 this is meant to be called only after ensuring that the needed
312 312 largefile exists in the cache)."""
313 313 wvfs = repo.wvfs
314 314 path = findfile(repo, hash)
315 315 if path is None:
316 316 return False
317 317 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
318 318 # The write may fail before the file is fully written, but we
319 319 # don't use atomic writes in the working copy.
320 320 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
321 321 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
322 322 if gothash != hash:
323 323 repo.ui.warn(
324 324 _(b'%s: data corruption in %s with hash %s\n')
325 325 % (filename, path, gothash)
326 326 )
327 327 wvfs.unlink(filename)
328 328 return False
329 329 return True
330 330
331 331
332 332 def copytostore(repo, ctx, file, fstandin):
333 333 wvfs = repo.wvfs
334 334 hash = readasstandin(ctx[fstandin])
335 335 if instore(repo, hash):
336 336 return
337 337 if wvfs.exists(file):
338 338 copytostoreabsolute(repo, wvfs.join(file), hash)
339 339 else:
340 340 repo.ui.warn(
341 341 _(b"%s: largefile %s not available from local store\n")
342 342 % (file, hash)
343 343 )
344 344
345 345
346 346 def copyalltostore(repo, node):
347 347 '''Copy all largefiles in a given revision to the store'''
348 348
349 349 ctx = repo[node]
350 350 for filename in ctx.files():
351 351 realfile = splitstandin(filename)
352 352 if realfile is not None and filename in ctx.manifest():
353 353 copytostore(repo, ctx, realfile, filename)
354 354
355 355
356 356 def copytostoreabsolute(repo, file, hash):
357 357 if inusercache(repo.ui, hash):
358 358 link(usercachepath(repo.ui, hash), storepath(repo, hash))
359 359 else:
360 360 util.makedirs(os.path.dirname(storepath(repo, hash)))
361 361 with open(file, b'rb') as srcf:
362 362 with util.atomictempfile(
363 363 storepath(repo, hash), createmode=repo.store.createmode
364 364 ) as dstf:
365 365 for chunk in util.filechunkiter(srcf):
366 366 dstf.write(chunk)
367 367 linktousercache(repo, hash)
368 368
369 369
370 370 def linktousercache(repo, hash):
371 371 """Link / copy the largefile with the specified hash from the store
372 372 to the cache."""
373 373 path = usercachepath(repo.ui, hash)
374 374 link(storepath(repo, hash), path)
375 375
376 376
377 377 def getstandinmatcher(repo, rmatcher=None):
378 378 '''Return a match object that applies rmatcher to the standin directory'''
379 379 wvfs = repo.wvfs
380 380 standindir = shortname
381 381
382 382 # no warnings about missing files or directories
383 383 badfn = lambda f, msg: None
384 384
385 385 if rmatcher and not rmatcher.always():
386 386 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
387 387 if not pats:
388 388 pats = [wvfs.join(standindir)]
389 389 match = scmutil.match(repo[None], pats, badfn=badfn)
390 390 else:
391 391 # no patterns: relative to repo root
392 392 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
393 393 return match
394 394
395 395
396 396 def composestandinmatcher(repo, rmatcher):
397 397 """Return a matcher that accepts standins corresponding to the
398 398 files accepted by rmatcher. Pass the list of files in the matcher
399 399 as the paths specified by the user."""
400 400 smatcher = getstandinmatcher(repo, rmatcher)
401 401 isstandin = smatcher.matchfn
402 402
403 403 def composedmatchfn(f):
404 404 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
405 405
406 406 smatcher.matchfn = composedmatchfn
407 407
408 408 return smatcher
409 409
410 410
411 411 def standin(filename):
412 412 """Return the repo-relative path to the standin for the specified big
413 413 file."""
414 414 # Notes:
415 415 # 1) Some callers want an absolute path, but for instance addlargefiles
416 416 # needs it repo-relative so it can be passed to repo[None].add(). So
417 417 # leave it up to the caller to use repo.wjoin() to get an absolute path.
418 418 # 2) Join with '/' because that's what dirstate always uses, even on
419 419 # Windows. Change existing separator to '/' first in case we are
420 420 # passed filenames from an external source (like the command line).
421 421 return shortnameslash + util.pconvert(filename)
422 422
423 423
424 424 def isstandin(filename):
425 425 """Return true if filename is a big file standin. filename must be
426 426 in Mercurial's internal form (slash-separated)."""
427 427 return filename.startswith(shortnameslash)
428 428
429 429
430 430 def splitstandin(filename):
431 431 # Split on / because that's what dirstate always uses, even on Windows.
432 432 # Change local separator to / first just in case we are passed filenames
433 433 # from an external source (like the command line).
434 434 bits = util.pconvert(filename).split(b'/', 1)
435 435 if len(bits) == 2 and bits[0] == shortname:
436 436 return bits[1]
437 437 else:
438 438 return None
439 439
440 440
441 441 def updatestandin(repo, lfile, standin):
442 442 """Re-calculate hash value of lfile and write it into standin
443 443
444 444 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
445 445 """
446 446 file = repo.wjoin(lfile)
447 447 if repo.wvfs.exists(lfile):
448 448 hash = hashfile(file)
449 449 executable = getexecutable(file)
450 450 writestandin(repo, standin, hash, executable)
451 451 else:
452 452 raise error.Abort(_(b'%s: file not found!') % lfile)
453 453
454 454
455 455 def readasstandin(fctx):
456 456 """read hex hash from given filectx of standin file
457 457
458 458 This encapsulates how "standin" data is stored into storage layer."""
459 459 return fctx.data().strip()
460 460
461 461
462 462 def writestandin(repo, standin, hash, executable):
463 463 '''write hash to <repo.root>/<standin>'''
464 464 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
465 465
466 466
467 467 def copyandhash(instream, outfile):
468 468 """Read bytes from instream (iterable) and write them to outfile,
469 469 computing the SHA-1 hash of the data along the way. Return the hash."""
470 470 hasher = hashutil.sha1(b'')
471 471 for data in instream:
472 472 hasher.update(data)
473 473 outfile.write(data)
474 474 return hex(hasher.digest())
475 475
476 476
477 477 def hashfile(file):
478 478 if not os.path.exists(file):
479 479 return b''
480 480 with open(file, b'rb') as fd:
481 481 return hexsha1(fd)
482 482
483 483
484 484 def getexecutable(filename):
485 485 mode = os.stat(filename).st_mode
486 486 return (
487 487 (mode & stat.S_IXUSR)
488 488 and (mode & stat.S_IXGRP)
489 489 and (mode & stat.S_IXOTH)
490 490 )
491 491
492 492
493 493 def urljoin(first, second, *arg):
494 494 def join(left, right):
495 495 if not left.endswith(b'/'):
496 496 left += b'/'
497 497 if right.startswith(b'/'):
498 498 right = right[1:]
499 499 return left + right
500 500
501 501 url = join(first, second)
502 502 for a in arg:
503 503 url = join(url, a)
504 504 return url
505 505
506 506
507 507 def hexsha1(fileobj):
508 508 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
509 509 object data"""
510 510 h = hashutil.sha1()
511 511 for chunk in util.filechunkiter(fileobj):
512 512 h.update(chunk)
513 513 return hex(h.digest())
514 514
515 515
516 516 def httpsendfile(ui, filename):
517 517 return httpconnection.httpsendfile(ui, filename, b'rb')
518 518
519 519
520 520 def unixpath(path):
521 521 '''Return a version of path normalized for use with the lfdirstate.'''
522 522 return util.pconvert(os.path.normpath(path))
523 523
524 524
525 525 def islfilesrepo(repo):
526 526 '''Return true if the repo is a largefile repo.'''
527 527 if b'largefiles' in repo.requirements and any(
528 528 shortnameslash in f[1] for f in repo.store.datafiles()
529 529 ):
530 530 return True
531 531
532 532 return any(openlfdirstate(repo.ui, repo, False))
533 533
534 534
535 535 class storeprotonotcapable(Exception):
536 536 def __init__(self, storetypes):
537 537 self.storetypes = storetypes
538 538
539 539
540 540 def getstandinsstate(repo):
541 541 standins = []
542 542 matcher = getstandinmatcher(repo)
543 543 wctx = repo[None]
544 544 for standin in repo.dirstate.walk(
545 545 matcher, subrepos=[], unknown=False, ignored=False
546 546 ):
547 547 lfile = splitstandin(standin)
548 548 try:
549 549 hash = readasstandin(wctx[standin])
550 550 except IOError:
551 551 hash = None
552 552 standins.append((lfile, hash))
553 553 return standins
554 554
555 555
556 556 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
557 557 lfstandin = standin(lfile)
558 558 if lfstandin not in repo.dirstate:
559 559 lfdirstate.drop(lfile)
560 560 else:
561 561 stat = repo.dirstate._map[lfstandin]
562 562 state, mtime = stat.state, stat.mtime
563 563 if state == b'n':
564 564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
565 565 # state 'n' doesn't ensure 'clean' in this case
566 566 lfdirstate.update_file(
567 567 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
568 568 )
569 569 else:
570 570 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
571 571 elif state == b'm':
572 lfdirstate.normallookup(lfile)
572 lfdirstate.update_file(
573 lfile, p1_tracked=True, wc_tracked=True, merged=True
574 )
573 575 elif state == b'r':
574 576 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
575 577 elif state == b'a':
576 578 lfdirstate.add(lfile)
577 579
578 580
579 581 def markcommitted(orig, ctx, node):
580 582 repo = ctx.repo()
581 583
582 584 lfdirstate = openlfdirstate(repo.ui, repo)
583 585 with lfdirstate.parentchange():
584 586 orig(node)
585 587
586 588 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
587 589 # because files coming from the 2nd parent are omitted in the latter.
588 590 #
589 591 # The former should be used to get targets of "synclfdirstate",
590 592 # because such files:
591 593 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
592 594 # - have to be marked as "n" after commit, but
593 595 # - aren't listed in "repo[node].files()"
594 596
595 597 for f in ctx.files():
596 598 lfile = splitstandin(f)
597 599 if lfile is not None:
598 600 synclfdirstate(repo, lfdirstate, lfile, False)
599 601 lfdirstate.write()
600 602
601 603 # As part of committing, copy all of the largefiles into the cache.
602 604 #
603 605 # Using "node" instead of "ctx" implies additional "repo[node]"
604 606 # lookup while copyalltostore(), but can omit redundant check for
605 607 # files comming from the 2nd parent, which should exist in store
606 608 # at merging.
607 609 copyalltostore(repo, node)
608 610
609 611
610 612 def getlfilestoupdate(oldstandins, newstandins):
611 613 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
612 614 filelist = []
613 615 for f in changedstandins:
614 616 if f[0] not in filelist:
615 617 filelist.append(f[0])
616 618 return filelist
617 619
618 620
619 621 def getlfilestoupload(repo, missing, addfunc):
620 622 makeprogress = repo.ui.makeprogress
621 623 with makeprogress(
622 624 _(b'finding outgoing largefiles'),
623 625 unit=_(b'revisions'),
624 626 total=len(missing),
625 627 ) as progress:
626 628 for i, n in enumerate(missing):
627 629 progress.update(i)
628 630 parents = [p for p in repo[n].parents() if p != repo.nullid]
629 631
630 632 with lfstatus(repo, value=False):
631 633 ctx = repo[n]
632 634
633 635 files = set(ctx.files())
634 636 if len(parents) == 2:
635 637 mc = ctx.manifest()
636 638 mp1 = ctx.p1().manifest()
637 639 mp2 = ctx.p2().manifest()
638 640 for f in mp1:
639 641 if f not in mc:
640 642 files.add(f)
641 643 for f in mp2:
642 644 if f not in mc:
643 645 files.add(f)
644 646 for f in mc:
645 647 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
646 648 files.add(f)
647 649 for fn in files:
648 650 if isstandin(fn) and fn in ctx:
649 651 addfunc(fn, readasstandin(ctx[fn]))
650 652
651 653
652 654 def updatestandinsbymatch(repo, match):
653 655 """Update standins in the working directory according to specified match
654 656
655 657 This returns (possibly modified) ``match`` object to be used for
656 658 subsequent commit process.
657 659 """
658 660
659 661 ui = repo.ui
660 662
661 663 # Case 1: user calls commit with no specific files or
662 664 # include/exclude patterns: refresh and commit all files that
663 665 # are "dirty".
664 666 if match is None or match.always():
665 667 # Spend a bit of time here to get a list of files we know
666 668 # are modified so we can compare only against those.
667 669 # It can cost a lot of time (several seconds)
668 670 # otherwise to update all standins if the largefiles are
669 671 # large.
670 672 lfdirstate = openlfdirstate(ui, repo)
671 673 dirtymatch = matchmod.always()
672 674 unsure, s = lfdirstate.status(
673 675 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
674 676 )
675 677 modifiedfiles = unsure + s.modified + s.added + s.removed
676 678 lfiles = listlfiles(repo)
677 679 # this only loops through largefiles that exist (not
678 680 # removed/renamed)
679 681 for lfile in lfiles:
680 682 if lfile in modifiedfiles:
681 683 fstandin = standin(lfile)
682 684 if repo.wvfs.exists(fstandin):
683 685 # this handles the case where a rebase is being
684 686 # performed and the working copy is not updated
685 687 # yet.
686 688 if repo.wvfs.exists(lfile):
687 689 updatestandin(repo, lfile, fstandin)
688 690
689 691 return match
690 692
691 693 lfiles = listlfiles(repo)
692 694 match._files = repo._subdirlfs(match.files(), lfiles)
693 695
694 696 # Case 2: user calls commit with specified patterns: refresh
695 697 # any matching big files.
696 698 smatcher = composestandinmatcher(repo, match)
697 699 standins = repo.dirstate.walk(
698 700 smatcher, subrepos=[], unknown=False, ignored=False
699 701 )
700 702
701 703 # No matching big files: get out of the way and pass control to
702 704 # the usual commit() method.
703 705 if not standins:
704 706 return match
705 707
706 708 # Refresh all matching big files. It's possible that the
707 709 # commit will end up failing, in which case the big files will
708 710 # stay refreshed. No harm done: the user modified them and
709 711 # asked to commit them, so sooner or later we're going to
710 712 # refresh the standins. Might as well leave them refreshed.
711 713 lfdirstate = openlfdirstate(ui, repo)
712 714 for fstandin in standins:
713 715 lfile = splitstandin(fstandin)
714 716 if lfdirstate[lfile] != b'r':
715 717 updatestandin(repo, lfile, fstandin)
716 718
717 719 # Cook up a new matcher that only matches regular files or
718 720 # standins corresponding to the big files requested by the
719 721 # user. Have to modify _files to prevent commit() from
720 722 # complaining "not tracked" for big files.
721 723 match = copy.copy(match)
722 724 origmatchfn = match.matchfn
723 725
724 726 # Check both the list of largefiles and the list of
725 727 # standins because if a largefile was removed, it
726 728 # won't be in the list of largefiles at this point
727 729 match._files += sorted(standins)
728 730
729 731 actualfiles = []
730 732 for f in match._files:
731 733 fstandin = standin(f)
732 734
733 735 # For largefiles, only one of the normal and standin should be
734 736 # committed (except if one of them is a remove). In the case of a
735 737 # standin removal, drop the normal file if it is unknown to dirstate.
736 738 # Thus, skip plain largefile names but keep the standin.
737 739 if f in lfiles or fstandin in standins:
738 740 if repo.dirstate[fstandin] != b'r':
739 741 if repo.dirstate[f] != b'r':
740 742 continue
741 743 elif repo.dirstate[f] == b'?':
742 744 continue
743 745
744 746 actualfiles.append(f)
745 747 match._files = actualfiles
746 748
747 749 def matchfn(f):
748 750 if origmatchfn(f):
749 751 return f not in lfiles
750 752 else:
751 753 return f in standins
752 754
753 755 match.matchfn = matchfn
754 756
755 757 return match
756 758
757 759
758 760 class automatedcommithook(object):
759 761 """Stateful hook to update standins at the 1st commit of resuming
760 762
761 763 For efficiency, updating standins in the working directory should
762 764 be avoided while automated committing (like rebase, transplant and
763 765 so on), because they should be updated before committing.
764 766
765 767 But the 1st commit of resuming automated committing (e.g. ``rebase
766 768 --continue``) should update them, because largefiles may be
767 769 modified manually.
768 770 """
769 771
770 772 def __init__(self, resuming):
771 773 self.resuming = resuming
772 774
773 775 def __call__(self, repo, match):
774 776 if self.resuming:
775 777 self.resuming = False # avoids updating at subsequent commits
776 778 return updatestandinsbymatch(repo, match)
777 779 else:
778 780 return match
779 781
780 782
781 783 def getstatuswriter(ui, repo, forcibly=None):
782 784 """Return the function to write largefiles specific status out
783 785
784 786 If ``forcibly`` is ``None``, this returns the last element of
785 787 ``repo._lfstatuswriters`` as "default" writer function.
786 788
787 789 Otherwise, this returns the function to always write out (or
788 790 ignore if ``not forcibly``) status.
789 791 """
790 792 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
791 793 return repo._lfstatuswriters[-1]
792 794 else:
793 795 if forcibly:
794 796 return ui.status # forcibly WRITE OUT
795 797 else:
796 798 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now