##// END OF EJS Templates
vfs: use 'vfs' module directly in 'hgext.largefile'...
Pierre-Yves David -
r31247:04b42862 default
parent child Browse files
Show More
@@ -1,666 +1,667 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 node,
27 27 pycompat,
28 28 scmutil,
29 29 util,
30 vfs as vfsmod,
30 31 )
31 32
32 33 shortname = '.hglf'
33 34 shortnameslash = shortname + '/'
34 35 longname = 'largefiles'
35 36
36 37 # -- Private worker functions ------------------------------------------
37 38
38 39 def getminsize(ui, assumelfiles, opt, default=10):
39 40 lfsize = opt
40 41 if not lfsize and assumelfiles:
41 42 lfsize = ui.config(longname, 'minsize', default=default)
42 43 if lfsize:
43 44 try:
44 45 lfsize = float(lfsize)
45 46 except ValueError:
46 47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
47 48 % lfsize)
48 49 if lfsize is None:
49 50 raise error.Abort(_('minimum size for largefiles must be specified'))
50 51 return lfsize
51 52
52 53 def link(src, dest):
53 54 """Try to create hardlink - if that fails, efficiently make a copy."""
54 55 util.makedirs(os.path.dirname(dest))
55 56 try:
56 57 util.oslink(src, dest)
57 58 except OSError:
58 59 # if hardlinks fail, fallback on atomic copy
59 60 with open(src, 'rb') as srcf:
60 61 with util.atomictempfile(dest) as dstf:
61 62 for chunk in util.filechunkiter(srcf):
62 63 dstf.write(chunk)
63 64 os.chmod(dest, os.stat(src).st_mode)
64 65
65 66 def usercachepath(ui, hash):
66 67 '''Return the correct location in the "global" largefiles cache for a file
67 68 with the given hash.
68 69 This cache is used for sharing of largefiles across repositories - both
69 70 to preserve download bandwidth and storage space.'''
70 71 return os.path.join(_usercachedir(ui), hash)
71 72
72 73 def _usercachedir(ui):
73 74 '''Return the location of the "global" largefiles cache.'''
74 75 path = ui.configpath(longname, 'usercache', None)
75 76 if path:
76 77 return path
77 78 if pycompat.osname == 'nt':
78 79 appdata = encoding.environ.get('LOCALAPPDATA',\
79 80 encoding.environ.get('APPDATA'))
80 81 if appdata:
81 82 return os.path.join(appdata, longname)
82 83 elif platform.system() == 'Darwin':
83 84 home = encoding.environ.get('HOME')
84 85 if home:
85 86 return os.path.join(home, 'Library', 'Caches', longname)
86 87 elif pycompat.osname == 'posix':
87 88 path = encoding.environ.get('XDG_CACHE_HOME')
88 89 if path:
89 90 return os.path.join(path, longname)
90 91 home = encoding.environ.get('HOME')
91 92 if home:
92 93 return os.path.join(home, '.cache', longname)
93 94 else:
94 95 raise error.Abort(_('unknown operating system: %s\n')
95 96 % pycompat.osname)
96 97 raise error.Abort(_('unknown %s usercache location') % longname)
97 98
98 99 def inusercache(ui, hash):
99 100 path = usercachepath(ui, hash)
100 101 return os.path.exists(path)
101 102
102 103 def findfile(repo, hash):
103 104 '''Return store path of the largefile with the specified hash.
104 105 As a side effect, the file might be linked from user cache.
105 106 Return None if the file can't be found locally.'''
106 107 path, exists = findstorepath(repo, hash)
107 108 if exists:
108 109 repo.ui.note(_('found %s in store\n') % hash)
109 110 return path
110 111 elif inusercache(repo.ui, hash):
111 112 repo.ui.note(_('found %s in system cache\n') % hash)
112 113 path = storepath(repo, hash)
113 114 link(usercachepath(repo.ui, hash), path)
114 115 return path
115 116 return None
116 117
117 118 class largefilesdirstate(dirstate.dirstate):
118 119 def __getitem__(self, key):
119 120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
120 121 def normal(self, f):
121 122 return super(largefilesdirstate, self).normal(unixpath(f))
122 123 def remove(self, f):
123 124 return super(largefilesdirstate, self).remove(unixpath(f))
124 125 def add(self, f):
125 126 return super(largefilesdirstate, self).add(unixpath(f))
126 127 def drop(self, f):
127 128 return super(largefilesdirstate, self).drop(unixpath(f))
128 129 def forget(self, f):
129 130 return super(largefilesdirstate, self).forget(unixpath(f))
130 131 def normallookup(self, f):
131 132 return super(largefilesdirstate, self).normallookup(unixpath(f))
132 133 def _ignore(self, f):
133 134 return False
134 135 def write(self, tr=False):
135 136 # (1) disable PENDING mode always
136 137 # (lfdirstate isn't yet managed as a part of the transaction)
137 138 # (2) avoid develwarn 'use dirstate.write with ....'
138 139 super(largefilesdirstate, self).write(None)
139 140
140 141 def openlfdirstate(ui, repo, create=True):
141 142 '''
142 143 Return a dirstate object that tracks largefiles: i.e. its root is
143 144 the repo root, but it is saved in .hg/largefiles/dirstate.
144 145 '''
145 146 vfs = repo.vfs
146 147 lfstoredir = longname
147 opener = scmutil.vfs(vfs.join(lfstoredir))
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
148 149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 150 repo.dirstate._validate)
150 151
151 152 # If the largefiles dirstate does not exist, populate and create
152 153 # it. This ensures that we create it on the first meaningful
153 154 # largefiles operation in a new clone.
154 155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
155 156 matcher = getstandinmatcher(repo)
156 157 standins = repo.dirstate.walk(matcher, [], False, False)
157 158
158 159 if len(standins) > 0:
159 160 vfs.makedirs(lfstoredir)
160 161
161 162 for standin in standins:
162 163 lfile = splitstandin(standin)
163 164 lfdirstate.normallookup(lfile)
164 165 return lfdirstate
165 166
166 167 def lfdirstatestatus(lfdirstate, repo):
167 168 wctx = repo['.']
168 169 match = matchmod.always(repo.root, repo.getcwd())
169 170 unsure, s = lfdirstate.status(match, [], False, False, False)
170 171 modified, clean = s.modified, s.clean
171 172 for lfile in unsure:
172 173 try:
173 174 fctx = wctx[standin(lfile)]
174 175 except LookupError:
175 176 fctx = None
176 177 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
177 178 modified.append(lfile)
178 179 else:
179 180 clean.append(lfile)
180 181 lfdirstate.normal(lfile)
181 182 return s
182 183
183 184 def listlfiles(repo, rev=None, matcher=None):
184 185 '''return a list of largefiles in the working copy or the
185 186 specified changeset'''
186 187
187 188 if matcher is None:
188 189 matcher = getstandinmatcher(repo)
189 190
190 191 # ignore unknown files in working directory
191 192 return [splitstandin(f)
192 193 for f in repo[rev].walk(matcher)
193 194 if rev is not None or repo.dirstate[f] != '?']
194 195
195 196 def instore(repo, hash, forcelocal=False):
196 197 '''Return true if a largefile with the given hash exists in the store'''
197 198 return os.path.exists(storepath(repo, hash, forcelocal))
198 199
199 200 def storepath(repo, hash, forcelocal=False):
200 201 '''Return the correct location in the repository largefiles store for a
201 202 file with the given hash.'''
202 203 if not forcelocal and repo.shared():
203 204 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
204 205 return repo.join(longname, hash)
205 206
206 207 def findstorepath(repo, hash):
207 208 '''Search through the local store path(s) to find the file for the given
208 209 hash. If the file is not found, its path in the primary store is returned.
209 210 The return value is a tuple of (path, exists(path)).
210 211 '''
211 212 # For shared repos, the primary store is in the share source. But for
212 213 # backward compatibility, force a lookup in the local store if it wasn't
213 214 # found in the share source.
214 215 path = storepath(repo, hash, False)
215 216
216 217 if instore(repo, hash):
217 218 return (path, True)
218 219 elif repo.shared() and instore(repo, hash, True):
219 220 return storepath(repo, hash, True), True
220 221
221 222 return (path, False)
222 223
223 224 def copyfromcache(repo, hash, filename):
224 225 '''Copy the specified largefile from the repo or system cache to
225 226 filename in the repository. Return true on success or false if the
226 227 file was not found in either cache (which should not happened:
227 228 this is meant to be called only after ensuring that the needed
228 229 largefile exists in the cache).'''
229 230 wvfs = repo.wvfs
230 231 path = findfile(repo, hash)
231 232 if path is None:
232 233 return False
233 234 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
234 235 # The write may fail before the file is fully written, but we
235 236 # don't use atomic writes in the working copy.
236 237 with open(path, 'rb') as srcfd:
237 238 with wvfs(filename, 'wb') as destfd:
238 239 gothash = copyandhash(
239 240 util.filechunkiter(srcfd), destfd)
240 241 if gothash != hash:
241 242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
242 243 % (filename, path, gothash))
243 244 wvfs.unlink(filename)
244 245 return False
245 246 return True
246 247
247 248 def copytostore(repo, rev, file, uploaded=False):
248 249 wvfs = repo.wvfs
249 250 hash = readstandin(repo, file, rev)
250 251 if instore(repo, hash):
251 252 return
252 253 if wvfs.exists(file):
253 254 copytostoreabsolute(repo, wvfs.join(file), hash)
254 255 else:
255 256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
256 257 (file, hash))
257 258
258 259 def copyalltostore(repo, node):
259 260 '''Copy all largefiles in a given revision to the store'''
260 261
261 262 ctx = repo[node]
262 263 for filename in ctx.files():
263 264 if isstandin(filename) and filename in ctx.manifest():
264 265 realfile = splitstandin(filename)
265 266 copytostore(repo, ctx.node(), realfile)
266 267
267 268 def copytostoreabsolute(repo, file, hash):
268 269 if inusercache(repo.ui, hash):
269 270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
270 271 else:
271 272 util.makedirs(os.path.dirname(storepath(repo, hash)))
272 273 with open(file, 'rb') as srcf:
273 274 with util.atomictempfile(storepath(repo, hash),
274 275 createmode=repo.store.createmode) as dstf:
275 276 for chunk in util.filechunkiter(srcf):
276 277 dstf.write(chunk)
277 278 linktousercache(repo, hash)
278 279
279 280 def linktousercache(repo, hash):
280 281 '''Link / copy the largefile with the specified hash from the store
281 282 to the cache.'''
282 283 path = usercachepath(repo.ui, hash)
283 284 link(storepath(repo, hash), path)
284 285
285 286 def getstandinmatcher(repo, rmatcher=None):
286 287 '''Return a match object that applies rmatcher to the standin directory'''
287 288 wvfs = repo.wvfs
288 289 standindir = shortname
289 290
290 291 # no warnings about missing files or directories
291 292 badfn = lambda f, msg: None
292 293
293 294 if rmatcher and not rmatcher.always():
294 295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
295 296 if not pats:
296 297 pats = [wvfs.join(standindir)]
297 298 match = scmutil.match(repo[None], pats, badfn=badfn)
298 299 # if pats is empty, it would incorrectly always match, so clear _always
299 300 match._always = False
300 301 else:
301 302 # no patterns: relative to repo root
302 303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
303 304 return match
304 305
305 306 def composestandinmatcher(repo, rmatcher):
306 307 '''Return a matcher that accepts standins corresponding to the
307 308 files accepted by rmatcher. Pass the list of files in the matcher
308 309 as the paths specified by the user.'''
309 310 smatcher = getstandinmatcher(repo, rmatcher)
310 311 isstandin = smatcher.matchfn
311 312 def composedmatchfn(f):
312 313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
313 314 smatcher.matchfn = composedmatchfn
314 315
315 316 return smatcher
316 317
317 318 def standin(filename):
318 319 '''Return the repo-relative path to the standin for the specified big
319 320 file.'''
320 321 # Notes:
321 322 # 1) Some callers want an absolute path, but for instance addlargefiles
322 323 # needs it repo-relative so it can be passed to repo[None].add(). So
323 324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
324 325 # 2) Join with '/' because that's what dirstate always uses, even on
325 326 # Windows. Change existing separator to '/' first in case we are
326 327 # passed filenames from an external source (like the command line).
327 328 return shortnameslash + util.pconvert(filename)
328 329
329 330 def isstandin(filename):
330 331 '''Return true if filename is a big file standin. filename must be
331 332 in Mercurial's internal form (slash-separated).'''
332 333 return filename.startswith(shortnameslash)
333 334
334 335 def splitstandin(filename):
335 336 # Split on / because that's what dirstate always uses, even on Windows.
336 337 # Change local separator to / first just in case we are passed filenames
337 338 # from an external source (like the command line).
338 339 bits = util.pconvert(filename).split('/', 1)
339 340 if len(bits) == 2 and bits[0] == shortname:
340 341 return bits[1]
341 342 else:
342 343 return None
343 344
344 345 def updatestandin(repo, standin):
345 346 file = repo.wjoin(splitstandin(standin))
346 347 if repo.wvfs.exists(splitstandin(standin)):
347 348 hash = hashfile(file)
348 349 executable = getexecutable(file)
349 350 writestandin(repo, standin, hash, executable)
350 351 else:
351 352 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
352 353
353 354 def readstandin(repo, filename, node=None):
354 355 '''read hex hash from standin for filename at given node, or working
355 356 directory if no node is given'''
356 357 return repo[node][standin(filename)].data().strip()
357 358
358 359 def writestandin(repo, standin, hash, executable):
359 360 '''write hash to <repo.root>/<standin>'''
360 361 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
361 362
362 363 def copyandhash(instream, outfile):
363 364 '''Read bytes from instream (iterable) and write them to outfile,
364 365 computing the SHA-1 hash of the data along the way. Return the hash.'''
365 366 hasher = hashlib.sha1('')
366 367 for data in instream:
367 368 hasher.update(data)
368 369 outfile.write(data)
369 370 return hasher.hexdigest()
370 371
371 372 def hashrepofile(repo, file):
372 373 return hashfile(repo.wjoin(file))
373 374
374 375 def hashfile(file):
375 376 if not os.path.exists(file):
376 377 return ''
377 378 hasher = hashlib.sha1('')
378 379 with open(file, 'rb') as fd:
379 380 for data in util.filechunkiter(fd):
380 381 hasher.update(data)
381 382 return hasher.hexdigest()
382 383
383 384 def getexecutable(filename):
384 385 mode = os.stat(filename).st_mode
385 386 return ((mode & stat.S_IXUSR) and
386 387 (mode & stat.S_IXGRP) and
387 388 (mode & stat.S_IXOTH))
388 389
389 390 def urljoin(first, second, *arg):
390 391 def join(left, right):
391 392 if not left.endswith('/'):
392 393 left += '/'
393 394 if right.startswith('/'):
394 395 right = right[1:]
395 396 return left + right
396 397
397 398 url = join(first, second)
398 399 for a in arg:
399 400 url = join(url, a)
400 401 return url
401 402
402 403 def hexsha1(data):
403 404 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
404 405 object data"""
405 406 h = hashlib.sha1()
406 407 for chunk in util.filechunkiter(data):
407 408 h.update(chunk)
408 409 return h.hexdigest()
409 410
410 411 def httpsendfile(ui, filename):
411 412 return httpconnection.httpsendfile(ui, filename, 'rb')
412 413
413 414 def unixpath(path):
414 415 '''Return a version of path normalized for use with the lfdirstate.'''
415 416 return util.pconvert(os.path.normpath(path))
416 417
417 418 def islfilesrepo(repo):
418 419 '''Return true if the repo is a largefile repo.'''
419 420 if ('largefiles' in repo.requirements and
420 421 any(shortnameslash in f[0] for f in repo.store.datafiles())):
421 422 return True
422 423
423 424 return any(openlfdirstate(repo.ui, repo, False))
424 425
425 426 class storeprotonotcapable(Exception):
426 427 def __init__(self, storetypes):
427 428 self.storetypes = storetypes
428 429
429 430 def getstandinsstate(repo):
430 431 standins = []
431 432 matcher = getstandinmatcher(repo)
432 433 for standin in repo.dirstate.walk(matcher, [], False, False):
433 434 lfile = splitstandin(standin)
434 435 try:
435 436 hash = readstandin(repo, lfile)
436 437 except IOError:
437 438 hash = None
438 439 standins.append((lfile, hash))
439 440 return standins
440 441
441 442 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
442 443 lfstandin = standin(lfile)
443 444 if lfstandin in repo.dirstate:
444 445 stat = repo.dirstate._map[lfstandin]
445 446 state, mtime = stat[0], stat[3]
446 447 else:
447 448 state, mtime = '?', -1
448 449 if state == 'n':
449 450 if (normallookup or mtime < 0 or
450 451 not repo.wvfs.exists(lfile)):
451 452 # state 'n' doesn't ensure 'clean' in this case
452 453 lfdirstate.normallookup(lfile)
453 454 else:
454 455 lfdirstate.normal(lfile)
455 456 elif state == 'm':
456 457 lfdirstate.normallookup(lfile)
457 458 elif state == 'r':
458 459 lfdirstate.remove(lfile)
459 460 elif state == 'a':
460 461 lfdirstate.add(lfile)
461 462 elif state == '?':
462 463 lfdirstate.drop(lfile)
463 464
464 465 def markcommitted(orig, ctx, node):
465 466 repo = ctx.repo()
466 467
467 468 orig(node)
468 469
469 470 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
470 471 # because files coming from the 2nd parent are omitted in the latter.
471 472 #
472 473 # The former should be used to get targets of "synclfdirstate",
473 474 # because such files:
474 475 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
475 476 # - have to be marked as "n" after commit, but
476 477 # - aren't listed in "repo[node].files()"
477 478
478 479 lfdirstate = openlfdirstate(repo.ui, repo)
479 480 for f in ctx.files():
480 481 if isstandin(f):
481 482 lfile = splitstandin(f)
482 483 synclfdirstate(repo, lfdirstate, lfile, False)
483 484 lfdirstate.write()
484 485
485 486 # As part of committing, copy all of the largefiles into the cache.
486 487 copyalltostore(repo, node)
487 488
488 489 def getlfilestoupdate(oldstandins, newstandins):
489 490 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
490 491 filelist = []
491 492 for f in changedstandins:
492 493 if f[0] not in filelist:
493 494 filelist.append(f[0])
494 495 return filelist
495 496
496 497 def getlfilestoupload(repo, missing, addfunc):
497 498 for i, n in enumerate(missing):
498 499 repo.ui.progress(_('finding outgoing largefiles'), i,
499 500 unit=_('revisions'), total=len(missing))
500 501 parents = [p for p in repo[n].parents() if p != node.nullid]
501 502
502 503 oldlfstatus = repo.lfstatus
503 504 repo.lfstatus = False
504 505 try:
505 506 ctx = repo[n]
506 507 finally:
507 508 repo.lfstatus = oldlfstatus
508 509
509 510 files = set(ctx.files())
510 511 if len(parents) == 2:
511 512 mc = ctx.manifest()
512 513 mp1 = ctx.parents()[0].manifest()
513 514 mp2 = ctx.parents()[1].manifest()
514 515 for f in mp1:
515 516 if f not in mc:
516 517 files.add(f)
517 518 for f in mp2:
518 519 if f not in mc:
519 520 files.add(f)
520 521 for f in mc:
521 522 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
522 523 files.add(f)
523 524 for fn in files:
524 525 if isstandin(fn) and fn in ctx:
525 526 addfunc(fn, ctx[fn].data().strip())
526 527 repo.ui.progress(_('finding outgoing largefiles'), None)
527 528
528 529 def updatestandinsbymatch(repo, match):
529 530 '''Update standins in the working directory according to specified match
530 531
531 532 This returns (possibly modified) ``match`` object to be used for
532 533 subsequent commit process.
533 534 '''
534 535
535 536 ui = repo.ui
536 537
537 538 # Case 1: user calls commit with no specific files or
538 539 # include/exclude patterns: refresh and commit all files that
539 540 # are "dirty".
540 541 if match is None or match.always():
541 542 # Spend a bit of time here to get a list of files we know
542 543 # are modified so we can compare only against those.
543 544 # It can cost a lot of time (several seconds)
544 545 # otherwise to update all standins if the largefiles are
545 546 # large.
546 547 lfdirstate = openlfdirstate(ui, repo)
547 548 dirtymatch = matchmod.always(repo.root, repo.getcwd())
548 549 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
549 550 False)
550 551 modifiedfiles = unsure + s.modified + s.added + s.removed
551 552 lfiles = listlfiles(repo)
552 553 # this only loops through largefiles that exist (not
553 554 # removed/renamed)
554 555 for lfile in lfiles:
555 556 if lfile in modifiedfiles:
556 557 if repo.wvfs.exists(standin(lfile)):
557 558 # this handles the case where a rebase is being
558 559 # performed and the working copy is not updated
559 560 # yet.
560 561 if repo.wvfs.exists(lfile):
561 562 updatestandin(repo,
562 563 standin(lfile))
563 564
564 565 return match
565 566
566 567 lfiles = listlfiles(repo)
567 568 match._files = repo._subdirlfs(match.files(), lfiles)
568 569
569 570 # Case 2: user calls commit with specified patterns: refresh
570 571 # any matching big files.
571 572 smatcher = composestandinmatcher(repo, match)
572 573 standins = repo.dirstate.walk(smatcher, [], False, False)
573 574
574 575 # No matching big files: get out of the way and pass control to
575 576 # the usual commit() method.
576 577 if not standins:
577 578 return match
578 579
579 580 # Refresh all matching big files. It's possible that the
580 581 # commit will end up failing, in which case the big files will
581 582 # stay refreshed. No harm done: the user modified them and
582 583 # asked to commit them, so sooner or later we're going to
583 584 # refresh the standins. Might as well leave them refreshed.
584 585 lfdirstate = openlfdirstate(ui, repo)
585 586 for fstandin in standins:
586 587 lfile = splitstandin(fstandin)
587 588 if lfdirstate[lfile] != 'r':
588 589 updatestandin(repo, fstandin)
589 590
590 591 # Cook up a new matcher that only matches regular files or
591 592 # standins corresponding to the big files requested by the
592 593 # user. Have to modify _files to prevent commit() from
593 594 # complaining "not tracked" for big files.
594 595 match = copy.copy(match)
595 596 origmatchfn = match.matchfn
596 597
597 598 # Check both the list of largefiles and the list of
598 599 # standins because if a largefile was removed, it
599 600 # won't be in the list of largefiles at this point
600 601 match._files += sorted(standins)
601 602
602 603 actualfiles = []
603 604 for f in match._files:
604 605 fstandin = standin(f)
605 606
606 607 # For largefiles, only one of the normal and standin should be
607 608 # committed (except if one of them is a remove). In the case of a
608 609 # standin removal, drop the normal file if it is unknown to dirstate.
609 610 # Thus, skip plain largefile names but keep the standin.
610 611 if f in lfiles or fstandin in standins:
611 612 if repo.dirstate[fstandin] != 'r':
612 613 if repo.dirstate[f] != 'r':
613 614 continue
614 615 elif repo.dirstate[f] == '?':
615 616 continue
616 617
617 618 actualfiles.append(f)
618 619 match._files = actualfiles
619 620
620 621 def matchfn(f):
621 622 if origmatchfn(f):
622 623 return f not in lfiles
623 624 else:
624 625 return f in standins
625 626
626 627 match.matchfn = matchfn
627 628
628 629 return match
629 630
630 631 class automatedcommithook(object):
631 632 '''Stateful hook to update standins at the 1st commit of resuming
632 633
633 634 For efficiency, updating standins in the working directory should
634 635 be avoided while automated committing (like rebase, transplant and
635 636 so on), because they should be updated before committing.
636 637
637 638 But the 1st commit of resuming automated committing (e.g. ``rebase
638 639 --continue``) should update them, because largefiles may be
639 640 modified manually.
640 641 '''
641 642 def __init__(self, resuming):
642 643 self.resuming = resuming
643 644
644 645 def __call__(self, repo, match):
645 646 if self.resuming:
646 647 self.resuming = False # avoids updating at subsequent commits
647 648 return updatestandinsbymatch(repo, match)
648 649 else:
649 650 return match
650 651
651 652 def getstatuswriter(ui, repo, forcibly=None):
652 653 '''Return the function to write largefiles specific status out
653 654
654 655 If ``forcibly`` is ``None``, this returns the last element of
655 656 ``repo._lfstatuswriters`` as "default" writer function.
656 657
657 658 Otherwise, this returns the function to always write out (or
658 659 ignore if ``not forcibly``) status.
659 660 '''
660 661 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
661 662 return repo._lfstatuswriters[-1]
662 663 else:
663 664 if forcibly:
664 665 return ui.status # forcibly WRITE OUT
665 666 else:
666 667 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now