##// END OF EJS Templates
largefiles: fix misleading comments in lfutil instore and storepath...
liscju -
r29419:01c0324a default
parent child Browse files
Show More
@@ -1,665 +1,664 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 error,
23 23 httpconnection,
24 24 match as matchmod,
25 25 node,
26 26 scmutil,
27 27 util,
28 28 )
29 29
30 30 shortname = '.hglf'
31 31 shortnameslash = shortname + '/'
32 32 longname = 'largefiles'
33 33
34 34
35 35 # -- Private worker functions ------------------------------------------
36 36
37 37 def getminsize(ui, assumelfiles, opt, default=10):
38 38 lfsize = opt
39 39 if not lfsize and assumelfiles:
40 40 lfsize = ui.config(longname, 'minsize', default=default)
41 41 if lfsize:
42 42 try:
43 43 lfsize = float(lfsize)
44 44 except ValueError:
45 45 raise error.Abort(_('largefiles: size must be number (not %s)\n')
46 46 % lfsize)
47 47 if lfsize is None:
48 48 raise error.Abort(_('minimum size for largefiles must be specified'))
49 49 return lfsize
50 50
51 51 def link(src, dest):
52 52 """Try to create hardlink - if that fails, efficiently make a copy."""
53 53 util.makedirs(os.path.dirname(dest))
54 54 try:
55 55 util.oslink(src, dest)
56 56 except OSError:
57 57 # if hardlinks fail, fallback on atomic copy
58 58 dst = util.atomictempfile(dest)
59 59 for chunk in util.filechunkiter(open(src, 'rb')):
60 60 dst.write(chunk)
61 61 dst.close()
62 62 os.chmod(dest, os.stat(src).st_mode)
63 63
64 64 def usercachepath(ui, hash):
65 65 '''Return the correct location in the "global" largefiles cache for a file
66 66 with the given hash.
67 67 This cache is used for sharing of largefiles across repositories - both
68 68 to preserve download bandwidth and storage space.'''
69 69 return os.path.join(_usercachedir(ui), hash)
70 70
71 71 def _usercachedir(ui):
72 72 '''Return the location of the "global" largefiles cache.'''
73 73 path = ui.configpath(longname, 'usercache', None)
74 74 if path:
75 75 return path
76 76 if os.name == 'nt':
77 77 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
78 78 if appdata:
79 79 return os.path.join(appdata, longname)
80 80 elif platform.system() == 'Darwin':
81 81 home = os.getenv('HOME')
82 82 if home:
83 83 return os.path.join(home, 'Library', 'Caches', longname)
84 84 elif os.name == 'posix':
85 85 path = os.getenv('XDG_CACHE_HOME')
86 86 if path:
87 87 return os.path.join(path, longname)
88 88 home = os.getenv('HOME')
89 89 if home:
90 90 return os.path.join(home, '.cache', longname)
91 91 else:
92 92 raise error.Abort(_('unknown operating system: %s\n') % os.name)
93 93 raise error.Abort(_('unknown %s usercache location\n') % longname)
94 94
95 95 def inusercache(ui, hash):
96 96 path = usercachepath(ui, hash)
97 97 return os.path.exists(path)
98 98
99 99 def findfile(repo, hash):
100 100 '''Return store path of the largefile with the specified hash.
101 101 As a side effect, the file might be linked from user cache.
102 102 Return None if the file can't be found locally.'''
103 103 path, exists = findstorepath(repo, hash)
104 104 if exists:
105 105 repo.ui.note(_('found %s in store\n') % hash)
106 106 return path
107 107 elif inusercache(repo.ui, hash):
108 108 repo.ui.note(_('found %s in system cache\n') % hash)
109 109 path = storepath(repo, hash)
110 110 link(usercachepath(repo.ui, hash), path)
111 111 return path
112 112 return None
113 113
114 114 class largefilesdirstate(dirstate.dirstate):
115 115 def __getitem__(self, key):
116 116 return super(largefilesdirstate, self).__getitem__(unixpath(key))
117 117 def normal(self, f):
118 118 return super(largefilesdirstate, self).normal(unixpath(f))
119 119 def remove(self, f):
120 120 return super(largefilesdirstate, self).remove(unixpath(f))
121 121 def add(self, f):
122 122 return super(largefilesdirstate, self).add(unixpath(f))
123 123 def drop(self, f):
124 124 return super(largefilesdirstate, self).drop(unixpath(f))
125 125 def forget(self, f):
126 126 return super(largefilesdirstate, self).forget(unixpath(f))
127 127 def normallookup(self, f):
128 128 return super(largefilesdirstate, self).normallookup(unixpath(f))
129 129 def _ignore(self, f):
130 130 return False
131 131 def write(self, tr=False):
132 132 # (1) disable PENDING mode always
133 133 # (lfdirstate isn't yet managed as a part of the transaction)
134 134 # (2) avoid develwarn 'use dirstate.write with ....'
135 135 super(largefilesdirstate, self).write(None)
136 136
137 137 def openlfdirstate(ui, repo, create=True):
138 138 '''
139 139 Return a dirstate object that tracks largefiles: i.e. its root is
140 140 the repo root, but it is saved in .hg/largefiles/dirstate.
141 141 '''
142 142 vfs = repo.vfs
143 143 lfstoredir = longname
144 144 opener = scmutil.opener(vfs.join(lfstoredir))
145 145 lfdirstate = largefilesdirstate(opener, ui, repo.root,
146 146 repo.dirstate._validate)
147 147
148 148 # If the largefiles dirstate does not exist, populate and create
149 149 # it. This ensures that we create it on the first meaningful
150 150 # largefiles operation in a new clone.
151 151 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
152 152 matcher = getstandinmatcher(repo)
153 153 standins = repo.dirstate.walk(matcher, [], False, False)
154 154
155 155 if len(standins) > 0:
156 156 vfs.makedirs(lfstoredir)
157 157
158 158 for standin in standins:
159 159 lfile = splitstandin(standin)
160 160 lfdirstate.normallookup(lfile)
161 161 return lfdirstate
162 162
163 163 def lfdirstatestatus(lfdirstate, repo):
164 164 wctx = repo['.']
165 165 match = matchmod.always(repo.root, repo.getcwd())
166 166 unsure, s = lfdirstate.status(match, [], False, False, False)
167 167 modified, clean = s.modified, s.clean
168 168 for lfile in unsure:
169 169 try:
170 170 fctx = wctx[standin(lfile)]
171 171 except LookupError:
172 172 fctx = None
173 173 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
174 174 modified.append(lfile)
175 175 else:
176 176 clean.append(lfile)
177 177 lfdirstate.normal(lfile)
178 178 return s
179 179
180 180 def listlfiles(repo, rev=None, matcher=None):
181 181 '''return a list of largefiles in the working copy or the
182 182 specified changeset'''
183 183
184 184 if matcher is None:
185 185 matcher = getstandinmatcher(repo)
186 186
187 187 # ignore unknown files in working directory
188 188 return [splitstandin(f)
189 189 for f in repo[rev].walk(matcher)
190 190 if rev is not None or repo.dirstate[f] != '?']
191 191
192 192 def instore(repo, hash, forcelocal=False):
193 '''Return true if a largefile with the given hash exists in the user
194 cache.'''
193 '''Return true if a largefile with the given hash exists in the store'''
195 194 return os.path.exists(storepath(repo, hash, forcelocal))
196 195
197 196 def storepath(repo, hash, forcelocal=False):
198 '''Return the correct location in the repository largefiles cache for a
197 '''Return the correct location in the repository largefiles store for a
199 198 file with the given hash.'''
200 199 if not forcelocal and repo.shared():
201 200 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
202 201 return repo.join(longname, hash)
203 202
204 203 def findstorepath(repo, hash):
205 204 '''Search through the local store path(s) to find the file for the given
206 205 hash. If the file is not found, its path in the primary store is returned.
207 206 The return value is a tuple of (path, exists(path)).
208 207 '''
209 208 # For shared repos, the primary store is in the share source. But for
210 209 # backward compatibility, force a lookup in the local store if it wasn't
211 210 # found in the share source.
212 211 path = storepath(repo, hash, False)
213 212
214 213 if instore(repo, hash):
215 214 return (path, True)
216 215 elif repo.shared() and instore(repo, hash, True):
217 216 return storepath(repo, hash, True), True
218 217
219 218 return (path, False)
220 219
221 220 def copyfromcache(repo, hash, filename):
222 221 '''Copy the specified largefile from the repo or system cache to
223 222 filename in the repository. Return true on success or false if the
224 223 file was not found in either cache (which should not happened:
225 224 this is meant to be called only after ensuring that the needed
226 225 largefile exists in the cache).'''
227 226 wvfs = repo.wvfs
228 227 path = findfile(repo, hash)
229 228 if path is None:
230 229 return False
231 230 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
232 231 # The write may fail before the file is fully written, but we
233 232 # don't use atomic writes in the working copy.
234 233 with open(path, 'rb') as srcfd:
235 234 with wvfs(filename, 'wb') as destfd:
236 235 gothash = copyandhash(srcfd, destfd)
237 236 if gothash != hash:
238 237 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
239 238 % (filename, path, gothash))
240 239 wvfs.unlink(filename)
241 240 return False
242 241 return True
243 242
244 243 def copytostore(repo, rev, file, uploaded=False):
245 244 wvfs = repo.wvfs
246 245 hash = readstandin(repo, file, rev)
247 246 if instore(repo, hash):
248 247 return
249 248 if wvfs.exists(file):
250 249 copytostoreabsolute(repo, wvfs.join(file), hash)
251 250 else:
252 251 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
253 252 (file, hash))
254 253
255 254 def copyalltostore(repo, node):
256 255 '''Copy all largefiles in a given revision to the store'''
257 256
258 257 ctx = repo[node]
259 258 for filename in ctx.files():
260 259 if isstandin(filename) and filename in ctx.manifest():
261 260 realfile = splitstandin(filename)
262 261 copytostore(repo, ctx.node(), realfile)
263 262
264 263
265 264 def copytostoreabsolute(repo, file, hash):
266 265 if inusercache(repo.ui, hash):
267 266 link(usercachepath(repo.ui, hash), storepath(repo, hash))
268 267 else:
269 268 util.makedirs(os.path.dirname(storepath(repo, hash)))
270 269 dst = util.atomictempfile(storepath(repo, hash),
271 270 createmode=repo.store.createmode)
272 271 for chunk in util.filechunkiter(open(file, 'rb')):
273 272 dst.write(chunk)
274 273 dst.close()
275 274 linktousercache(repo, hash)
276 275
277 276 def linktousercache(repo, hash):
278 277 '''Link / copy the largefile with the specified hash from the store
279 278 to the cache.'''
280 279 path = usercachepath(repo.ui, hash)
281 280 link(storepath(repo, hash), path)
282 281
283 282 def getstandinmatcher(repo, rmatcher=None):
284 283 '''Return a match object that applies rmatcher to the standin directory'''
285 284 wvfs = repo.wvfs
286 285 standindir = shortname
287 286
288 287 # no warnings about missing files or directories
289 288 badfn = lambda f, msg: None
290 289
291 290 if rmatcher and not rmatcher.always():
292 291 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
293 292 if not pats:
294 293 pats = [wvfs.join(standindir)]
295 294 match = scmutil.match(repo[None], pats, badfn=badfn)
296 295 # if pats is empty, it would incorrectly always match, so clear _always
297 296 match._always = False
298 297 else:
299 298 # no patterns: relative to repo root
300 299 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
301 300 return match
302 301
303 302 def composestandinmatcher(repo, rmatcher):
304 303 '''Return a matcher that accepts standins corresponding to the
305 304 files accepted by rmatcher. Pass the list of files in the matcher
306 305 as the paths specified by the user.'''
307 306 smatcher = getstandinmatcher(repo, rmatcher)
308 307 isstandin = smatcher.matchfn
309 308 def composedmatchfn(f):
310 309 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
311 310 smatcher.matchfn = composedmatchfn
312 311
313 312 return smatcher
314 313
315 314 def standin(filename):
316 315 '''Return the repo-relative path to the standin for the specified big
317 316 file.'''
318 317 # Notes:
319 318 # 1) Some callers want an absolute path, but for instance addlargefiles
320 319 # needs it repo-relative so it can be passed to repo[None].add(). So
321 320 # leave it up to the caller to use repo.wjoin() to get an absolute path.
322 321 # 2) Join with '/' because that's what dirstate always uses, even on
323 322 # Windows. Change existing separator to '/' first in case we are
324 323 # passed filenames from an external source (like the command line).
325 324 return shortnameslash + util.pconvert(filename)
326 325
327 326 def isstandin(filename):
328 327 '''Return true if filename is a big file standin. filename must be
329 328 in Mercurial's internal form (slash-separated).'''
330 329 return filename.startswith(shortnameslash)
331 330
332 331 def splitstandin(filename):
333 332 # Split on / because that's what dirstate always uses, even on Windows.
334 333 # Change local separator to / first just in case we are passed filenames
335 334 # from an external source (like the command line).
336 335 bits = util.pconvert(filename).split('/', 1)
337 336 if len(bits) == 2 and bits[0] == shortname:
338 337 return bits[1]
339 338 else:
340 339 return None
341 340
342 341 def updatestandin(repo, standin):
343 342 file = repo.wjoin(splitstandin(standin))
344 343 if repo.wvfs.exists(splitstandin(standin)):
345 344 hash = hashfile(file)
346 345 executable = getexecutable(file)
347 346 writestandin(repo, standin, hash, executable)
348 347 else:
349 348 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
350 349
351 350 def readstandin(repo, filename, node=None):
352 351 '''read hex hash from standin for filename at given node, or working
353 352 directory if no node is given'''
354 353 return repo[node][standin(filename)].data().strip()
355 354
356 355 def writestandin(repo, standin, hash, executable):
357 356 '''write hash to <repo.root>/<standin>'''
358 357 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
359 358
360 359 def copyandhash(instream, outfile):
361 360 '''Read bytes from instream (iterable) and write them to outfile,
362 361 computing the SHA-1 hash of the data along the way. Return the hash.'''
363 362 hasher = hashlib.sha1('')
364 363 for data in instream:
365 364 hasher.update(data)
366 365 outfile.write(data)
367 366 return hasher.hexdigest()
368 367
369 368 def hashrepofile(repo, file):
370 369 return hashfile(repo.wjoin(file))
371 370
372 371 def hashfile(file):
373 372 if not os.path.exists(file):
374 373 return ''
375 374 hasher = hashlib.sha1('')
376 375 fd = open(file, 'rb')
377 376 for data in util.filechunkiter(fd, 128 * 1024):
378 377 hasher.update(data)
379 378 fd.close()
380 379 return hasher.hexdigest()
381 380
382 381 def getexecutable(filename):
383 382 mode = os.stat(filename).st_mode
384 383 return ((mode & stat.S_IXUSR) and
385 384 (mode & stat.S_IXGRP) and
386 385 (mode & stat.S_IXOTH))
387 386
388 387 def urljoin(first, second, *arg):
389 388 def join(left, right):
390 389 if not left.endswith('/'):
391 390 left += '/'
392 391 if right.startswith('/'):
393 392 right = right[1:]
394 393 return left + right
395 394
396 395 url = join(first, second)
397 396 for a in arg:
398 397 url = join(url, a)
399 398 return url
400 399
401 400 def hexsha1(data):
402 401 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
403 402 object data"""
404 403 h = hashlib.sha1()
405 404 for chunk in util.filechunkiter(data):
406 405 h.update(chunk)
407 406 return h.hexdigest()
408 407
409 408 def httpsendfile(ui, filename):
410 409 return httpconnection.httpsendfile(ui, filename, 'rb')
411 410
412 411 def unixpath(path):
413 412 '''Return a version of path normalized for use with the lfdirstate.'''
414 413 return util.pconvert(os.path.normpath(path))
415 414
416 415 def islfilesrepo(repo):
417 416 '''Return true if the repo is a largefile repo.'''
418 417 if ('largefiles' in repo.requirements and
419 418 any(shortnameslash in f[0] for f in repo.store.datafiles())):
420 419 return True
421 420
422 421 return any(openlfdirstate(repo.ui, repo, False))
423 422
424 423 class storeprotonotcapable(Exception):
425 424 def __init__(self, storetypes):
426 425 self.storetypes = storetypes
427 426
428 427 def getstandinsstate(repo):
429 428 standins = []
430 429 matcher = getstandinmatcher(repo)
431 430 for standin in repo.dirstate.walk(matcher, [], False, False):
432 431 lfile = splitstandin(standin)
433 432 try:
434 433 hash = readstandin(repo, lfile)
435 434 except IOError:
436 435 hash = None
437 436 standins.append((lfile, hash))
438 437 return standins
439 438
440 439 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
441 440 lfstandin = standin(lfile)
442 441 if lfstandin in repo.dirstate:
443 442 stat = repo.dirstate._map[lfstandin]
444 443 state, mtime = stat[0], stat[3]
445 444 else:
446 445 state, mtime = '?', -1
447 446 if state == 'n':
448 447 if (normallookup or mtime < 0 or
449 448 not repo.wvfs.exists(lfile)):
450 449 # state 'n' doesn't ensure 'clean' in this case
451 450 lfdirstate.normallookup(lfile)
452 451 else:
453 452 lfdirstate.normal(lfile)
454 453 elif state == 'm':
455 454 lfdirstate.normallookup(lfile)
456 455 elif state == 'r':
457 456 lfdirstate.remove(lfile)
458 457 elif state == 'a':
459 458 lfdirstate.add(lfile)
460 459 elif state == '?':
461 460 lfdirstate.drop(lfile)
462 461
463 462 def markcommitted(orig, ctx, node):
464 463 repo = ctx.repo()
465 464
466 465 orig(node)
467 466
468 467 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
469 468 # because files coming from the 2nd parent are omitted in the latter.
470 469 #
471 470 # The former should be used to get targets of "synclfdirstate",
472 471 # because such files:
473 472 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
474 473 # - have to be marked as "n" after commit, but
475 474 # - aren't listed in "repo[node].files()"
476 475
477 476 lfdirstate = openlfdirstate(repo.ui, repo)
478 477 for f in ctx.files():
479 478 if isstandin(f):
480 479 lfile = splitstandin(f)
481 480 synclfdirstate(repo, lfdirstate, lfile, False)
482 481 lfdirstate.write()
483 482
484 483 # As part of committing, copy all of the largefiles into the cache.
485 484 copyalltostore(repo, node)
486 485
487 486 def getlfilestoupdate(oldstandins, newstandins):
488 487 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
489 488 filelist = []
490 489 for f in changedstandins:
491 490 if f[0] not in filelist:
492 491 filelist.append(f[0])
493 492 return filelist
494 493
495 494 def getlfilestoupload(repo, missing, addfunc):
496 495 for i, n in enumerate(missing):
497 496 repo.ui.progress(_('finding outgoing largefiles'), i,
498 497 unit=_('revisions'), total=len(missing))
499 498 parents = [p for p in repo[n].parents() if p != node.nullid]
500 499
501 500 oldlfstatus = repo.lfstatus
502 501 repo.lfstatus = False
503 502 try:
504 503 ctx = repo[n]
505 504 finally:
506 505 repo.lfstatus = oldlfstatus
507 506
508 507 files = set(ctx.files())
509 508 if len(parents) == 2:
510 509 mc = ctx.manifest()
511 510 mp1 = ctx.parents()[0].manifest()
512 511 mp2 = ctx.parents()[1].manifest()
513 512 for f in mp1:
514 513 if f not in mc:
515 514 files.add(f)
516 515 for f in mp2:
517 516 if f not in mc:
518 517 files.add(f)
519 518 for f in mc:
520 519 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
521 520 files.add(f)
522 521 for fn in files:
523 522 if isstandin(fn) and fn in ctx:
524 523 addfunc(fn, ctx[fn].data().strip())
525 524 repo.ui.progress(_('finding outgoing largefiles'), None)
526 525
527 526 def updatestandinsbymatch(repo, match):
528 527 '''Update standins in the working directory according to specified match
529 528
530 529 This returns (possibly modified) ``match`` object to be used for
531 530 subsequent commit process.
532 531 '''
533 532
534 533 ui = repo.ui
535 534
536 535 # Case 1: user calls commit with no specific files or
537 536 # include/exclude patterns: refresh and commit all files that
538 537 # are "dirty".
539 538 if match is None or match.always():
540 539 # Spend a bit of time here to get a list of files we know
541 540 # are modified so we can compare only against those.
542 541 # It can cost a lot of time (several seconds)
543 542 # otherwise to update all standins if the largefiles are
544 543 # large.
545 544 lfdirstate = openlfdirstate(ui, repo)
546 545 dirtymatch = matchmod.always(repo.root, repo.getcwd())
547 546 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
548 547 False)
549 548 modifiedfiles = unsure + s.modified + s.added + s.removed
550 549 lfiles = listlfiles(repo)
551 550 # this only loops through largefiles that exist (not
552 551 # removed/renamed)
553 552 for lfile in lfiles:
554 553 if lfile in modifiedfiles:
555 554 if repo.wvfs.exists(standin(lfile)):
556 555 # this handles the case where a rebase is being
557 556 # performed and the working copy is not updated
558 557 # yet.
559 558 if repo.wvfs.exists(lfile):
560 559 updatestandin(repo,
561 560 standin(lfile))
562 561
563 562 return match
564 563
565 564 lfiles = listlfiles(repo)
566 565 match._files = repo._subdirlfs(match.files(), lfiles)
567 566
568 567 # Case 2: user calls commit with specified patterns: refresh
569 568 # any matching big files.
570 569 smatcher = composestandinmatcher(repo, match)
571 570 standins = repo.dirstate.walk(smatcher, [], False, False)
572 571
573 572 # No matching big files: get out of the way and pass control to
574 573 # the usual commit() method.
575 574 if not standins:
576 575 return match
577 576
578 577 # Refresh all matching big files. It's possible that the
579 578 # commit will end up failing, in which case the big files will
580 579 # stay refreshed. No harm done: the user modified them and
581 580 # asked to commit them, so sooner or later we're going to
582 581 # refresh the standins. Might as well leave them refreshed.
583 582 lfdirstate = openlfdirstate(ui, repo)
584 583 for fstandin in standins:
585 584 lfile = splitstandin(fstandin)
586 585 if lfdirstate[lfile] != 'r':
587 586 updatestandin(repo, fstandin)
588 587
589 588 # Cook up a new matcher that only matches regular files or
590 589 # standins corresponding to the big files requested by the
591 590 # user. Have to modify _files to prevent commit() from
592 591 # complaining "not tracked" for big files.
593 592 match = copy.copy(match)
594 593 origmatchfn = match.matchfn
595 594
596 595 # Check both the list of largefiles and the list of
597 596 # standins because if a largefile was removed, it
598 597 # won't be in the list of largefiles at this point
599 598 match._files += sorted(standins)
600 599
601 600 actualfiles = []
602 601 for f in match._files:
603 602 fstandin = standin(f)
604 603
605 604 # For largefiles, only one of the normal and standin should be
606 605 # committed (except if one of them is a remove). In the case of a
607 606 # standin removal, drop the normal file if it is unknown to dirstate.
608 607 # Thus, skip plain largefile names but keep the standin.
609 608 if f in lfiles or fstandin in standins:
610 609 if repo.dirstate[fstandin] != 'r':
611 610 if repo.dirstate[f] != 'r':
612 611 continue
613 612 elif repo.dirstate[f] == '?':
614 613 continue
615 614
616 615 actualfiles.append(f)
617 616 match._files = actualfiles
618 617
619 618 def matchfn(f):
620 619 if origmatchfn(f):
621 620 return f not in lfiles
622 621 else:
623 622 return f in standins
624 623
625 624 match.matchfn = matchfn
626 625
627 626 return match
628 627
629 628 class automatedcommithook(object):
630 629 '''Stateful hook to update standins at the 1st commit of resuming
631 630
632 631 For efficiency, updating standins in the working directory should
633 632 be avoided while automated committing (like rebase, transplant and
634 633 so on), because they should be updated before committing.
635 634
636 635 But the 1st commit of resuming automated committing (e.g. ``rebase
637 636 --continue``) should update them, because largefiles may be
638 637 modified manually.
639 638 '''
640 639 def __init__(self, resuming):
641 640 self.resuming = resuming
642 641
643 642 def __call__(self, repo, match):
644 643 if self.resuming:
645 644 self.resuming = False # avoids updating at subsequent commits
646 645 return updatestandinsbymatch(repo, match)
647 646 else:
648 647 return match
649 648
650 649 def getstatuswriter(ui, repo, forcibly=None):
651 650 '''Return the function to write largefiles specific status out
652 651
653 652 If ``forcibly`` is ``None``, this returns the last element of
654 653 ``repo._lfstatuswriters`` as "default" writer function.
655 654
656 655 Otherwise, this returns the function to always write out (or
657 656 ignore if ``not forcibly``) status.
658 657 '''
659 658 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
660 659 return repo._lfstatuswriters[-1]
661 660 else:
662 661 if forcibly:
663 662 return ui.status # forcibly WRITE OUT
664 663 else:
665 664 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now