##// END OF EJS Templates
largefiles: don't access repo.changelog directly in getlfilestoupload...
Mads Kiilerich -
r28877:8079639b default
parent child Browse files
Show More
@@ -1,655 +1,655 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import platform
13 13 import stat
14 14 import copy
15 15
16 16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 17 from mercurial.i18n import _
18 18 from mercurial import node, error
19 19
20 20 shortname = '.hglf'
21 21 shortnameslash = shortname + '/'
22 22 longname = 'largefiles'
23 23
24 24
25 25 # -- Private worker functions ------------------------------------------
26 26
27 27 def getminsize(ui, assumelfiles, opt, default=10):
28 28 lfsize = opt
29 29 if not lfsize and assumelfiles:
30 30 lfsize = ui.config(longname, 'minsize', default=default)
31 31 if lfsize:
32 32 try:
33 33 lfsize = float(lfsize)
34 34 except ValueError:
35 35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
36 36 % lfsize)
37 37 if lfsize is None:
38 38 raise error.Abort(_('minimum size for largefiles must be specified'))
39 39 return lfsize
40 40
41 41 def link(src, dest):
42 42 """Try to create hardlink - if that fails, efficiently make a copy."""
43 43 util.makedirs(os.path.dirname(dest))
44 44 try:
45 45 util.oslink(src, dest)
46 46 except OSError:
47 47 # if hardlinks fail, fallback on atomic copy
48 48 dst = util.atomictempfile(dest)
49 49 for chunk in util.filechunkiter(open(src, 'rb')):
50 50 dst.write(chunk)
51 51 dst.close()
52 52 os.chmod(dest, os.stat(src).st_mode)
53 53
54 54 def usercachepath(ui, hash):
55 55 '''Return the correct location in the "global" largefiles cache for a file
56 56 with the given hash.
57 57 This cache is used for sharing of largefiles across repositories - both
58 58 to preserve download bandwidth and storage space.'''
59 59 return os.path.join(_usercachedir(ui), hash)
60 60
61 61 def _usercachedir(ui):
62 62 '''Return the location of the "global" largefiles cache.'''
63 63 path = ui.configpath(longname, 'usercache', None)
64 64 if path:
65 65 return path
66 66 if os.name == 'nt':
67 67 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
68 68 if appdata:
69 69 return os.path.join(appdata, longname)
70 70 elif platform.system() == 'Darwin':
71 71 home = os.getenv('HOME')
72 72 if home:
73 73 return os.path.join(home, 'Library', 'Caches', longname)
74 74 elif os.name == 'posix':
75 75 path = os.getenv('XDG_CACHE_HOME')
76 76 if path:
77 77 return os.path.join(path, longname)
78 78 home = os.getenv('HOME')
79 79 if home:
80 80 return os.path.join(home, '.cache', longname)
81 81 else:
82 82 raise error.Abort(_('unknown operating system: %s\n') % os.name)
83 83 raise error.Abort(_('unknown %s usercache location\n') % longname)
84 84
85 85 def inusercache(ui, hash):
86 86 path = usercachepath(ui, hash)
87 87 return os.path.exists(path)
88 88
89 89 def findfile(repo, hash):
90 90 '''Return store path of the largefile with the specified hash.
91 91 As a side effect, the file might be linked from user cache.
92 92 Return None if the file can't be found locally.'''
93 93 path, exists = findstorepath(repo, hash)
94 94 if exists:
95 95 repo.ui.note(_('found %s in store\n') % hash)
96 96 return path
97 97 elif inusercache(repo.ui, hash):
98 98 repo.ui.note(_('found %s in system cache\n') % hash)
99 99 path = storepath(repo, hash)
100 100 link(usercachepath(repo.ui, hash), path)
101 101 return path
102 102 return None
103 103
104 104 class largefilesdirstate(dirstate.dirstate):
105 105 def __getitem__(self, key):
106 106 return super(largefilesdirstate, self).__getitem__(unixpath(key))
107 107 def normal(self, f):
108 108 return super(largefilesdirstate, self).normal(unixpath(f))
109 109 def remove(self, f):
110 110 return super(largefilesdirstate, self).remove(unixpath(f))
111 111 def add(self, f):
112 112 return super(largefilesdirstate, self).add(unixpath(f))
113 113 def drop(self, f):
114 114 return super(largefilesdirstate, self).drop(unixpath(f))
115 115 def forget(self, f):
116 116 return super(largefilesdirstate, self).forget(unixpath(f))
117 117 def normallookup(self, f):
118 118 return super(largefilesdirstate, self).normallookup(unixpath(f))
119 119 def _ignore(self, f):
120 120 return False
121 121 def write(self, tr=False):
122 122 # (1) disable PENDING mode always
123 123 # (lfdirstate isn't yet managed as a part of the transaction)
124 124 # (2) avoid develwarn 'use dirstate.write with ....'
125 125 super(largefilesdirstate, self).write(None)
126 126
127 127 def openlfdirstate(ui, repo, create=True):
128 128 '''
129 129 Return a dirstate object that tracks largefiles: i.e. its root is
130 130 the repo root, but it is saved in .hg/largefiles/dirstate.
131 131 '''
132 132 vfs = repo.vfs
133 133 lfstoredir = longname
134 134 opener = scmutil.opener(vfs.join(lfstoredir))
135 135 lfdirstate = largefilesdirstate(opener, ui, repo.root,
136 136 repo.dirstate._validate)
137 137
138 138 # If the largefiles dirstate does not exist, populate and create
139 139 # it. This ensures that we create it on the first meaningful
140 140 # largefiles operation in a new clone.
141 141 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
142 142 matcher = getstandinmatcher(repo)
143 143 standins = repo.dirstate.walk(matcher, [], False, False)
144 144
145 145 if len(standins) > 0:
146 146 vfs.makedirs(lfstoredir)
147 147
148 148 for standin in standins:
149 149 lfile = splitstandin(standin)
150 150 lfdirstate.normallookup(lfile)
151 151 return lfdirstate
152 152
153 153 def lfdirstatestatus(lfdirstate, repo):
154 154 wctx = repo['.']
155 155 match = match_.always(repo.root, repo.getcwd())
156 156 unsure, s = lfdirstate.status(match, [], False, False, False)
157 157 modified, clean = s.modified, s.clean
158 158 for lfile in unsure:
159 159 try:
160 160 fctx = wctx[standin(lfile)]
161 161 except LookupError:
162 162 fctx = None
163 163 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
164 164 modified.append(lfile)
165 165 else:
166 166 clean.append(lfile)
167 167 lfdirstate.normal(lfile)
168 168 return s
169 169
170 170 def listlfiles(repo, rev=None, matcher=None):
171 171 '''return a list of largefiles in the working copy or the
172 172 specified changeset'''
173 173
174 174 if matcher is None:
175 175 matcher = getstandinmatcher(repo)
176 176
177 177 # ignore unknown files in working directory
178 178 return [splitstandin(f)
179 179 for f in repo[rev].walk(matcher)
180 180 if rev is not None or repo.dirstate[f] != '?']
181 181
182 182 def instore(repo, hash, forcelocal=False):
183 183 '''Return true if a largefile with the given hash exists in the user
184 184 cache.'''
185 185 return os.path.exists(storepath(repo, hash, forcelocal))
186 186
187 187 def storepath(repo, hash, forcelocal=False):
188 188 '''Return the correct location in the repository largefiles cache for a
189 189 file with the given hash.'''
190 190 if not forcelocal and repo.shared():
191 191 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
192 192 return repo.join(longname, hash)
193 193
194 194 def findstorepath(repo, hash):
195 195 '''Search through the local store path(s) to find the file for the given
196 196 hash. If the file is not found, its path in the primary store is returned.
197 197 The return value is a tuple of (path, exists(path)).
198 198 '''
199 199 # For shared repos, the primary store is in the share source. But for
200 200 # backward compatibility, force a lookup in the local store if it wasn't
201 201 # found in the share source.
202 202 path = storepath(repo, hash, False)
203 203
204 204 if instore(repo, hash):
205 205 return (path, True)
206 206 elif repo.shared() and instore(repo, hash, True):
207 207 return storepath(repo, hash, True)
208 208
209 209 return (path, False)
210 210
211 211 def copyfromcache(repo, hash, filename):
212 212 '''Copy the specified largefile from the repo or system cache to
213 213 filename in the repository. Return true on success or false if the
214 214 file was not found in either cache (which should not happened:
215 215 this is meant to be called only after ensuring that the needed
216 216 largefile exists in the cache).'''
217 217 wvfs = repo.wvfs
218 218 path = findfile(repo, hash)
219 219 if path is None:
220 220 return False
221 221 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
222 222 # The write may fail before the file is fully written, but we
223 223 # don't use atomic writes in the working copy.
224 224 with open(path, 'rb') as srcfd:
225 225 with wvfs(filename, 'wb') as destfd:
226 226 gothash = copyandhash(srcfd, destfd)
227 227 if gothash != hash:
228 228 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
229 229 % (filename, path, gothash))
230 230 wvfs.unlink(filename)
231 231 return False
232 232 return True
233 233
234 234 def copytostore(repo, rev, file, uploaded=False):
235 235 wvfs = repo.wvfs
236 236 hash = readstandin(repo, file, rev)
237 237 if instore(repo, hash):
238 238 return
239 239 if wvfs.exists(file):
240 240 copytostoreabsolute(repo, wvfs.join(file), hash)
241 241 else:
242 242 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
243 243 (file, hash))
244 244
245 245 def copyalltostore(repo, node):
246 246 '''Copy all largefiles in a given revision to the store'''
247 247
248 248 ctx = repo[node]
249 249 for filename in ctx.files():
250 250 if isstandin(filename) and filename in ctx.manifest():
251 251 realfile = splitstandin(filename)
252 252 copytostore(repo, ctx.node(), realfile)
253 253
254 254
255 255 def copytostoreabsolute(repo, file, hash):
256 256 if inusercache(repo.ui, hash):
257 257 link(usercachepath(repo.ui, hash), storepath(repo, hash))
258 258 else:
259 259 util.makedirs(os.path.dirname(storepath(repo, hash)))
260 260 dst = util.atomictempfile(storepath(repo, hash),
261 261 createmode=repo.store.createmode)
262 262 for chunk in util.filechunkiter(open(file, 'rb')):
263 263 dst.write(chunk)
264 264 dst.close()
265 265 linktousercache(repo, hash)
266 266
267 267 def linktousercache(repo, hash):
268 268 '''Link / copy the largefile with the specified hash from the store
269 269 to the cache.'''
270 270 path = usercachepath(repo.ui, hash)
271 271 link(storepath(repo, hash), path)
272 272
273 273 def getstandinmatcher(repo, rmatcher=None):
274 274 '''Return a match object that applies rmatcher to the standin directory'''
275 275 wvfs = repo.wvfs
276 276 standindir = shortname
277 277
278 278 # no warnings about missing files or directories
279 279 badfn = lambda f, msg: None
280 280
281 281 if rmatcher and not rmatcher.always():
282 282 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
283 283 if not pats:
284 284 pats = [wvfs.join(standindir)]
285 285 match = scmutil.match(repo[None], pats, badfn=badfn)
286 286 # if pats is empty, it would incorrectly always match, so clear _always
287 287 match._always = False
288 288 else:
289 289 # no patterns: relative to repo root
290 290 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
291 291 return match
292 292
293 293 def composestandinmatcher(repo, rmatcher):
294 294 '''Return a matcher that accepts standins corresponding to the
295 295 files accepted by rmatcher. Pass the list of files in the matcher
296 296 as the paths specified by the user.'''
297 297 smatcher = getstandinmatcher(repo, rmatcher)
298 298 isstandin = smatcher.matchfn
299 299 def composedmatchfn(f):
300 300 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
301 301 smatcher.matchfn = composedmatchfn
302 302
303 303 return smatcher
304 304
305 305 def standin(filename):
306 306 '''Return the repo-relative path to the standin for the specified big
307 307 file.'''
308 308 # Notes:
309 309 # 1) Some callers want an absolute path, but for instance addlargefiles
310 310 # needs it repo-relative so it can be passed to repo[None].add(). So
311 311 # leave it up to the caller to use repo.wjoin() to get an absolute path.
312 312 # 2) Join with '/' because that's what dirstate always uses, even on
313 313 # Windows. Change existing separator to '/' first in case we are
314 314 # passed filenames from an external source (like the command line).
315 315 return shortnameslash + util.pconvert(filename)
316 316
317 317 def isstandin(filename):
318 318 '''Return true if filename is a big file standin. filename must be
319 319 in Mercurial's internal form (slash-separated).'''
320 320 return filename.startswith(shortnameslash)
321 321
322 322 def splitstandin(filename):
323 323 # Split on / because that's what dirstate always uses, even on Windows.
324 324 # Change local separator to / first just in case we are passed filenames
325 325 # from an external source (like the command line).
326 326 bits = util.pconvert(filename).split('/', 1)
327 327 if len(bits) == 2 and bits[0] == shortname:
328 328 return bits[1]
329 329 else:
330 330 return None
331 331
332 332 def updatestandin(repo, standin):
333 333 file = repo.wjoin(splitstandin(standin))
334 334 if repo.wvfs.exists(splitstandin(standin)):
335 335 hash = hashfile(file)
336 336 executable = getexecutable(file)
337 337 writestandin(repo, standin, hash, executable)
338 338 else:
339 339 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
340 340
341 341 def readstandin(repo, filename, node=None):
342 342 '''read hex hash from standin for filename at given node, or working
343 343 directory if no node is given'''
344 344 return repo[node][standin(filename)].data().strip()
345 345
346 346 def writestandin(repo, standin, hash, executable):
347 347 '''write hash to <repo.root>/<standin>'''
348 348 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
349 349
350 350 def copyandhash(instream, outfile):
351 351 '''Read bytes from instream (iterable) and write them to outfile,
352 352 computing the SHA-1 hash of the data along the way. Return the hash.'''
353 353 hasher = util.sha1('')
354 354 for data in instream:
355 355 hasher.update(data)
356 356 outfile.write(data)
357 357 return hasher.hexdigest()
358 358
359 359 def hashrepofile(repo, file):
360 360 return hashfile(repo.wjoin(file))
361 361
362 362 def hashfile(file):
363 363 if not os.path.exists(file):
364 364 return ''
365 365 hasher = util.sha1('')
366 366 fd = open(file, 'rb')
367 367 for data in util.filechunkiter(fd, 128 * 1024):
368 368 hasher.update(data)
369 369 fd.close()
370 370 return hasher.hexdigest()
371 371
372 372 def getexecutable(filename):
373 373 mode = os.stat(filename).st_mode
374 374 return ((mode & stat.S_IXUSR) and
375 375 (mode & stat.S_IXGRP) and
376 376 (mode & stat.S_IXOTH))
377 377
378 378 def urljoin(first, second, *arg):
379 379 def join(left, right):
380 380 if not left.endswith('/'):
381 381 left += '/'
382 382 if right.startswith('/'):
383 383 right = right[1:]
384 384 return left + right
385 385
386 386 url = join(first, second)
387 387 for a in arg:
388 388 url = join(url, a)
389 389 return url
390 390
391 391 def hexsha1(data):
392 392 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
393 393 object data"""
394 394 h = util.sha1()
395 395 for chunk in util.filechunkiter(data):
396 396 h.update(chunk)
397 397 return h.hexdigest()
398 398
399 399 def httpsendfile(ui, filename):
400 400 return httpconnection.httpsendfile(ui, filename, 'rb')
401 401
402 402 def unixpath(path):
403 403 '''Return a version of path normalized for use with the lfdirstate.'''
404 404 return util.pconvert(os.path.normpath(path))
405 405
406 406 def islfilesrepo(repo):
407 407 '''Return true if the repo is a largefile repo.'''
408 408 if ('largefiles' in repo.requirements and
409 409 any(shortnameslash in f[0] for f in repo.store.datafiles())):
410 410 return True
411 411
412 412 return any(openlfdirstate(repo.ui, repo, False))
413 413
414 414 class storeprotonotcapable(Exception):
415 415 def __init__(self, storetypes):
416 416 self.storetypes = storetypes
417 417
418 418 def getstandinsstate(repo):
419 419 standins = []
420 420 matcher = getstandinmatcher(repo)
421 421 for standin in repo.dirstate.walk(matcher, [], False, False):
422 422 lfile = splitstandin(standin)
423 423 try:
424 424 hash = readstandin(repo, lfile)
425 425 except IOError:
426 426 hash = None
427 427 standins.append((lfile, hash))
428 428 return standins
429 429
430 430 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
431 431 lfstandin = standin(lfile)
432 432 if lfstandin in repo.dirstate:
433 433 stat = repo.dirstate._map[lfstandin]
434 434 state, mtime = stat[0], stat[3]
435 435 else:
436 436 state, mtime = '?', -1
437 437 if state == 'n':
438 438 if (normallookup or mtime < 0 or
439 439 not repo.wvfs.exists(lfile)):
440 440 # state 'n' doesn't ensure 'clean' in this case
441 441 lfdirstate.normallookup(lfile)
442 442 else:
443 443 lfdirstate.normal(lfile)
444 444 elif state == 'm':
445 445 lfdirstate.normallookup(lfile)
446 446 elif state == 'r':
447 447 lfdirstate.remove(lfile)
448 448 elif state == 'a':
449 449 lfdirstate.add(lfile)
450 450 elif state == '?':
451 451 lfdirstate.drop(lfile)
452 452
453 453 def markcommitted(orig, ctx, node):
454 454 repo = ctx.repo()
455 455
456 456 orig(node)
457 457
458 458 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
459 459 # because files coming from the 2nd parent are omitted in the latter.
460 460 #
461 461 # The former should be used to get targets of "synclfdirstate",
462 462 # because such files:
463 463 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
464 464 # - have to be marked as "n" after commit, but
465 465 # - aren't listed in "repo[node].files()"
466 466
467 467 lfdirstate = openlfdirstate(repo.ui, repo)
468 468 for f in ctx.files():
469 469 if isstandin(f):
470 470 lfile = splitstandin(f)
471 471 synclfdirstate(repo, lfdirstate, lfile, False)
472 472 lfdirstate.write()
473 473
474 474 # As part of committing, copy all of the largefiles into the cache.
475 475 copyalltostore(repo, node)
476 476
477 477 def getlfilestoupdate(oldstandins, newstandins):
478 478 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
479 479 filelist = []
480 480 for f in changedstandins:
481 481 if f[0] not in filelist:
482 482 filelist.append(f[0])
483 483 return filelist
484 484
485 485 def getlfilestoupload(repo, missing, addfunc):
486 486 for i, n in enumerate(missing):
487 487 repo.ui.progress(_('finding outgoing largefiles'), i,
488 488 unit=_('revisions'), total=len(missing))
489 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
489 parents = [p for p in repo[n].parents() if p != node.nullid]
490 490
491 491 oldlfstatus = repo.lfstatus
492 492 repo.lfstatus = False
493 493 try:
494 494 ctx = repo[n]
495 495 finally:
496 496 repo.lfstatus = oldlfstatus
497 497
498 498 files = set(ctx.files())
499 499 if len(parents) == 2:
500 500 mc = ctx.manifest()
501 501 mp1 = ctx.parents()[0].manifest()
502 502 mp2 = ctx.parents()[1].manifest()
503 503 for f in mp1:
504 504 if f not in mc:
505 505 files.add(f)
506 506 for f in mp2:
507 507 if f not in mc:
508 508 files.add(f)
509 509 for f in mc:
510 510 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
511 511 files.add(f)
512 512 for fn in files:
513 513 if isstandin(fn) and fn in ctx:
514 514 addfunc(fn, ctx[fn].data().strip())
515 515 repo.ui.progress(_('finding outgoing largefiles'), None)
516 516
517 517 def updatestandinsbymatch(repo, match):
518 518 '''Update standins in the working directory according to specified match
519 519
520 520 This returns (possibly modified) ``match`` object to be used for
521 521 subsequent commit process.
522 522 '''
523 523
524 524 ui = repo.ui
525 525
526 526 # Case 1: user calls commit with no specific files or
527 527 # include/exclude patterns: refresh and commit all files that
528 528 # are "dirty".
529 529 if match is None or match.always():
530 530 # Spend a bit of time here to get a list of files we know
531 531 # are modified so we can compare only against those.
532 532 # It can cost a lot of time (several seconds)
533 533 # otherwise to update all standins if the largefiles are
534 534 # large.
535 535 lfdirstate = openlfdirstate(ui, repo)
536 536 dirtymatch = match_.always(repo.root, repo.getcwd())
537 537 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
538 538 False)
539 539 modifiedfiles = unsure + s.modified + s.added + s.removed
540 540 lfiles = listlfiles(repo)
541 541 # this only loops through largefiles that exist (not
542 542 # removed/renamed)
543 543 for lfile in lfiles:
544 544 if lfile in modifiedfiles:
545 545 if repo.wvfs.exists(standin(lfile)):
546 546 # this handles the case where a rebase is being
547 547 # performed and the working copy is not updated
548 548 # yet.
549 549 if repo.wvfs.exists(lfile):
550 550 updatestandin(repo,
551 551 standin(lfile))
552 552
553 553 return match
554 554
555 555 lfiles = listlfiles(repo)
556 556 match._files = repo._subdirlfs(match.files(), lfiles)
557 557
558 558 # Case 2: user calls commit with specified patterns: refresh
559 559 # any matching big files.
560 560 smatcher = composestandinmatcher(repo, match)
561 561 standins = repo.dirstate.walk(smatcher, [], False, False)
562 562
563 563 # No matching big files: get out of the way and pass control to
564 564 # the usual commit() method.
565 565 if not standins:
566 566 return match
567 567
568 568 # Refresh all matching big files. It's possible that the
569 569 # commit will end up failing, in which case the big files will
570 570 # stay refreshed. No harm done: the user modified them and
571 571 # asked to commit them, so sooner or later we're going to
572 572 # refresh the standins. Might as well leave them refreshed.
573 573 lfdirstate = openlfdirstate(ui, repo)
574 574 for fstandin in standins:
575 575 lfile = splitstandin(fstandin)
576 576 if lfdirstate[lfile] != 'r':
577 577 updatestandin(repo, fstandin)
578 578
579 579 # Cook up a new matcher that only matches regular files or
580 580 # standins corresponding to the big files requested by the
581 581 # user. Have to modify _files to prevent commit() from
582 582 # complaining "not tracked" for big files.
583 583 match = copy.copy(match)
584 584 origmatchfn = match.matchfn
585 585
586 586 # Check both the list of largefiles and the list of
587 587 # standins because if a largefile was removed, it
588 588 # won't be in the list of largefiles at this point
589 589 match._files += sorted(standins)
590 590
591 591 actualfiles = []
592 592 for f in match._files:
593 593 fstandin = standin(f)
594 594
595 595 # For largefiles, only one of the normal and standin should be
596 596 # committed (except if one of them is a remove). In the case of a
597 597 # standin removal, drop the normal file if it is unknown to dirstate.
598 598 # Thus, skip plain largefile names but keep the standin.
599 599 if f in lfiles or fstandin in standins:
600 600 if repo.dirstate[fstandin] != 'r':
601 601 if repo.dirstate[f] != 'r':
602 602 continue
603 603 elif repo.dirstate[f] == '?':
604 604 continue
605 605
606 606 actualfiles.append(f)
607 607 match._files = actualfiles
608 608
609 609 def matchfn(f):
610 610 if origmatchfn(f):
611 611 return f not in lfiles
612 612 else:
613 613 return f in standins
614 614
615 615 match.matchfn = matchfn
616 616
617 617 return match
618 618
619 619 class automatedcommithook(object):
620 620 '''Stateful hook to update standins at the 1st commit of resuming
621 621
622 622 For efficiency, updating standins in the working directory should
623 623 be avoided while automated committing (like rebase, transplant and
624 624 so on), because they should be updated before committing.
625 625
626 626 But the 1st commit of resuming automated committing (e.g. ``rebase
627 627 --continue``) should update them, because largefiles may be
628 628 modified manually.
629 629 '''
630 630 def __init__(self, resuming):
631 631 self.resuming = resuming
632 632
633 633 def __call__(self, repo, match):
634 634 if self.resuming:
635 635 self.resuming = False # avoids updating at subsequent commits
636 636 return updatestandinsbymatch(repo, match)
637 637 else:
638 638 return match
639 639
640 640 def getstatuswriter(ui, repo, forcibly=None):
641 641 '''Return the function to write largefiles specific status out
642 642
643 643 If ``forcibly`` is ``None``, this returns the last element of
644 644 ``repo._lfstatuswriters`` as "default" writer function.
645 645
646 646 Otherwise, this returns the function to always write out (or
647 647 ignore if ``not forcibly``) status.
648 648 '''
649 649 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
650 650 return repo._lfstatuswriters[-1]
651 651 else:
652 652 if forcibly:
653 653 return ui.status # forcibly WRITE OUT
654 654 else:
655 655 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now