##// END OF EJS Templates
largefiles: rename match_ to matchmod import in lfutil
liscju -
r29320:016a9015 default
parent child Browse files
Show More
@@ -1,664 +1,664 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14 import platform
15 15 import stat
16 16
17 17 from mercurial.i18n import _
18 18
19 19 from mercurial import (
20 20 dirstate,
21 21 error,
22 22 httpconnection,
23 match as match_,
23 match as matchmod,
24 24 node,
25 25 scmutil,
26 26 util,
27 27 )
28 28
29 29 shortname = '.hglf'
30 30 shortnameslash = shortname + '/'
31 31 longname = 'largefiles'
32 32
33 33
34 34 # -- Private worker functions ------------------------------------------
35 35
36 36 def getminsize(ui, assumelfiles, opt, default=10):
37 37 lfsize = opt
38 38 if not lfsize and assumelfiles:
39 39 lfsize = ui.config(longname, 'minsize', default=default)
40 40 if lfsize:
41 41 try:
42 42 lfsize = float(lfsize)
43 43 except ValueError:
44 44 raise error.Abort(_('largefiles: size must be number (not %s)\n')
45 45 % lfsize)
46 46 if lfsize is None:
47 47 raise error.Abort(_('minimum size for largefiles must be specified'))
48 48 return lfsize
49 49
50 50 def link(src, dest):
51 51 """Try to create hardlink - if that fails, efficiently make a copy."""
52 52 util.makedirs(os.path.dirname(dest))
53 53 try:
54 54 util.oslink(src, dest)
55 55 except OSError:
56 56 # if hardlinks fail, fallback on atomic copy
57 57 dst = util.atomictempfile(dest)
58 58 for chunk in util.filechunkiter(open(src, 'rb')):
59 59 dst.write(chunk)
60 60 dst.close()
61 61 os.chmod(dest, os.stat(src).st_mode)
62 62
63 63 def usercachepath(ui, hash):
64 64 '''Return the correct location in the "global" largefiles cache for a file
65 65 with the given hash.
66 66 This cache is used for sharing of largefiles across repositories - both
67 67 to preserve download bandwidth and storage space.'''
68 68 return os.path.join(_usercachedir(ui), hash)
69 69
70 70 def _usercachedir(ui):
71 71 '''Return the location of the "global" largefiles cache.'''
72 72 path = ui.configpath(longname, 'usercache', None)
73 73 if path:
74 74 return path
75 75 if os.name == 'nt':
76 76 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
77 77 if appdata:
78 78 return os.path.join(appdata, longname)
79 79 elif platform.system() == 'Darwin':
80 80 home = os.getenv('HOME')
81 81 if home:
82 82 return os.path.join(home, 'Library', 'Caches', longname)
83 83 elif os.name == 'posix':
84 84 path = os.getenv('XDG_CACHE_HOME')
85 85 if path:
86 86 return os.path.join(path, longname)
87 87 home = os.getenv('HOME')
88 88 if home:
89 89 return os.path.join(home, '.cache', longname)
90 90 else:
91 91 raise error.Abort(_('unknown operating system: %s\n') % os.name)
92 92 raise error.Abort(_('unknown %s usercache location\n') % longname)
93 93
94 94 def inusercache(ui, hash):
95 95 path = usercachepath(ui, hash)
96 96 return os.path.exists(path)
97 97
98 98 def findfile(repo, hash):
99 99 '''Return store path of the largefile with the specified hash.
100 100 As a side effect, the file might be linked from user cache.
101 101 Return None if the file can't be found locally.'''
102 102 path, exists = findstorepath(repo, hash)
103 103 if exists:
104 104 repo.ui.note(_('found %s in store\n') % hash)
105 105 return path
106 106 elif inusercache(repo.ui, hash):
107 107 repo.ui.note(_('found %s in system cache\n') % hash)
108 108 path = storepath(repo, hash)
109 109 link(usercachepath(repo.ui, hash), path)
110 110 return path
111 111 return None
112 112
113 113 class largefilesdirstate(dirstate.dirstate):
114 114 def __getitem__(self, key):
115 115 return super(largefilesdirstate, self).__getitem__(unixpath(key))
116 116 def normal(self, f):
117 117 return super(largefilesdirstate, self).normal(unixpath(f))
118 118 def remove(self, f):
119 119 return super(largefilesdirstate, self).remove(unixpath(f))
120 120 def add(self, f):
121 121 return super(largefilesdirstate, self).add(unixpath(f))
122 122 def drop(self, f):
123 123 return super(largefilesdirstate, self).drop(unixpath(f))
124 124 def forget(self, f):
125 125 return super(largefilesdirstate, self).forget(unixpath(f))
126 126 def normallookup(self, f):
127 127 return super(largefilesdirstate, self).normallookup(unixpath(f))
128 128 def _ignore(self, f):
129 129 return False
130 130 def write(self, tr=False):
131 131 # (1) disable PENDING mode always
132 132 # (lfdirstate isn't yet managed as a part of the transaction)
133 133 # (2) avoid develwarn 'use dirstate.write with ....'
134 134 super(largefilesdirstate, self).write(None)
135 135
136 136 def openlfdirstate(ui, repo, create=True):
137 137 '''
138 138 Return a dirstate object that tracks largefiles: i.e. its root is
139 139 the repo root, but it is saved in .hg/largefiles/dirstate.
140 140 '''
141 141 vfs = repo.vfs
142 142 lfstoredir = longname
143 143 opener = scmutil.opener(vfs.join(lfstoredir))
144 144 lfdirstate = largefilesdirstate(opener, ui, repo.root,
145 145 repo.dirstate._validate)
146 146
147 147 # If the largefiles dirstate does not exist, populate and create
148 148 # it. This ensures that we create it on the first meaningful
149 149 # largefiles operation in a new clone.
150 150 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
151 151 matcher = getstandinmatcher(repo)
152 152 standins = repo.dirstate.walk(matcher, [], False, False)
153 153
154 154 if len(standins) > 0:
155 155 vfs.makedirs(lfstoredir)
156 156
157 157 for standin in standins:
158 158 lfile = splitstandin(standin)
159 159 lfdirstate.normallookup(lfile)
160 160 return lfdirstate
161 161
162 162 def lfdirstatestatus(lfdirstate, repo):
163 163 wctx = repo['.']
164 match = match_.always(repo.root, repo.getcwd())
164 match = matchmod.always(repo.root, repo.getcwd())
165 165 unsure, s = lfdirstate.status(match, [], False, False, False)
166 166 modified, clean = s.modified, s.clean
167 167 for lfile in unsure:
168 168 try:
169 169 fctx = wctx[standin(lfile)]
170 170 except LookupError:
171 171 fctx = None
172 172 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
173 173 modified.append(lfile)
174 174 else:
175 175 clean.append(lfile)
176 176 lfdirstate.normal(lfile)
177 177 return s
178 178
179 179 def listlfiles(repo, rev=None, matcher=None):
180 180 '''return a list of largefiles in the working copy or the
181 181 specified changeset'''
182 182
183 183 if matcher is None:
184 184 matcher = getstandinmatcher(repo)
185 185
186 186 # ignore unknown files in working directory
187 187 return [splitstandin(f)
188 188 for f in repo[rev].walk(matcher)
189 189 if rev is not None or repo.dirstate[f] != '?']
190 190
191 191 def instore(repo, hash, forcelocal=False):
192 192 '''Return true if a largefile with the given hash exists in the user
193 193 cache.'''
194 194 return os.path.exists(storepath(repo, hash, forcelocal))
195 195
196 196 def storepath(repo, hash, forcelocal=False):
197 197 '''Return the correct location in the repository largefiles cache for a
198 198 file with the given hash.'''
199 199 if not forcelocal and repo.shared():
200 200 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
201 201 return repo.join(longname, hash)
202 202
203 203 def findstorepath(repo, hash):
204 204 '''Search through the local store path(s) to find the file for the given
205 205 hash. If the file is not found, its path in the primary store is returned.
206 206 The return value is a tuple of (path, exists(path)).
207 207 '''
208 208 # For shared repos, the primary store is in the share source. But for
209 209 # backward compatibility, force a lookup in the local store if it wasn't
210 210 # found in the share source.
211 211 path = storepath(repo, hash, False)
212 212
213 213 if instore(repo, hash):
214 214 return (path, True)
215 215 elif repo.shared() and instore(repo, hash, True):
216 216 return storepath(repo, hash, True)
217 217
218 218 return (path, False)
219 219
220 220 def copyfromcache(repo, hash, filename):
221 221 '''Copy the specified largefile from the repo or system cache to
222 222 filename in the repository. Return true on success or false if the
223 223 file was not found in either cache (which should not happened:
224 224 this is meant to be called only after ensuring that the needed
225 225 largefile exists in the cache).'''
226 226 wvfs = repo.wvfs
227 227 path = findfile(repo, hash)
228 228 if path is None:
229 229 return False
230 230 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
231 231 # The write may fail before the file is fully written, but we
232 232 # don't use atomic writes in the working copy.
233 233 with open(path, 'rb') as srcfd:
234 234 with wvfs(filename, 'wb') as destfd:
235 235 gothash = copyandhash(srcfd, destfd)
236 236 if gothash != hash:
237 237 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
238 238 % (filename, path, gothash))
239 239 wvfs.unlink(filename)
240 240 return False
241 241 return True
242 242
243 243 def copytostore(repo, rev, file, uploaded=False):
244 244 wvfs = repo.wvfs
245 245 hash = readstandin(repo, file, rev)
246 246 if instore(repo, hash):
247 247 return
248 248 if wvfs.exists(file):
249 249 copytostoreabsolute(repo, wvfs.join(file), hash)
250 250 else:
251 251 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
252 252 (file, hash))
253 253
254 254 def copyalltostore(repo, node):
255 255 '''Copy all largefiles in a given revision to the store'''
256 256
257 257 ctx = repo[node]
258 258 for filename in ctx.files():
259 259 if isstandin(filename) and filename in ctx.manifest():
260 260 realfile = splitstandin(filename)
261 261 copytostore(repo, ctx.node(), realfile)
262 262
263 263
264 264 def copytostoreabsolute(repo, file, hash):
265 265 if inusercache(repo.ui, hash):
266 266 link(usercachepath(repo.ui, hash), storepath(repo, hash))
267 267 else:
268 268 util.makedirs(os.path.dirname(storepath(repo, hash)))
269 269 dst = util.atomictempfile(storepath(repo, hash),
270 270 createmode=repo.store.createmode)
271 271 for chunk in util.filechunkiter(open(file, 'rb')):
272 272 dst.write(chunk)
273 273 dst.close()
274 274 linktousercache(repo, hash)
275 275
276 276 def linktousercache(repo, hash):
277 277 '''Link / copy the largefile with the specified hash from the store
278 278 to the cache.'''
279 279 path = usercachepath(repo.ui, hash)
280 280 link(storepath(repo, hash), path)
281 281
282 282 def getstandinmatcher(repo, rmatcher=None):
283 283 '''Return a match object that applies rmatcher to the standin directory'''
284 284 wvfs = repo.wvfs
285 285 standindir = shortname
286 286
287 287 # no warnings about missing files or directories
288 288 badfn = lambda f, msg: None
289 289
290 290 if rmatcher and not rmatcher.always():
291 291 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
292 292 if not pats:
293 293 pats = [wvfs.join(standindir)]
294 294 match = scmutil.match(repo[None], pats, badfn=badfn)
295 295 # if pats is empty, it would incorrectly always match, so clear _always
296 296 match._always = False
297 297 else:
298 298 # no patterns: relative to repo root
299 299 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
300 300 return match
301 301
302 302 def composestandinmatcher(repo, rmatcher):
303 303 '''Return a matcher that accepts standins corresponding to the
304 304 files accepted by rmatcher. Pass the list of files in the matcher
305 305 as the paths specified by the user.'''
306 306 smatcher = getstandinmatcher(repo, rmatcher)
307 307 isstandin = smatcher.matchfn
308 308 def composedmatchfn(f):
309 309 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
310 310 smatcher.matchfn = composedmatchfn
311 311
312 312 return smatcher
313 313
314 314 def standin(filename):
315 315 '''Return the repo-relative path to the standin for the specified big
316 316 file.'''
317 317 # Notes:
318 318 # 1) Some callers want an absolute path, but for instance addlargefiles
319 319 # needs it repo-relative so it can be passed to repo[None].add(). So
320 320 # leave it up to the caller to use repo.wjoin() to get an absolute path.
321 321 # 2) Join with '/' because that's what dirstate always uses, even on
322 322 # Windows. Change existing separator to '/' first in case we are
323 323 # passed filenames from an external source (like the command line).
324 324 return shortnameslash + util.pconvert(filename)
325 325
326 326 def isstandin(filename):
327 327 '''Return true if filename is a big file standin. filename must be
328 328 in Mercurial's internal form (slash-separated).'''
329 329 return filename.startswith(shortnameslash)
330 330
331 331 def splitstandin(filename):
332 332 # Split on / because that's what dirstate always uses, even on Windows.
333 333 # Change local separator to / first just in case we are passed filenames
334 334 # from an external source (like the command line).
335 335 bits = util.pconvert(filename).split('/', 1)
336 336 if len(bits) == 2 and bits[0] == shortname:
337 337 return bits[1]
338 338 else:
339 339 return None
340 340
341 341 def updatestandin(repo, standin):
342 342 file = repo.wjoin(splitstandin(standin))
343 343 if repo.wvfs.exists(splitstandin(standin)):
344 344 hash = hashfile(file)
345 345 executable = getexecutable(file)
346 346 writestandin(repo, standin, hash, executable)
347 347 else:
348 348 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
349 349
350 350 def readstandin(repo, filename, node=None):
351 351 '''read hex hash from standin for filename at given node, or working
352 352 directory if no node is given'''
353 353 return repo[node][standin(filename)].data().strip()
354 354
355 355 def writestandin(repo, standin, hash, executable):
356 356 '''write hash to <repo.root>/<standin>'''
357 357 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
358 358
359 359 def copyandhash(instream, outfile):
360 360 '''Read bytes from instream (iterable) and write them to outfile,
361 361 computing the SHA-1 hash of the data along the way. Return the hash.'''
362 362 hasher = util.sha1('')
363 363 for data in instream:
364 364 hasher.update(data)
365 365 outfile.write(data)
366 366 return hasher.hexdigest()
367 367
368 368 def hashrepofile(repo, file):
369 369 return hashfile(repo.wjoin(file))
370 370
371 371 def hashfile(file):
372 372 if not os.path.exists(file):
373 373 return ''
374 374 hasher = util.sha1('')
375 375 fd = open(file, 'rb')
376 376 for data in util.filechunkiter(fd, 128 * 1024):
377 377 hasher.update(data)
378 378 fd.close()
379 379 return hasher.hexdigest()
380 380
381 381 def getexecutable(filename):
382 382 mode = os.stat(filename).st_mode
383 383 return ((mode & stat.S_IXUSR) and
384 384 (mode & stat.S_IXGRP) and
385 385 (mode & stat.S_IXOTH))
386 386
387 387 def urljoin(first, second, *arg):
388 388 def join(left, right):
389 389 if not left.endswith('/'):
390 390 left += '/'
391 391 if right.startswith('/'):
392 392 right = right[1:]
393 393 return left + right
394 394
395 395 url = join(first, second)
396 396 for a in arg:
397 397 url = join(url, a)
398 398 return url
399 399
400 400 def hexsha1(data):
401 401 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 402 object data"""
403 403 h = util.sha1()
404 404 for chunk in util.filechunkiter(data):
405 405 h.update(chunk)
406 406 return h.hexdigest()
407 407
408 408 def httpsendfile(ui, filename):
409 409 return httpconnection.httpsendfile(ui, filename, 'rb')
410 410
411 411 def unixpath(path):
412 412 '''Return a version of path normalized for use with the lfdirstate.'''
413 413 return util.pconvert(os.path.normpath(path))
414 414
415 415 def islfilesrepo(repo):
416 416 '''Return true if the repo is a largefile repo.'''
417 417 if ('largefiles' in repo.requirements and
418 418 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 419 return True
420 420
421 421 return any(openlfdirstate(repo.ui, repo, False))
422 422
423 423 class storeprotonotcapable(Exception):
424 424 def __init__(self, storetypes):
425 425 self.storetypes = storetypes
426 426
427 427 def getstandinsstate(repo):
428 428 standins = []
429 429 matcher = getstandinmatcher(repo)
430 430 for standin in repo.dirstate.walk(matcher, [], False, False):
431 431 lfile = splitstandin(standin)
432 432 try:
433 433 hash = readstandin(repo, lfile)
434 434 except IOError:
435 435 hash = None
436 436 standins.append((lfile, hash))
437 437 return standins
438 438
439 439 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
440 440 lfstandin = standin(lfile)
441 441 if lfstandin in repo.dirstate:
442 442 stat = repo.dirstate._map[lfstandin]
443 443 state, mtime = stat[0], stat[3]
444 444 else:
445 445 state, mtime = '?', -1
446 446 if state == 'n':
447 447 if (normallookup or mtime < 0 or
448 448 not repo.wvfs.exists(lfile)):
449 449 # state 'n' doesn't ensure 'clean' in this case
450 450 lfdirstate.normallookup(lfile)
451 451 else:
452 452 lfdirstate.normal(lfile)
453 453 elif state == 'm':
454 454 lfdirstate.normallookup(lfile)
455 455 elif state == 'r':
456 456 lfdirstate.remove(lfile)
457 457 elif state == 'a':
458 458 lfdirstate.add(lfile)
459 459 elif state == '?':
460 460 lfdirstate.drop(lfile)
461 461
462 462 def markcommitted(orig, ctx, node):
463 463 repo = ctx.repo()
464 464
465 465 orig(node)
466 466
467 467 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
468 468 # because files coming from the 2nd parent are omitted in the latter.
469 469 #
470 470 # The former should be used to get targets of "synclfdirstate",
471 471 # because such files:
472 472 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
473 473 # - have to be marked as "n" after commit, but
474 474 # - aren't listed in "repo[node].files()"
475 475
476 476 lfdirstate = openlfdirstate(repo.ui, repo)
477 477 for f in ctx.files():
478 478 if isstandin(f):
479 479 lfile = splitstandin(f)
480 480 synclfdirstate(repo, lfdirstate, lfile, False)
481 481 lfdirstate.write()
482 482
483 483 # As part of committing, copy all of the largefiles into the cache.
484 484 copyalltostore(repo, node)
485 485
486 486 def getlfilestoupdate(oldstandins, newstandins):
487 487 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
488 488 filelist = []
489 489 for f in changedstandins:
490 490 if f[0] not in filelist:
491 491 filelist.append(f[0])
492 492 return filelist
493 493
494 494 def getlfilestoupload(repo, missing, addfunc):
495 495 for i, n in enumerate(missing):
496 496 repo.ui.progress(_('finding outgoing largefiles'), i,
497 497 unit=_('revisions'), total=len(missing))
498 498 parents = [p for p in repo[n].parents() if p != node.nullid]
499 499
500 500 oldlfstatus = repo.lfstatus
501 501 repo.lfstatus = False
502 502 try:
503 503 ctx = repo[n]
504 504 finally:
505 505 repo.lfstatus = oldlfstatus
506 506
507 507 files = set(ctx.files())
508 508 if len(parents) == 2:
509 509 mc = ctx.manifest()
510 510 mp1 = ctx.parents()[0].manifest()
511 511 mp2 = ctx.parents()[1].manifest()
512 512 for f in mp1:
513 513 if f not in mc:
514 514 files.add(f)
515 515 for f in mp2:
516 516 if f not in mc:
517 517 files.add(f)
518 518 for f in mc:
519 519 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
520 520 files.add(f)
521 521 for fn in files:
522 522 if isstandin(fn) and fn in ctx:
523 523 addfunc(fn, ctx[fn].data().strip())
524 524 repo.ui.progress(_('finding outgoing largefiles'), None)
525 525
526 526 def updatestandinsbymatch(repo, match):
527 527 '''Update standins in the working directory according to specified match
528 528
529 529 This returns (possibly modified) ``match`` object to be used for
530 530 subsequent commit process.
531 531 '''
532 532
533 533 ui = repo.ui
534 534
535 535 # Case 1: user calls commit with no specific files or
536 536 # include/exclude patterns: refresh and commit all files that
537 537 # are "dirty".
538 538 if match is None or match.always():
539 539 # Spend a bit of time here to get a list of files we know
540 540 # are modified so we can compare only against those.
541 541 # It can cost a lot of time (several seconds)
542 542 # otherwise to update all standins if the largefiles are
543 543 # large.
544 544 lfdirstate = openlfdirstate(ui, repo)
545 dirtymatch = match_.always(repo.root, repo.getcwd())
545 dirtymatch = matchmod.always(repo.root, repo.getcwd())
546 546 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
547 547 False)
548 548 modifiedfiles = unsure + s.modified + s.added + s.removed
549 549 lfiles = listlfiles(repo)
550 550 # this only loops through largefiles that exist (not
551 551 # removed/renamed)
552 552 for lfile in lfiles:
553 553 if lfile in modifiedfiles:
554 554 if repo.wvfs.exists(standin(lfile)):
555 555 # this handles the case where a rebase is being
556 556 # performed and the working copy is not updated
557 557 # yet.
558 558 if repo.wvfs.exists(lfile):
559 559 updatestandin(repo,
560 560 standin(lfile))
561 561
562 562 return match
563 563
564 564 lfiles = listlfiles(repo)
565 565 match._files = repo._subdirlfs(match.files(), lfiles)
566 566
567 567 # Case 2: user calls commit with specified patterns: refresh
568 568 # any matching big files.
569 569 smatcher = composestandinmatcher(repo, match)
570 570 standins = repo.dirstate.walk(smatcher, [], False, False)
571 571
572 572 # No matching big files: get out of the way and pass control to
573 573 # the usual commit() method.
574 574 if not standins:
575 575 return match
576 576
577 577 # Refresh all matching big files. It's possible that the
578 578 # commit will end up failing, in which case the big files will
579 579 # stay refreshed. No harm done: the user modified them and
580 580 # asked to commit them, so sooner or later we're going to
581 581 # refresh the standins. Might as well leave them refreshed.
582 582 lfdirstate = openlfdirstate(ui, repo)
583 583 for fstandin in standins:
584 584 lfile = splitstandin(fstandin)
585 585 if lfdirstate[lfile] != 'r':
586 586 updatestandin(repo, fstandin)
587 587
588 588 # Cook up a new matcher that only matches regular files or
589 589 # standins corresponding to the big files requested by the
590 590 # user. Have to modify _files to prevent commit() from
591 591 # complaining "not tracked" for big files.
592 592 match = copy.copy(match)
593 593 origmatchfn = match.matchfn
594 594
595 595 # Check both the list of largefiles and the list of
596 596 # standins because if a largefile was removed, it
597 597 # won't be in the list of largefiles at this point
598 598 match._files += sorted(standins)
599 599
600 600 actualfiles = []
601 601 for f in match._files:
602 602 fstandin = standin(f)
603 603
604 604 # For largefiles, only one of the normal and standin should be
605 605 # committed (except if one of them is a remove). In the case of a
606 606 # standin removal, drop the normal file if it is unknown to dirstate.
607 607 # Thus, skip plain largefile names but keep the standin.
608 608 if f in lfiles or fstandin in standins:
609 609 if repo.dirstate[fstandin] != 'r':
610 610 if repo.dirstate[f] != 'r':
611 611 continue
612 612 elif repo.dirstate[f] == '?':
613 613 continue
614 614
615 615 actualfiles.append(f)
616 616 match._files = actualfiles
617 617
618 618 def matchfn(f):
619 619 if origmatchfn(f):
620 620 return f not in lfiles
621 621 else:
622 622 return f in standins
623 623
624 624 match.matchfn = matchfn
625 625
626 626 return match
627 627
628 628 class automatedcommithook(object):
629 629 '''Stateful hook to update standins at the 1st commit of resuming
630 630
631 631 For efficiency, updating standins in the working directory should
632 632 be avoided while automated committing (like rebase, transplant and
633 633 so on), because they should be updated before committing.
634 634
635 635 But the 1st commit of resuming automated committing (e.g. ``rebase
636 636 --continue``) should update them, because largefiles may be
637 637 modified manually.
638 638 '''
639 639 def __init__(self, resuming):
640 640 self.resuming = resuming
641 641
642 642 def __call__(self, repo, match):
643 643 if self.resuming:
644 644 self.resuming = False # avoids updating at subsequent commits
645 645 return updatestandinsbymatch(repo, match)
646 646 else:
647 647 return match
648 648
649 649 def getstatuswriter(ui, repo, forcibly=None):
650 650 '''Return the function to write largefiles specific status out
651 651
652 652 If ``forcibly`` is ``None``, this returns the last element of
653 653 ``repo._lfstatuswriters`` as "default" writer function.
654 654
655 655 Otherwise, this returns the function to always write out (or
656 656 ignore if ``not forcibly``) status.
657 657 '''
658 658 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
659 659 return repo._lfstatuswriters[-1]
660 660 else:
661 661 if forcibly:
662 662 return ui.status # forcibly WRITE OUT
663 663 else:
664 664 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now