##// END OF EJS Templates
doc: trim newline at the end of exception message
FUJIWARA Katsunori -
r29644:ce4ac5d1 stable
parent child Browse files
Show More
@@ -1,662 +1,662 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 error,
23 23 httpconnection,
24 24 match as matchmod,
25 25 node,
26 26 scmutil,
27 27 util,
28 28 )
29 29
30 30 shortname = '.hglf'
31 31 shortnameslash = shortname + '/'
32 32 longname = 'largefiles'
33 33
34 34 # -- Private worker functions ------------------------------------------
35 35
36 36 def getminsize(ui, assumelfiles, opt, default=10):
37 37 lfsize = opt
38 38 if not lfsize and assumelfiles:
39 39 lfsize = ui.config(longname, 'minsize', default=default)
40 40 if lfsize:
41 41 try:
42 42 lfsize = float(lfsize)
43 43 except ValueError:
44 44 raise error.Abort(_('largefiles: size must be number (not %s)\n')
45 45 % lfsize)
46 46 if lfsize is None:
47 47 raise error.Abort(_('minimum size for largefiles must be specified'))
48 48 return lfsize
49 49
50 50 def link(src, dest):
51 51 """Try to create hardlink - if that fails, efficiently make a copy."""
52 52 util.makedirs(os.path.dirname(dest))
53 53 try:
54 54 util.oslink(src, dest)
55 55 except OSError:
56 56 # if hardlinks fail, fallback on atomic copy
57 57 dst = util.atomictempfile(dest)
58 58 for chunk in util.filechunkiter(open(src, 'rb')):
59 59 dst.write(chunk)
60 60 dst.close()
61 61 os.chmod(dest, os.stat(src).st_mode)
62 62
63 63 def usercachepath(ui, hash):
64 64 '''Return the correct location in the "global" largefiles cache for a file
65 65 with the given hash.
66 66 This cache is used for sharing of largefiles across repositories - both
67 67 to preserve download bandwidth and storage space.'''
68 68 return os.path.join(_usercachedir(ui), hash)
69 69
70 70 def _usercachedir(ui):
71 71 '''Return the location of the "global" largefiles cache.'''
72 72 path = ui.configpath(longname, 'usercache', None)
73 73 if path:
74 74 return path
75 75 if os.name == 'nt':
76 76 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
77 77 if appdata:
78 78 return os.path.join(appdata, longname)
79 79 elif platform.system() == 'Darwin':
80 80 home = os.getenv('HOME')
81 81 if home:
82 82 return os.path.join(home, 'Library', 'Caches', longname)
83 83 elif os.name == 'posix':
84 84 path = os.getenv('XDG_CACHE_HOME')
85 85 if path:
86 86 return os.path.join(path, longname)
87 87 home = os.getenv('HOME')
88 88 if home:
89 89 return os.path.join(home, '.cache', longname)
90 90 else:
91 91 raise error.Abort(_('unknown operating system: %s\n') % os.name)
92 raise error.Abort(_('unknown %s usercache location\n') % longname)
92 raise error.Abort(_('unknown %s usercache location') % longname)
93 93
94 94 def inusercache(ui, hash):
95 95 path = usercachepath(ui, hash)
96 96 return os.path.exists(path)
97 97
98 98 def findfile(repo, hash):
99 99 '''Return store path of the largefile with the specified hash.
100 100 As a side effect, the file might be linked from user cache.
101 101 Return None if the file can't be found locally.'''
102 102 path, exists = findstorepath(repo, hash)
103 103 if exists:
104 104 repo.ui.note(_('found %s in store\n') % hash)
105 105 return path
106 106 elif inusercache(repo.ui, hash):
107 107 repo.ui.note(_('found %s in system cache\n') % hash)
108 108 path = storepath(repo, hash)
109 109 link(usercachepath(repo.ui, hash), path)
110 110 return path
111 111 return None
112 112
113 113 class largefilesdirstate(dirstate.dirstate):
114 114 def __getitem__(self, key):
115 115 return super(largefilesdirstate, self).__getitem__(unixpath(key))
116 116 def normal(self, f):
117 117 return super(largefilesdirstate, self).normal(unixpath(f))
118 118 def remove(self, f):
119 119 return super(largefilesdirstate, self).remove(unixpath(f))
120 120 def add(self, f):
121 121 return super(largefilesdirstate, self).add(unixpath(f))
122 122 def drop(self, f):
123 123 return super(largefilesdirstate, self).drop(unixpath(f))
124 124 def forget(self, f):
125 125 return super(largefilesdirstate, self).forget(unixpath(f))
126 126 def normallookup(self, f):
127 127 return super(largefilesdirstate, self).normallookup(unixpath(f))
128 128 def _ignore(self, f):
129 129 return False
130 130 def write(self, tr=False):
131 131 # (1) disable PENDING mode always
132 132 # (lfdirstate isn't yet managed as a part of the transaction)
133 133 # (2) avoid develwarn 'use dirstate.write with ....'
134 134 super(largefilesdirstate, self).write(None)
135 135
136 136 def openlfdirstate(ui, repo, create=True):
137 137 '''
138 138 Return a dirstate object that tracks largefiles: i.e. its root is
139 139 the repo root, but it is saved in .hg/largefiles/dirstate.
140 140 '''
141 141 vfs = repo.vfs
142 142 lfstoredir = longname
143 143 opener = scmutil.opener(vfs.join(lfstoredir))
144 144 lfdirstate = largefilesdirstate(opener, ui, repo.root,
145 145 repo.dirstate._validate)
146 146
147 147 # If the largefiles dirstate does not exist, populate and create
148 148 # it. This ensures that we create it on the first meaningful
149 149 # largefiles operation in a new clone.
150 150 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
151 151 matcher = getstandinmatcher(repo)
152 152 standins = repo.dirstate.walk(matcher, [], False, False)
153 153
154 154 if len(standins) > 0:
155 155 vfs.makedirs(lfstoredir)
156 156
157 157 for standin in standins:
158 158 lfile = splitstandin(standin)
159 159 lfdirstate.normallookup(lfile)
160 160 return lfdirstate
161 161
162 162 def lfdirstatestatus(lfdirstate, repo):
163 163 wctx = repo['.']
164 164 match = matchmod.always(repo.root, repo.getcwd())
165 165 unsure, s = lfdirstate.status(match, [], False, False, False)
166 166 modified, clean = s.modified, s.clean
167 167 for lfile in unsure:
168 168 try:
169 169 fctx = wctx[standin(lfile)]
170 170 except LookupError:
171 171 fctx = None
172 172 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
173 173 modified.append(lfile)
174 174 else:
175 175 clean.append(lfile)
176 176 lfdirstate.normal(lfile)
177 177 return s
178 178
179 179 def listlfiles(repo, rev=None, matcher=None):
180 180 '''return a list of largefiles in the working copy or the
181 181 specified changeset'''
182 182
183 183 if matcher is None:
184 184 matcher = getstandinmatcher(repo)
185 185
186 186 # ignore unknown files in working directory
187 187 return [splitstandin(f)
188 188 for f in repo[rev].walk(matcher)
189 189 if rev is not None or repo.dirstate[f] != '?']
190 190
191 191 def instore(repo, hash, forcelocal=False):
192 192 '''Return true if a largefile with the given hash exists in the store'''
193 193 return os.path.exists(storepath(repo, hash, forcelocal))
194 194
195 195 def storepath(repo, hash, forcelocal=False):
196 196 '''Return the correct location in the repository largefiles store for a
197 197 file with the given hash.'''
198 198 if not forcelocal and repo.shared():
199 199 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
200 200 return repo.join(longname, hash)
201 201
202 202 def findstorepath(repo, hash):
203 203 '''Search through the local store path(s) to find the file for the given
204 204 hash. If the file is not found, its path in the primary store is returned.
205 205 The return value is a tuple of (path, exists(path)).
206 206 '''
207 207 # For shared repos, the primary store is in the share source. But for
208 208 # backward compatibility, force a lookup in the local store if it wasn't
209 209 # found in the share source.
210 210 path = storepath(repo, hash, False)
211 211
212 212 if instore(repo, hash):
213 213 return (path, True)
214 214 elif repo.shared() and instore(repo, hash, True):
215 215 return storepath(repo, hash, True), True
216 216
217 217 return (path, False)
218 218
219 219 def copyfromcache(repo, hash, filename):
220 220 '''Copy the specified largefile from the repo or system cache to
221 221 filename in the repository. Return true on success or false if the
222 222 file was not found in either cache (which should not happened:
223 223 this is meant to be called only after ensuring that the needed
224 224 largefile exists in the cache).'''
225 225 wvfs = repo.wvfs
226 226 path = findfile(repo, hash)
227 227 if path is None:
228 228 return False
229 229 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
230 230 # The write may fail before the file is fully written, but we
231 231 # don't use atomic writes in the working copy.
232 232 with open(path, 'rb') as srcfd:
233 233 with wvfs(filename, 'wb') as destfd:
234 234 gothash = copyandhash(srcfd, destfd)
235 235 if gothash != hash:
236 236 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
237 237 % (filename, path, gothash))
238 238 wvfs.unlink(filename)
239 239 return False
240 240 return True
241 241
242 242 def copytostore(repo, rev, file, uploaded=False):
243 243 wvfs = repo.wvfs
244 244 hash = readstandin(repo, file, rev)
245 245 if instore(repo, hash):
246 246 return
247 247 if wvfs.exists(file):
248 248 copytostoreabsolute(repo, wvfs.join(file), hash)
249 249 else:
250 250 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
251 251 (file, hash))
252 252
253 253 def copyalltostore(repo, node):
254 254 '''Copy all largefiles in a given revision to the store'''
255 255
256 256 ctx = repo[node]
257 257 for filename in ctx.files():
258 258 if isstandin(filename) and filename in ctx.manifest():
259 259 realfile = splitstandin(filename)
260 260 copytostore(repo, ctx.node(), realfile)
261 261
262 262 def copytostoreabsolute(repo, file, hash):
263 263 if inusercache(repo.ui, hash):
264 264 link(usercachepath(repo.ui, hash), storepath(repo, hash))
265 265 else:
266 266 util.makedirs(os.path.dirname(storepath(repo, hash)))
267 267 dst = util.atomictempfile(storepath(repo, hash),
268 268 createmode=repo.store.createmode)
269 269 for chunk in util.filechunkiter(open(file, 'rb')):
270 270 dst.write(chunk)
271 271 dst.close()
272 272 linktousercache(repo, hash)
273 273
274 274 def linktousercache(repo, hash):
275 275 '''Link / copy the largefile with the specified hash from the store
276 276 to the cache.'''
277 277 path = usercachepath(repo.ui, hash)
278 278 link(storepath(repo, hash), path)
279 279
280 280 def getstandinmatcher(repo, rmatcher=None):
281 281 '''Return a match object that applies rmatcher to the standin directory'''
282 282 wvfs = repo.wvfs
283 283 standindir = shortname
284 284
285 285 # no warnings about missing files or directories
286 286 badfn = lambda f, msg: None
287 287
288 288 if rmatcher and not rmatcher.always():
289 289 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
290 290 if not pats:
291 291 pats = [wvfs.join(standindir)]
292 292 match = scmutil.match(repo[None], pats, badfn=badfn)
293 293 # if pats is empty, it would incorrectly always match, so clear _always
294 294 match._always = False
295 295 else:
296 296 # no patterns: relative to repo root
297 297 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
298 298 return match
299 299
300 300 def composestandinmatcher(repo, rmatcher):
301 301 '''Return a matcher that accepts standins corresponding to the
302 302 files accepted by rmatcher. Pass the list of files in the matcher
303 303 as the paths specified by the user.'''
304 304 smatcher = getstandinmatcher(repo, rmatcher)
305 305 isstandin = smatcher.matchfn
306 306 def composedmatchfn(f):
307 307 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
308 308 smatcher.matchfn = composedmatchfn
309 309
310 310 return smatcher
311 311
312 312 def standin(filename):
313 313 '''Return the repo-relative path to the standin for the specified big
314 314 file.'''
315 315 # Notes:
316 316 # 1) Some callers want an absolute path, but for instance addlargefiles
317 317 # needs it repo-relative so it can be passed to repo[None].add(). So
318 318 # leave it up to the caller to use repo.wjoin() to get an absolute path.
319 319 # 2) Join with '/' because that's what dirstate always uses, even on
320 320 # Windows. Change existing separator to '/' first in case we are
321 321 # passed filenames from an external source (like the command line).
322 322 return shortnameslash + util.pconvert(filename)
323 323
324 324 def isstandin(filename):
325 325 '''Return true if filename is a big file standin. filename must be
326 326 in Mercurial's internal form (slash-separated).'''
327 327 return filename.startswith(shortnameslash)
328 328
329 329 def splitstandin(filename):
330 330 # Split on / because that's what dirstate always uses, even on Windows.
331 331 # Change local separator to / first just in case we are passed filenames
332 332 # from an external source (like the command line).
333 333 bits = util.pconvert(filename).split('/', 1)
334 334 if len(bits) == 2 and bits[0] == shortname:
335 335 return bits[1]
336 336 else:
337 337 return None
338 338
339 339 def updatestandin(repo, standin):
340 340 file = repo.wjoin(splitstandin(standin))
341 341 if repo.wvfs.exists(splitstandin(standin)):
342 342 hash = hashfile(file)
343 343 executable = getexecutable(file)
344 344 writestandin(repo, standin, hash, executable)
345 345 else:
346 346 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
347 347
348 348 def readstandin(repo, filename, node=None):
349 349 '''read hex hash from standin for filename at given node, or working
350 350 directory if no node is given'''
351 351 return repo[node][standin(filename)].data().strip()
352 352
353 353 def writestandin(repo, standin, hash, executable):
354 354 '''write hash to <repo.root>/<standin>'''
355 355 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
356 356
357 357 def copyandhash(instream, outfile):
358 358 '''Read bytes from instream (iterable) and write them to outfile,
359 359 computing the SHA-1 hash of the data along the way. Return the hash.'''
360 360 hasher = hashlib.sha1('')
361 361 for data in instream:
362 362 hasher.update(data)
363 363 outfile.write(data)
364 364 return hasher.hexdigest()
365 365
366 366 def hashrepofile(repo, file):
367 367 return hashfile(repo.wjoin(file))
368 368
369 369 def hashfile(file):
370 370 if not os.path.exists(file):
371 371 return ''
372 372 hasher = hashlib.sha1('')
373 373 fd = open(file, 'rb')
374 374 for data in util.filechunkiter(fd, 128 * 1024):
375 375 hasher.update(data)
376 376 fd.close()
377 377 return hasher.hexdigest()
378 378
379 379 def getexecutable(filename):
380 380 mode = os.stat(filename).st_mode
381 381 return ((mode & stat.S_IXUSR) and
382 382 (mode & stat.S_IXGRP) and
383 383 (mode & stat.S_IXOTH))
384 384
385 385 def urljoin(first, second, *arg):
386 386 def join(left, right):
387 387 if not left.endswith('/'):
388 388 left += '/'
389 389 if right.startswith('/'):
390 390 right = right[1:]
391 391 return left + right
392 392
393 393 url = join(first, second)
394 394 for a in arg:
395 395 url = join(url, a)
396 396 return url
397 397
398 398 def hexsha1(data):
399 399 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
400 400 object data"""
401 401 h = hashlib.sha1()
402 402 for chunk in util.filechunkiter(data):
403 403 h.update(chunk)
404 404 return h.hexdigest()
405 405
406 406 def httpsendfile(ui, filename):
407 407 return httpconnection.httpsendfile(ui, filename, 'rb')
408 408
409 409 def unixpath(path):
410 410 '''Return a version of path normalized for use with the lfdirstate.'''
411 411 return util.pconvert(os.path.normpath(path))
412 412
413 413 def islfilesrepo(repo):
414 414 '''Return true if the repo is a largefile repo.'''
415 415 if ('largefiles' in repo.requirements and
416 416 any(shortnameslash in f[0] for f in repo.store.datafiles())):
417 417 return True
418 418
419 419 return any(openlfdirstate(repo.ui, repo, False))
420 420
421 421 class storeprotonotcapable(Exception):
422 422 def __init__(self, storetypes):
423 423 self.storetypes = storetypes
424 424
425 425 def getstandinsstate(repo):
426 426 standins = []
427 427 matcher = getstandinmatcher(repo)
428 428 for standin in repo.dirstate.walk(matcher, [], False, False):
429 429 lfile = splitstandin(standin)
430 430 try:
431 431 hash = readstandin(repo, lfile)
432 432 except IOError:
433 433 hash = None
434 434 standins.append((lfile, hash))
435 435 return standins
436 436
437 437 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
438 438 lfstandin = standin(lfile)
439 439 if lfstandin in repo.dirstate:
440 440 stat = repo.dirstate._map[lfstandin]
441 441 state, mtime = stat[0], stat[3]
442 442 else:
443 443 state, mtime = '?', -1
444 444 if state == 'n':
445 445 if (normallookup or mtime < 0 or
446 446 not repo.wvfs.exists(lfile)):
447 447 # state 'n' doesn't ensure 'clean' in this case
448 448 lfdirstate.normallookup(lfile)
449 449 else:
450 450 lfdirstate.normal(lfile)
451 451 elif state == 'm':
452 452 lfdirstate.normallookup(lfile)
453 453 elif state == 'r':
454 454 lfdirstate.remove(lfile)
455 455 elif state == 'a':
456 456 lfdirstate.add(lfile)
457 457 elif state == '?':
458 458 lfdirstate.drop(lfile)
459 459
460 460 def markcommitted(orig, ctx, node):
461 461 repo = ctx.repo()
462 462
463 463 orig(node)
464 464
465 465 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
466 466 # because files coming from the 2nd parent are omitted in the latter.
467 467 #
468 468 # The former should be used to get targets of "synclfdirstate",
469 469 # because such files:
470 470 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
471 471 # - have to be marked as "n" after commit, but
472 472 # - aren't listed in "repo[node].files()"
473 473
474 474 lfdirstate = openlfdirstate(repo.ui, repo)
475 475 for f in ctx.files():
476 476 if isstandin(f):
477 477 lfile = splitstandin(f)
478 478 synclfdirstate(repo, lfdirstate, lfile, False)
479 479 lfdirstate.write()
480 480
481 481 # As part of committing, copy all of the largefiles into the cache.
482 482 copyalltostore(repo, node)
483 483
484 484 def getlfilestoupdate(oldstandins, newstandins):
485 485 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
486 486 filelist = []
487 487 for f in changedstandins:
488 488 if f[0] not in filelist:
489 489 filelist.append(f[0])
490 490 return filelist
491 491
492 492 def getlfilestoupload(repo, missing, addfunc):
493 493 for i, n in enumerate(missing):
494 494 repo.ui.progress(_('finding outgoing largefiles'), i,
495 495 unit=_('revisions'), total=len(missing))
496 496 parents = [p for p in repo[n].parents() if p != node.nullid]
497 497
498 498 oldlfstatus = repo.lfstatus
499 499 repo.lfstatus = False
500 500 try:
501 501 ctx = repo[n]
502 502 finally:
503 503 repo.lfstatus = oldlfstatus
504 504
505 505 files = set(ctx.files())
506 506 if len(parents) == 2:
507 507 mc = ctx.manifest()
508 508 mp1 = ctx.parents()[0].manifest()
509 509 mp2 = ctx.parents()[1].manifest()
510 510 for f in mp1:
511 511 if f not in mc:
512 512 files.add(f)
513 513 for f in mp2:
514 514 if f not in mc:
515 515 files.add(f)
516 516 for f in mc:
517 517 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
518 518 files.add(f)
519 519 for fn in files:
520 520 if isstandin(fn) and fn in ctx:
521 521 addfunc(fn, ctx[fn].data().strip())
522 522 repo.ui.progress(_('finding outgoing largefiles'), None)
523 523
524 524 def updatestandinsbymatch(repo, match):
525 525 '''Update standins in the working directory according to specified match
526 526
527 527 This returns (possibly modified) ``match`` object to be used for
528 528 subsequent commit process.
529 529 '''
530 530
531 531 ui = repo.ui
532 532
533 533 # Case 1: user calls commit with no specific files or
534 534 # include/exclude patterns: refresh and commit all files that
535 535 # are "dirty".
536 536 if match is None or match.always():
537 537 # Spend a bit of time here to get a list of files we know
538 538 # are modified so we can compare only against those.
539 539 # It can cost a lot of time (several seconds)
540 540 # otherwise to update all standins if the largefiles are
541 541 # large.
542 542 lfdirstate = openlfdirstate(ui, repo)
543 543 dirtymatch = matchmod.always(repo.root, repo.getcwd())
544 544 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
545 545 False)
546 546 modifiedfiles = unsure + s.modified + s.added + s.removed
547 547 lfiles = listlfiles(repo)
548 548 # this only loops through largefiles that exist (not
549 549 # removed/renamed)
550 550 for lfile in lfiles:
551 551 if lfile in modifiedfiles:
552 552 if repo.wvfs.exists(standin(lfile)):
553 553 # this handles the case where a rebase is being
554 554 # performed and the working copy is not updated
555 555 # yet.
556 556 if repo.wvfs.exists(lfile):
557 557 updatestandin(repo,
558 558 standin(lfile))
559 559
560 560 return match
561 561
562 562 lfiles = listlfiles(repo)
563 563 match._files = repo._subdirlfs(match.files(), lfiles)
564 564
565 565 # Case 2: user calls commit with specified patterns: refresh
566 566 # any matching big files.
567 567 smatcher = composestandinmatcher(repo, match)
568 568 standins = repo.dirstate.walk(smatcher, [], False, False)
569 569
570 570 # No matching big files: get out of the way and pass control to
571 571 # the usual commit() method.
572 572 if not standins:
573 573 return match
574 574
575 575 # Refresh all matching big files. It's possible that the
576 576 # commit will end up failing, in which case the big files will
577 577 # stay refreshed. No harm done: the user modified them and
578 578 # asked to commit them, so sooner or later we're going to
579 579 # refresh the standins. Might as well leave them refreshed.
580 580 lfdirstate = openlfdirstate(ui, repo)
581 581 for fstandin in standins:
582 582 lfile = splitstandin(fstandin)
583 583 if lfdirstate[lfile] != 'r':
584 584 updatestandin(repo, fstandin)
585 585
586 586 # Cook up a new matcher that only matches regular files or
587 587 # standins corresponding to the big files requested by the
588 588 # user. Have to modify _files to prevent commit() from
589 589 # complaining "not tracked" for big files.
590 590 match = copy.copy(match)
591 591 origmatchfn = match.matchfn
592 592
593 593 # Check both the list of largefiles and the list of
594 594 # standins because if a largefile was removed, it
595 595 # won't be in the list of largefiles at this point
596 596 match._files += sorted(standins)
597 597
598 598 actualfiles = []
599 599 for f in match._files:
600 600 fstandin = standin(f)
601 601
602 602 # For largefiles, only one of the normal and standin should be
603 603 # committed (except if one of them is a remove). In the case of a
604 604 # standin removal, drop the normal file if it is unknown to dirstate.
605 605 # Thus, skip plain largefile names but keep the standin.
606 606 if f in lfiles or fstandin in standins:
607 607 if repo.dirstate[fstandin] != 'r':
608 608 if repo.dirstate[f] != 'r':
609 609 continue
610 610 elif repo.dirstate[f] == '?':
611 611 continue
612 612
613 613 actualfiles.append(f)
614 614 match._files = actualfiles
615 615
616 616 def matchfn(f):
617 617 if origmatchfn(f):
618 618 return f not in lfiles
619 619 else:
620 620 return f in standins
621 621
622 622 match.matchfn = matchfn
623 623
624 624 return match
625 625
626 626 class automatedcommithook(object):
627 627 '''Stateful hook to update standins at the 1st commit of resuming
628 628
629 629 For efficiency, updating standins in the working directory should
630 630 be avoided while automated committing (like rebase, transplant and
631 631 so on), because they should be updated before committing.
632 632
633 633 But the 1st commit of resuming automated committing (e.g. ``rebase
634 634 --continue``) should update them, because largefiles may be
635 635 modified manually.
636 636 '''
637 637 def __init__(self, resuming):
638 638 self.resuming = resuming
639 639
640 640 def __call__(self, repo, match):
641 641 if self.resuming:
642 642 self.resuming = False # avoids updating at subsequent commits
643 643 return updatestandinsbymatch(repo, match)
644 644 else:
645 645 return match
646 646
647 647 def getstatuswriter(ui, repo, forcibly=None):
648 648 '''Return the function to write largefiles specific status out
649 649
650 650 If ``forcibly`` is ``None``, this returns the last element of
651 651 ``repo._lfstatuswriters`` as "default" writer function.
652 652
653 653 Otherwise, this returns the function to always write out (or
654 654 ignore if ``not forcibly``) status.
655 655 '''
656 656 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
657 657 return repo._lfstatuswriters[-1]
658 658 else:
659 659 if forcibly:
660 660 return ui.status # forcibly WRITE OUT
661 661 else:
662 662 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now