##// END OF EJS Templates
largefiles: do not use platform.system()...
Jun Wu -
r34642:bb6544b1 default
parent child Browse files
Show More
@@ -1,674 +1,673 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 import platform
16 15 import stat
17 16
18 17 from mercurial.i18n import _
19 18
20 19 from mercurial import (
21 20 dirstate,
22 21 encoding,
23 22 error,
24 23 httpconnection,
25 24 match as matchmod,
26 25 node,
27 26 pycompat,
28 27 scmutil,
29 28 sparse,
30 29 util,
31 30 vfs as vfsmod,
32 31 )
33 32
34 33 shortname = '.hglf'
35 34 shortnameslash = shortname + '/'
36 35 longname = 'largefiles'
37 36
38 37 # -- Private worker functions ------------------------------------------
39 38
40 39 def getminsize(ui, assumelfiles, opt, default=10):
41 40 lfsize = opt
42 41 if not lfsize and assumelfiles:
43 42 lfsize = ui.config(longname, 'minsize', default=default)
44 43 if lfsize:
45 44 try:
46 45 lfsize = float(lfsize)
47 46 except ValueError:
48 47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
49 48 % lfsize)
50 49 if lfsize is None:
51 50 raise error.Abort(_('minimum size for largefiles must be specified'))
52 51 return lfsize
53 52
54 53 def link(src, dest):
55 54 """Try to create hardlink - if that fails, efficiently make a copy."""
56 55 util.makedirs(os.path.dirname(dest))
57 56 try:
58 57 util.oslink(src, dest)
59 58 except OSError:
60 59 # if hardlinks fail, fallback on atomic copy
61 60 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
62 61 for chunk in util.filechunkiter(srcf):
63 62 dstf.write(chunk)
64 63 os.chmod(dest, os.stat(src).st_mode)
65 64
66 65 def usercachepath(ui, hash):
67 66 '''Return the correct location in the "global" largefiles cache for a file
68 67 with the given hash.
69 68 This cache is used for sharing of largefiles across repositories - both
70 69 to preserve download bandwidth and storage space.'''
71 70 return os.path.join(_usercachedir(ui), hash)
72 71
73 72 def _usercachedir(ui):
74 73 '''Return the location of the "global" largefiles cache.'''
75 74 path = ui.configpath(longname, 'usercache', None)
76 75 if path:
77 76 return path
78 77 if pycompat.osname == 'nt':
79 78 appdata = encoding.environ.get('LOCALAPPDATA',\
80 79 encoding.environ.get('APPDATA'))
81 80 if appdata:
82 81 return os.path.join(appdata, longname)
83 elif platform.system() == 'Darwin':
82 elif pycompat.sysplatform == 'darwin':
84 83 home = encoding.environ.get('HOME')
85 84 if home:
86 85 return os.path.join(home, 'Library', 'Caches', longname)
87 86 elif pycompat.osname == 'posix':
88 87 path = encoding.environ.get('XDG_CACHE_HOME')
89 88 if path:
90 89 return os.path.join(path, longname)
91 90 home = encoding.environ.get('HOME')
92 91 if home:
93 92 return os.path.join(home, '.cache', longname)
94 93 else:
95 94 raise error.Abort(_('unknown operating system: %s\n')
96 95 % pycompat.osname)
97 96 raise error.Abort(_('unknown %s usercache location') % longname)
98 97
99 98 def inusercache(ui, hash):
100 99 path = usercachepath(ui, hash)
101 100 return os.path.exists(path)
102 101
103 102 def findfile(repo, hash):
104 103 '''Return store path of the largefile with the specified hash.
105 104 As a side effect, the file might be linked from user cache.
106 105 Return None if the file can't be found locally.'''
107 106 path, exists = findstorepath(repo, hash)
108 107 if exists:
109 108 repo.ui.note(_('found %s in store\n') % hash)
110 109 return path
111 110 elif inusercache(repo.ui, hash):
112 111 repo.ui.note(_('found %s in system cache\n') % hash)
113 112 path = storepath(repo, hash)
114 113 link(usercachepath(repo.ui, hash), path)
115 114 return path
116 115 return None
117 116
118 117 class largefilesdirstate(dirstate.dirstate):
119 118 def __getitem__(self, key):
120 119 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 120 def normal(self, f):
122 121 return super(largefilesdirstate, self).normal(unixpath(f))
123 122 def remove(self, f):
124 123 return super(largefilesdirstate, self).remove(unixpath(f))
125 124 def add(self, f):
126 125 return super(largefilesdirstate, self).add(unixpath(f))
127 126 def drop(self, f):
128 127 return super(largefilesdirstate, self).drop(unixpath(f))
129 128 def forget(self, f):
130 129 return super(largefilesdirstate, self).forget(unixpath(f))
131 130 def normallookup(self, f):
132 131 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 132 def _ignore(self, f):
134 133 return False
135 134 def write(self, tr=False):
136 135 # (1) disable PENDING mode always
137 136 # (lfdirstate isn't yet managed as a part of the transaction)
138 137 # (2) avoid develwarn 'use dirstate.write with ....'
139 138 super(largefilesdirstate, self).write(None)
140 139
141 140 def openlfdirstate(ui, repo, create=True):
142 141 '''
143 142 Return a dirstate object that tracks largefiles: i.e. its root is
144 143 the repo root, but it is saved in .hg/largefiles/dirstate.
145 144 '''
146 145 vfs = repo.vfs
147 146 lfstoredir = longname
148 147 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 148 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 149 repo.dirstate._validate,
151 150 lambda: sparse.matcher(repo))
152 151
153 152 # If the largefiles dirstate does not exist, populate and create
154 153 # it. This ensures that we create it on the first meaningful
155 154 # largefiles operation in a new clone.
156 155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
157 156 matcher = getstandinmatcher(repo)
158 157 standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False,
159 158 ignored=False)
160 159
161 160 if len(standins) > 0:
162 161 vfs.makedirs(lfstoredir)
163 162
164 163 for standin in standins:
165 164 lfile = splitstandin(standin)
166 165 lfdirstate.normallookup(lfile)
167 166 return lfdirstate
168 167
169 168 def lfdirstatestatus(lfdirstate, repo):
170 169 pctx = repo['.']
171 170 match = matchmod.always(repo.root, repo.getcwd())
172 171 unsure, s = lfdirstate.status(match, subrepos=[], ignored=False,
173 172 clean=False, unknown=False)
174 173 modified, clean = s.modified, s.clean
175 174 for lfile in unsure:
176 175 try:
177 176 fctx = pctx[standin(lfile)]
178 177 except LookupError:
179 178 fctx = None
180 179 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
181 180 modified.append(lfile)
182 181 else:
183 182 clean.append(lfile)
184 183 lfdirstate.normal(lfile)
185 184 return s
186 185
187 186 def listlfiles(repo, rev=None, matcher=None):
188 187 '''return a list of largefiles in the working copy or the
189 188 specified changeset'''
190 189
191 190 if matcher is None:
192 191 matcher = getstandinmatcher(repo)
193 192
194 193 # ignore unknown files in working directory
195 194 return [splitstandin(f)
196 195 for f in repo[rev].walk(matcher)
197 196 if rev is not None or repo.dirstate[f] != '?']
198 197
199 198 def instore(repo, hash, forcelocal=False):
200 199 '''Return true if a largefile with the given hash exists in the store'''
201 200 return os.path.exists(storepath(repo, hash, forcelocal))
202 201
203 202 def storepath(repo, hash, forcelocal=False):
204 203 '''Return the correct location in the repository largefiles store for a
205 204 file with the given hash.'''
206 205 if not forcelocal and repo.shared():
207 206 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
208 207 return repo.vfs.join(longname, hash)
209 208
210 209 def findstorepath(repo, hash):
211 210 '''Search through the local store path(s) to find the file for the given
212 211 hash. If the file is not found, its path in the primary store is returned.
213 212 The return value is a tuple of (path, exists(path)).
214 213 '''
215 214 # For shared repos, the primary store is in the share source. But for
216 215 # backward compatibility, force a lookup in the local store if it wasn't
217 216 # found in the share source.
218 217 path = storepath(repo, hash, False)
219 218
220 219 if instore(repo, hash):
221 220 return (path, True)
222 221 elif repo.shared() and instore(repo, hash, True):
223 222 return storepath(repo, hash, True), True
224 223
225 224 return (path, False)
226 225
227 226 def copyfromcache(repo, hash, filename):
228 227 '''Copy the specified largefile from the repo or system cache to
229 228 filename in the repository. Return true on success or false if the
230 229 file was not found in either cache (which should not happened:
231 230 this is meant to be called only after ensuring that the needed
232 231 largefile exists in the cache).'''
233 232 wvfs = repo.wvfs
234 233 path = findfile(repo, hash)
235 234 if path is None:
236 235 return False
237 236 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
238 237 # The write may fail before the file is fully written, but we
239 238 # don't use atomic writes in the working copy.
240 239 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd:
241 240 gothash = copyandhash(
242 241 util.filechunkiter(srcfd), destfd)
243 242 if gothash != hash:
244 243 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
245 244 % (filename, path, gothash))
246 245 wvfs.unlink(filename)
247 246 return False
248 247 return True
249 248
250 249 def copytostore(repo, ctx, file, fstandin):
251 250 wvfs = repo.wvfs
252 251 hash = readasstandin(ctx[fstandin])
253 252 if instore(repo, hash):
254 253 return
255 254 if wvfs.exists(file):
256 255 copytostoreabsolute(repo, wvfs.join(file), hash)
257 256 else:
258 257 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
259 258 (file, hash))
260 259
261 260 def copyalltostore(repo, node):
262 261 '''Copy all largefiles in a given revision to the store'''
263 262
264 263 ctx = repo[node]
265 264 for filename in ctx.files():
266 265 realfile = splitstandin(filename)
267 266 if realfile is not None and filename in ctx.manifest():
268 267 copytostore(repo, ctx, realfile, filename)
269 268
270 269 def copytostoreabsolute(repo, file, hash):
271 270 if inusercache(repo.ui, hash):
272 271 link(usercachepath(repo.ui, hash), storepath(repo, hash))
273 272 else:
274 273 util.makedirs(os.path.dirname(storepath(repo, hash)))
275 274 with open(file, 'rb') as srcf:
276 275 with util.atomictempfile(storepath(repo, hash),
277 276 createmode=repo.store.createmode) as dstf:
278 277 for chunk in util.filechunkiter(srcf):
279 278 dstf.write(chunk)
280 279 linktousercache(repo, hash)
281 280
282 281 def linktousercache(repo, hash):
283 282 '''Link / copy the largefile with the specified hash from the store
284 283 to the cache.'''
285 284 path = usercachepath(repo.ui, hash)
286 285 link(storepath(repo, hash), path)
287 286
288 287 def getstandinmatcher(repo, rmatcher=None):
289 288 '''Return a match object that applies rmatcher to the standin directory'''
290 289 wvfs = repo.wvfs
291 290 standindir = shortname
292 291
293 292 # no warnings about missing files or directories
294 293 badfn = lambda f, msg: None
295 294
296 295 if rmatcher and not rmatcher.always():
297 296 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
298 297 if not pats:
299 298 pats = [wvfs.join(standindir)]
300 299 match = scmutil.match(repo[None], pats, badfn=badfn)
301 300 else:
302 301 # no patterns: relative to repo root
303 302 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
304 303 return match
305 304
306 305 def composestandinmatcher(repo, rmatcher):
307 306 '''Return a matcher that accepts standins corresponding to the
308 307 files accepted by rmatcher. Pass the list of files in the matcher
309 308 as the paths specified by the user.'''
310 309 smatcher = getstandinmatcher(repo, rmatcher)
311 310 isstandin = smatcher.matchfn
312 311 def composedmatchfn(f):
313 312 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
314 313 smatcher.matchfn = composedmatchfn
315 314
316 315 return smatcher
317 316
318 317 def standin(filename):
319 318 '''Return the repo-relative path to the standin for the specified big
320 319 file.'''
321 320 # Notes:
322 321 # 1) Some callers want an absolute path, but for instance addlargefiles
323 322 # needs it repo-relative so it can be passed to repo[None].add(). So
324 323 # leave it up to the caller to use repo.wjoin() to get an absolute path.
325 324 # 2) Join with '/' because that's what dirstate always uses, even on
326 325 # Windows. Change existing separator to '/' first in case we are
327 326 # passed filenames from an external source (like the command line).
328 327 return shortnameslash + util.pconvert(filename)
329 328
330 329 def isstandin(filename):
331 330 '''Return true if filename is a big file standin. filename must be
332 331 in Mercurial's internal form (slash-separated).'''
333 332 return filename.startswith(shortnameslash)
334 333
335 334 def splitstandin(filename):
336 335 # Split on / because that's what dirstate always uses, even on Windows.
337 336 # Change local separator to / first just in case we are passed filenames
338 337 # from an external source (like the command line).
339 338 bits = util.pconvert(filename).split('/', 1)
340 339 if len(bits) == 2 and bits[0] == shortname:
341 340 return bits[1]
342 341 else:
343 342 return None
344 343
345 344 def updatestandin(repo, lfile, standin):
346 345 """Re-calculate hash value of lfile and write it into standin
347 346
348 347 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
349 348 """
350 349 file = repo.wjoin(lfile)
351 350 if repo.wvfs.exists(lfile):
352 351 hash = hashfile(file)
353 352 executable = getexecutable(file)
354 353 writestandin(repo, standin, hash, executable)
355 354 else:
356 355 raise error.Abort(_('%s: file not found!') % lfile)
357 356
358 357 def readasstandin(fctx):
359 358 '''read hex hash from given filectx of standin file
360 359
361 360 This encapsulates how "standin" data is stored into storage layer.'''
362 361 return fctx.data().strip()
363 362
364 363 def writestandin(repo, standin, hash, executable):
365 364 '''write hash to <repo.root>/<standin>'''
366 365 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
367 366
368 367 def copyandhash(instream, outfile):
369 368 '''Read bytes from instream (iterable) and write them to outfile,
370 369 computing the SHA-1 hash of the data along the way. Return the hash.'''
371 370 hasher = hashlib.sha1('')
372 371 for data in instream:
373 372 hasher.update(data)
374 373 outfile.write(data)
375 374 return hasher.hexdigest()
376 375
377 376 def hashfile(file):
378 377 if not os.path.exists(file):
379 378 return ''
380 379 with open(file, 'rb') as fd:
381 380 return hexsha1(fd)
382 381
383 382 def getexecutable(filename):
384 383 mode = os.stat(filename).st_mode
385 384 return ((mode & stat.S_IXUSR) and
386 385 (mode & stat.S_IXGRP) and
387 386 (mode & stat.S_IXOTH))
388 387
389 388 def urljoin(first, second, *arg):
390 389 def join(left, right):
391 390 if not left.endswith('/'):
392 391 left += '/'
393 392 if right.startswith('/'):
394 393 right = right[1:]
395 394 return left + right
396 395
397 396 url = join(first, second)
398 397 for a in arg:
399 398 url = join(url, a)
400 399 return url
401 400
402 401 def hexsha1(fileobj):
403 402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
404 403 object data"""
405 404 h = hashlib.sha1()
406 405 for chunk in util.filechunkiter(fileobj):
407 406 h.update(chunk)
408 407 return h.hexdigest()
409 408
410 409 def httpsendfile(ui, filename):
411 410 return httpconnection.httpsendfile(ui, filename, 'rb')
412 411
413 412 def unixpath(path):
414 413 '''Return a version of path normalized for use with the lfdirstate.'''
415 414 return util.pconvert(os.path.normpath(path))
416 415
417 416 def islfilesrepo(repo):
418 417 '''Return true if the repo is a largefile repo.'''
419 418 if ('largefiles' in repo.requirements and
420 419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
421 420 return True
422 421
423 422 return any(openlfdirstate(repo.ui, repo, False))
424 423
425 424 class storeprotonotcapable(Exception):
426 425 def __init__(self, storetypes):
427 426 self.storetypes = storetypes
428 427
429 428 def getstandinsstate(repo):
430 429 standins = []
431 430 matcher = getstandinmatcher(repo)
432 431 wctx = repo[None]
433 432 for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False,
434 433 ignored=False):
435 434 lfile = splitstandin(standin)
436 435 try:
437 436 hash = readasstandin(wctx[standin])
438 437 except IOError:
439 438 hash = None
440 439 standins.append((lfile, hash))
441 440 return standins
442 441
443 442 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
444 443 lfstandin = standin(lfile)
445 444 if lfstandin in repo.dirstate:
446 445 stat = repo.dirstate._map[lfstandin]
447 446 state, mtime = stat[0], stat[3]
448 447 else:
449 448 state, mtime = '?', -1
450 449 if state == 'n':
451 450 if (normallookup or mtime < 0 or
452 451 not repo.wvfs.exists(lfile)):
453 452 # state 'n' doesn't ensure 'clean' in this case
454 453 lfdirstate.normallookup(lfile)
455 454 else:
456 455 lfdirstate.normal(lfile)
457 456 elif state == 'm':
458 457 lfdirstate.normallookup(lfile)
459 458 elif state == 'r':
460 459 lfdirstate.remove(lfile)
461 460 elif state == 'a':
462 461 lfdirstate.add(lfile)
463 462 elif state == '?':
464 463 lfdirstate.drop(lfile)
465 464
466 465 def markcommitted(orig, ctx, node):
467 466 repo = ctx.repo()
468 467
469 468 orig(node)
470 469
471 470 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
472 471 # because files coming from the 2nd parent are omitted in the latter.
473 472 #
474 473 # The former should be used to get targets of "synclfdirstate",
475 474 # because such files:
476 475 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
477 476 # - have to be marked as "n" after commit, but
478 477 # - aren't listed in "repo[node].files()"
479 478
480 479 lfdirstate = openlfdirstate(repo.ui, repo)
481 480 for f in ctx.files():
482 481 lfile = splitstandin(f)
483 482 if lfile is not None:
484 483 synclfdirstate(repo, lfdirstate, lfile, False)
485 484 lfdirstate.write()
486 485
487 486 # As part of committing, copy all of the largefiles into the cache.
488 487 #
489 488 # Using "node" instead of "ctx" implies additional "repo[node]"
490 489 # lookup while copyalltostore(), but can omit redundant check for
491 490 # files comming from the 2nd parent, which should exist in store
492 491 # at merging.
493 492 copyalltostore(repo, node)
494 493
495 494 def getlfilestoupdate(oldstandins, newstandins):
496 495 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
497 496 filelist = []
498 497 for f in changedstandins:
499 498 if f[0] not in filelist:
500 499 filelist.append(f[0])
501 500 return filelist
502 501
503 502 def getlfilestoupload(repo, missing, addfunc):
504 503 for i, n in enumerate(missing):
505 504 repo.ui.progress(_('finding outgoing largefiles'), i,
506 505 unit=_('revisions'), total=len(missing))
507 506 parents = [p for p in repo[n].parents() if p != node.nullid]
508 507
509 508 oldlfstatus = repo.lfstatus
510 509 repo.lfstatus = False
511 510 try:
512 511 ctx = repo[n]
513 512 finally:
514 513 repo.lfstatus = oldlfstatus
515 514
516 515 files = set(ctx.files())
517 516 if len(parents) == 2:
518 517 mc = ctx.manifest()
519 518 mp1 = ctx.parents()[0].manifest()
520 519 mp2 = ctx.parents()[1].manifest()
521 520 for f in mp1:
522 521 if f not in mc:
523 522 files.add(f)
524 523 for f in mp2:
525 524 if f not in mc:
526 525 files.add(f)
527 526 for f in mc:
528 527 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
529 528 files.add(f)
530 529 for fn in files:
531 530 if isstandin(fn) and fn in ctx:
532 531 addfunc(fn, readasstandin(ctx[fn]))
533 532 repo.ui.progress(_('finding outgoing largefiles'), None)
534 533
535 534 def updatestandinsbymatch(repo, match):
536 535 '''Update standins in the working directory according to specified match
537 536
538 537 This returns (possibly modified) ``match`` object to be used for
539 538 subsequent commit process.
540 539 '''
541 540
542 541 ui = repo.ui
543 542
544 543 # Case 1: user calls commit with no specific files or
545 544 # include/exclude patterns: refresh and commit all files that
546 545 # are "dirty".
547 546 if match is None or match.always():
548 547 # Spend a bit of time here to get a list of files we know
549 548 # are modified so we can compare only against those.
550 549 # It can cost a lot of time (several seconds)
551 550 # otherwise to update all standins if the largefiles are
552 551 # large.
553 552 lfdirstate = openlfdirstate(ui, repo)
554 553 dirtymatch = matchmod.always(repo.root, repo.getcwd())
555 554 unsure, s = lfdirstate.status(dirtymatch, subrepos=[], ignored=False,
556 555 clean=False, unknown=False)
557 556 modifiedfiles = unsure + s.modified + s.added + s.removed
558 557 lfiles = listlfiles(repo)
559 558 # this only loops through largefiles that exist (not
560 559 # removed/renamed)
561 560 for lfile in lfiles:
562 561 if lfile in modifiedfiles:
563 562 fstandin = standin(lfile)
564 563 if repo.wvfs.exists(fstandin):
565 564 # this handles the case where a rebase is being
566 565 # performed and the working copy is not updated
567 566 # yet.
568 567 if repo.wvfs.exists(lfile):
569 568 updatestandin(repo, lfile, fstandin)
570 569
571 570 return match
572 571
573 572 lfiles = listlfiles(repo)
574 573 match._files = repo._subdirlfs(match.files(), lfiles)
575 574
576 575 # Case 2: user calls commit with specified patterns: refresh
577 576 # any matching big files.
578 577 smatcher = composestandinmatcher(repo, match)
579 578 standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False,
580 579 ignored=False)
581 580
582 581 # No matching big files: get out of the way and pass control to
583 582 # the usual commit() method.
584 583 if not standins:
585 584 return match
586 585
587 586 # Refresh all matching big files. It's possible that the
588 587 # commit will end up failing, in which case the big files will
589 588 # stay refreshed. No harm done: the user modified them and
590 589 # asked to commit them, so sooner or later we're going to
591 590 # refresh the standins. Might as well leave them refreshed.
592 591 lfdirstate = openlfdirstate(ui, repo)
593 592 for fstandin in standins:
594 593 lfile = splitstandin(fstandin)
595 594 if lfdirstate[lfile] != 'r':
596 595 updatestandin(repo, lfile, fstandin)
597 596
598 597 # Cook up a new matcher that only matches regular files or
599 598 # standins corresponding to the big files requested by the
600 599 # user. Have to modify _files to prevent commit() from
601 600 # complaining "not tracked" for big files.
602 601 match = copy.copy(match)
603 602 origmatchfn = match.matchfn
604 603
605 604 # Check both the list of largefiles and the list of
606 605 # standins because if a largefile was removed, it
607 606 # won't be in the list of largefiles at this point
608 607 match._files += sorted(standins)
609 608
610 609 actualfiles = []
611 610 for f in match._files:
612 611 fstandin = standin(f)
613 612
614 613 # For largefiles, only one of the normal and standin should be
615 614 # committed (except if one of them is a remove). In the case of a
616 615 # standin removal, drop the normal file if it is unknown to dirstate.
617 616 # Thus, skip plain largefile names but keep the standin.
618 617 if f in lfiles or fstandin in standins:
619 618 if repo.dirstate[fstandin] != 'r':
620 619 if repo.dirstate[f] != 'r':
621 620 continue
622 621 elif repo.dirstate[f] == '?':
623 622 continue
624 623
625 624 actualfiles.append(f)
626 625 match._files = actualfiles
627 626
628 627 def matchfn(f):
629 628 if origmatchfn(f):
630 629 return f not in lfiles
631 630 else:
632 631 return f in standins
633 632
634 633 match.matchfn = matchfn
635 634
636 635 return match
637 636
638 637 class automatedcommithook(object):
639 638 '''Stateful hook to update standins at the 1st commit of resuming
640 639
641 640 For efficiency, updating standins in the working directory should
642 641 be avoided while automated committing (like rebase, transplant and
643 642 so on), because they should be updated before committing.
644 643
645 644 But the 1st commit of resuming automated committing (e.g. ``rebase
646 645 --continue``) should update them, because largefiles may be
647 646 modified manually.
648 647 '''
649 648 def __init__(self, resuming):
650 649 self.resuming = resuming
651 650
652 651 def __call__(self, repo, match):
653 652 if self.resuming:
654 653 self.resuming = False # avoids updating at subsequent commits
655 654 return updatestandinsbymatch(repo, match)
656 655 else:
657 656 return match
658 657
659 658 def getstatuswriter(ui, repo, forcibly=None):
660 659 '''Return the function to write largefiles specific status out
661 660
662 661 If ``forcibly`` is ``None``, this returns the last element of
663 662 ``repo._lfstatuswriters`` as "default" writer function.
664 663
665 664 Otherwise, this returns the function to always write out (or
666 665 ignore if ``not forcibly``) status.
667 666 '''
668 667 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
669 668 return repo._lfstatuswriters[-1]
670 669 else:
671 670 if forcibly:
672 671 return ui.status # forcibly WRITE OUT
673 672 else:
674 673 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now