##// END OF EJS Templates
largefiles: add lfile argument to updatestandin() for efficiency (API)...
FUJIWARA Katsunori -
r31659:0eec3611 default
parent child Browse files
Show More
@@ -1,667 +1,670 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 node,
27 27 pycompat,
28 28 scmutil,
29 29 util,
30 30 vfs as vfsmod,
31 31 )
32 32
33 33 shortname = '.hglf'
34 34 shortnameslash = shortname + '/'
35 35 longname = 'largefiles'
36 36
37 37 # -- Private worker functions ------------------------------------------
38 38
39 39 def getminsize(ui, assumelfiles, opt, default=10):
40 40 lfsize = opt
41 41 if not lfsize and assumelfiles:
42 42 lfsize = ui.config(longname, 'minsize', default=default)
43 43 if lfsize:
44 44 try:
45 45 lfsize = float(lfsize)
46 46 except ValueError:
47 47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
48 48 % lfsize)
49 49 if lfsize is None:
50 50 raise error.Abort(_('minimum size for largefiles must be specified'))
51 51 return lfsize
52 52
53 53 def link(src, dest):
54 54 """Try to create hardlink - if that fails, efficiently make a copy."""
55 55 util.makedirs(os.path.dirname(dest))
56 56 try:
57 57 util.oslink(src, dest)
58 58 except OSError:
59 59 # if hardlinks fail, fallback on atomic copy
60 60 with open(src, 'rb') as srcf:
61 61 with util.atomictempfile(dest) as dstf:
62 62 for chunk in util.filechunkiter(srcf):
63 63 dstf.write(chunk)
64 64 os.chmod(dest, os.stat(src).st_mode)
65 65
66 66 def usercachepath(ui, hash):
67 67 '''Return the correct location in the "global" largefiles cache for a file
68 68 with the given hash.
69 69 This cache is used for sharing of largefiles across repositories - both
70 70 to preserve download bandwidth and storage space.'''
71 71 return os.path.join(_usercachedir(ui), hash)
72 72
73 73 def _usercachedir(ui):
74 74 '''Return the location of the "global" largefiles cache.'''
75 75 path = ui.configpath(longname, 'usercache', None)
76 76 if path:
77 77 return path
78 78 if pycompat.osname == 'nt':
79 79 appdata = encoding.environ.get('LOCALAPPDATA',\
80 80 encoding.environ.get('APPDATA'))
81 81 if appdata:
82 82 return os.path.join(appdata, longname)
83 83 elif platform.system() == 'Darwin':
84 84 home = encoding.environ.get('HOME')
85 85 if home:
86 86 return os.path.join(home, 'Library', 'Caches', longname)
87 87 elif pycompat.osname == 'posix':
88 88 path = encoding.environ.get('XDG_CACHE_HOME')
89 89 if path:
90 90 return os.path.join(path, longname)
91 91 home = encoding.environ.get('HOME')
92 92 if home:
93 93 return os.path.join(home, '.cache', longname)
94 94 else:
95 95 raise error.Abort(_('unknown operating system: %s\n')
96 96 % pycompat.osname)
97 97 raise error.Abort(_('unknown %s usercache location') % longname)
98 98
99 99 def inusercache(ui, hash):
100 100 path = usercachepath(ui, hash)
101 101 return os.path.exists(path)
102 102
103 103 def findfile(repo, hash):
104 104 '''Return store path of the largefile with the specified hash.
105 105 As a side effect, the file might be linked from user cache.
106 106 Return None if the file can't be found locally.'''
107 107 path, exists = findstorepath(repo, hash)
108 108 if exists:
109 109 repo.ui.note(_('found %s in store\n') % hash)
110 110 return path
111 111 elif inusercache(repo.ui, hash):
112 112 repo.ui.note(_('found %s in system cache\n') % hash)
113 113 path = storepath(repo, hash)
114 114 link(usercachepath(repo.ui, hash), path)
115 115 return path
116 116 return None
117 117
118 118 class largefilesdirstate(dirstate.dirstate):
119 119 def __getitem__(self, key):
120 120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 121 def normal(self, f):
122 122 return super(largefilesdirstate, self).normal(unixpath(f))
123 123 def remove(self, f):
124 124 return super(largefilesdirstate, self).remove(unixpath(f))
125 125 def add(self, f):
126 126 return super(largefilesdirstate, self).add(unixpath(f))
127 127 def drop(self, f):
128 128 return super(largefilesdirstate, self).drop(unixpath(f))
129 129 def forget(self, f):
130 130 return super(largefilesdirstate, self).forget(unixpath(f))
131 131 def normallookup(self, f):
132 132 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 133 def _ignore(self, f):
134 134 return False
135 135 def write(self, tr=False):
136 136 # (1) disable PENDING mode always
137 137 # (lfdirstate isn't yet managed as a part of the transaction)
138 138 # (2) avoid develwarn 'use dirstate.write with ....'
139 139 super(largefilesdirstate, self).write(None)
140 140
141 141 def openlfdirstate(ui, repo, create=True):
142 142 '''
143 143 Return a dirstate object that tracks largefiles: i.e. its root is
144 144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 145 '''
146 146 vfs = repo.vfs
147 147 lfstoredir = longname
148 148 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 150 repo.dirstate._validate)
151 151
152 152 # If the largefiles dirstate does not exist, populate and create
153 153 # it. This ensures that we create it on the first meaningful
154 154 # largefiles operation in a new clone.
155 155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
156 156 matcher = getstandinmatcher(repo)
157 157 standins = repo.dirstate.walk(matcher, [], False, False)
158 158
159 159 if len(standins) > 0:
160 160 vfs.makedirs(lfstoredir)
161 161
162 162 for standin in standins:
163 163 lfile = splitstandin(standin)
164 164 lfdirstate.normallookup(lfile)
165 165 return lfdirstate
166 166
167 167 def lfdirstatestatus(lfdirstate, repo):
168 168 pctx = repo['.']
169 169 match = matchmod.always(repo.root, repo.getcwd())
170 170 unsure, s = lfdirstate.status(match, [], False, False, False)
171 171 modified, clean = s.modified, s.clean
172 172 for lfile in unsure:
173 173 try:
174 174 fctx = pctx[standin(lfile)]
175 175 except LookupError:
176 176 fctx = None
177 177 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
178 178 modified.append(lfile)
179 179 else:
180 180 clean.append(lfile)
181 181 lfdirstate.normal(lfile)
182 182 return s
183 183
184 184 def listlfiles(repo, rev=None, matcher=None):
185 185 '''return a list of largefiles in the working copy or the
186 186 specified changeset'''
187 187
188 188 if matcher is None:
189 189 matcher = getstandinmatcher(repo)
190 190
191 191 # ignore unknown files in working directory
192 192 return [splitstandin(f)
193 193 for f in repo[rev].walk(matcher)
194 194 if rev is not None or repo.dirstate[f] != '?']
195 195
196 196 def instore(repo, hash, forcelocal=False):
197 197 '''Return true if a largefile with the given hash exists in the store'''
198 198 return os.path.exists(storepath(repo, hash, forcelocal))
199 199
200 200 def storepath(repo, hash, forcelocal=False):
201 201 '''Return the correct location in the repository largefiles store for a
202 202 file with the given hash.'''
203 203 if not forcelocal and repo.shared():
204 204 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
205 205 return repo.vfs.join(longname, hash)
206 206
207 207 def findstorepath(repo, hash):
208 208 '''Search through the local store path(s) to find the file for the given
209 209 hash. If the file is not found, its path in the primary store is returned.
210 210 The return value is a tuple of (path, exists(path)).
211 211 '''
212 212 # For shared repos, the primary store is in the share source. But for
213 213 # backward compatibility, force a lookup in the local store if it wasn't
214 214 # found in the share source.
215 215 path = storepath(repo, hash, False)
216 216
217 217 if instore(repo, hash):
218 218 return (path, True)
219 219 elif repo.shared() and instore(repo, hash, True):
220 220 return storepath(repo, hash, True), True
221 221
222 222 return (path, False)
223 223
224 224 def copyfromcache(repo, hash, filename):
225 225 '''Copy the specified largefile from the repo or system cache to
226 226 filename in the repository. Return true on success or false if the
227 227 file was not found in either cache (which should not happened:
228 228 this is meant to be called only after ensuring that the needed
229 229 largefile exists in the cache).'''
230 230 wvfs = repo.wvfs
231 231 path = findfile(repo, hash)
232 232 if path is None:
233 233 return False
234 234 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
235 235 # The write may fail before the file is fully written, but we
236 236 # don't use atomic writes in the working copy.
237 237 with open(path, 'rb') as srcfd:
238 238 with wvfs(filename, 'wb') as destfd:
239 239 gothash = copyandhash(
240 240 util.filechunkiter(srcfd), destfd)
241 241 if gothash != hash:
242 242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
243 243 % (filename, path, gothash))
244 244 wvfs.unlink(filename)
245 245 return False
246 246 return True
247 247
248 248 def copytostore(repo, revorctx, file, uploaded=False):
249 249 wvfs = repo.wvfs
250 250 hash = readstandin(repo, file, revorctx)
251 251 if instore(repo, hash):
252 252 return
253 253 if wvfs.exists(file):
254 254 copytostoreabsolute(repo, wvfs.join(file), hash)
255 255 else:
256 256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
257 257 (file, hash))
258 258
259 259 def copyalltostore(repo, node):
260 260 '''Copy all largefiles in a given revision to the store'''
261 261
262 262 ctx = repo[node]
263 263 for filename in ctx.files():
264 264 realfile = splitstandin(filename)
265 265 if realfile is not None and filename in ctx.manifest():
266 266 copytostore(repo, ctx, realfile)
267 267
268 268 def copytostoreabsolute(repo, file, hash):
269 269 if inusercache(repo.ui, hash):
270 270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
271 271 else:
272 272 util.makedirs(os.path.dirname(storepath(repo, hash)))
273 273 with open(file, 'rb') as srcf:
274 274 with util.atomictempfile(storepath(repo, hash),
275 275 createmode=repo.store.createmode) as dstf:
276 276 for chunk in util.filechunkiter(srcf):
277 277 dstf.write(chunk)
278 278 linktousercache(repo, hash)
279 279
280 280 def linktousercache(repo, hash):
281 281 '''Link / copy the largefile with the specified hash from the store
282 282 to the cache.'''
283 283 path = usercachepath(repo.ui, hash)
284 284 link(storepath(repo, hash), path)
285 285
286 286 def getstandinmatcher(repo, rmatcher=None):
287 287 '''Return a match object that applies rmatcher to the standin directory'''
288 288 wvfs = repo.wvfs
289 289 standindir = shortname
290 290
291 291 # no warnings about missing files or directories
292 292 badfn = lambda f, msg: None
293 293
294 294 if rmatcher and not rmatcher.always():
295 295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
296 296 if not pats:
297 297 pats = [wvfs.join(standindir)]
298 298 match = scmutil.match(repo[None], pats, badfn=badfn)
299 299 # if pats is empty, it would incorrectly always match, so clear _always
300 300 match._always = False
301 301 else:
302 302 # no patterns: relative to repo root
303 303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
304 304 return match
305 305
306 306 def composestandinmatcher(repo, rmatcher):
307 307 '''Return a matcher that accepts standins corresponding to the
308 308 files accepted by rmatcher. Pass the list of files in the matcher
309 309 as the paths specified by the user.'''
310 310 smatcher = getstandinmatcher(repo, rmatcher)
311 311 isstandin = smatcher.matchfn
312 312 def composedmatchfn(f):
313 313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
314 314 smatcher.matchfn = composedmatchfn
315 315
316 316 return smatcher
317 317
318 318 def standin(filename):
319 319 '''Return the repo-relative path to the standin for the specified big
320 320 file.'''
321 321 # Notes:
322 322 # 1) Some callers want an absolute path, but for instance addlargefiles
323 323 # needs it repo-relative so it can be passed to repo[None].add(). So
324 324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
325 325 # 2) Join with '/' because that's what dirstate always uses, even on
326 326 # Windows. Change existing separator to '/' first in case we are
327 327 # passed filenames from an external source (like the command line).
328 328 return shortnameslash + util.pconvert(filename)
329 329
330 330 def isstandin(filename):
331 331 '''Return true if filename is a big file standin. filename must be
332 332 in Mercurial's internal form (slash-separated).'''
333 333 return filename.startswith(shortnameslash)
334 334
335 335 def splitstandin(filename):
336 336 # Split on / because that's what dirstate always uses, even on Windows.
337 337 # Change local separator to / first just in case we are passed filenames
338 338 # from an external source (like the command line).
339 339 bits = util.pconvert(filename).split('/', 1)
340 340 if len(bits) == 2 and bits[0] == shortname:
341 341 return bits[1]
342 342 else:
343 343 return None
344 344
345 def updatestandin(repo, standin):
346 lfile = splitstandin(standin)
345 def updatestandin(repo, lfile, standin):
346 """Re-calculate hash value of lfile and write it into standin
347
348 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
349 """
347 350 file = repo.wjoin(lfile)
348 351 if repo.wvfs.exists(lfile):
349 352 hash = hashfile(file)
350 353 executable = getexecutable(file)
351 354 writestandin(repo, standin, hash, executable)
352 355 else:
353 356 raise error.Abort(_('%s: file not found!') % lfile)
354 357
355 358 def readstandin(repo, filename, node=None):
356 359 '''read hex hash from standin for filename at given node, or working
357 360 directory if no node is given'''
358 361 return repo[node][standin(filename)].data().strip()
359 362
360 363 def writestandin(repo, standin, hash, executable):
361 364 '''write hash to <repo.root>/<standin>'''
362 365 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
363 366
364 367 def copyandhash(instream, outfile):
365 368 '''Read bytes from instream (iterable) and write them to outfile,
366 369 computing the SHA-1 hash of the data along the way. Return the hash.'''
367 370 hasher = hashlib.sha1('')
368 371 for data in instream:
369 372 hasher.update(data)
370 373 outfile.write(data)
371 374 return hasher.hexdigest()
372 375
373 376 def hashfile(file):
374 377 if not os.path.exists(file):
375 378 return ''
376 379 with open(file, 'rb') as fd:
377 380 return hexsha1(fd)
378 381
379 382 def getexecutable(filename):
380 383 mode = os.stat(filename).st_mode
381 384 return ((mode & stat.S_IXUSR) and
382 385 (mode & stat.S_IXGRP) and
383 386 (mode & stat.S_IXOTH))
384 387
385 388 def urljoin(first, second, *arg):
386 389 def join(left, right):
387 390 if not left.endswith('/'):
388 391 left += '/'
389 392 if right.startswith('/'):
390 393 right = right[1:]
391 394 return left + right
392 395
393 396 url = join(first, second)
394 397 for a in arg:
395 398 url = join(url, a)
396 399 return url
397 400
398 401 def hexsha1(fileobj):
399 402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
400 403 object data"""
401 404 h = hashlib.sha1()
402 405 for chunk in util.filechunkiter(fileobj):
403 406 h.update(chunk)
404 407 return h.hexdigest()
405 408
406 409 def httpsendfile(ui, filename):
407 410 return httpconnection.httpsendfile(ui, filename, 'rb')
408 411
409 412 def unixpath(path):
410 413 '''Return a version of path normalized for use with the lfdirstate.'''
411 414 return util.pconvert(os.path.normpath(path))
412 415
413 416 def islfilesrepo(repo):
414 417 '''Return true if the repo is a largefile repo.'''
415 418 if ('largefiles' in repo.requirements and
416 419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
417 420 return True
418 421
419 422 return any(openlfdirstate(repo.ui, repo, False))
420 423
421 424 class storeprotonotcapable(Exception):
422 425 def __init__(self, storetypes):
423 426 self.storetypes = storetypes
424 427
425 428 def getstandinsstate(repo):
426 429 standins = []
427 430 matcher = getstandinmatcher(repo)
428 431 for standin in repo.dirstate.walk(matcher, [], False, False):
429 432 lfile = splitstandin(standin)
430 433 try:
431 434 hash = readstandin(repo, lfile)
432 435 except IOError:
433 436 hash = None
434 437 standins.append((lfile, hash))
435 438 return standins
436 439
437 440 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
438 441 lfstandin = standin(lfile)
439 442 if lfstandin in repo.dirstate:
440 443 stat = repo.dirstate._map[lfstandin]
441 444 state, mtime = stat[0], stat[3]
442 445 else:
443 446 state, mtime = '?', -1
444 447 if state == 'n':
445 448 if (normallookup or mtime < 0 or
446 449 not repo.wvfs.exists(lfile)):
447 450 # state 'n' doesn't ensure 'clean' in this case
448 451 lfdirstate.normallookup(lfile)
449 452 else:
450 453 lfdirstate.normal(lfile)
451 454 elif state == 'm':
452 455 lfdirstate.normallookup(lfile)
453 456 elif state == 'r':
454 457 lfdirstate.remove(lfile)
455 458 elif state == 'a':
456 459 lfdirstate.add(lfile)
457 460 elif state == '?':
458 461 lfdirstate.drop(lfile)
459 462
460 463 def markcommitted(orig, ctx, node):
461 464 repo = ctx.repo()
462 465
463 466 orig(node)
464 467
465 468 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
466 469 # because files coming from the 2nd parent are omitted in the latter.
467 470 #
468 471 # The former should be used to get targets of "synclfdirstate",
469 472 # because such files:
470 473 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
471 474 # - have to be marked as "n" after commit, but
472 475 # - aren't listed in "repo[node].files()"
473 476
474 477 lfdirstate = openlfdirstate(repo.ui, repo)
475 478 for f in ctx.files():
476 479 lfile = splitstandin(f)
477 480 if lfile is not None:
478 481 synclfdirstate(repo, lfdirstate, lfile, False)
479 482 lfdirstate.write()
480 483
481 484 # As part of committing, copy all of the largefiles into the cache.
482 485 #
483 486 # Using "node" instead of "ctx" implies additional "repo[node]"
484 487 # lookup while copyalltostore(), but can omit redundant check for
485 488 # files comming from the 2nd parent, which should exist in store
486 489 # at merging.
487 490 copyalltostore(repo, node)
488 491
489 492 def getlfilestoupdate(oldstandins, newstandins):
490 493 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
491 494 filelist = []
492 495 for f in changedstandins:
493 496 if f[0] not in filelist:
494 497 filelist.append(f[0])
495 498 return filelist
496 499
497 500 def getlfilestoupload(repo, missing, addfunc):
498 501 for i, n in enumerate(missing):
499 502 repo.ui.progress(_('finding outgoing largefiles'), i,
500 503 unit=_('revisions'), total=len(missing))
501 504 parents = [p for p in repo[n].parents() if p != node.nullid]
502 505
503 506 oldlfstatus = repo.lfstatus
504 507 repo.lfstatus = False
505 508 try:
506 509 ctx = repo[n]
507 510 finally:
508 511 repo.lfstatus = oldlfstatus
509 512
510 513 files = set(ctx.files())
511 514 if len(parents) == 2:
512 515 mc = ctx.manifest()
513 516 mp1 = ctx.parents()[0].manifest()
514 517 mp2 = ctx.parents()[1].manifest()
515 518 for f in mp1:
516 519 if f not in mc:
517 520 files.add(f)
518 521 for f in mp2:
519 522 if f not in mc:
520 523 files.add(f)
521 524 for f in mc:
522 525 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
523 526 files.add(f)
524 527 for fn in files:
525 528 if isstandin(fn) and fn in ctx:
526 529 addfunc(fn, ctx[fn].data().strip())
527 530 repo.ui.progress(_('finding outgoing largefiles'), None)
528 531
529 532 def updatestandinsbymatch(repo, match):
530 533 '''Update standins in the working directory according to specified match
531 534
532 535 This returns (possibly modified) ``match`` object to be used for
533 536 subsequent commit process.
534 537 '''
535 538
536 539 ui = repo.ui
537 540
538 541 # Case 1: user calls commit with no specific files or
539 542 # include/exclude patterns: refresh and commit all files that
540 543 # are "dirty".
541 544 if match is None or match.always():
542 545 # Spend a bit of time here to get a list of files we know
543 546 # are modified so we can compare only against those.
544 547 # It can cost a lot of time (several seconds)
545 548 # otherwise to update all standins if the largefiles are
546 549 # large.
547 550 lfdirstate = openlfdirstate(ui, repo)
548 551 dirtymatch = matchmod.always(repo.root, repo.getcwd())
549 552 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
550 553 False)
551 554 modifiedfiles = unsure + s.modified + s.added + s.removed
552 555 lfiles = listlfiles(repo)
553 556 # this only loops through largefiles that exist (not
554 557 # removed/renamed)
555 558 for lfile in lfiles:
556 559 if lfile in modifiedfiles:
557 560 fstandin = standin(lfile)
558 561 if repo.wvfs.exists(fstandin):
559 562 # this handles the case where a rebase is being
560 563 # performed and the working copy is not updated
561 564 # yet.
562 565 if repo.wvfs.exists(lfile):
563 updatestandin(repo, fstandin)
566 updatestandin(repo, lfile, fstandin)
564 567
565 568 return match
566 569
567 570 lfiles = listlfiles(repo)
568 571 match._files = repo._subdirlfs(match.files(), lfiles)
569 572
570 573 # Case 2: user calls commit with specified patterns: refresh
571 574 # any matching big files.
572 575 smatcher = composestandinmatcher(repo, match)
573 576 standins = repo.dirstate.walk(smatcher, [], False, False)
574 577
575 578 # No matching big files: get out of the way and pass control to
576 579 # the usual commit() method.
577 580 if not standins:
578 581 return match
579 582
580 583 # Refresh all matching big files. It's possible that the
581 584 # commit will end up failing, in which case the big files will
582 585 # stay refreshed. No harm done: the user modified them and
583 586 # asked to commit them, so sooner or later we're going to
584 587 # refresh the standins. Might as well leave them refreshed.
585 588 lfdirstate = openlfdirstate(ui, repo)
586 589 for fstandin in standins:
587 590 lfile = splitstandin(fstandin)
588 591 if lfdirstate[lfile] != 'r':
589 updatestandin(repo, fstandin)
592 updatestandin(repo, lfile, fstandin)
590 593
591 594 # Cook up a new matcher that only matches regular files or
592 595 # standins corresponding to the big files requested by the
593 596 # user. Have to modify _files to prevent commit() from
594 597 # complaining "not tracked" for big files.
595 598 match = copy.copy(match)
596 599 origmatchfn = match.matchfn
597 600
598 601 # Check both the list of largefiles and the list of
599 602 # standins because if a largefile was removed, it
600 603 # won't be in the list of largefiles at this point
601 604 match._files += sorted(standins)
602 605
603 606 actualfiles = []
604 607 for f in match._files:
605 608 fstandin = standin(f)
606 609
607 610 # For largefiles, only one of the normal and standin should be
608 611 # committed (except if one of them is a remove). In the case of a
609 612 # standin removal, drop the normal file if it is unknown to dirstate.
610 613 # Thus, skip plain largefile names but keep the standin.
611 614 if f in lfiles or fstandin in standins:
612 615 if repo.dirstate[fstandin] != 'r':
613 616 if repo.dirstate[f] != 'r':
614 617 continue
615 618 elif repo.dirstate[f] == '?':
616 619 continue
617 620
618 621 actualfiles.append(f)
619 622 match._files = actualfiles
620 623
621 624 def matchfn(f):
622 625 if origmatchfn(f):
623 626 return f not in lfiles
624 627 else:
625 628 return f in standins
626 629
627 630 match.matchfn = matchfn
628 631
629 632 return match
630 633
631 634 class automatedcommithook(object):
632 635 '''Stateful hook to update standins at the 1st commit of resuming
633 636
634 637 For efficiency, updating standins in the working directory should
635 638 be avoided while automated committing (like rebase, transplant and
636 639 so on), because they should be updated before committing.
637 640
638 641 But the 1st commit of resuming automated committing (e.g. ``rebase
639 642 --continue``) should update them, because largefiles may be
640 643 modified manually.
641 644 '''
642 645 def __init__(self, resuming):
643 646 self.resuming = resuming
644 647
645 648 def __call__(self, repo, match):
646 649 if self.resuming:
647 650 self.resuming = False # avoids updating at subsequent commits
648 651 return updatestandinsbymatch(repo, match)
649 652 else:
650 653 return match
651 654
652 655 def getstatuswriter(ui, repo, forcibly=None):
653 656 '''Return the function to write largefiles specific status out
654 657
655 658 If ``forcibly`` is ``None``, this returns the last element of
656 659 ``repo._lfstatuswriters`` as "default" writer function.
657 660
658 661 Otherwise, this returns the function to always write out (or
659 662 ignore if ``not forcibly``) status.
660 663 '''
661 664 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
662 665 return repo._lfstatuswriters[-1]
663 666 else:
664 667 if forcibly:
665 668 return ui.status # forcibly WRITE OUT
666 669 else:
667 670 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1460 +1,1460 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial import (
18 18 archival,
19 19 cmdutil,
20 20 error,
21 21 hg,
22 22 match as matchmod,
23 23 pathutil,
24 24 registrar,
25 25 scmutil,
26 26 smartset,
27 27 util,
28 28 )
29 29
30 30 from . import (
31 31 lfcommands,
32 32 lfutil,
33 33 storefactory,
34 34 )
35 35
36 36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37 37
38 38 def composelargefilematcher(match, manifest):
39 39 '''create a matcher that matches only the largefiles in the original
40 40 matcher'''
41 41 m = copy.copy(match)
42 42 lfile = lambda f: lfutil.standin(f) in manifest
43 43 m._files = filter(lfile, m._files)
44 44 m._fileroots = set(m._files)
45 45 m._always = False
46 46 origmatchfn = m.matchfn
47 47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 48 return m
49 49
50 50 def composenormalfilematcher(match, manifest, exclude=None):
51 51 excluded = set()
52 52 if exclude is not None:
53 53 excluded.update(exclude)
54 54
55 55 m = copy.copy(match)
56 56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 57 manifest or f in excluded)
58 58 m._files = filter(notlfile, m._files)
59 59 m._fileroots = set(m._files)
60 60 m._always = False
61 61 origmatchfn = m.matchfn
62 62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 63 return m
64 64
65 65 def installnormalfilesmatchfn(manifest):
66 66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 68 default='relpath', badfn=None):
69 69 if opts is None:
70 70 opts = {}
71 71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 72 return composenormalfilematcher(match, manifest)
73 73 oldmatch = installmatchfn(overridematch)
74 74
75 75 def installmatchfn(f):
76 76 '''monkey patch the scmutil module with a custom match function.
77 77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 78 oldmatch = scmutil.match
79 79 setattr(f, 'oldmatch', oldmatch)
80 80 scmutil.match = f
81 81 return oldmatch
82 82
83 83 def restorematchfn():
84 84 '''restores scmutil.match to what it was before installmatchfn
85 85 was called. no-op if scmutil.match is its original function.
86 86
87 87 Note that n calls to installmatchfn will require n calls to
88 88 restore the original matchfn.'''
89 89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90 90
91 91 def installmatchandpatsfn(f):
92 92 oldmatchandpats = scmutil.matchandpats
93 93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 94 scmutil.matchandpats = f
95 95 return oldmatchandpats
96 96
97 97 def restorematchandpatsfn():
98 98 '''restores scmutil.matchandpats to what it was before
99 99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 100 is its original function.
101 101
102 102 Note that n calls to installmatchandpatsfn will require n calls
103 103 to restore the original matchfn.'''
104 104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 105 scmutil.matchandpats)
106 106
107 107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 108 large = opts.get('large')
109 109 lfsize = lfutil.getminsize(
110 110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111 111
112 112 lfmatcher = None
113 113 if lfutil.islfilesrepo(repo):
114 114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 115 if lfpats:
116 116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117 117
118 118 lfnames = []
119 119 m = matcher
120 120
121 121 wctx = repo[None]
122 122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 123 exact = m.exact(f)
124 124 lfile = lfutil.standin(f) in wctx
125 125 nfile = f in wctx
126 126 exists = lfile or nfile
127 127
128 128 # addremove in core gets fancy with the name, add doesn't
129 129 if isaddremove:
130 130 name = m.uipath(f)
131 131 else:
132 132 name = m.rel(f)
133 133
134 134 # Don't warn the user when they attempt to add a normal tracked file.
135 135 # The normal add code will do that for us.
136 136 if exact and exists:
137 137 if lfile:
138 138 ui.warn(_('%s already a largefile\n') % name)
139 139 continue
140 140
141 141 if (exact or not exists) and not lfutil.isstandin(f):
142 142 # In case the file was removed previously, but not committed
143 143 # (issue3507)
144 144 if not repo.wvfs.exists(f):
145 145 continue
146 146
147 147 abovemin = (lfsize and
148 148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 150 lfnames.append(f)
151 151 if ui.verbose or not exact:
152 152 ui.status(_('adding %s as a largefile\n') % name)
153 153
154 154 bad = []
155 155
156 156 # Need to lock, otherwise there could be a race condition between
157 157 # when standins are created and added to the repo.
158 158 with repo.wlock():
159 159 if not opts.get('dry_run'):
160 160 standins = []
161 161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 162 for f in lfnames:
163 163 standinname = lfutil.standin(f)
164 164 lfutil.writestandin(repo, standinname, hash='',
165 165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 166 standins.append(standinname)
167 167 if lfdirstate[f] == 'r':
168 168 lfdirstate.normallookup(f)
169 169 else:
170 170 lfdirstate.add(f)
171 171 lfdirstate.write()
172 172 bad += [lfutil.splitstandin(f)
173 173 for f in repo[None].add(standins)
174 174 if f in m.files()]
175 175
176 176 added = [f for f in lfnames if f not in bad]
177 177 return added, bad
178 178
179 179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 180 after = opts.get('after')
181 181 m = composelargefilematcher(matcher, repo[None].manifest())
182 182 try:
183 183 repo.lfstatus = True
184 184 s = repo.status(match=m, clean=not isaddremove)
185 185 finally:
186 186 repo.lfstatus = False
187 187 manifest = repo[None].manifest()
188 188 modified, added, deleted, clean = [[f for f in list
189 189 if lfutil.standin(f) in manifest]
190 190 for list in (s.modified, s.added,
191 191 s.deleted, s.clean)]
192 192
193 193 def warn(files, msg):
194 194 for f in files:
195 195 ui.warn(msg % m.rel(f))
196 196 return int(len(files) > 0)
197 197
198 198 result = 0
199 199
200 200 if after:
201 201 remove = deleted
202 202 result = warn(modified + added + clean,
203 203 _('not removing %s: file still exists\n'))
204 204 else:
205 205 remove = deleted + clean
206 206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 207 ' to force removal)\n'))
208 208 result = warn(added, _('not removing %s: file has been marked for add'
209 209 ' (use forget to undo)\n')) or result
210 210
211 211 # Need to lock because standin files are deleted then removed from the
212 212 # repository and we could race in-between.
213 213 with repo.wlock():
214 214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 215 for f in sorted(remove):
216 216 if ui.verbose or not m.exact(f):
217 217 # addremove in core gets fancy with the name, remove doesn't
218 218 if isaddremove:
219 219 name = m.uipath(f)
220 220 else:
221 221 name = m.rel(f)
222 222 ui.status(_('removing %s\n') % name)
223 223
224 224 if not opts.get('dry_run'):
225 225 if not after:
226 226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 227
228 228 if opts.get('dry_run'):
229 229 return result
230 230
231 231 remove = [lfutil.standin(f) for f in remove]
232 232 # If this is being called by addremove, let the original addremove
233 233 # function handle this.
234 234 if not isaddremove:
235 235 for f in remove:
236 236 repo.wvfs.unlinkpath(f, ignoremissing=True)
237 237 repo[None].forget(remove)
238 238
239 239 for f in remove:
240 240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 241 False)
242 242
243 243 lfdirstate.write()
244 244
245 245 return result
246 246
247 247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 248 # appear at their right place in the manifests.
249 249 def decodepath(orig, path):
250 250 return lfutil.splitstandin(path) or path
251 251
252 252 # -- Wrappers: modify existing commands --------------------------------
253 253
254 254 def overrideadd(orig, ui, repo, *pats, **opts):
255 255 if opts.get('normal') and opts.get('large'):
256 256 raise error.Abort(_('--normal cannot be used with --large'))
257 257 return orig(ui, repo, *pats, **opts)
258 258
259 259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 260 # The --normal flag short circuits this override
261 261 if opts.get('normal'):
262 262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263 263
264 264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 266 ladded)
267 267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268 268
269 269 bad.extend(f for f in lbad)
270 270 return bad
271 271
272 272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 275 return removelargefiles(ui, repo, False, matcher, after=after,
276 276 force=force) or result
277 277
278 278 def overridestatusfn(orig, repo, rev2, **opts):
279 279 try:
280 280 repo._repo.lfstatus = True
281 281 return orig(repo, rev2, **opts)
282 282 finally:
283 283 repo._repo.lfstatus = False
284 284
285 285 def overridestatus(orig, ui, repo, *pats, **opts):
286 286 try:
287 287 repo.lfstatus = True
288 288 return orig(ui, repo, *pats, **opts)
289 289 finally:
290 290 repo.lfstatus = False
291 291
292 292 def overridedirty(orig, repo, ignoreupdate=False):
293 293 try:
294 294 repo._repo.lfstatus = True
295 295 return orig(repo, ignoreupdate)
296 296 finally:
297 297 repo._repo.lfstatus = False
298 298
299 299 def overridelog(orig, ui, repo, *pats, **opts):
300 300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 301 default='relpath', badfn=None):
302 302 """Matcher that merges root directory with .hglf, suitable for log.
303 303 It is still possible to match .hglf directly.
304 304 For any listed files run log on the standin too.
305 305 matchfn tries both the given filename and with .hglf stripped.
306 306 """
307 307 if opts is None:
308 308 opts = {}
309 309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 310 badfn=badfn)
311 311 m, p = copy.copy(matchandpats)
312 312
313 313 if m.always():
314 314 # We want to match everything anyway, so there's no benefit trying
315 315 # to add standins.
316 316 return matchandpats
317 317
318 318 pats = set(p)
319 319
320 320 def fixpats(pat, tostandin=lfutil.standin):
321 321 if pat.startswith('set:'):
322 322 return pat
323 323
324 324 kindpat = matchmod._patsplit(pat, None)
325 325
326 326 if kindpat[0] is not None:
327 327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 328 return tostandin(kindpat[1])
329 329
330 330 if m._cwd:
331 331 hglf = lfutil.shortname
332 332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333 333
334 334 def tostandin(f):
335 335 # The file may already be a standin, so truncate the back
336 336 # prefix and test before mangling it. This avoids turning
337 337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 339 return f
340 340
341 341 # An absolute path is from outside the repo, so truncate the
342 342 # path to the root before building the standin. Otherwise cwd
343 343 # is somewhere in the repo, relative to root, and needs to be
344 344 # prepended before building the standin.
345 345 if os.path.isabs(m._cwd):
346 346 f = f[len(back):]
347 347 else:
348 348 f = m._cwd + '/' + f
349 349 return back + lfutil.standin(f)
350 350
351 351 pats.update(fixpats(f, tostandin) for f in p)
352 352 else:
353 353 def tostandin(f):
354 354 if lfutil.isstandin(f):
355 355 return f
356 356 return lfutil.standin(f)
357 357 pats.update(fixpats(f, tostandin) for f in p)
358 358
359 359 for i in range(0, len(m._files)):
360 360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 361 if m._files[i] == '.':
362 362 continue
363 363 standin = lfutil.standin(m._files[i])
364 364 # If the "standin" is a directory, append instead of replace to
365 365 # support naming a directory on the command line with only
366 366 # largefiles. The original directory is kept to support normal
367 367 # files.
368 368 if standin in ctx:
369 369 m._files[i] = standin
370 370 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
371 371 m._files.append(standin)
372 372
373 373 m._fileroots = set(m._files)
374 374 m._always = False
375 375 origmatchfn = m.matchfn
376 376 def lfmatchfn(f):
377 377 lf = lfutil.splitstandin(f)
378 378 if lf is not None and origmatchfn(lf):
379 379 return True
380 380 r = origmatchfn(f)
381 381 return r
382 382 m.matchfn = lfmatchfn
383 383
384 384 ui.debug('updated patterns: %s\n' % sorted(pats))
385 385 return m, pats
386 386
387 387 # For hg log --patch, the match object is used in two different senses:
388 388 # (1) to determine what revisions should be printed out, and
389 389 # (2) to determine what files to print out diffs for.
390 390 # The magic matchandpats override should be used for case (1) but not for
391 391 # case (2).
392 392 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
393 393 wctx = repo[None]
394 394 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
395 395 return lambda rev: match
396 396
397 397 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
398 398 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
399 399 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
400 400
401 401 try:
402 402 return orig(ui, repo, *pats, **opts)
403 403 finally:
404 404 restorematchandpatsfn()
405 405 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
406 406
407 407 def overrideverify(orig, ui, repo, *pats, **opts):
408 408 large = opts.pop('large', False)
409 409 all = opts.pop('lfa', False)
410 410 contents = opts.pop('lfc', False)
411 411
412 412 result = orig(ui, repo, *pats, **opts)
413 413 if large or all or contents:
414 414 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
415 415 return result
416 416
417 417 def overridedebugstate(orig, ui, repo, *pats, **opts):
418 418 large = opts.pop('large', False)
419 419 if large:
420 420 class fakerepo(object):
421 421 dirstate = lfutil.openlfdirstate(ui, repo)
422 422 orig(ui, fakerepo, *pats, **opts)
423 423 else:
424 424 orig(ui, repo, *pats, **opts)
425 425
426 426 # Before starting the manifest merge, merge.updates will call
427 427 # _checkunknownfile to check if there are any files in the merged-in
428 428 # changeset that collide with unknown files in the working copy.
429 429 #
430 430 # The largefiles are seen as unknown, so this prevents us from merging
431 431 # in a file 'foo' if we already have a largefile with the same name.
432 432 #
433 433 # The overridden function filters the unknown files by removing any
434 434 # largefiles. This makes the merge proceed and we can then handle this
435 435 # case further in the overridden calculateupdates function below.
436 436 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
437 437 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
438 438 return False
439 439 return origfn(repo, wctx, mctx, f, f2)
440 440
441 441 # The manifest merge handles conflicts on the manifest level. We want
442 442 # to handle changes in largefile-ness of files at this level too.
443 443 #
444 444 # The strategy is to run the original calculateupdates and then process
445 445 # the action list it outputs. There are two cases we need to deal with:
446 446 #
447 447 # 1. Normal file in p1, largefile in p2. Here the largefile is
448 448 # detected via its standin file, which will enter the working copy
449 449 # with a "get" action. It is not "merge" since the standin is all
450 450 # Mercurial is concerned with at this level -- the link to the
451 451 # existing normal file is not relevant here.
452 452 #
453 453 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
454 454 # since the largefile will be present in the working copy and
455 455 # different from the normal file in p2. Mercurial therefore
456 456 # triggers a merge action.
457 457 #
458 458 # In both cases, we prompt the user and emit new actions to either
459 459 # remove the standin (if the normal file was kept) or to remove the
460 460 # normal file and get the standin (if the largefile was kept). The
461 461 # default prompt answer is to use the largefile version since it was
462 462 # presumably changed on purpose.
463 463 #
464 464 # Finally, the merge.applyupdates function will then take care of
465 465 # writing the files into the working copy and lfcommands.updatelfiles
466 466 # will update the largefiles.
467 467 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
468 468 acceptremote, *args, **kwargs):
469 469 overwrite = force and not branchmerge
470 470 actions, diverge, renamedelete = origfn(
471 471 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
472 472
473 473 if overwrite:
474 474 return actions, diverge, renamedelete
475 475
476 476 # Convert to dictionary with filename as key and action as value.
477 477 lfiles = set()
478 478 for f in actions:
479 479 splitstandin = lfutil.splitstandin(f)
480 480 if splitstandin in p1:
481 481 lfiles.add(splitstandin)
482 482 elif lfutil.standin(f) in p1:
483 483 lfiles.add(f)
484 484
485 485 for lfile in sorted(lfiles):
486 486 standin = lfutil.standin(lfile)
487 487 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
488 488 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
489 489 if sm in ('g', 'dc') and lm != 'r':
490 490 if sm == 'dc':
491 491 f1, f2, fa, move, anc = sargs
492 492 sargs = (p2[f2].flags(), False)
493 493 # Case 1: normal file in the working copy, largefile in
494 494 # the second parent
495 495 usermsg = _('remote turned local normal file %s into a largefile\n'
496 496 'use (l)argefile or keep (n)ormal file?'
497 497 '$$ &Largefile $$ &Normal file') % lfile
498 498 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
499 499 actions[lfile] = ('r', None, 'replaced by standin')
500 500 actions[standin] = ('g', sargs, 'replaces standin')
501 501 else: # keep local normal file
502 502 actions[lfile] = ('k', None, 'replaces standin')
503 503 if branchmerge:
504 504 actions[standin] = ('k', None, 'replaced by non-standin')
505 505 else:
506 506 actions[standin] = ('r', None, 'replaced by non-standin')
507 507 elif lm in ('g', 'dc') and sm != 'r':
508 508 if lm == 'dc':
509 509 f1, f2, fa, move, anc = largs
510 510 largs = (p2[f2].flags(), False)
511 511 # Case 2: largefile in the working copy, normal file in
512 512 # the second parent
513 513 usermsg = _('remote turned local largefile %s into a normal file\n'
514 514 'keep (l)argefile or use (n)ormal file?'
515 515 '$$ &Largefile $$ &Normal file') % lfile
516 516 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
517 517 if branchmerge:
518 518 # largefile can be restored from standin safely
519 519 actions[lfile] = ('k', None, 'replaced by standin')
520 520 actions[standin] = ('k', None, 'replaces standin')
521 521 else:
522 522 # "lfile" should be marked as "removed" without
523 523 # removal of itself
524 524 actions[lfile] = ('lfmr', None,
525 525 'forget non-standin largefile')
526 526
527 527 # linear-merge should treat this largefile as 're-added'
528 528 actions[standin] = ('a', None, 'keep standin')
529 529 else: # pick remote normal file
530 530 actions[lfile] = ('g', largs, 'replaces standin')
531 531 actions[standin] = ('r', None, 'replaced by non-standin')
532 532
533 533 return actions, diverge, renamedelete
534 534
535 535 def mergerecordupdates(orig, repo, actions, branchmerge):
536 536 if 'lfmr' in actions:
537 537 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
538 538 for lfile, args, msg in actions['lfmr']:
539 539 # this should be executed before 'orig', to execute 'remove'
540 540 # before all other actions
541 541 repo.dirstate.remove(lfile)
542 542 # make sure lfile doesn't get synclfdirstate'd as normal
543 543 lfdirstate.add(lfile)
544 544 lfdirstate.write()
545 545
546 546 return orig(repo, actions, branchmerge)
547 547
548 548 # Override filemerge to prompt the user about how they wish to merge
549 549 # largefiles. This will handle identical edits without prompting the user.
550 550 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
551 551 labels=None):
552 552 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
553 553 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
554 554 labels=labels)
555 555
556 556 ahash = fca.data().strip().lower()
557 557 dhash = fcd.data().strip().lower()
558 558 ohash = fco.data().strip().lower()
559 559 if (ohash != ahash and
560 560 ohash != dhash and
561 561 (dhash == ahash or
562 562 repo.ui.promptchoice(
563 563 _('largefile %s has a merge conflict\nancestor was %s\n'
564 564 'keep (l)ocal %s or\ntake (o)ther %s?'
565 565 '$$ &Local $$ &Other') %
566 566 (lfutil.splitstandin(orig), ahash, dhash, ohash),
567 567 0) == 1)):
568 568 repo.wwrite(fcd.path(), fco.data(), fco.flags())
569 569 return True, 0, False
570 570
571 571 def copiespathcopies(orig, ctx1, ctx2, match=None):
572 572 copies = orig(ctx1, ctx2, match=match)
573 573 updated = {}
574 574
575 575 for k, v in copies.iteritems():
576 576 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
577 577
578 578 return updated
579 579
580 580 # Copy first changes the matchers to match standins instead of
581 581 # largefiles. Then it overrides util.copyfile in that function it
582 582 # checks if the destination largefile already exists. It also keeps a
583 583 # list of copied files so that the largefiles can be copied and the
584 584 # dirstate updated.
585 585 def overridecopy(orig, ui, repo, pats, opts, rename=False):
586 586 # doesn't remove largefile on rename
587 587 if len(pats) < 2:
588 588 # this isn't legal, let the original function deal with it
589 589 return orig(ui, repo, pats, opts, rename)
590 590
591 591 # This could copy both lfiles and normal files in one command,
592 592 # but we don't want to do that. First replace their matcher to
593 593 # only match normal files and run it, then replace it to just
594 594 # match largefiles and run it again.
595 595 nonormalfiles = False
596 596 nolfiles = False
597 597 installnormalfilesmatchfn(repo[None].manifest())
598 598 try:
599 599 result = orig(ui, repo, pats, opts, rename)
600 600 except error.Abort as e:
601 601 if str(e) != _('no files to copy'):
602 602 raise e
603 603 else:
604 604 nonormalfiles = True
605 605 result = 0
606 606 finally:
607 607 restorematchfn()
608 608
609 609 # The first rename can cause our current working directory to be removed.
610 610 # In that case there is nothing left to copy/rename so just quit.
611 611 try:
612 612 repo.getcwd()
613 613 except OSError:
614 614 return result
615 615
616 616 def makestandin(relpath):
617 617 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
618 618 return repo.wvfs.join(lfutil.standin(path))
619 619
620 620 fullpats = scmutil.expandpats(pats)
621 621 dest = fullpats[-1]
622 622
623 623 if os.path.isdir(dest):
624 624 if not os.path.isdir(makestandin(dest)):
625 625 os.makedirs(makestandin(dest))
626 626
627 627 try:
628 628 # When we call orig below it creates the standins but we don't add
629 629 # them to the dir state until later so lock during that time.
630 630 wlock = repo.wlock()
631 631
632 632 manifest = repo[None].manifest()
633 633 def overridematch(ctx, pats=(), opts=None, globbed=False,
634 634 default='relpath', badfn=None):
635 635 if opts is None:
636 636 opts = {}
637 637 newpats = []
638 638 # The patterns were previously mangled to add the standin
639 639 # directory; we need to remove that now
640 640 for pat in pats:
641 641 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
642 642 newpats.append(pat.replace(lfutil.shortname, ''))
643 643 else:
644 644 newpats.append(pat)
645 645 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
646 646 m = copy.copy(match)
647 647 lfile = lambda f: lfutil.standin(f) in manifest
648 648 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
649 649 m._fileroots = set(m._files)
650 650 origmatchfn = m.matchfn
651 651 def matchfn(f):
652 652 lfile = lfutil.splitstandin(f)
653 653 return (lfile is not None and
654 654 (f in manifest) and
655 655 origmatchfn(lfile) or
656 656 None)
657 657 m.matchfn = matchfn
658 658 return m
659 659 oldmatch = installmatchfn(overridematch)
660 660 listpats = []
661 661 for pat in pats:
662 662 if matchmod.patkind(pat) is not None:
663 663 listpats.append(pat)
664 664 else:
665 665 listpats.append(makestandin(pat))
666 666
667 667 try:
668 668 origcopyfile = util.copyfile
669 669 copiedfiles = []
670 670 def overridecopyfile(src, dest):
671 671 if (lfutil.shortname in src and
672 672 dest.startswith(repo.wjoin(lfutil.shortname))):
673 673 destlfile = dest.replace(lfutil.shortname, '')
674 674 if not opts['force'] and os.path.exists(destlfile):
675 675 raise IOError('',
676 676 _('destination largefile already exists'))
677 677 copiedfiles.append((src, dest))
678 678 origcopyfile(src, dest)
679 679
680 680 util.copyfile = overridecopyfile
681 681 result += orig(ui, repo, listpats, opts, rename)
682 682 finally:
683 683 util.copyfile = origcopyfile
684 684
685 685 lfdirstate = lfutil.openlfdirstate(ui, repo)
686 686 for (src, dest) in copiedfiles:
687 687 if (lfutil.shortname in src and
688 688 dest.startswith(repo.wjoin(lfutil.shortname))):
689 689 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
690 690 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
691 691 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
692 692 if not os.path.isdir(destlfiledir):
693 693 os.makedirs(destlfiledir)
694 694 if rename:
695 695 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
696 696
697 697 # The file is gone, but this deletes any empty parent
698 698 # directories as a side-effect.
699 699 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
700 700 lfdirstate.remove(srclfile)
701 701 else:
702 702 util.copyfile(repo.wjoin(srclfile),
703 703 repo.wjoin(destlfile))
704 704
705 705 lfdirstate.add(destlfile)
706 706 lfdirstate.write()
707 707 except error.Abort as e:
708 708 if str(e) != _('no files to copy'):
709 709 raise e
710 710 else:
711 711 nolfiles = True
712 712 finally:
713 713 restorematchfn()
714 714 wlock.release()
715 715
716 716 if nolfiles and nonormalfiles:
717 717 raise error.Abort(_('no files to copy'))
718 718
719 719 return result
720 720
721 721 # When the user calls revert, we have to be careful to not revert any
722 722 # changes to other largefiles accidentally. This means we have to keep
723 723 # track of the largefiles that are being reverted so we only pull down
724 724 # the necessary largefiles.
725 725 #
726 726 # Standins are only updated (to match the hash of largefiles) before
727 727 # commits. Update the standins then run the original revert, changing
728 728 # the matcher to hit standins instead of largefiles. Based on the
729 729 # resulting standins update the largefiles.
730 730 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
731 731 # Because we put the standins in a bad state (by updating them)
732 732 # and then return them to a correct state we need to lock to
733 733 # prevent others from changing them in their incorrect state.
734 734 with repo.wlock():
735 735 lfdirstate = lfutil.openlfdirstate(ui, repo)
736 736 s = lfutil.lfdirstatestatus(lfdirstate, repo)
737 737 lfdirstate.write()
738 738 for lfile in s.modified:
739 lfutil.updatestandin(repo, lfutil.standin(lfile))
739 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
740 740 for lfile in s.deleted:
741 741 fstandin = lfutil.standin(lfile)
742 742 if (repo.wvfs.exists(fstandin)):
743 743 repo.wvfs.unlink(fstandin)
744 744
745 745 oldstandins = lfutil.getstandinsstate(repo)
746 746
747 747 def overridematch(mctx, pats=(), opts=None, globbed=False,
748 748 default='relpath', badfn=None):
749 749 if opts is None:
750 750 opts = {}
751 751 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
752 752 m = copy.copy(match)
753 753
754 754 # revert supports recursing into subrepos, and though largefiles
755 755 # currently doesn't work correctly in that case, this match is
756 756 # called, so the lfdirstate above may not be the correct one for
757 757 # this invocation of match.
758 758 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
759 759 False)
760 760
761 761 wctx = repo[None]
762 762 matchfiles = []
763 763 for f in m._files:
764 764 standin = lfutil.standin(f)
765 765 if standin in ctx or standin in mctx:
766 766 matchfiles.append(standin)
767 767 elif standin in wctx or lfdirstate[f] == 'r':
768 768 continue
769 769 else:
770 770 matchfiles.append(f)
771 771 m._files = matchfiles
772 772 m._fileroots = set(m._files)
773 773 origmatchfn = m.matchfn
774 774 def matchfn(f):
775 775 lfile = lfutil.splitstandin(f)
776 776 if lfile is not None:
777 777 return (origmatchfn(lfile) and
778 778 (f in ctx or f in mctx))
779 779 return origmatchfn(f)
780 780 m.matchfn = matchfn
781 781 return m
782 782 oldmatch = installmatchfn(overridematch)
783 783 try:
784 784 orig(ui, repo, ctx, parents, *pats, **opts)
785 785 finally:
786 786 restorematchfn()
787 787
788 788 newstandins = lfutil.getstandinsstate(repo)
789 789 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
790 790 # lfdirstate should be 'normallookup'-ed for updated files,
791 791 # because reverting doesn't touch dirstate for 'normal' files
792 792 # when target revision is explicitly specified: in such case,
793 793 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
794 794 # of target (standin) file.
795 795 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
796 796 normallookup=True)
797 797
798 798 # after pulling changesets, we need to take some extra care to get
799 799 # largefiles updated remotely
800 800 def overridepull(orig, ui, repo, source=None, **opts):
801 801 revsprepull = len(repo)
802 802 if not source:
803 803 source = 'default'
804 804 repo.lfpullsource = source
805 805 result = orig(ui, repo, source, **opts)
806 806 revspostpull = len(repo)
807 807 lfrevs = opts.get('lfrev', [])
808 808 if opts.get('all_largefiles'):
809 809 lfrevs.append('pulled()')
810 810 if lfrevs and revspostpull > revsprepull:
811 811 numcached = 0
812 812 repo.firstpulled = revsprepull # for pulled() revset expression
813 813 try:
814 814 for rev in scmutil.revrange(repo, lfrevs):
815 815 ui.note(_('pulling largefiles for revision %s\n') % rev)
816 816 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
817 817 numcached += len(cached)
818 818 finally:
819 819 del repo.firstpulled
820 820 ui.status(_("%d largefiles cached\n") % numcached)
821 821 return result
822 822
823 823 def overridepush(orig, ui, repo, *args, **kwargs):
824 824 """Override push command and store --lfrev parameters in opargs"""
825 825 lfrevs = kwargs.pop('lfrev', None)
826 826 if lfrevs:
827 827 opargs = kwargs.setdefault('opargs', {})
828 828 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
829 829 return orig(ui, repo, *args, **kwargs)
830 830
831 831 def exchangepushoperation(orig, *args, **kwargs):
832 832 """Override pushoperation constructor and store lfrevs parameter"""
833 833 lfrevs = kwargs.pop('lfrevs', None)
834 834 pushop = orig(*args, **kwargs)
835 835 pushop.lfrevs = lfrevs
836 836 return pushop
837 837
838 838 revsetpredicate = registrar.revsetpredicate()
839 839
840 840 @revsetpredicate('pulled()')
841 841 def pulledrevsetsymbol(repo, subset, x):
842 842 """Changesets that just has been pulled.
843 843
844 844 Only available with largefiles from pull --lfrev expressions.
845 845
846 846 .. container:: verbose
847 847
848 848 Some examples:
849 849
850 850 - pull largefiles for all new changesets::
851 851
852 852 hg pull -lfrev "pulled()"
853 853
854 854 - pull largefiles for all new branch heads::
855 855
856 856 hg pull -lfrev "head(pulled()) and not closed()"
857 857
858 858 """
859 859
860 860 try:
861 861 firstpulled = repo.firstpulled
862 862 except AttributeError:
863 863 raise error.Abort(_("pulled() only available in --lfrev"))
864 864 return smartset.baseset([r for r in subset if r >= firstpulled])
865 865
866 866 def overrideclone(orig, ui, source, dest=None, **opts):
867 867 d = dest
868 868 if d is None:
869 869 d = hg.defaultdest(source)
870 870 if opts.get('all_largefiles') and not hg.islocal(d):
871 871 raise error.Abort(_(
872 872 '--all-largefiles is incompatible with non-local destination %s') %
873 873 d)
874 874
875 875 return orig(ui, source, dest, **opts)
876 876
877 877 def hgclone(orig, ui, opts, *args, **kwargs):
878 878 result = orig(ui, opts, *args, **kwargs)
879 879
880 880 if result is not None:
881 881 sourcerepo, destrepo = result
882 882 repo = destrepo.local()
883 883
884 884 # When cloning to a remote repo (like through SSH), no repo is available
885 885 # from the peer. Therefore the largefiles can't be downloaded and the
886 886 # hgrc can't be updated.
887 887 if not repo:
888 888 return result
889 889
890 890 # If largefiles is required for this repo, permanently enable it locally
891 891 if 'largefiles' in repo.requirements:
892 892 with repo.vfs('hgrc', 'a', text=True) as fp:
893 893 fp.write('\n[extensions]\nlargefiles=\n')
894 894
895 895 # Caching is implicitly limited to 'rev' option, since the dest repo was
896 896 # truncated at that point. The user may expect a download count with
897 897 # this option, so attempt whether or not this is a largefile repo.
898 898 if opts.get('all_largefiles'):
899 899 success, missing = lfcommands.downloadlfiles(ui, repo, None)
900 900
901 901 if missing != 0:
902 902 return None
903 903
904 904 return result
905 905
906 906 def overriderebase(orig, ui, repo, **opts):
907 907 if not util.safehasattr(repo, '_largefilesenabled'):
908 908 return orig(ui, repo, **opts)
909 909
910 910 resuming = opts.get('continue')
911 911 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
912 912 repo._lfstatuswriters.append(lambda *msg, **opts: None)
913 913 try:
914 914 return orig(ui, repo, **opts)
915 915 finally:
916 916 repo._lfstatuswriters.pop()
917 917 repo._lfcommithooks.pop()
918 918
919 919 def overridearchivecmd(orig, ui, repo, dest, **opts):
920 920 repo.unfiltered().lfstatus = True
921 921
922 922 try:
923 923 return orig(ui, repo.unfiltered(), dest, **opts)
924 924 finally:
925 925 repo.unfiltered().lfstatus = False
926 926
927 927 def hgwebarchive(orig, web, req, tmpl):
928 928 web.repo.lfstatus = True
929 929
930 930 try:
931 931 return orig(web, req, tmpl)
932 932 finally:
933 933 web.repo.lfstatus = False
934 934
935 935 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
936 936 prefix='', mtime=None, subrepos=None):
937 937 # For some reason setting repo.lfstatus in hgwebarchive only changes the
938 938 # unfiltered repo's attr, so check that as well.
939 939 if not repo.lfstatus and not repo.unfiltered().lfstatus:
940 940 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
941 941 subrepos)
942 942
943 943 # No need to lock because we are only reading history and
944 944 # largefile caches, neither of which are modified.
945 945 if node is not None:
946 946 lfcommands.cachelfiles(repo.ui, repo, node)
947 947
948 948 if kind not in archival.archivers:
949 949 raise error.Abort(_("unknown archive type '%s'") % kind)
950 950
951 951 ctx = repo[node]
952 952
953 953 if kind == 'files':
954 954 if prefix:
955 955 raise error.Abort(
956 956 _('cannot give prefix when archiving to files'))
957 957 else:
958 958 prefix = archival.tidyprefix(dest, kind, prefix)
959 959
960 960 def write(name, mode, islink, getdata):
961 961 if matchfn and not matchfn(name):
962 962 return
963 963 data = getdata()
964 964 if decode:
965 965 data = repo.wwritedata(name, data)
966 966 archiver.addfile(prefix + name, mode, islink, data)
967 967
968 968 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
969 969
970 970 if repo.ui.configbool("ui", "archivemeta", True):
971 971 write('.hg_archival.txt', 0o644, False,
972 972 lambda: archival.buildmetadata(ctx))
973 973
974 974 for f in ctx:
975 975 ff = ctx.flags(f)
976 976 getdata = ctx[f].data
977 977 lfile = lfutil.splitstandin(f)
978 978 if lfile is not None:
979 979 if node is not None:
980 980 path = lfutil.findfile(repo, getdata().strip())
981 981
982 982 if path is None:
983 983 raise error.Abort(
984 984 _('largefile %s not found in repo store or system cache')
985 985 % lfile)
986 986 else:
987 987 path = lfile
988 988
989 989 f = lfile
990 990
991 991 getdata = lambda: util.readfile(path)
992 992 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
993 993
994 994 if subrepos:
995 995 for subpath in sorted(ctx.substate):
996 996 sub = ctx.workingsub(subpath)
997 997 submatch = matchmod.subdirmatcher(subpath, matchfn)
998 998 sub._repo.lfstatus = True
999 999 sub.archive(archiver, prefix, submatch)
1000 1000
1001 1001 archiver.done()
1002 1002
1003 1003 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1004 1004 if not repo._repo.lfstatus:
1005 1005 return orig(repo, archiver, prefix, match, decode)
1006 1006
1007 1007 repo._get(repo._state + ('hg',))
1008 1008 rev = repo._state[1]
1009 1009 ctx = repo._repo[rev]
1010 1010
1011 1011 if ctx.node() is not None:
1012 1012 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1013 1013
1014 1014 def write(name, mode, islink, getdata):
1015 1015 # At this point, the standin has been replaced with the largefile name,
1016 1016 # so the normal matcher works here without the lfutil variants.
1017 1017 if match and not match(f):
1018 1018 return
1019 1019 data = getdata()
1020 1020 if decode:
1021 1021 data = repo._repo.wwritedata(name, data)
1022 1022
1023 1023 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1024 1024
1025 1025 for f in ctx:
1026 1026 ff = ctx.flags(f)
1027 1027 getdata = ctx[f].data
1028 1028 lfile = lfutil.splitstandin(f)
1029 1029 if lfile is not None:
1030 1030 if ctx.node() is not None:
1031 1031 path = lfutil.findfile(repo._repo, getdata().strip())
1032 1032
1033 1033 if path is None:
1034 1034 raise error.Abort(
1035 1035 _('largefile %s not found in repo store or system cache')
1036 1036 % lfile)
1037 1037 else:
1038 1038 path = lfile
1039 1039
1040 1040 f = lfile
1041 1041
1042 1042 getdata = lambda: util.readfile(os.path.join(prefix, path))
1043 1043
1044 1044 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1045 1045
1046 1046 for subpath in sorted(ctx.substate):
1047 1047 sub = ctx.workingsub(subpath)
1048 1048 submatch = matchmod.subdirmatcher(subpath, match)
1049 1049 sub._repo.lfstatus = True
1050 1050 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1051 1051
1052 1052 # If a largefile is modified, the change is not reflected in its
1053 1053 # standin until a commit. cmdutil.bailifchanged() raises an exception
1054 1054 # if the repo has uncommitted changes. Wrap it to also check if
1055 1055 # largefiles were changed. This is used by bisect, backout and fetch.
1056 1056 def overridebailifchanged(orig, repo, *args, **kwargs):
1057 1057 orig(repo, *args, **kwargs)
1058 1058 repo.lfstatus = True
1059 1059 s = repo.status()
1060 1060 repo.lfstatus = False
1061 1061 if s.modified or s.added or s.removed or s.deleted:
1062 1062 raise error.Abort(_('uncommitted changes'))
1063 1063
1064 1064 def postcommitstatus(orig, repo, *args, **kwargs):
1065 1065 repo.lfstatus = True
1066 1066 try:
1067 1067 return orig(repo, *args, **kwargs)
1068 1068 finally:
1069 1069 repo.lfstatus = False
1070 1070
1071 1071 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1072 1072 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1073 1073 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1074 1074 m = composelargefilematcher(match, repo[None].manifest())
1075 1075
1076 1076 try:
1077 1077 repo.lfstatus = True
1078 1078 s = repo.status(match=m, clean=True)
1079 1079 finally:
1080 1080 repo.lfstatus = False
1081 1081 manifest = repo[None].manifest()
1082 1082 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1083 1083 forget = [f for f in forget if lfutil.standin(f) in manifest]
1084 1084
1085 1085 for f in forget:
1086 1086 fstandin = lfutil.standin(f)
1087 1087 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1088 1088 ui.warn(_('not removing %s: file is already untracked\n')
1089 1089 % m.rel(f))
1090 1090 bad.append(f)
1091 1091
1092 1092 for f in forget:
1093 1093 if ui.verbose or not m.exact(f):
1094 1094 ui.status(_('removing %s\n') % m.rel(f))
1095 1095
1096 1096 # Need to lock because standin files are deleted then removed from the
1097 1097 # repository and we could race in-between.
1098 1098 with repo.wlock():
1099 1099 lfdirstate = lfutil.openlfdirstate(ui, repo)
1100 1100 for f in forget:
1101 1101 if lfdirstate[f] == 'a':
1102 1102 lfdirstate.drop(f)
1103 1103 else:
1104 1104 lfdirstate.remove(f)
1105 1105 lfdirstate.write()
1106 1106 standins = [lfutil.standin(f) for f in forget]
1107 1107 for f in standins:
1108 1108 repo.wvfs.unlinkpath(f, ignoremissing=True)
1109 1109 rejected = repo[None].forget(standins)
1110 1110
1111 1111 bad.extend(f for f in rejected if f in m.files())
1112 1112 forgot.extend(f for f in forget if f not in rejected)
1113 1113 return bad, forgot
1114 1114
1115 1115 def _getoutgoings(repo, other, missing, addfunc):
1116 1116 """get pairs of filename and largefile hash in outgoing revisions
1117 1117 in 'missing'.
1118 1118
1119 1119 largefiles already existing on 'other' repository are ignored.
1120 1120
1121 1121 'addfunc' is invoked with each unique pairs of filename and
1122 1122 largefile hash value.
1123 1123 """
1124 1124 knowns = set()
1125 1125 lfhashes = set()
1126 1126 def dedup(fn, lfhash):
1127 1127 k = (fn, lfhash)
1128 1128 if k not in knowns:
1129 1129 knowns.add(k)
1130 1130 lfhashes.add(lfhash)
1131 1131 lfutil.getlfilestoupload(repo, missing, dedup)
1132 1132 if lfhashes:
1133 1133 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1134 1134 for fn, lfhash in knowns:
1135 1135 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1136 1136 addfunc(fn, lfhash)
1137 1137
1138 1138 def outgoinghook(ui, repo, other, opts, missing):
1139 1139 if opts.pop('large', None):
1140 1140 lfhashes = set()
1141 1141 if ui.debugflag:
1142 1142 toupload = {}
1143 1143 def addfunc(fn, lfhash):
1144 1144 if fn not in toupload:
1145 1145 toupload[fn] = []
1146 1146 toupload[fn].append(lfhash)
1147 1147 lfhashes.add(lfhash)
1148 1148 def showhashes(fn):
1149 1149 for lfhash in sorted(toupload[fn]):
1150 1150 ui.debug(' %s\n' % (lfhash))
1151 1151 else:
1152 1152 toupload = set()
1153 1153 def addfunc(fn, lfhash):
1154 1154 toupload.add(fn)
1155 1155 lfhashes.add(lfhash)
1156 1156 def showhashes(fn):
1157 1157 pass
1158 1158 _getoutgoings(repo, other, missing, addfunc)
1159 1159
1160 1160 if not toupload:
1161 1161 ui.status(_('largefiles: no files to upload\n'))
1162 1162 else:
1163 1163 ui.status(_('largefiles to upload (%d entities):\n')
1164 1164 % (len(lfhashes)))
1165 1165 for file in sorted(toupload):
1166 1166 ui.status(lfutil.splitstandin(file) + '\n')
1167 1167 showhashes(file)
1168 1168 ui.status('\n')
1169 1169
1170 1170 def summaryremotehook(ui, repo, opts, changes):
1171 1171 largeopt = opts.get('large', False)
1172 1172 if changes is None:
1173 1173 if largeopt:
1174 1174 return (False, True) # only outgoing check is needed
1175 1175 else:
1176 1176 return (False, False)
1177 1177 elif largeopt:
1178 1178 url, branch, peer, outgoing = changes[1]
1179 1179 if peer is None:
1180 1180 # i18n: column positioning for "hg summary"
1181 1181 ui.status(_('largefiles: (no remote repo)\n'))
1182 1182 return
1183 1183
1184 1184 toupload = set()
1185 1185 lfhashes = set()
1186 1186 def addfunc(fn, lfhash):
1187 1187 toupload.add(fn)
1188 1188 lfhashes.add(lfhash)
1189 1189 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1190 1190
1191 1191 if not toupload:
1192 1192 # i18n: column positioning for "hg summary"
1193 1193 ui.status(_('largefiles: (no files to upload)\n'))
1194 1194 else:
1195 1195 # i18n: column positioning for "hg summary"
1196 1196 ui.status(_('largefiles: %d entities for %d files to upload\n')
1197 1197 % (len(lfhashes), len(toupload)))
1198 1198
1199 1199 def overridesummary(orig, ui, repo, *pats, **opts):
1200 1200 try:
1201 1201 repo.lfstatus = True
1202 1202 orig(ui, repo, *pats, **opts)
1203 1203 finally:
1204 1204 repo.lfstatus = False
1205 1205
1206 1206 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1207 1207 similarity=None):
1208 1208 if opts is None:
1209 1209 opts = {}
1210 1210 if not lfutil.islfilesrepo(repo):
1211 1211 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1212 1212 # Get the list of missing largefiles so we can remove them
1213 1213 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1214 1214 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1215 1215 False, False, False)
1216 1216
1217 1217 # Call into the normal remove code, but the removing of the standin, we want
1218 1218 # to have handled by original addremove. Monkey patching here makes sure
1219 1219 # we don't remove the standin in the largefiles code, preventing a very
1220 1220 # confused state later.
1221 1221 if s.deleted:
1222 1222 m = copy.copy(matcher)
1223 1223
1224 1224 # The m._files and m._map attributes are not changed to the deleted list
1225 1225 # because that affects the m.exact() test, which in turn governs whether
1226 1226 # or not the file name is printed, and how. Simply limit the original
1227 1227 # matches to those in the deleted status list.
1228 1228 matchfn = m.matchfn
1229 1229 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1230 1230
1231 1231 removelargefiles(repo.ui, repo, True, m, **opts)
1232 1232 # Call into the normal add code, and any files that *should* be added as
1233 1233 # largefiles will be
1234 1234 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1235 1235 # Now that we've handled largefiles, hand off to the original addremove
1236 1236 # function to take care of the rest. Make sure it doesn't do anything with
1237 1237 # largefiles by passing a matcher that will ignore them.
1238 1238 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1239 1239 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1240 1240
1241 1241 # Calling purge with --all will cause the largefiles to be deleted.
1242 1242 # Override repo.status to prevent this from happening.
1243 1243 def overridepurge(orig, ui, repo, *dirs, **opts):
1244 1244 # XXX Monkey patching a repoview will not work. The assigned attribute will
1245 1245 # be set on the unfiltered repo, but we will only lookup attributes in the
1246 1246 # unfiltered repo if the lookup in the repoview object itself fails. As the
1247 1247 # monkey patched method exists on the repoview class the lookup will not
1248 1248 # fail. As a result, the original version will shadow the monkey patched
1249 1249 # one, defeating the monkey patch.
1250 1250 #
1251 1251 # As a work around we use an unfiltered repo here. We should do something
1252 1252 # cleaner instead.
1253 1253 repo = repo.unfiltered()
1254 1254 oldstatus = repo.status
1255 1255 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1256 1256 clean=False, unknown=False, listsubrepos=False):
1257 1257 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1258 1258 listsubrepos)
1259 1259 lfdirstate = lfutil.openlfdirstate(ui, repo)
1260 1260 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1261 1261 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1262 1262 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1263 1263 unknown, ignored, r.clean)
1264 1264 repo.status = overridestatus
1265 1265 orig(ui, repo, *dirs, **opts)
1266 1266 repo.status = oldstatus
1267 1267 def overriderollback(orig, ui, repo, **opts):
1268 1268 with repo.wlock():
1269 1269 before = repo.dirstate.parents()
1270 1270 orphans = set(f for f in repo.dirstate
1271 1271 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1272 1272 result = orig(ui, repo, **opts)
1273 1273 after = repo.dirstate.parents()
1274 1274 if before == after:
1275 1275 return result # no need to restore standins
1276 1276
1277 1277 pctx = repo['.']
1278 1278 for f in repo.dirstate:
1279 1279 if lfutil.isstandin(f):
1280 1280 orphans.discard(f)
1281 1281 if repo.dirstate[f] == 'r':
1282 1282 repo.wvfs.unlinkpath(f, ignoremissing=True)
1283 1283 elif f in pctx:
1284 1284 fctx = pctx[f]
1285 1285 repo.wwrite(f, fctx.data(), fctx.flags())
1286 1286 else:
1287 1287 # content of standin is not so important in 'a',
1288 1288 # 'm' or 'n' (coming from the 2nd parent) cases
1289 1289 lfutil.writestandin(repo, f, '', False)
1290 1290 for standin in orphans:
1291 1291 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1292 1292
1293 1293 lfdirstate = lfutil.openlfdirstate(ui, repo)
1294 1294 orphans = set(lfdirstate)
1295 1295 lfiles = lfutil.listlfiles(repo)
1296 1296 for file in lfiles:
1297 1297 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1298 1298 orphans.discard(file)
1299 1299 for lfile in orphans:
1300 1300 lfdirstate.drop(lfile)
1301 1301 lfdirstate.write()
1302 1302 return result
1303 1303
1304 1304 def overridetransplant(orig, ui, repo, *revs, **opts):
1305 1305 resuming = opts.get('continue')
1306 1306 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1307 1307 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1308 1308 try:
1309 1309 result = orig(ui, repo, *revs, **opts)
1310 1310 finally:
1311 1311 repo._lfstatuswriters.pop()
1312 1312 repo._lfcommithooks.pop()
1313 1313 return result
1314 1314
1315 1315 def overridecat(orig, ui, repo, file1, *pats, **opts):
1316 1316 ctx = scmutil.revsingle(repo, opts.get('rev'))
1317 1317 err = 1
1318 1318 notbad = set()
1319 1319 m = scmutil.match(ctx, (file1,) + pats, opts)
1320 1320 origmatchfn = m.matchfn
1321 1321 def lfmatchfn(f):
1322 1322 if origmatchfn(f):
1323 1323 return True
1324 1324 lf = lfutil.splitstandin(f)
1325 1325 if lf is None:
1326 1326 return False
1327 1327 notbad.add(lf)
1328 1328 return origmatchfn(lf)
1329 1329 m.matchfn = lfmatchfn
1330 1330 origbadfn = m.bad
1331 1331 def lfbadfn(f, msg):
1332 1332 if not f in notbad:
1333 1333 origbadfn(f, msg)
1334 1334 m.bad = lfbadfn
1335 1335
1336 1336 origvisitdirfn = m.visitdir
1337 1337 def lfvisitdirfn(dir):
1338 1338 if dir == lfutil.shortname:
1339 1339 return True
1340 1340 ret = origvisitdirfn(dir)
1341 1341 if ret:
1342 1342 return ret
1343 1343 lf = lfutil.splitstandin(dir)
1344 1344 if lf is None:
1345 1345 return False
1346 1346 return origvisitdirfn(lf)
1347 1347 m.visitdir = lfvisitdirfn
1348 1348
1349 1349 for f in ctx.walk(m):
1350 1350 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1351 1351 pathname=f) as fp:
1352 1352 lf = lfutil.splitstandin(f)
1353 1353 if lf is None or origmatchfn(f):
1354 1354 # duplicating unreachable code from commands.cat
1355 1355 data = ctx[f].data()
1356 1356 if opts.get('decode'):
1357 1357 data = repo.wwritedata(f, data)
1358 1358 fp.write(data)
1359 1359 else:
1360 1360 hash = lfutil.readstandin(repo, lf, ctx)
1361 1361 if not lfutil.inusercache(repo.ui, hash):
1362 1362 store = storefactory.openstore(repo)
1363 1363 success, missing = store.get([(lf, hash)])
1364 1364 if len(success) != 1:
1365 1365 raise error.Abort(
1366 1366 _('largefile %s is not in cache and could not be '
1367 1367 'downloaded') % lf)
1368 1368 path = lfutil.usercachepath(repo.ui, hash)
1369 1369 with open(path, "rb") as fpin:
1370 1370 for chunk in util.filechunkiter(fpin):
1371 1371 fp.write(chunk)
1372 1372 err = 0
1373 1373 return err
1374 1374
1375 1375 def mergeupdate(orig, repo, node, branchmerge, force,
1376 1376 *args, **kwargs):
1377 1377 matcher = kwargs.get('matcher', None)
1378 1378 # note if this is a partial update
1379 1379 partial = matcher and not matcher.always()
1380 1380 with repo.wlock():
1381 1381 # branch | | |
1382 1382 # merge | force | partial | action
1383 1383 # -------+-------+---------+--------------
1384 1384 # x | x | x | linear-merge
1385 1385 # o | x | x | branch-merge
1386 1386 # x | o | x | overwrite (as clean update)
1387 1387 # o | o | x | force-branch-merge (*1)
1388 1388 # x | x | o | (*)
1389 1389 # o | x | o | (*)
1390 1390 # x | o | o | overwrite (as revert)
1391 1391 # o | o | o | (*)
1392 1392 #
1393 1393 # (*) don't care
1394 1394 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1395 1395
1396 1396 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1397 1397 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1398 1398 repo.getcwd()),
1399 1399 [], False, True, False)
1400 1400 oldclean = set(s.clean)
1401 1401 pctx = repo['.']
1402 1402 dctx = repo[node]
1403 1403 for lfile in unsure + s.modified:
1404 1404 lfileabs = repo.wvfs.join(lfile)
1405 1405 if not repo.wvfs.exists(lfileabs):
1406 1406 continue
1407 1407 lfhash = lfutil.hashfile(lfileabs)
1408 1408 standin = lfutil.standin(lfile)
1409 1409 lfutil.writestandin(repo, standin, lfhash,
1410 1410 lfutil.getexecutable(lfileabs))
1411 1411 if (standin in pctx and
1412 1412 lfhash == lfutil.readstandin(repo, lfile, pctx)):
1413 1413 oldclean.add(lfile)
1414 1414 for lfile in s.added:
1415 1415 fstandin = lfutil.standin(lfile)
1416 1416 if fstandin not in dctx:
1417 1417 # in this case, content of standin file is meaningless
1418 1418 # (in dctx, lfile is unknown, or normal file)
1419 1419 continue
1420 lfutil.updatestandin(repo, fstandin)
1420 lfutil.updatestandin(repo, lfile, fstandin)
1421 1421 # mark all clean largefiles as dirty, just in case the update gets
1422 1422 # interrupted before largefiles and lfdirstate are synchronized
1423 1423 for lfile in oldclean:
1424 1424 lfdirstate.normallookup(lfile)
1425 1425 lfdirstate.write()
1426 1426
1427 1427 oldstandins = lfutil.getstandinsstate(repo)
1428 1428
1429 1429 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1430 1430
1431 1431 newstandins = lfutil.getstandinsstate(repo)
1432 1432 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1433 1433
1434 1434 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1435 1435 # all the ones that didn't change as clean
1436 1436 for lfile in oldclean.difference(filelist):
1437 1437 lfdirstate.normal(lfile)
1438 1438 lfdirstate.write()
1439 1439
1440 1440 if branchmerge or force or partial:
1441 1441 filelist.extend(s.deleted + s.removed)
1442 1442
1443 1443 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1444 1444 normallookup=partial)
1445 1445
1446 1446 return result
1447 1447
1448 1448 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1449 1449 result = orig(repo, files, *args, **kwargs)
1450 1450
1451 1451 filelist = []
1452 1452 for f in files:
1453 1453 lf = lfutil.splitstandin(f)
1454 1454 if lf is not None:
1455 1455 filelist.append(lf)
1456 1456 if filelist:
1457 1457 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1458 1458 printmessage=False, normallookup=True)
1459 1459
1460 1460 return result
General Comments 0
You need to be logged in to leave comments. Login now