##// END OF EJS Templates
largefiles: remove additional blank lines...
liscju -
r29420:e5c91dc9 default
parent child Browse files
Show More
@@ -1,664 +1,662
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 error,
23 23 httpconnection,
24 24 match as matchmod,
25 25 node,
26 26 scmutil,
27 27 util,
28 28 )
29 29
30 30 shortname = '.hglf'
31 31 shortnameslash = shortname + '/'
32 32 longname = 'largefiles'
33 33
34
35 34 # -- Private worker functions ------------------------------------------
36 35
37 36 def getminsize(ui, assumelfiles, opt, default=10):
38 37 lfsize = opt
39 38 if not lfsize and assumelfiles:
40 39 lfsize = ui.config(longname, 'minsize', default=default)
41 40 if lfsize:
42 41 try:
43 42 lfsize = float(lfsize)
44 43 except ValueError:
45 44 raise error.Abort(_('largefiles: size must be number (not %s)\n')
46 45 % lfsize)
47 46 if lfsize is None:
48 47 raise error.Abort(_('minimum size for largefiles must be specified'))
49 48 return lfsize
50 49
51 50 def link(src, dest):
52 51 """Try to create hardlink - if that fails, efficiently make a copy."""
53 52 util.makedirs(os.path.dirname(dest))
54 53 try:
55 54 util.oslink(src, dest)
56 55 except OSError:
57 56 # if hardlinks fail, fallback on atomic copy
58 57 dst = util.atomictempfile(dest)
59 58 for chunk in util.filechunkiter(open(src, 'rb')):
60 59 dst.write(chunk)
61 60 dst.close()
62 61 os.chmod(dest, os.stat(src).st_mode)
63 62
64 63 def usercachepath(ui, hash):
65 64 '''Return the correct location in the "global" largefiles cache for a file
66 65 with the given hash.
67 66 This cache is used for sharing of largefiles across repositories - both
68 67 to preserve download bandwidth and storage space.'''
69 68 return os.path.join(_usercachedir(ui), hash)
70 69
71 70 def _usercachedir(ui):
72 71 '''Return the location of the "global" largefiles cache.'''
73 72 path = ui.configpath(longname, 'usercache', None)
74 73 if path:
75 74 return path
76 75 if os.name == 'nt':
77 76 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
78 77 if appdata:
79 78 return os.path.join(appdata, longname)
80 79 elif platform.system() == 'Darwin':
81 80 home = os.getenv('HOME')
82 81 if home:
83 82 return os.path.join(home, 'Library', 'Caches', longname)
84 83 elif os.name == 'posix':
85 84 path = os.getenv('XDG_CACHE_HOME')
86 85 if path:
87 86 return os.path.join(path, longname)
88 87 home = os.getenv('HOME')
89 88 if home:
90 89 return os.path.join(home, '.cache', longname)
91 90 else:
92 91 raise error.Abort(_('unknown operating system: %s\n') % os.name)
93 92 raise error.Abort(_('unknown %s usercache location\n') % longname)
94 93
95 94 def inusercache(ui, hash):
96 95 path = usercachepath(ui, hash)
97 96 return os.path.exists(path)
98 97
99 98 def findfile(repo, hash):
100 99 '''Return store path of the largefile with the specified hash.
101 100 As a side effect, the file might be linked from user cache.
102 101 Return None if the file can't be found locally.'''
103 102 path, exists = findstorepath(repo, hash)
104 103 if exists:
105 104 repo.ui.note(_('found %s in store\n') % hash)
106 105 return path
107 106 elif inusercache(repo.ui, hash):
108 107 repo.ui.note(_('found %s in system cache\n') % hash)
109 108 path = storepath(repo, hash)
110 109 link(usercachepath(repo.ui, hash), path)
111 110 return path
112 111 return None
113 112
114 113 class largefilesdirstate(dirstate.dirstate):
115 114 def __getitem__(self, key):
116 115 return super(largefilesdirstate, self).__getitem__(unixpath(key))
117 116 def normal(self, f):
118 117 return super(largefilesdirstate, self).normal(unixpath(f))
119 118 def remove(self, f):
120 119 return super(largefilesdirstate, self).remove(unixpath(f))
121 120 def add(self, f):
122 121 return super(largefilesdirstate, self).add(unixpath(f))
123 122 def drop(self, f):
124 123 return super(largefilesdirstate, self).drop(unixpath(f))
125 124 def forget(self, f):
126 125 return super(largefilesdirstate, self).forget(unixpath(f))
127 126 def normallookup(self, f):
128 127 return super(largefilesdirstate, self).normallookup(unixpath(f))
129 128 def _ignore(self, f):
130 129 return False
131 130 def write(self, tr=False):
132 131 # (1) disable PENDING mode always
133 132 # (lfdirstate isn't yet managed as a part of the transaction)
134 133 # (2) avoid develwarn 'use dirstate.write with ....'
135 134 super(largefilesdirstate, self).write(None)
136 135
137 136 def openlfdirstate(ui, repo, create=True):
138 137 '''
139 138 Return a dirstate object that tracks largefiles: i.e. its root is
140 139 the repo root, but it is saved in .hg/largefiles/dirstate.
141 140 '''
142 141 vfs = repo.vfs
143 142 lfstoredir = longname
144 143 opener = scmutil.opener(vfs.join(lfstoredir))
145 144 lfdirstate = largefilesdirstate(opener, ui, repo.root,
146 145 repo.dirstate._validate)
147 146
148 147 # If the largefiles dirstate does not exist, populate and create
149 148 # it. This ensures that we create it on the first meaningful
150 149 # largefiles operation in a new clone.
151 150 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
152 151 matcher = getstandinmatcher(repo)
153 152 standins = repo.dirstate.walk(matcher, [], False, False)
154 153
155 154 if len(standins) > 0:
156 155 vfs.makedirs(lfstoredir)
157 156
158 157 for standin in standins:
159 158 lfile = splitstandin(standin)
160 159 lfdirstate.normallookup(lfile)
161 160 return lfdirstate
162 161
163 162 def lfdirstatestatus(lfdirstate, repo):
164 163 wctx = repo['.']
165 164 match = matchmod.always(repo.root, repo.getcwd())
166 165 unsure, s = lfdirstate.status(match, [], False, False, False)
167 166 modified, clean = s.modified, s.clean
168 167 for lfile in unsure:
169 168 try:
170 169 fctx = wctx[standin(lfile)]
171 170 except LookupError:
172 171 fctx = None
173 172 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
174 173 modified.append(lfile)
175 174 else:
176 175 clean.append(lfile)
177 176 lfdirstate.normal(lfile)
178 177 return s
179 178
180 179 def listlfiles(repo, rev=None, matcher=None):
181 180 '''return a list of largefiles in the working copy or the
182 181 specified changeset'''
183 182
184 183 if matcher is None:
185 184 matcher = getstandinmatcher(repo)
186 185
187 186 # ignore unknown files in working directory
188 187 return [splitstandin(f)
189 188 for f in repo[rev].walk(matcher)
190 189 if rev is not None or repo.dirstate[f] != '?']
191 190
192 191 def instore(repo, hash, forcelocal=False):
193 192 '''Return true if a largefile with the given hash exists in the store'''
194 193 return os.path.exists(storepath(repo, hash, forcelocal))
195 194
196 195 def storepath(repo, hash, forcelocal=False):
197 196 '''Return the correct location in the repository largefiles store for a
198 197 file with the given hash.'''
199 198 if not forcelocal and repo.shared():
200 199 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
201 200 return repo.join(longname, hash)
202 201
203 202 def findstorepath(repo, hash):
204 203 '''Search through the local store path(s) to find the file for the given
205 204 hash. If the file is not found, its path in the primary store is returned.
206 205 The return value is a tuple of (path, exists(path)).
207 206 '''
208 207 # For shared repos, the primary store is in the share source. But for
209 208 # backward compatibility, force a lookup in the local store if it wasn't
210 209 # found in the share source.
211 210 path = storepath(repo, hash, False)
212 211
213 212 if instore(repo, hash):
214 213 return (path, True)
215 214 elif repo.shared() and instore(repo, hash, True):
216 215 return storepath(repo, hash, True), True
217 216
218 217 return (path, False)
219 218
220 219 def copyfromcache(repo, hash, filename):
221 220 '''Copy the specified largefile from the repo or system cache to
222 221 filename in the repository. Return true on success or false if the
223 222 file was not found in either cache (which should not happened:
224 223 this is meant to be called only after ensuring that the needed
225 224 largefile exists in the cache).'''
226 225 wvfs = repo.wvfs
227 226 path = findfile(repo, hash)
228 227 if path is None:
229 228 return False
230 229 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
231 230 # The write may fail before the file is fully written, but we
232 231 # don't use atomic writes in the working copy.
233 232 with open(path, 'rb') as srcfd:
234 233 with wvfs(filename, 'wb') as destfd:
235 234 gothash = copyandhash(srcfd, destfd)
236 235 if gothash != hash:
237 236 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
238 237 % (filename, path, gothash))
239 238 wvfs.unlink(filename)
240 239 return False
241 240 return True
242 241
243 242 def copytostore(repo, rev, file, uploaded=False):
244 243 wvfs = repo.wvfs
245 244 hash = readstandin(repo, file, rev)
246 245 if instore(repo, hash):
247 246 return
248 247 if wvfs.exists(file):
249 248 copytostoreabsolute(repo, wvfs.join(file), hash)
250 249 else:
251 250 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
252 251 (file, hash))
253 252
254 253 def copyalltostore(repo, node):
255 254 '''Copy all largefiles in a given revision to the store'''
256 255
257 256 ctx = repo[node]
258 257 for filename in ctx.files():
259 258 if isstandin(filename) and filename in ctx.manifest():
260 259 realfile = splitstandin(filename)
261 260 copytostore(repo, ctx.node(), realfile)
262 261
263
264 262 def copytostoreabsolute(repo, file, hash):
265 263 if inusercache(repo.ui, hash):
266 264 link(usercachepath(repo.ui, hash), storepath(repo, hash))
267 265 else:
268 266 util.makedirs(os.path.dirname(storepath(repo, hash)))
269 267 dst = util.atomictempfile(storepath(repo, hash),
270 268 createmode=repo.store.createmode)
271 269 for chunk in util.filechunkiter(open(file, 'rb')):
272 270 dst.write(chunk)
273 271 dst.close()
274 272 linktousercache(repo, hash)
275 273
276 274 def linktousercache(repo, hash):
277 275 '''Link / copy the largefile with the specified hash from the store
278 276 to the cache.'''
279 277 path = usercachepath(repo.ui, hash)
280 278 link(storepath(repo, hash), path)
281 279
282 280 def getstandinmatcher(repo, rmatcher=None):
283 281 '''Return a match object that applies rmatcher to the standin directory'''
284 282 wvfs = repo.wvfs
285 283 standindir = shortname
286 284
287 285 # no warnings about missing files or directories
288 286 badfn = lambda f, msg: None
289 287
290 288 if rmatcher and not rmatcher.always():
291 289 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
292 290 if not pats:
293 291 pats = [wvfs.join(standindir)]
294 292 match = scmutil.match(repo[None], pats, badfn=badfn)
295 293 # if pats is empty, it would incorrectly always match, so clear _always
296 294 match._always = False
297 295 else:
298 296 # no patterns: relative to repo root
299 297 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
300 298 return match
301 299
302 300 def composestandinmatcher(repo, rmatcher):
303 301 '''Return a matcher that accepts standins corresponding to the
304 302 files accepted by rmatcher. Pass the list of files in the matcher
305 303 as the paths specified by the user.'''
306 304 smatcher = getstandinmatcher(repo, rmatcher)
307 305 isstandin = smatcher.matchfn
308 306 def composedmatchfn(f):
309 307 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
310 308 smatcher.matchfn = composedmatchfn
311 309
312 310 return smatcher
313 311
314 312 def standin(filename):
315 313 '''Return the repo-relative path to the standin for the specified big
316 314 file.'''
317 315 # Notes:
318 316 # 1) Some callers want an absolute path, but for instance addlargefiles
319 317 # needs it repo-relative so it can be passed to repo[None].add(). So
320 318 # leave it up to the caller to use repo.wjoin() to get an absolute path.
321 319 # 2) Join with '/' because that's what dirstate always uses, even on
322 320 # Windows. Change existing separator to '/' first in case we are
323 321 # passed filenames from an external source (like the command line).
324 322 return shortnameslash + util.pconvert(filename)
325 323
326 324 def isstandin(filename):
327 325 '''Return true if filename is a big file standin. filename must be
328 326 in Mercurial's internal form (slash-separated).'''
329 327 return filename.startswith(shortnameslash)
330 328
331 329 def splitstandin(filename):
332 330 # Split on / because that's what dirstate always uses, even on Windows.
333 331 # Change local separator to / first just in case we are passed filenames
334 332 # from an external source (like the command line).
335 333 bits = util.pconvert(filename).split('/', 1)
336 334 if len(bits) == 2 and bits[0] == shortname:
337 335 return bits[1]
338 336 else:
339 337 return None
340 338
341 339 def updatestandin(repo, standin):
342 340 file = repo.wjoin(splitstandin(standin))
343 341 if repo.wvfs.exists(splitstandin(standin)):
344 342 hash = hashfile(file)
345 343 executable = getexecutable(file)
346 344 writestandin(repo, standin, hash, executable)
347 345 else:
348 346 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
349 347
350 348 def readstandin(repo, filename, node=None):
351 349 '''read hex hash from standin for filename at given node, or working
352 350 directory if no node is given'''
353 351 return repo[node][standin(filename)].data().strip()
354 352
355 353 def writestandin(repo, standin, hash, executable):
356 354 '''write hash to <repo.root>/<standin>'''
357 355 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
358 356
359 357 def copyandhash(instream, outfile):
360 358 '''Read bytes from instream (iterable) and write them to outfile,
361 359 computing the SHA-1 hash of the data along the way. Return the hash.'''
362 360 hasher = hashlib.sha1('')
363 361 for data in instream:
364 362 hasher.update(data)
365 363 outfile.write(data)
366 364 return hasher.hexdigest()
367 365
368 366 def hashrepofile(repo, file):
369 367 return hashfile(repo.wjoin(file))
370 368
371 369 def hashfile(file):
372 370 if not os.path.exists(file):
373 371 return ''
374 372 hasher = hashlib.sha1('')
375 373 fd = open(file, 'rb')
376 374 for data in util.filechunkiter(fd, 128 * 1024):
377 375 hasher.update(data)
378 376 fd.close()
379 377 return hasher.hexdigest()
380 378
381 379 def getexecutable(filename):
382 380 mode = os.stat(filename).st_mode
383 381 return ((mode & stat.S_IXUSR) and
384 382 (mode & stat.S_IXGRP) and
385 383 (mode & stat.S_IXOTH))
386 384
387 385 def urljoin(first, second, *arg):
388 386 def join(left, right):
389 387 if not left.endswith('/'):
390 388 left += '/'
391 389 if right.startswith('/'):
392 390 right = right[1:]
393 391 return left + right
394 392
395 393 url = join(first, second)
396 394 for a in arg:
397 395 url = join(url, a)
398 396 return url
399 397
400 398 def hexsha1(data):
401 399 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 400 object data"""
403 401 h = hashlib.sha1()
404 402 for chunk in util.filechunkiter(data):
405 403 h.update(chunk)
406 404 return h.hexdigest()
407 405
408 406 def httpsendfile(ui, filename):
409 407 return httpconnection.httpsendfile(ui, filename, 'rb')
410 408
411 409 def unixpath(path):
412 410 '''Return a version of path normalized for use with the lfdirstate.'''
413 411 return util.pconvert(os.path.normpath(path))
414 412
415 413 def islfilesrepo(repo):
416 414 '''Return true if the repo is a largefile repo.'''
417 415 if ('largefiles' in repo.requirements and
418 416 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 417 return True
420 418
421 419 return any(openlfdirstate(repo.ui, repo, False))
422 420
423 421 class storeprotonotcapable(Exception):
424 422 def __init__(self, storetypes):
425 423 self.storetypes = storetypes
426 424
427 425 def getstandinsstate(repo):
428 426 standins = []
429 427 matcher = getstandinmatcher(repo)
430 428 for standin in repo.dirstate.walk(matcher, [], False, False):
431 429 lfile = splitstandin(standin)
432 430 try:
433 431 hash = readstandin(repo, lfile)
434 432 except IOError:
435 433 hash = None
436 434 standins.append((lfile, hash))
437 435 return standins
438 436
439 437 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
440 438 lfstandin = standin(lfile)
441 439 if lfstandin in repo.dirstate:
442 440 stat = repo.dirstate._map[lfstandin]
443 441 state, mtime = stat[0], stat[3]
444 442 else:
445 443 state, mtime = '?', -1
446 444 if state == 'n':
447 445 if (normallookup or mtime < 0 or
448 446 not repo.wvfs.exists(lfile)):
449 447 # state 'n' doesn't ensure 'clean' in this case
450 448 lfdirstate.normallookup(lfile)
451 449 else:
452 450 lfdirstate.normal(lfile)
453 451 elif state == 'm':
454 452 lfdirstate.normallookup(lfile)
455 453 elif state == 'r':
456 454 lfdirstate.remove(lfile)
457 455 elif state == 'a':
458 456 lfdirstate.add(lfile)
459 457 elif state == '?':
460 458 lfdirstate.drop(lfile)
461 459
462 460 def markcommitted(orig, ctx, node):
463 461 repo = ctx.repo()
464 462
465 463 orig(node)
466 464
467 465 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
468 466 # because files coming from the 2nd parent are omitted in the latter.
469 467 #
470 468 # The former should be used to get targets of "synclfdirstate",
471 469 # because such files:
472 470 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
473 471 # - have to be marked as "n" after commit, but
474 472 # - aren't listed in "repo[node].files()"
475 473
476 474 lfdirstate = openlfdirstate(repo.ui, repo)
477 475 for f in ctx.files():
478 476 if isstandin(f):
479 477 lfile = splitstandin(f)
480 478 synclfdirstate(repo, lfdirstate, lfile, False)
481 479 lfdirstate.write()
482 480
483 481 # As part of committing, copy all of the largefiles into the cache.
484 482 copyalltostore(repo, node)
485 483
486 484 def getlfilestoupdate(oldstandins, newstandins):
487 485 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
488 486 filelist = []
489 487 for f in changedstandins:
490 488 if f[0] not in filelist:
491 489 filelist.append(f[0])
492 490 return filelist
493 491
494 492 def getlfilestoupload(repo, missing, addfunc):
495 493 for i, n in enumerate(missing):
496 494 repo.ui.progress(_('finding outgoing largefiles'), i,
497 495 unit=_('revisions'), total=len(missing))
498 496 parents = [p for p in repo[n].parents() if p != node.nullid]
499 497
500 498 oldlfstatus = repo.lfstatus
501 499 repo.lfstatus = False
502 500 try:
503 501 ctx = repo[n]
504 502 finally:
505 503 repo.lfstatus = oldlfstatus
506 504
507 505 files = set(ctx.files())
508 506 if len(parents) == 2:
509 507 mc = ctx.manifest()
510 508 mp1 = ctx.parents()[0].manifest()
511 509 mp2 = ctx.parents()[1].manifest()
512 510 for f in mp1:
513 511 if f not in mc:
514 512 files.add(f)
515 513 for f in mp2:
516 514 if f not in mc:
517 515 files.add(f)
518 516 for f in mc:
519 517 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
520 518 files.add(f)
521 519 for fn in files:
522 520 if isstandin(fn) and fn in ctx:
523 521 addfunc(fn, ctx[fn].data().strip())
524 522 repo.ui.progress(_('finding outgoing largefiles'), None)
525 523
526 524 def updatestandinsbymatch(repo, match):
527 525 '''Update standins in the working directory according to specified match
528 526
529 527 This returns (possibly modified) ``match`` object to be used for
530 528 subsequent commit process.
531 529 '''
532 530
533 531 ui = repo.ui
534 532
535 533 # Case 1: user calls commit with no specific files or
536 534 # include/exclude patterns: refresh and commit all files that
537 535 # are "dirty".
538 536 if match is None or match.always():
539 537 # Spend a bit of time here to get a list of files we know
540 538 # are modified so we can compare only against those.
541 539 # It can cost a lot of time (several seconds)
542 540 # otherwise to update all standins if the largefiles are
543 541 # large.
544 542 lfdirstate = openlfdirstate(ui, repo)
545 543 dirtymatch = matchmod.always(repo.root, repo.getcwd())
546 544 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
547 545 False)
548 546 modifiedfiles = unsure + s.modified + s.added + s.removed
549 547 lfiles = listlfiles(repo)
550 548 # this only loops through largefiles that exist (not
551 549 # removed/renamed)
552 550 for lfile in lfiles:
553 551 if lfile in modifiedfiles:
554 552 if repo.wvfs.exists(standin(lfile)):
555 553 # this handles the case where a rebase is being
556 554 # performed and the working copy is not updated
557 555 # yet.
558 556 if repo.wvfs.exists(lfile):
559 557 updatestandin(repo,
560 558 standin(lfile))
561 559
562 560 return match
563 561
564 562 lfiles = listlfiles(repo)
565 563 match._files = repo._subdirlfs(match.files(), lfiles)
566 564
567 565 # Case 2: user calls commit with specified patterns: refresh
568 566 # any matching big files.
569 567 smatcher = composestandinmatcher(repo, match)
570 568 standins = repo.dirstate.walk(smatcher, [], False, False)
571 569
572 570 # No matching big files: get out of the way and pass control to
573 571 # the usual commit() method.
574 572 if not standins:
575 573 return match
576 574
577 575 # Refresh all matching big files. It's possible that the
578 576 # commit will end up failing, in which case the big files will
579 577 # stay refreshed. No harm done: the user modified them and
580 578 # asked to commit them, so sooner or later we're going to
581 579 # refresh the standins. Might as well leave them refreshed.
582 580 lfdirstate = openlfdirstate(ui, repo)
583 581 for fstandin in standins:
584 582 lfile = splitstandin(fstandin)
585 583 if lfdirstate[lfile] != 'r':
586 584 updatestandin(repo, fstandin)
587 585
588 586 # Cook up a new matcher that only matches regular files or
589 587 # standins corresponding to the big files requested by the
590 588 # user. Have to modify _files to prevent commit() from
591 589 # complaining "not tracked" for big files.
592 590 match = copy.copy(match)
593 591 origmatchfn = match.matchfn
594 592
595 593 # Check both the list of largefiles and the list of
596 594 # standins because if a largefile was removed, it
597 595 # won't be in the list of largefiles at this point
598 596 match._files += sorted(standins)
599 597
600 598 actualfiles = []
601 599 for f in match._files:
602 600 fstandin = standin(f)
603 601
604 602 # For largefiles, only one of the normal and standin should be
605 603 # committed (except if one of them is a remove). In the case of a
606 604 # standin removal, drop the normal file if it is unknown to dirstate.
607 605 # Thus, skip plain largefile names but keep the standin.
608 606 if f in lfiles or fstandin in standins:
609 607 if repo.dirstate[fstandin] != 'r':
610 608 if repo.dirstate[f] != 'r':
611 609 continue
612 610 elif repo.dirstate[f] == '?':
613 611 continue
614 612
615 613 actualfiles.append(f)
616 614 match._files = actualfiles
617 615
618 616 def matchfn(f):
619 617 if origmatchfn(f):
620 618 return f not in lfiles
621 619 else:
622 620 return f in standins
623 621
624 622 match.matchfn = matchfn
625 623
626 624 return match
627 625
628 626 class automatedcommithook(object):
629 627 '''Stateful hook to update standins at the 1st commit of resuming
630 628
631 629 For efficiency, updating standins in the working directory should
632 630 be avoided while automated committing (like rebase, transplant and
633 631 so on), because they should be updated before committing.
634 632
635 633 But the 1st commit of resuming automated committing (e.g. ``rebase
636 634 --continue``) should update them, because largefiles may be
637 635 modified manually.
638 636 '''
639 637 def __init__(self, resuming):
640 638 self.resuming = resuming
641 639
642 640 def __call__(self, repo, match):
643 641 if self.resuming:
644 642 self.resuming = False # avoids updating at subsequent commits
645 643 return updatestandinsbymatch(repo, match)
646 644 else:
647 645 return match
648 646
649 647 def getstatuswriter(ui, repo, forcibly=None):
650 648 '''Return the function to write largefiles specific status out
651 649
652 650 If ``forcibly`` is ``None``, this returns the last element of
653 651 ``repo._lfstatuswriters`` as "default" writer function.
654 652
655 653 Otherwise, this returns the function to always write out (or
656 654 ignore if ``not forcibly``) status.
657 655 '''
658 656 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
659 657 return repo._lfstatuswriters[-1]
660 658 else:
661 659 if forcibly:
662 660 return ui.status # forcibly WRITE OUT
663 661 else:
664 662 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1433 +1,1432
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial import (
18 18 archival,
19 19 cmdutil,
20 20 error,
21 21 hg,
22 22 match as matchmod,
23 23 pathutil,
24 24 registrar,
25 25 revset,
26 26 scmutil,
27 27 util,
28 28 )
29 29
30 30 from . import (
31 31 lfcommands,
32 32 lfutil,
33 33 storefactory,
34 34 )
35 35
36 36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37 37
38 38 def composelargefilematcher(match, manifest):
39 39 '''create a matcher that matches only the largefiles in the original
40 40 matcher'''
41 41 m = copy.copy(match)
42 42 lfile = lambda f: lfutil.standin(f) in manifest
43 43 m._files = filter(lfile, m._files)
44 44 m._fileroots = set(m._files)
45 45 m._always = False
46 46 origmatchfn = m.matchfn
47 47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 48 return m
49 49
50 50 def composenormalfilematcher(match, manifest, exclude=None):
51 51 excluded = set()
52 52 if exclude is not None:
53 53 excluded.update(exclude)
54 54
55 55 m = copy.copy(match)
56 56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 57 manifest or f in excluded)
58 58 m._files = filter(notlfile, m._files)
59 59 m._fileroots = set(m._files)
60 60 m._always = False
61 61 origmatchfn = m.matchfn
62 62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 63 return m
64 64
65 65 def installnormalfilesmatchfn(manifest):
66 66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 68 default='relpath', badfn=None):
69 69 if opts is None:
70 70 opts = {}
71 71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 72 return composenormalfilematcher(match, manifest)
73 73 oldmatch = installmatchfn(overridematch)
74 74
75 75 def installmatchfn(f):
76 76 '''monkey patch the scmutil module with a custom match function.
77 77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 78 oldmatch = scmutil.match
79 79 setattr(f, 'oldmatch', oldmatch)
80 80 scmutil.match = f
81 81 return oldmatch
82 82
83 83 def restorematchfn():
84 84 '''restores scmutil.match to what it was before installmatchfn
85 85 was called. no-op if scmutil.match is its original function.
86 86
87 87 Note that n calls to installmatchfn will require n calls to
88 88 restore the original matchfn.'''
89 89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90 90
91 91 def installmatchandpatsfn(f):
92 92 oldmatchandpats = scmutil.matchandpats
93 93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 94 scmutil.matchandpats = f
95 95 return oldmatchandpats
96 96
97 97 def restorematchandpatsfn():
98 98 '''restores scmutil.matchandpats to what it was before
99 99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 100 is its original function.
101 101
102 102 Note that n calls to installmatchandpatsfn will require n calls
103 103 to restore the original matchfn.'''
104 104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 105 scmutil.matchandpats)
106 106
107 107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 108 large = opts.get('large')
109 109 lfsize = lfutil.getminsize(
110 110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111 111
112 112 lfmatcher = None
113 113 if lfutil.islfilesrepo(repo):
114 114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 115 if lfpats:
116 116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117 117
118 118 lfnames = []
119 119 m = matcher
120 120
121 121 wctx = repo[None]
122 122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 123 exact = m.exact(f)
124 124 lfile = lfutil.standin(f) in wctx
125 125 nfile = f in wctx
126 126 exists = lfile or nfile
127 127
128 128 # addremove in core gets fancy with the name, add doesn't
129 129 if isaddremove:
130 130 name = m.uipath(f)
131 131 else:
132 132 name = m.rel(f)
133 133
134 134 # Don't warn the user when they attempt to add a normal tracked file.
135 135 # The normal add code will do that for us.
136 136 if exact and exists:
137 137 if lfile:
138 138 ui.warn(_('%s already a largefile\n') % name)
139 139 continue
140 140
141 141 if (exact or not exists) and not lfutil.isstandin(f):
142 142 # In case the file was removed previously, but not committed
143 143 # (issue3507)
144 144 if not repo.wvfs.exists(f):
145 145 continue
146 146
147 147 abovemin = (lfsize and
148 148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 150 lfnames.append(f)
151 151 if ui.verbose or not exact:
152 152 ui.status(_('adding %s as a largefile\n') % name)
153 153
154 154 bad = []
155 155
156 156 # Need to lock, otherwise there could be a race condition between
157 157 # when standins are created and added to the repo.
158 158 with repo.wlock():
159 159 if not opts.get('dry_run'):
160 160 standins = []
161 161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 162 for f in lfnames:
163 163 standinname = lfutil.standin(f)
164 164 lfutil.writestandin(repo, standinname, hash='',
165 165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 166 standins.append(standinname)
167 167 if lfdirstate[f] == 'r':
168 168 lfdirstate.normallookup(f)
169 169 else:
170 170 lfdirstate.add(f)
171 171 lfdirstate.write()
172 172 bad += [lfutil.splitstandin(f)
173 173 for f in repo[None].add(standins)
174 174 if f in m.files()]
175 175
176 176 added = [f for f in lfnames if f not in bad]
177 177 return added, bad
178 178
179 179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 180 after = opts.get('after')
181 181 m = composelargefilematcher(matcher, repo[None].manifest())
182 182 try:
183 183 repo.lfstatus = True
184 184 s = repo.status(match=m, clean=not isaddremove)
185 185 finally:
186 186 repo.lfstatus = False
187 187 manifest = repo[None].manifest()
188 188 modified, added, deleted, clean = [[f for f in list
189 189 if lfutil.standin(f) in manifest]
190 190 for list in (s.modified, s.added,
191 191 s.deleted, s.clean)]
192 192
193 193 def warn(files, msg):
194 194 for f in files:
195 195 ui.warn(msg % m.rel(f))
196 196 return int(len(files) > 0)
197 197
198 198 result = 0
199 199
200 200 if after:
201 201 remove = deleted
202 202 result = warn(modified + added + clean,
203 203 _('not removing %s: file still exists\n'))
204 204 else:
205 205 remove = deleted + clean
206 206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 207 ' to force removal)\n'))
208 208 result = warn(added, _('not removing %s: file has been marked for add'
209 209 ' (use forget to undo)\n')) or result
210 210
211 211 # Need to lock because standin files are deleted then removed from the
212 212 # repository and we could race in-between.
213 213 with repo.wlock():
214 214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 215 for f in sorted(remove):
216 216 if ui.verbose or not m.exact(f):
217 217 # addremove in core gets fancy with the name, remove doesn't
218 218 if isaddremove:
219 219 name = m.uipath(f)
220 220 else:
221 221 name = m.rel(f)
222 222 ui.status(_('removing %s\n') % name)
223 223
224 224 if not opts.get('dry_run'):
225 225 if not after:
226 226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 227
228 228 if opts.get('dry_run'):
229 229 return result
230 230
231 231 remove = [lfutil.standin(f) for f in remove]
232 232 # If this is being called by addremove, let the original addremove
233 233 # function handle this.
234 234 if not isaddremove:
235 235 for f in remove:
236 236 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
237 237 repo[None].forget(remove)
238 238
239 239 for f in remove:
240 240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 241 False)
242 242
243 243 lfdirstate.write()
244 244
245 245 return result
246 246
247 247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 248 # appear at their right place in the manifests.
249 249 def decodepath(orig, path):
250 250 return lfutil.splitstandin(path) or path
251 251
252 252 # -- Wrappers: modify existing commands --------------------------------
253 253
254 254 def overrideadd(orig, ui, repo, *pats, **opts):
255 255 if opts.get('normal') and opts.get('large'):
256 256 raise error.Abort(_('--normal cannot be used with --large'))
257 257 return orig(ui, repo, *pats, **opts)
258 258
259 259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 260 # The --normal flag short circuits this override
261 261 if opts.get('normal'):
262 262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263 263
264 264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 266 ladded)
267 267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268 268
269 269 bad.extend(f for f in lbad)
270 270 return bad
271 271
272 272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 275 return removelargefiles(ui, repo, False, matcher, after=after,
276 276 force=force) or result
277 277
278 278 def overridestatusfn(orig, repo, rev2, **opts):
279 279 try:
280 280 repo._repo.lfstatus = True
281 281 return orig(repo, rev2, **opts)
282 282 finally:
283 283 repo._repo.lfstatus = False
284 284
285 285 def overridestatus(orig, ui, repo, *pats, **opts):
286 286 try:
287 287 repo.lfstatus = True
288 288 return orig(ui, repo, *pats, **opts)
289 289 finally:
290 290 repo.lfstatus = False
291 291
292 292 def overridedirty(orig, repo, ignoreupdate=False):
293 293 try:
294 294 repo._repo.lfstatus = True
295 295 return orig(repo, ignoreupdate)
296 296 finally:
297 297 repo._repo.lfstatus = False
298 298
299 299 def overridelog(orig, ui, repo, *pats, **opts):
300 300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 301 default='relpath', badfn=None):
302 302 """Matcher that merges root directory with .hglf, suitable for log.
303 303 It is still possible to match .hglf directly.
304 304 For any listed files run log on the standin too.
305 305 matchfn tries both the given filename and with .hglf stripped.
306 306 """
307 307 if opts is None:
308 308 opts = {}
309 309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 310 badfn=badfn)
311 311 m, p = copy.copy(matchandpats)
312 312
313 313 if m.always():
314 314 # We want to match everything anyway, so there's no benefit trying
315 315 # to add standins.
316 316 return matchandpats
317 317
318 318 pats = set(p)
319 319
320 320 def fixpats(pat, tostandin=lfutil.standin):
321 321 if pat.startswith('set:'):
322 322 return pat
323 323
324 324 kindpat = matchmod._patsplit(pat, None)
325 325
326 326 if kindpat[0] is not None:
327 327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 328 return tostandin(kindpat[1])
329 329
330 330 if m._cwd:
331 331 hglf = lfutil.shortname
332 332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333 333
334 334 def tostandin(f):
335 335 # The file may already be a standin, so truncate the back
336 336 # prefix and test before mangling it. This avoids turning
337 337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 339 return f
340 340
341 341 # An absolute path is from outside the repo, so truncate the
342 342 # path to the root before building the standin. Otherwise cwd
343 343 # is somewhere in the repo, relative to root, and needs to be
344 344 # prepended before building the standin.
345 345 if os.path.isabs(m._cwd):
346 346 f = f[len(back):]
347 347 else:
348 348 f = m._cwd + '/' + f
349 349 return back + lfutil.standin(f)
350 350
351 351 pats.update(fixpats(f, tostandin) for f in p)
352 352 else:
353 353 def tostandin(f):
354 354 if lfutil.splitstandin(f):
355 355 return f
356 356 return lfutil.standin(f)
357 357 pats.update(fixpats(f, tostandin) for f in p)
358 358
359 359 for i in range(0, len(m._files)):
360 360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 361 if m._files[i] == '.':
362 362 continue
363 363 standin = lfutil.standin(m._files[i])
364 364 # If the "standin" is a directory, append instead of replace to
365 365 # support naming a directory on the command line with only
366 366 # largefiles. The original directory is kept to support normal
367 367 # files.
368 368 if standin in repo[ctx.node()]:
369 369 m._files[i] = standin
370 370 elif m._files[i] not in repo[ctx.node()] \
371 371 and repo.wvfs.isdir(standin):
372 372 m._files.append(standin)
373 373
374 374 m._fileroots = set(m._files)
375 375 m._always = False
376 376 origmatchfn = m.matchfn
377 377 def lfmatchfn(f):
378 378 lf = lfutil.splitstandin(f)
379 379 if lf is not None and origmatchfn(lf):
380 380 return True
381 381 r = origmatchfn(f)
382 382 return r
383 383 m.matchfn = lfmatchfn
384 384
385 385 ui.debug('updated patterns: %s\n' % sorted(pats))
386 386 return m, pats
387 387
388 388 # For hg log --patch, the match object is used in two different senses:
389 389 # (1) to determine what revisions should be printed out, and
390 390 # (2) to determine what files to print out diffs for.
391 391 # The magic matchandpats override should be used for case (1) but not for
392 392 # case (2).
393 393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
394 394 wctx = repo[None]
395 395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
396 396 return lambda rev: match
397 397
398 398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
399 399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
400 400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
401 401
402 402 try:
403 403 return orig(ui, repo, *pats, **opts)
404 404 finally:
405 405 restorematchandpatsfn()
406 406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
407 407
408 408 def overrideverify(orig, ui, repo, *pats, **opts):
409 409 large = opts.pop('large', False)
410 410 all = opts.pop('lfa', False)
411 411 contents = opts.pop('lfc', False)
412 412
413 413 result = orig(ui, repo, *pats, **opts)
414 414 if large or all or contents:
415 415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
416 416 return result
417 417
418 418 def overridedebugstate(orig, ui, repo, *pats, **opts):
419 419 large = opts.pop('large', False)
420 420 if large:
421 421 class fakerepo(object):
422 422 dirstate = lfutil.openlfdirstate(ui, repo)
423 423 orig(ui, fakerepo, *pats, **opts)
424 424 else:
425 425 orig(ui, repo, *pats, **opts)
426 426
427 427 # Before starting the manifest merge, merge.updates will call
428 428 # _checkunknownfile to check if there are any files in the merged-in
429 429 # changeset that collide with unknown files in the working copy.
430 430 #
431 431 # The largefiles are seen as unknown, so this prevents us from merging
432 432 # in a file 'foo' if we already have a largefile with the same name.
433 433 #
434 434 # The overridden function filters the unknown files by removing any
435 435 # largefiles. This makes the merge proceed and we can then handle this
436 436 # case further in the overridden calculateupdates function below.
437 437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
438 438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
439 439 return False
440 440 return origfn(repo, wctx, mctx, f, f2)
441 441
442 442 # The manifest merge handles conflicts on the manifest level. We want
443 443 # to handle changes in largefile-ness of files at this level too.
444 444 #
445 445 # The strategy is to run the original calculateupdates and then process
446 446 # the action list it outputs. There are two cases we need to deal with:
447 447 #
448 448 # 1. Normal file in p1, largefile in p2. Here the largefile is
449 449 # detected via its standin file, which will enter the working copy
450 450 # with a "get" action. It is not "merge" since the standin is all
451 451 # Mercurial is concerned with at this level -- the link to the
452 452 # existing normal file is not relevant here.
453 453 #
454 454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
455 455 # since the largefile will be present in the working copy and
456 456 # different from the normal file in p2. Mercurial therefore
457 457 # triggers a merge action.
458 458 #
459 459 # In both cases, we prompt the user and emit new actions to either
460 460 # remove the standin (if the normal file was kept) or to remove the
461 461 # normal file and get the standin (if the largefile was kept). The
462 462 # default prompt answer is to use the largefile version since it was
463 463 # presumably changed on purpose.
464 464 #
465 465 # Finally, the merge.applyupdates function will then take care of
466 466 # writing the files into the working copy and lfcommands.updatelfiles
467 467 # will update the largefiles.
468 468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
469 469 acceptremote, *args, **kwargs):
470 470 overwrite = force and not branchmerge
471 471 actions, diverge, renamedelete = origfn(
472 472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
473 473
474 474 if overwrite:
475 475 return actions, diverge, renamedelete
476 476
477 477 # Convert to dictionary with filename as key and action as value.
478 478 lfiles = set()
479 479 for f in actions:
480 480 splitstandin = lfutil.splitstandin(f)
481 481 if splitstandin in p1:
482 482 lfiles.add(splitstandin)
483 483 elif lfutil.standin(f) in p1:
484 484 lfiles.add(f)
485 485
486 486 for lfile in sorted(lfiles):
487 487 standin = lfutil.standin(lfile)
488 488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
489 489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
490 490 if sm in ('g', 'dc') and lm != 'r':
491 491 if sm == 'dc':
492 492 f1, f2, fa, move, anc = sargs
493 493 sargs = (p2[f2].flags(), False)
494 494 # Case 1: normal file in the working copy, largefile in
495 495 # the second parent
496 496 usermsg = _('remote turned local normal file %s into a largefile\n'
497 497 'use (l)argefile or keep (n)ormal file?'
498 498 '$$ &Largefile $$ &Normal file') % lfile
499 499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
500 500 actions[lfile] = ('r', None, 'replaced by standin')
501 501 actions[standin] = ('g', sargs, 'replaces standin')
502 502 else: # keep local normal file
503 503 actions[lfile] = ('k', None, 'replaces standin')
504 504 if branchmerge:
505 505 actions[standin] = ('k', None, 'replaced by non-standin')
506 506 else:
507 507 actions[standin] = ('r', None, 'replaced by non-standin')
508 508 elif lm in ('g', 'dc') and sm != 'r':
509 509 if lm == 'dc':
510 510 f1, f2, fa, move, anc = largs
511 511 largs = (p2[f2].flags(), False)
512 512 # Case 2: largefile in the working copy, normal file in
513 513 # the second parent
514 514 usermsg = _('remote turned local largefile %s into a normal file\n'
515 515 'keep (l)argefile or use (n)ormal file?'
516 516 '$$ &Largefile $$ &Normal file') % lfile
517 517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
518 518 if branchmerge:
519 519 # largefile can be restored from standin safely
520 520 actions[lfile] = ('k', None, 'replaced by standin')
521 521 actions[standin] = ('k', None, 'replaces standin')
522 522 else:
523 523 # "lfile" should be marked as "removed" without
524 524 # removal of itself
525 525 actions[lfile] = ('lfmr', None,
526 526 'forget non-standin largefile')
527 527
528 528 # linear-merge should treat this largefile as 're-added'
529 529 actions[standin] = ('a', None, 'keep standin')
530 530 else: # pick remote normal file
531 531 actions[lfile] = ('g', largs, 'replaces standin')
532 532 actions[standin] = ('r', None, 'replaced by non-standin')
533 533
534 534 return actions, diverge, renamedelete
535 535
536 536 def mergerecordupdates(orig, repo, actions, branchmerge):
537 537 if 'lfmr' in actions:
538 538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
539 539 for lfile, args, msg in actions['lfmr']:
540 540 # this should be executed before 'orig', to execute 'remove'
541 541 # before all other actions
542 542 repo.dirstate.remove(lfile)
543 543 # make sure lfile doesn't get synclfdirstate'd as normal
544 544 lfdirstate.add(lfile)
545 545 lfdirstate.write()
546 546
547 547 return orig(repo, actions, branchmerge)
548 548
549
550 549 # Override filemerge to prompt the user about how they wish to merge
551 550 # largefiles. This will handle identical edits without prompting the user.
552 551 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
553 552 labels=None):
554 553 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
555 554 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
556 555 labels=labels)
557 556
558 557 ahash = fca.data().strip().lower()
559 558 dhash = fcd.data().strip().lower()
560 559 ohash = fco.data().strip().lower()
561 560 if (ohash != ahash and
562 561 ohash != dhash and
563 562 (dhash == ahash or
564 563 repo.ui.promptchoice(
565 564 _('largefile %s has a merge conflict\nancestor was %s\n'
566 565 'keep (l)ocal %s or\ntake (o)ther %s?'
567 566 '$$ &Local $$ &Other') %
568 567 (lfutil.splitstandin(orig), ahash, dhash, ohash),
569 568 0) == 1)):
570 569 repo.wwrite(fcd.path(), fco.data(), fco.flags())
571 570 return True, 0, False
572 571
573 572 def copiespathcopies(orig, ctx1, ctx2, match=None):
574 573 copies = orig(ctx1, ctx2, match=match)
575 574 updated = {}
576 575
577 576 for k, v in copies.iteritems():
578 577 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
579 578
580 579 return updated
581 580
582 581 # Copy first changes the matchers to match standins instead of
583 582 # largefiles. Then it overrides util.copyfile in that function it
584 583 # checks if the destination largefile already exists. It also keeps a
585 584 # list of copied files so that the largefiles can be copied and the
586 585 # dirstate updated.
587 586 def overridecopy(orig, ui, repo, pats, opts, rename=False):
588 587 # doesn't remove largefile on rename
589 588 if len(pats) < 2:
590 589 # this isn't legal, let the original function deal with it
591 590 return orig(ui, repo, pats, opts, rename)
592 591
593 592 # This could copy both lfiles and normal files in one command,
594 593 # but we don't want to do that. First replace their matcher to
595 594 # only match normal files and run it, then replace it to just
596 595 # match largefiles and run it again.
597 596 nonormalfiles = False
598 597 nolfiles = False
599 598 installnormalfilesmatchfn(repo[None].manifest())
600 599 try:
601 600 result = orig(ui, repo, pats, opts, rename)
602 601 except error.Abort as e:
603 602 if str(e) != _('no files to copy'):
604 603 raise e
605 604 else:
606 605 nonormalfiles = True
607 606 result = 0
608 607 finally:
609 608 restorematchfn()
610 609
611 610 # The first rename can cause our current working directory to be removed.
612 611 # In that case there is nothing left to copy/rename so just quit.
613 612 try:
614 613 repo.getcwd()
615 614 except OSError:
616 615 return result
617 616
618 617 def makestandin(relpath):
619 618 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
620 619 return repo.wvfs.join(lfutil.standin(path))
621 620
622 621 fullpats = scmutil.expandpats(pats)
623 622 dest = fullpats[-1]
624 623
625 624 if os.path.isdir(dest):
626 625 if not os.path.isdir(makestandin(dest)):
627 626 os.makedirs(makestandin(dest))
628 627
629 628 try:
630 629 # When we call orig below it creates the standins but we don't add
631 630 # them to the dir state until later so lock during that time.
632 631 wlock = repo.wlock()
633 632
634 633 manifest = repo[None].manifest()
635 634 def overridematch(ctx, pats=(), opts=None, globbed=False,
636 635 default='relpath', badfn=None):
637 636 if opts is None:
638 637 opts = {}
639 638 newpats = []
640 639 # The patterns were previously mangled to add the standin
641 640 # directory; we need to remove that now
642 641 for pat in pats:
643 642 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
644 643 newpats.append(pat.replace(lfutil.shortname, ''))
645 644 else:
646 645 newpats.append(pat)
647 646 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
648 647 m = copy.copy(match)
649 648 lfile = lambda f: lfutil.standin(f) in manifest
650 649 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
651 650 m._fileroots = set(m._files)
652 651 origmatchfn = m.matchfn
653 652 m.matchfn = lambda f: (lfutil.isstandin(f) and
654 653 (f in manifest) and
655 654 origmatchfn(lfutil.splitstandin(f)) or
656 655 None)
657 656 return m
658 657 oldmatch = installmatchfn(overridematch)
659 658 listpats = []
660 659 for pat in pats:
661 660 if matchmod.patkind(pat) is not None:
662 661 listpats.append(pat)
663 662 else:
664 663 listpats.append(makestandin(pat))
665 664
666 665 try:
667 666 origcopyfile = util.copyfile
668 667 copiedfiles = []
669 668 def overridecopyfile(src, dest):
670 669 if (lfutil.shortname in src and
671 670 dest.startswith(repo.wjoin(lfutil.shortname))):
672 671 destlfile = dest.replace(lfutil.shortname, '')
673 672 if not opts['force'] and os.path.exists(destlfile):
674 673 raise IOError('',
675 674 _('destination largefile already exists'))
676 675 copiedfiles.append((src, dest))
677 676 origcopyfile(src, dest)
678 677
679 678 util.copyfile = overridecopyfile
680 679 result += orig(ui, repo, listpats, opts, rename)
681 680 finally:
682 681 util.copyfile = origcopyfile
683 682
684 683 lfdirstate = lfutil.openlfdirstate(ui, repo)
685 684 for (src, dest) in copiedfiles:
686 685 if (lfutil.shortname in src and
687 686 dest.startswith(repo.wjoin(lfutil.shortname))):
688 687 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
689 688 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
690 689 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
691 690 if not os.path.isdir(destlfiledir):
692 691 os.makedirs(destlfiledir)
693 692 if rename:
694 693 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
695 694
696 695 # The file is gone, but this deletes any empty parent
697 696 # directories as a side-effect.
698 697 util.unlinkpath(repo.wjoin(srclfile), True)
699 698 lfdirstate.remove(srclfile)
700 699 else:
701 700 util.copyfile(repo.wjoin(srclfile),
702 701 repo.wjoin(destlfile))
703 702
704 703 lfdirstate.add(destlfile)
705 704 lfdirstate.write()
706 705 except error.Abort as e:
707 706 if str(e) != _('no files to copy'):
708 707 raise e
709 708 else:
710 709 nolfiles = True
711 710 finally:
712 711 restorematchfn()
713 712 wlock.release()
714 713
715 714 if nolfiles and nonormalfiles:
716 715 raise error.Abort(_('no files to copy'))
717 716
718 717 return result
719 718
720 719 # When the user calls revert, we have to be careful to not revert any
721 720 # changes to other largefiles accidentally. This means we have to keep
722 721 # track of the largefiles that are being reverted so we only pull down
723 722 # the necessary largefiles.
724 723 #
725 724 # Standins are only updated (to match the hash of largefiles) before
726 725 # commits. Update the standins then run the original revert, changing
727 726 # the matcher to hit standins instead of largefiles. Based on the
728 727 # resulting standins update the largefiles.
729 728 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
730 729 # Because we put the standins in a bad state (by updating them)
731 730 # and then return them to a correct state we need to lock to
732 731 # prevent others from changing them in their incorrect state.
733 732 with repo.wlock():
734 733 lfdirstate = lfutil.openlfdirstate(ui, repo)
735 734 s = lfutil.lfdirstatestatus(lfdirstate, repo)
736 735 lfdirstate.write()
737 736 for lfile in s.modified:
738 737 lfutil.updatestandin(repo, lfutil.standin(lfile))
739 738 for lfile in s.deleted:
740 739 if (repo.wvfs.exists(lfutil.standin(lfile))):
741 740 repo.wvfs.unlink(lfutil.standin(lfile))
742 741
743 742 oldstandins = lfutil.getstandinsstate(repo)
744 743
745 744 def overridematch(mctx, pats=(), opts=None, globbed=False,
746 745 default='relpath', badfn=None):
747 746 if opts is None:
748 747 opts = {}
749 748 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
750 749 m = copy.copy(match)
751 750
752 751 # revert supports recursing into subrepos, and though largefiles
753 752 # currently doesn't work correctly in that case, this match is
754 753 # called, so the lfdirstate above may not be the correct one for
755 754 # this invocation of match.
756 755 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
757 756 False)
758 757
759 758 def tostandin(f):
760 759 standin = lfutil.standin(f)
761 760 if standin in ctx or standin in mctx:
762 761 return standin
763 762 elif standin in repo[None] or lfdirstate[f] == 'r':
764 763 return None
765 764 return f
766 765 m._files = [tostandin(f) for f in m._files]
767 766 m._files = [f for f in m._files if f is not None]
768 767 m._fileroots = set(m._files)
769 768 origmatchfn = m.matchfn
770 769 def matchfn(f):
771 770 if lfutil.isstandin(f):
772 771 return (origmatchfn(lfutil.splitstandin(f)) and
773 772 (f in ctx or f in mctx))
774 773 return origmatchfn(f)
775 774 m.matchfn = matchfn
776 775 return m
777 776 oldmatch = installmatchfn(overridematch)
778 777 try:
779 778 orig(ui, repo, ctx, parents, *pats, **opts)
780 779 finally:
781 780 restorematchfn()
782 781
783 782 newstandins = lfutil.getstandinsstate(repo)
784 783 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
785 784 # lfdirstate should be 'normallookup'-ed for updated files,
786 785 # because reverting doesn't touch dirstate for 'normal' files
787 786 # when target revision is explicitly specified: in such case,
788 787 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
789 788 # of target (standin) file.
790 789 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
791 790 normallookup=True)
792 791
793 792 # after pulling changesets, we need to take some extra care to get
794 793 # largefiles updated remotely
795 794 def overridepull(orig, ui, repo, source=None, **opts):
796 795 revsprepull = len(repo)
797 796 if not source:
798 797 source = 'default'
799 798 repo.lfpullsource = source
800 799 result = orig(ui, repo, source, **opts)
801 800 revspostpull = len(repo)
802 801 lfrevs = opts.get('lfrev', [])
803 802 if opts.get('all_largefiles'):
804 803 lfrevs.append('pulled()')
805 804 if lfrevs and revspostpull > revsprepull:
806 805 numcached = 0
807 806 repo.firstpulled = revsprepull # for pulled() revset expression
808 807 try:
809 808 for rev in scmutil.revrange(repo, lfrevs):
810 809 ui.note(_('pulling largefiles for revision %s\n') % rev)
811 810 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
812 811 numcached += len(cached)
813 812 finally:
814 813 del repo.firstpulled
815 814 ui.status(_("%d largefiles cached\n") % numcached)
816 815 return result
817 816
818 817 def overridepush(orig, ui, repo, *args, **kwargs):
819 818 """Override push command and store --lfrev parameters in opargs"""
820 819 lfrevs = kwargs.pop('lfrev', None)
821 820 if lfrevs:
822 821 opargs = kwargs.setdefault('opargs', {})
823 822 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
824 823 return orig(ui, repo, *args, **kwargs)
825 824
826 825 def exchangepushoperation(orig, *args, **kwargs):
827 826 """Override pushoperation constructor and store lfrevs parameter"""
828 827 lfrevs = kwargs.pop('lfrevs', None)
829 828 pushop = orig(*args, **kwargs)
830 829 pushop.lfrevs = lfrevs
831 830 return pushop
832 831
833 832 revsetpredicate = registrar.revsetpredicate()
834 833
835 834 @revsetpredicate('pulled()')
836 835 def pulledrevsetsymbol(repo, subset, x):
837 836 """Changesets that just has been pulled.
838 837
839 838 Only available with largefiles from pull --lfrev expressions.
840 839
841 840 .. container:: verbose
842 841
843 842 Some examples:
844 843
845 844 - pull largefiles for all new changesets::
846 845
847 846 hg pull -lfrev "pulled()"
848 847
849 848 - pull largefiles for all new branch heads::
850 849
851 850 hg pull -lfrev "head(pulled()) and not closed()"
852 851
853 852 """
854 853
855 854 try:
856 855 firstpulled = repo.firstpulled
857 856 except AttributeError:
858 857 raise error.Abort(_("pulled() only available in --lfrev"))
859 858 return revset.baseset([r for r in subset if r >= firstpulled])
860 859
861 860 def overrideclone(orig, ui, source, dest=None, **opts):
862 861 d = dest
863 862 if d is None:
864 863 d = hg.defaultdest(source)
865 864 if opts.get('all_largefiles') and not hg.islocal(d):
866 865 raise error.Abort(_(
867 866 '--all-largefiles is incompatible with non-local destination %s') %
868 867 d)
869 868
870 869 return orig(ui, source, dest, **opts)
871 870
872 871 def hgclone(orig, ui, opts, *args, **kwargs):
873 872 result = orig(ui, opts, *args, **kwargs)
874 873
875 874 if result is not None:
876 875 sourcerepo, destrepo = result
877 876 repo = destrepo.local()
878 877
879 878 # When cloning to a remote repo (like through SSH), no repo is available
880 879 # from the peer. Therefore the largefiles can't be downloaded and the
881 880 # hgrc can't be updated.
882 881 if not repo:
883 882 return result
884 883
885 884 # If largefiles is required for this repo, permanently enable it locally
886 885 if 'largefiles' in repo.requirements:
887 886 fp = repo.vfs('hgrc', 'a', text=True)
888 887 try:
889 888 fp.write('\n[extensions]\nlargefiles=\n')
890 889 finally:
891 890 fp.close()
892 891
893 892 # Caching is implicitly limited to 'rev' option, since the dest repo was
894 893 # truncated at that point. The user may expect a download count with
895 894 # this option, so attempt whether or not this is a largefile repo.
896 895 if opts.get('all_largefiles'):
897 896 success, missing = lfcommands.downloadlfiles(ui, repo, None)
898 897
899 898 if missing != 0:
900 899 return None
901 900
902 901 return result
903 902
904 903 def overriderebase(orig, ui, repo, **opts):
905 904 if not util.safehasattr(repo, '_largefilesenabled'):
906 905 return orig(ui, repo, **opts)
907 906
908 907 resuming = opts.get('continue')
909 908 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
910 909 repo._lfstatuswriters.append(lambda *msg, **opts: None)
911 910 try:
912 911 return orig(ui, repo, **opts)
913 912 finally:
914 913 repo._lfstatuswriters.pop()
915 914 repo._lfcommithooks.pop()
916 915
917 916 def overridearchivecmd(orig, ui, repo, dest, **opts):
918 917 repo.unfiltered().lfstatus = True
919 918
920 919 try:
921 920 return orig(ui, repo.unfiltered(), dest, **opts)
922 921 finally:
923 922 repo.unfiltered().lfstatus = False
924 923
925 924 def hgwebarchive(orig, web, req, tmpl):
926 925 web.repo.lfstatus = True
927 926
928 927 try:
929 928 return orig(web, req, tmpl)
930 929 finally:
931 930 web.repo.lfstatus = False
932 931
933 932 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
934 933 prefix='', mtime=None, subrepos=None):
935 934 # For some reason setting repo.lfstatus in hgwebarchive only changes the
936 935 # unfiltered repo's attr, so check that as well.
937 936 if not repo.lfstatus and not repo.unfiltered().lfstatus:
938 937 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
939 938 subrepos)
940 939
941 940 # No need to lock because we are only reading history and
942 941 # largefile caches, neither of which are modified.
943 942 if node is not None:
944 943 lfcommands.cachelfiles(repo.ui, repo, node)
945 944
946 945 if kind not in archival.archivers:
947 946 raise error.Abort(_("unknown archive type '%s'") % kind)
948 947
949 948 ctx = repo[node]
950 949
951 950 if kind == 'files':
952 951 if prefix:
953 952 raise error.Abort(
954 953 _('cannot give prefix when archiving to files'))
955 954 else:
956 955 prefix = archival.tidyprefix(dest, kind, prefix)
957 956
958 957 def write(name, mode, islink, getdata):
959 958 if matchfn and not matchfn(name):
960 959 return
961 960 data = getdata()
962 961 if decode:
963 962 data = repo.wwritedata(name, data)
964 963 archiver.addfile(prefix + name, mode, islink, data)
965 964
966 965 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
967 966
968 967 if repo.ui.configbool("ui", "archivemeta", True):
969 968 write('.hg_archival.txt', 0o644, False,
970 969 lambda: archival.buildmetadata(ctx))
971 970
972 971 for f in ctx:
973 972 ff = ctx.flags(f)
974 973 getdata = ctx[f].data
975 974 if lfutil.isstandin(f):
976 975 if node is not None:
977 976 path = lfutil.findfile(repo, getdata().strip())
978 977
979 978 if path is None:
980 979 raise error.Abort(
981 980 _('largefile %s not found in repo store or system cache')
982 981 % lfutil.splitstandin(f))
983 982 else:
984 983 path = lfutil.splitstandin(f)
985 984
986 985 f = lfutil.splitstandin(f)
987 986
988 987 getdata = lambda: util.readfile(path)
989 988 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
990 989
991 990 if subrepos:
992 991 for subpath in sorted(ctx.substate):
993 992 sub = ctx.workingsub(subpath)
994 993 submatch = matchmod.subdirmatcher(subpath, matchfn)
995 994 sub._repo.lfstatus = True
996 995 sub.archive(archiver, prefix, submatch)
997 996
998 997 archiver.done()
999 998
1000 999 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
1001 1000 if not repo._repo.lfstatus:
1002 1001 return orig(repo, archiver, prefix, match)
1003 1002
1004 1003 repo._get(repo._state + ('hg',))
1005 1004 rev = repo._state[1]
1006 1005 ctx = repo._repo[rev]
1007 1006
1008 1007 if ctx.node() is not None:
1009 1008 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1010 1009
1011 1010 def write(name, mode, islink, getdata):
1012 1011 # At this point, the standin has been replaced with the largefile name,
1013 1012 # so the normal matcher works here without the lfutil variants.
1014 1013 if match and not match(f):
1015 1014 return
1016 1015 data = getdata()
1017 1016
1018 1017 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1019 1018
1020 1019 for f in ctx:
1021 1020 ff = ctx.flags(f)
1022 1021 getdata = ctx[f].data
1023 1022 if lfutil.isstandin(f):
1024 1023 if ctx.node() is not None:
1025 1024 path = lfutil.findfile(repo._repo, getdata().strip())
1026 1025
1027 1026 if path is None:
1028 1027 raise error.Abort(
1029 1028 _('largefile %s not found in repo store or system cache')
1030 1029 % lfutil.splitstandin(f))
1031 1030 else:
1032 1031 path = lfutil.splitstandin(f)
1033 1032
1034 1033 f = lfutil.splitstandin(f)
1035 1034
1036 1035 getdata = lambda: util.readfile(os.path.join(prefix, path))
1037 1036
1038 1037 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1039 1038
1040 1039 for subpath in sorted(ctx.substate):
1041 1040 sub = ctx.workingsub(subpath)
1042 1041 submatch = matchmod.subdirmatcher(subpath, match)
1043 1042 sub._repo.lfstatus = True
1044 1043 sub.archive(archiver, prefix + repo._path + '/', submatch)
1045 1044
1046 1045 # If a largefile is modified, the change is not reflected in its
1047 1046 # standin until a commit. cmdutil.bailifchanged() raises an exception
1048 1047 # if the repo has uncommitted changes. Wrap it to also check if
1049 1048 # largefiles were changed. This is used by bisect, backout and fetch.
1050 1049 def overridebailifchanged(orig, repo, *args, **kwargs):
1051 1050 orig(repo, *args, **kwargs)
1052 1051 repo.lfstatus = True
1053 1052 s = repo.status()
1054 1053 repo.lfstatus = False
1055 1054 if s.modified or s.added or s.removed or s.deleted:
1056 1055 raise error.Abort(_('uncommitted changes'))
1057 1056
1058 1057 def postcommitstatus(orig, repo, *args, **kwargs):
1059 1058 repo.lfstatus = True
1060 1059 try:
1061 1060 return orig(repo, *args, **kwargs)
1062 1061 finally:
1063 1062 repo.lfstatus = False
1064 1063
1065 1064 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1066 1065 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1067 1066 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1068 1067 m = composelargefilematcher(match, repo[None].manifest())
1069 1068
1070 1069 try:
1071 1070 repo.lfstatus = True
1072 1071 s = repo.status(match=m, clean=True)
1073 1072 finally:
1074 1073 repo.lfstatus = False
1075 1074 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1076 1075 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1077 1076
1078 1077 for f in forget:
1079 1078 if lfutil.standin(f) not in repo.dirstate and not \
1080 1079 repo.wvfs.isdir(lfutil.standin(f)):
1081 1080 ui.warn(_('not removing %s: file is already untracked\n')
1082 1081 % m.rel(f))
1083 1082 bad.append(f)
1084 1083
1085 1084 for f in forget:
1086 1085 if ui.verbose or not m.exact(f):
1087 1086 ui.status(_('removing %s\n') % m.rel(f))
1088 1087
1089 1088 # Need to lock because standin files are deleted then removed from the
1090 1089 # repository and we could race in-between.
1091 1090 with repo.wlock():
1092 1091 lfdirstate = lfutil.openlfdirstate(ui, repo)
1093 1092 for f in forget:
1094 1093 if lfdirstate[f] == 'a':
1095 1094 lfdirstate.drop(f)
1096 1095 else:
1097 1096 lfdirstate.remove(f)
1098 1097 lfdirstate.write()
1099 1098 standins = [lfutil.standin(f) for f in forget]
1100 1099 for f in standins:
1101 1100 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1102 1101 rejected = repo[None].forget(standins)
1103 1102
1104 1103 bad.extend(f for f in rejected if f in m.files())
1105 1104 forgot.extend(f for f in forget if f not in rejected)
1106 1105 return bad, forgot
1107 1106
1108 1107 def _getoutgoings(repo, other, missing, addfunc):
1109 1108 """get pairs of filename and largefile hash in outgoing revisions
1110 1109 in 'missing'.
1111 1110
1112 1111 largefiles already existing on 'other' repository are ignored.
1113 1112
1114 1113 'addfunc' is invoked with each unique pairs of filename and
1115 1114 largefile hash value.
1116 1115 """
1117 1116 knowns = set()
1118 1117 lfhashes = set()
1119 1118 def dedup(fn, lfhash):
1120 1119 k = (fn, lfhash)
1121 1120 if k not in knowns:
1122 1121 knowns.add(k)
1123 1122 lfhashes.add(lfhash)
1124 1123 lfutil.getlfilestoupload(repo, missing, dedup)
1125 1124 if lfhashes:
1126 1125 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1127 1126 for fn, lfhash in knowns:
1128 1127 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1129 1128 addfunc(fn, lfhash)
1130 1129
1131 1130 def outgoinghook(ui, repo, other, opts, missing):
1132 1131 if opts.pop('large', None):
1133 1132 lfhashes = set()
1134 1133 if ui.debugflag:
1135 1134 toupload = {}
1136 1135 def addfunc(fn, lfhash):
1137 1136 if fn not in toupload:
1138 1137 toupload[fn] = []
1139 1138 toupload[fn].append(lfhash)
1140 1139 lfhashes.add(lfhash)
1141 1140 def showhashes(fn):
1142 1141 for lfhash in sorted(toupload[fn]):
1143 1142 ui.debug(' %s\n' % (lfhash))
1144 1143 else:
1145 1144 toupload = set()
1146 1145 def addfunc(fn, lfhash):
1147 1146 toupload.add(fn)
1148 1147 lfhashes.add(lfhash)
1149 1148 def showhashes(fn):
1150 1149 pass
1151 1150 _getoutgoings(repo, other, missing, addfunc)
1152 1151
1153 1152 if not toupload:
1154 1153 ui.status(_('largefiles: no files to upload\n'))
1155 1154 else:
1156 1155 ui.status(_('largefiles to upload (%d entities):\n')
1157 1156 % (len(lfhashes)))
1158 1157 for file in sorted(toupload):
1159 1158 ui.status(lfutil.splitstandin(file) + '\n')
1160 1159 showhashes(file)
1161 1160 ui.status('\n')
1162 1161
1163 1162 def summaryremotehook(ui, repo, opts, changes):
1164 1163 largeopt = opts.get('large', False)
1165 1164 if changes is None:
1166 1165 if largeopt:
1167 1166 return (False, True) # only outgoing check is needed
1168 1167 else:
1169 1168 return (False, False)
1170 1169 elif largeopt:
1171 1170 url, branch, peer, outgoing = changes[1]
1172 1171 if peer is None:
1173 1172 # i18n: column positioning for "hg summary"
1174 1173 ui.status(_('largefiles: (no remote repo)\n'))
1175 1174 return
1176 1175
1177 1176 toupload = set()
1178 1177 lfhashes = set()
1179 1178 def addfunc(fn, lfhash):
1180 1179 toupload.add(fn)
1181 1180 lfhashes.add(lfhash)
1182 1181 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1183 1182
1184 1183 if not toupload:
1185 1184 # i18n: column positioning for "hg summary"
1186 1185 ui.status(_('largefiles: (no files to upload)\n'))
1187 1186 else:
1188 1187 # i18n: column positioning for "hg summary"
1189 1188 ui.status(_('largefiles: %d entities for %d files to upload\n')
1190 1189 % (len(lfhashes), len(toupload)))
1191 1190
1192 1191 def overridesummary(orig, ui, repo, *pats, **opts):
1193 1192 try:
1194 1193 repo.lfstatus = True
1195 1194 orig(ui, repo, *pats, **opts)
1196 1195 finally:
1197 1196 repo.lfstatus = False
1198 1197
1199 1198 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1200 1199 similarity=None):
1201 1200 if opts is None:
1202 1201 opts = {}
1203 1202 if not lfutil.islfilesrepo(repo):
1204 1203 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1205 1204 # Get the list of missing largefiles so we can remove them
1206 1205 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1207 1206 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1208 1207 False, False, False)
1209 1208
1210 1209 # Call into the normal remove code, but the removing of the standin, we want
1211 1210 # to have handled by original addremove. Monkey patching here makes sure
1212 1211 # we don't remove the standin in the largefiles code, preventing a very
1213 1212 # confused state later.
1214 1213 if s.deleted:
1215 1214 m = copy.copy(matcher)
1216 1215
1217 1216 # The m._files and m._map attributes are not changed to the deleted list
1218 1217 # because that affects the m.exact() test, which in turn governs whether
1219 1218 # or not the file name is printed, and how. Simply limit the original
1220 1219 # matches to those in the deleted status list.
1221 1220 matchfn = m.matchfn
1222 1221 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1223 1222
1224 1223 removelargefiles(repo.ui, repo, True, m, **opts)
1225 1224 # Call into the normal add code, and any files that *should* be added as
1226 1225 # largefiles will be
1227 1226 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1228 1227 # Now that we've handled largefiles, hand off to the original addremove
1229 1228 # function to take care of the rest. Make sure it doesn't do anything with
1230 1229 # largefiles by passing a matcher that will ignore them.
1231 1230 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1232 1231 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1233 1232
1234 1233 # Calling purge with --all will cause the largefiles to be deleted.
1235 1234 # Override repo.status to prevent this from happening.
1236 1235 def overridepurge(orig, ui, repo, *dirs, **opts):
1237 1236 # XXX Monkey patching a repoview will not work. The assigned attribute will
1238 1237 # be set on the unfiltered repo, but we will only lookup attributes in the
1239 1238 # unfiltered repo if the lookup in the repoview object itself fails. As the
1240 1239 # monkey patched method exists on the repoview class the lookup will not
1241 1240 # fail. As a result, the original version will shadow the monkey patched
1242 1241 # one, defeating the monkey patch.
1243 1242 #
1244 1243 # As a work around we use an unfiltered repo here. We should do something
1245 1244 # cleaner instead.
1246 1245 repo = repo.unfiltered()
1247 1246 oldstatus = repo.status
1248 1247 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1249 1248 clean=False, unknown=False, listsubrepos=False):
1250 1249 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1251 1250 listsubrepos)
1252 1251 lfdirstate = lfutil.openlfdirstate(ui, repo)
1253 1252 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1254 1253 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1255 1254 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1256 1255 unknown, ignored, r.clean)
1257 1256 repo.status = overridestatus
1258 1257 orig(ui, repo, *dirs, **opts)
1259 1258 repo.status = oldstatus
1260 1259 def overriderollback(orig, ui, repo, **opts):
1261 1260 with repo.wlock():
1262 1261 before = repo.dirstate.parents()
1263 1262 orphans = set(f for f in repo.dirstate
1264 1263 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1265 1264 result = orig(ui, repo, **opts)
1266 1265 after = repo.dirstate.parents()
1267 1266 if before == after:
1268 1267 return result # no need to restore standins
1269 1268
1270 1269 pctx = repo['.']
1271 1270 for f in repo.dirstate:
1272 1271 if lfutil.isstandin(f):
1273 1272 orphans.discard(f)
1274 1273 if repo.dirstate[f] == 'r':
1275 1274 repo.wvfs.unlinkpath(f, ignoremissing=True)
1276 1275 elif f in pctx:
1277 1276 fctx = pctx[f]
1278 1277 repo.wwrite(f, fctx.data(), fctx.flags())
1279 1278 else:
1280 1279 # content of standin is not so important in 'a',
1281 1280 # 'm' or 'n' (coming from the 2nd parent) cases
1282 1281 lfutil.writestandin(repo, f, '', False)
1283 1282 for standin in orphans:
1284 1283 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1285 1284
1286 1285 lfdirstate = lfutil.openlfdirstate(ui, repo)
1287 1286 orphans = set(lfdirstate)
1288 1287 lfiles = lfutil.listlfiles(repo)
1289 1288 for file in lfiles:
1290 1289 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1291 1290 orphans.discard(file)
1292 1291 for lfile in orphans:
1293 1292 lfdirstate.drop(lfile)
1294 1293 lfdirstate.write()
1295 1294 return result
1296 1295
1297 1296 def overridetransplant(orig, ui, repo, *revs, **opts):
1298 1297 resuming = opts.get('continue')
1299 1298 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1300 1299 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1301 1300 try:
1302 1301 result = orig(ui, repo, *revs, **opts)
1303 1302 finally:
1304 1303 repo._lfstatuswriters.pop()
1305 1304 repo._lfcommithooks.pop()
1306 1305 return result
1307 1306
1308 1307 def overridecat(orig, ui, repo, file1, *pats, **opts):
1309 1308 ctx = scmutil.revsingle(repo, opts.get('rev'))
1310 1309 err = 1
1311 1310 notbad = set()
1312 1311 m = scmutil.match(ctx, (file1,) + pats, opts)
1313 1312 origmatchfn = m.matchfn
1314 1313 def lfmatchfn(f):
1315 1314 if origmatchfn(f):
1316 1315 return True
1317 1316 lf = lfutil.splitstandin(f)
1318 1317 if lf is None:
1319 1318 return False
1320 1319 notbad.add(lf)
1321 1320 return origmatchfn(lf)
1322 1321 m.matchfn = lfmatchfn
1323 1322 origbadfn = m.bad
1324 1323 def lfbadfn(f, msg):
1325 1324 if not f in notbad:
1326 1325 origbadfn(f, msg)
1327 1326 m.bad = lfbadfn
1328 1327
1329 1328 origvisitdirfn = m.visitdir
1330 1329 def lfvisitdirfn(dir):
1331 1330 if dir == lfutil.shortname:
1332 1331 return True
1333 1332 ret = origvisitdirfn(dir)
1334 1333 if ret:
1335 1334 return ret
1336 1335 lf = lfutil.splitstandin(dir)
1337 1336 if lf is None:
1338 1337 return False
1339 1338 return origvisitdirfn(lf)
1340 1339 m.visitdir = lfvisitdirfn
1341 1340
1342 1341 for f in ctx.walk(m):
1343 1342 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1344 1343 pathname=f)
1345 1344 lf = lfutil.splitstandin(f)
1346 1345 if lf is None or origmatchfn(f):
1347 1346 # duplicating unreachable code from commands.cat
1348 1347 data = ctx[f].data()
1349 1348 if opts.get('decode'):
1350 1349 data = repo.wwritedata(f, data)
1351 1350 fp.write(data)
1352 1351 else:
1353 1352 hash = lfutil.readstandin(repo, lf, ctx.rev())
1354 1353 if not lfutil.inusercache(repo.ui, hash):
1355 1354 store = storefactory.openstore(repo)
1356 1355 success, missing = store.get([(lf, hash)])
1357 1356 if len(success) != 1:
1358 1357 raise error.Abort(
1359 1358 _('largefile %s is not in cache and could not be '
1360 1359 'downloaded') % lf)
1361 1360 path = lfutil.usercachepath(repo.ui, hash)
1362 1361 fpin = open(path, "rb")
1363 1362 for chunk in util.filechunkiter(fpin, 128 * 1024):
1364 1363 fp.write(chunk)
1365 1364 fpin.close()
1366 1365 fp.close()
1367 1366 err = 0
1368 1367 return err
1369 1368
1370 1369 def mergeupdate(orig, repo, node, branchmerge, force,
1371 1370 *args, **kwargs):
1372 1371 matcher = kwargs.get('matcher', None)
1373 1372 # note if this is a partial update
1374 1373 partial = matcher and not matcher.always()
1375 1374 with repo.wlock():
1376 1375 # branch | | |
1377 1376 # merge | force | partial | action
1378 1377 # -------+-------+---------+--------------
1379 1378 # x | x | x | linear-merge
1380 1379 # o | x | x | branch-merge
1381 1380 # x | o | x | overwrite (as clean update)
1382 1381 # o | o | x | force-branch-merge (*1)
1383 1382 # x | x | o | (*)
1384 1383 # o | x | o | (*)
1385 1384 # x | o | o | overwrite (as revert)
1386 1385 # o | o | o | (*)
1387 1386 #
1388 1387 # (*) don't care
1389 1388 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1390 1389
1391 1390 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1392 1391 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1393 1392 repo.getcwd()),
1394 1393 [], False, False, False)
1395 1394 pctx = repo['.']
1396 1395 for lfile in unsure + s.modified:
1397 1396 lfileabs = repo.wvfs.join(lfile)
1398 1397 if not repo.wvfs.exists(lfileabs):
1399 1398 continue
1400 1399 lfhash = lfutil.hashrepofile(repo, lfile)
1401 1400 standin = lfutil.standin(lfile)
1402 1401 lfutil.writestandin(repo, standin, lfhash,
1403 1402 lfutil.getexecutable(lfileabs))
1404 1403 if (standin in pctx and
1405 1404 lfhash == lfutil.readstandin(repo, lfile, '.')):
1406 1405 lfdirstate.normal(lfile)
1407 1406 for lfile in s.added:
1408 1407 lfutil.updatestandin(repo, lfutil.standin(lfile))
1409 1408 lfdirstate.write()
1410 1409
1411 1410 oldstandins = lfutil.getstandinsstate(repo)
1412 1411
1413 1412 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1414 1413
1415 1414 newstandins = lfutil.getstandinsstate(repo)
1416 1415 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1417 1416 if branchmerge or force or partial:
1418 1417 filelist.extend(s.deleted + s.removed)
1419 1418
1420 1419 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1421 1420 normallookup=partial)
1422 1421
1423 1422 return result
1424 1423
1425 1424 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1426 1425 result = orig(repo, files, *args, **kwargs)
1427 1426
1428 1427 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1429 1428 if filelist:
1430 1429 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1431 1430 printmessage=False, normallookup=True)
1432 1431
1433 1432 return result
General Comments 0
You need to be logged in to leave comments. Login now