##// END OF EJS Templates
largefiles: move "copyalltostore" invocation into "markcommitted"...
FUJIWARA Katsunori -
r23276:4be75483 default
parent child Browse files
Show More
@@ -1,578 +1,579 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import os, errno
12 12 import shutil
13 13
14 14 from mercurial import util, match as match_, hg, node, context, error, \
15 15 cmdutil, scmutil, commands
16 16 from mercurial.i18n import _
17 17 from mercurial.lock import release
18 18
19 19 import lfutil
20 20 import basestore
21 21
22 22 # -- Commands ----------------------------------------------------------
23 23
24 24 cmdtable = {}
25 25 command = cmdutil.command(cmdtable)
26 26
27 27 @command('lfconvert',
28 28 [('s', 'size', '',
29 29 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
30 30 ('', 'to-normal', False,
31 31 _('convert from a largefiles repo to a normal repo')),
32 32 ],
33 33 _('hg lfconvert SOURCE DEST [FILE ...]'),
34 34 norepo=True,
35 35 inferrepo=True)
36 36 def lfconvert(ui, src, dest, *pats, **opts):
37 37 '''convert a normal repository to a largefiles repository
38 38
39 39 Convert repository SOURCE to a new repository DEST, identical to
40 40 SOURCE except that certain files will be converted as largefiles:
41 41 specifically, any file that matches any PATTERN *or* whose size is
42 42 above the minimum size threshold is converted as a largefile. The
43 43 size used to determine whether or not to track a file as a
44 44 largefile is the size of the first version of the file. The
45 45 minimum size can be specified either with --size or in
46 46 configuration as ``largefiles.size``.
47 47
48 48 After running this command you will need to make sure that
49 49 largefiles is enabled anywhere you intend to push the new
50 50 repository.
51 51
52 52 Use --to-normal to convert largefiles back to normal files; after
53 53 this, the DEST repository can be used without largefiles at all.'''
54 54
55 55 if opts['to_normal']:
56 56 tolfile = False
57 57 else:
58 58 tolfile = True
59 59 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
60 60
61 61 if not hg.islocal(src):
62 62 raise util.Abort(_('%s is not a local Mercurial repo') % src)
63 63 if not hg.islocal(dest):
64 64 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
65 65
66 66 rsrc = hg.repository(ui, src)
67 67 ui.status(_('initializing destination %s\n') % dest)
68 68 rdst = hg.repository(ui, dest, create=True)
69 69
70 70 success = False
71 71 dstwlock = dstlock = None
72 72 try:
73 73 # Lock destination to prevent modification while it is converted to.
74 74 # Don't need to lock src because we are just reading from its history
75 75 # which can't change.
76 76 dstwlock = rdst.wlock()
77 77 dstlock = rdst.lock()
78 78
79 79 # Get a list of all changesets in the source. The easy way to do this
80 80 # is to simply walk the changelog, using changelog.nodesbetween().
81 81 # Take a look at mercurial/revlog.py:639 for more details.
82 82 # Use a generator instead of a list to decrease memory usage
83 83 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
84 84 rsrc.heads())[0])
85 85 revmap = {node.nullid: node.nullid}
86 86 if tolfile:
87 87 lfiles = set()
88 88 normalfiles = set()
89 89 if not pats:
90 90 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
91 91 if pats:
92 92 matcher = match_.match(rsrc.root, '', list(pats))
93 93 else:
94 94 matcher = None
95 95
96 96 lfiletohash = {}
97 97 for ctx in ctxs:
98 98 ui.progress(_('converting revisions'), ctx.rev(),
99 99 unit=_('revision'), total=rsrc['tip'].rev())
100 100 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
101 101 lfiles, normalfiles, matcher, size, lfiletohash)
102 102 ui.progress(_('converting revisions'), None)
103 103
104 104 if os.path.exists(rdst.wjoin(lfutil.shortname)):
105 105 shutil.rmtree(rdst.wjoin(lfutil.shortname))
106 106
107 107 for f in lfiletohash.keys():
108 108 if os.path.isfile(rdst.wjoin(f)):
109 109 os.unlink(rdst.wjoin(f))
110 110 try:
111 111 os.removedirs(os.path.dirname(rdst.wjoin(f)))
112 112 except OSError:
113 113 pass
114 114
115 115 # If there were any files converted to largefiles, add largefiles
116 116 # to the destination repository's requirements.
117 117 if lfiles:
118 118 rdst.requirements.add('largefiles')
119 119 rdst._writerequirements()
120 120 else:
121 121 for ctx in ctxs:
122 122 ui.progress(_('converting revisions'), ctx.rev(),
123 123 unit=_('revision'), total=rsrc['tip'].rev())
124 124 _addchangeset(ui, rsrc, rdst, ctx, revmap)
125 125
126 126 ui.progress(_('converting revisions'), None)
127 127 success = True
128 128 finally:
129 129 rdst.dirstate.clear()
130 130 release(dstlock, dstwlock)
131 131 if not success:
132 132 # we failed, remove the new directory
133 133 shutil.rmtree(rdst.root)
134 134
135 135 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
136 136 # Convert src parents to dst parents
137 137 parents = _convertparents(ctx, revmap)
138 138
139 139 # Generate list of changed files
140 140 files = _getchangedfiles(ctx, parents)
141 141
142 142 def getfilectx(repo, memctx, f):
143 143 if lfutil.standin(f) in files:
144 144 # if the file isn't in the manifest then it was removed
145 145 # or renamed, raise IOError to indicate this
146 146 try:
147 147 fctx = ctx.filectx(lfutil.standin(f))
148 148 except error.LookupError:
149 149 return None
150 150 renamed = fctx.renamed()
151 151 if renamed:
152 152 renamed = lfutil.splitstandin(renamed[0])
153 153
154 154 hash = fctx.data().strip()
155 155 path = lfutil.findfile(rsrc, hash)
156 156
157 157 # If one file is missing, likely all files from this rev are
158 158 if path is None:
159 159 cachelfiles(ui, rsrc, ctx.node())
160 160 path = lfutil.findfile(rsrc, hash)
161 161
162 162 if path is None:
163 163 raise util.Abort(
164 164 _("missing largefile \'%s\' from revision %s")
165 165 % (f, node.hex(ctx.node())))
166 166
167 167 data = ''
168 168 fd = None
169 169 try:
170 170 fd = open(path, 'rb')
171 171 data = fd.read()
172 172 finally:
173 173 if fd:
174 174 fd.close()
175 175 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
176 176 'x' in fctx.flags(), renamed)
177 177 else:
178 178 return _getnormalcontext(repo, ctx, f, revmap)
179 179
180 180 dstfiles = []
181 181 for file in files:
182 182 if lfutil.isstandin(file):
183 183 dstfiles.append(lfutil.splitstandin(file))
184 184 else:
185 185 dstfiles.append(file)
186 186 # Commit
187 187 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
188 188
189 189 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
190 190 matcher, size, lfiletohash):
191 191 # Convert src parents to dst parents
192 192 parents = _convertparents(ctx, revmap)
193 193
194 194 # Generate list of changed files
195 195 files = _getchangedfiles(ctx, parents)
196 196
197 197 dstfiles = []
198 198 for f in files:
199 199 if f not in lfiles and f not in normalfiles:
200 200 islfile = _islfile(f, ctx, matcher, size)
201 201 # If this file was renamed or copied then copy
202 202 # the largefile-ness of its predecessor
203 203 if f in ctx.manifest():
204 204 fctx = ctx.filectx(f)
205 205 renamed = fctx.renamed()
206 206 renamedlfile = renamed and renamed[0] in lfiles
207 207 islfile |= renamedlfile
208 208 if 'l' in fctx.flags():
209 209 if renamedlfile:
210 210 raise util.Abort(
211 211 _('renamed/copied largefile %s becomes symlink')
212 212 % f)
213 213 islfile = False
214 214 if islfile:
215 215 lfiles.add(f)
216 216 else:
217 217 normalfiles.add(f)
218 218
219 219 if f in lfiles:
220 220 dstfiles.append(lfutil.standin(f))
221 221 # largefile in manifest if it has not been removed/renamed
222 222 if f in ctx.manifest():
223 223 fctx = ctx.filectx(f)
224 224 if 'l' in fctx.flags():
225 225 renamed = fctx.renamed()
226 226 if renamed and renamed[0] in lfiles:
227 227 raise util.Abort(_('largefile %s becomes symlink') % f)
228 228
229 229 # largefile was modified, update standins
230 230 m = util.sha1('')
231 231 m.update(ctx[f].data())
232 232 hash = m.hexdigest()
233 233 if f not in lfiletohash or lfiletohash[f] != hash:
234 234 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
235 235 executable = 'x' in ctx[f].flags()
236 236 lfutil.writestandin(rdst, lfutil.standin(f), hash,
237 237 executable)
238 238 lfiletohash[f] = hash
239 239 else:
240 240 # normal file
241 241 dstfiles.append(f)
242 242
243 243 def getfilectx(repo, memctx, f):
244 244 if lfutil.isstandin(f):
245 245 # if the file isn't in the manifest then it was removed
246 246 # or renamed, raise IOError to indicate this
247 247 srcfname = lfutil.splitstandin(f)
248 248 try:
249 249 fctx = ctx.filectx(srcfname)
250 250 except error.LookupError:
251 251 return None
252 252 renamed = fctx.renamed()
253 253 if renamed:
254 254 # standin is always a largefile because largefile-ness
255 255 # doesn't change after rename or copy
256 256 renamed = lfutil.standin(renamed[0])
257 257
258 258 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
259 259 'l' in fctx.flags(), 'x' in fctx.flags(),
260 260 renamed)
261 261 else:
262 262 return _getnormalcontext(repo, ctx, f, revmap)
263 263
264 264 # Commit
265 265 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
266 266
267 267 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
268 268 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
269 269 getfilectx, ctx.user(), ctx.date(), ctx.extra())
270 270 ret = rdst.commitctx(mctx)
271 lfutil.copyalltostore(rdst, ret)
271 272 rdst.setparents(ret)
272 273 revmap[ctx.node()] = rdst.changelog.tip()
273 274
274 275 # Generate list of changed files
275 276 def _getchangedfiles(ctx, parents):
276 277 files = set(ctx.files())
277 278 if node.nullid not in parents:
278 279 mc = ctx.manifest()
279 280 mp1 = ctx.parents()[0].manifest()
280 281 mp2 = ctx.parents()[1].manifest()
281 282 files |= (set(mp1) | set(mp2)) - set(mc)
282 283 for f in mc:
283 284 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
284 285 files.add(f)
285 286 return files
286 287
287 288 # Convert src parents to dst parents
288 289 def _convertparents(ctx, revmap):
289 290 parents = []
290 291 for p in ctx.parents():
291 292 parents.append(revmap[p.node()])
292 293 while len(parents) < 2:
293 294 parents.append(node.nullid)
294 295 return parents
295 296
296 297 # Get memfilectx for a normal file
297 298 def _getnormalcontext(repo, ctx, f, revmap):
298 299 try:
299 300 fctx = ctx.filectx(f)
300 301 except error.LookupError:
301 302 return None
302 303 renamed = fctx.renamed()
303 304 if renamed:
304 305 renamed = renamed[0]
305 306
306 307 data = fctx.data()
307 308 if f == '.hgtags':
308 309 data = _converttags (repo.ui, revmap, data)
309 310 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
310 311 'x' in fctx.flags(), renamed)
311 312
312 313 # Remap tag data using a revision map
313 314 def _converttags(ui, revmap, data):
314 315 newdata = []
315 316 for line in data.splitlines():
316 317 try:
317 318 id, name = line.split(' ', 1)
318 319 except ValueError:
319 320 ui.warn(_('skipping incorrectly formatted tag %s\n')
320 321 % line)
321 322 continue
322 323 try:
323 324 newid = node.bin(id)
324 325 except TypeError:
325 326 ui.warn(_('skipping incorrectly formatted id %s\n')
326 327 % id)
327 328 continue
328 329 try:
329 330 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
330 331 name))
331 332 except KeyError:
332 333 ui.warn(_('no mapping for id %s\n') % id)
333 334 continue
334 335 return ''.join(newdata)
335 336
336 337 def _islfile(file, ctx, matcher, size):
337 338 '''Return true if file should be considered a largefile, i.e.
338 339 matcher matches it or it is larger than size.'''
339 340 # never store special .hg* files as largefiles
340 341 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
341 342 return False
342 343 if matcher and matcher(file):
343 344 return True
344 345 try:
345 346 return ctx.filectx(file).size() >= size * 1024 * 1024
346 347 except error.LookupError:
347 348 return False
348 349
349 350 def uploadlfiles(ui, rsrc, rdst, files):
350 351 '''upload largefiles to the central store'''
351 352
352 353 if not files:
353 354 return
354 355
355 356 store = basestore._openstore(rsrc, rdst, put=True)
356 357
357 358 at = 0
358 359 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
359 360 retval = store.exists(files)
360 361 files = filter(lambda h: not retval[h], files)
361 362 ui.debug("%d largefiles need to be uploaded\n" % len(files))
362 363
363 364 for hash in files:
364 365 ui.progress(_('uploading largefiles'), at, unit='largefile',
365 366 total=len(files))
366 367 source = lfutil.findfile(rsrc, hash)
367 368 if not source:
368 369 raise util.Abort(_('largefile %s missing from store'
369 370 ' (needs to be uploaded)') % hash)
370 371 # XXX check for errors here
371 372 store.put(source, hash)
372 373 at += 1
373 374 ui.progress(_('uploading largefiles'), None)
374 375
375 376 def verifylfiles(ui, repo, all=False, contents=False):
376 377 '''Verify that every largefile revision in the current changeset
377 378 exists in the central store. With --contents, also verify that
378 379 the contents of each local largefile file revision are correct (SHA-1 hash
379 380 matches the revision ID). With --all, check every changeset in
380 381 this repository.'''
381 382 if all:
382 383 # Pass a list to the function rather than an iterator because we know a
383 384 # list will work.
384 385 revs = range(len(repo))
385 386 else:
386 387 revs = ['.']
387 388
388 389 store = basestore._openstore(repo)
389 390 return store.verify(revs, contents=contents)
390 391
391 392 def cachelfiles(ui, repo, node, filelist=None):
392 393 '''cachelfiles ensures that all largefiles needed by the specified revision
393 394 are present in the repository's largefile cache.
394 395
395 396 returns a tuple (cached, missing). cached is the list of files downloaded
396 397 by this operation; missing is the list of files that were needed but could
397 398 not be found.'''
398 399 lfiles = lfutil.listlfiles(repo, node)
399 400 if filelist:
400 401 lfiles = set(lfiles) & set(filelist)
401 402 toget = []
402 403
403 404 for lfile in lfiles:
404 405 try:
405 406 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
406 407 except IOError, err:
407 408 if err.errno == errno.ENOENT:
408 409 continue # node must be None and standin wasn't found in wctx
409 410 raise
410 411 if not lfutil.findfile(repo, expectedhash):
411 412 toget.append((lfile, expectedhash))
412 413
413 414 if toget:
414 415 store = basestore._openstore(repo)
415 416 ret = store.get(toget)
416 417 return ret
417 418
418 419 return ([], [])
419 420
420 421 def downloadlfiles(ui, repo, rev=None):
421 422 matchfn = scmutil.match(repo[None],
422 423 [repo.wjoin(lfutil.shortname)], {})
423 424 def prepare(ctx, fns):
424 425 pass
425 426 totalsuccess = 0
426 427 totalmissing = 0
427 428 if rev != []: # walkchangerevs on empty list would return all revs
428 429 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
429 430 prepare):
430 431 success, missing = cachelfiles(ui, repo, ctx.node())
431 432 totalsuccess += len(success)
432 433 totalmissing += len(missing)
433 434 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
434 435 if totalmissing > 0:
435 436 ui.status(_("%d largefiles failed to download\n") % totalmissing)
436 437 return totalsuccess, totalmissing
437 438
438 439 def updatelfiles(ui, repo, filelist=None, printmessage=None,
439 440 normallookup=False):
440 441 '''Update largefiles according to standins in the working directory
441 442
442 443 If ``printmessage`` is other than ``None``, it means "print (or
443 444 ignore, for false) message forcibly".
444 445 '''
445 446 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
446 447 wlock = repo.wlock()
447 448 try:
448 449 lfdirstate = lfutil.openlfdirstate(ui, repo)
449 450 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
450 451
451 452 if filelist is not None:
452 453 filelist = set(filelist)
453 454 lfiles = [f for f in lfiles if f in filelist]
454 455
455 456 update = {}
456 457 updated, removed = 0, 0
457 458 for lfile in lfiles:
458 459 abslfile = repo.wjoin(lfile)
459 460 absstandin = repo.wjoin(lfutil.standin(lfile))
460 461 if os.path.exists(absstandin):
461 462 if (os.path.exists(absstandin + '.orig') and
462 463 os.path.exists(abslfile)):
463 464 shutil.copyfile(abslfile, abslfile + '.orig')
464 465 util.unlinkpath(absstandin + '.orig')
465 466 expecthash = lfutil.readstandin(repo, lfile)
466 467 if (expecthash != '' and
467 468 (not os.path.exists(abslfile) or
468 469 expecthash != lfutil.hashfile(abslfile))):
469 470 if lfile not in repo[None]: # not switched to normal file
470 471 util.unlinkpath(abslfile, ignoremissing=True)
471 472 # use normallookup() to allocate an entry in largefiles
472 473 # dirstate, because lack of it misleads
473 474 # lfilesrepo.status() into recognition that such cache
474 475 # missing files are removed.
475 476 lfdirstate.normallookup(lfile)
476 477 update[lfile] = expecthash
477 478 else:
478 479 # Remove lfiles for which the standin is deleted, unless the
479 480 # lfile is added to the repository again. This happens when a
480 481 # largefile is converted back to a normal file: the standin
481 482 # disappears, but a new (normal) file appears as the lfile.
482 483 if (os.path.exists(abslfile) and
483 484 repo.dirstate.normalize(lfile) not in repo[None]):
484 485 util.unlinkpath(abslfile)
485 486 removed += 1
486 487
487 488 # largefile processing might be slow and be interrupted - be prepared
488 489 lfdirstate.write()
489 490
490 491 if lfiles:
491 492 statuswriter(_('getting changed largefiles\n'))
492 493 cachelfiles(ui, repo, None, lfiles)
493 494
494 495 for lfile in lfiles:
495 496 update1 = 0
496 497
497 498 expecthash = update.get(lfile)
498 499 if expecthash:
499 500 if not lfutil.copyfromcache(repo, expecthash, lfile):
500 501 # failed ... but already removed and set to normallookup
501 502 continue
502 503 # Synchronize largefile dirstate to the last modified
503 504 # time of the file
504 505 lfdirstate.normal(lfile)
505 506 update1 = 1
506 507
507 508 # copy the state of largefile standin from the repository's
508 509 # dirstate to its state in the lfdirstate.
509 510 abslfile = repo.wjoin(lfile)
510 511 absstandin = repo.wjoin(lfutil.standin(lfile))
511 512 if os.path.exists(absstandin):
512 513 mode = os.stat(absstandin).st_mode
513 514 if mode != os.stat(abslfile).st_mode:
514 515 os.chmod(abslfile, mode)
515 516 update1 = 1
516 517
517 518 updated += update1
518 519
519 520 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
520 521
521 522 if filelist is not None:
522 523 # If "local largefile" is chosen at file merging, it is
523 524 # not listed in "filelist" (= dirstate syncing is
524 525 # omitted), because the standin file is not changed before and
525 526 # after merging.
526 527 # But the status of such files may have to be changed by
527 528 # merging. For example, locally modified ("M") largefile
528 529 # has to become re-added("A"), if it is "normal" file in
529 530 # the target revision of linear-merging.
530 531 for lfile in lfdirstate:
531 532 if lfile not in filelist:
532 533 lfutil.synclfdirstate(repo, lfdirstate, lfile, True)
533 534
534 535 lfdirstate.write()
535 536 if lfiles:
536 537 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
537 538 removed))
538 539 finally:
539 540 wlock.release()
540 541
541 542 @command('lfpull',
542 543 [('r', 'rev', [], _('pull largefiles for these revisions'))
543 544 ] + commands.remoteopts,
544 545 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
545 546 def lfpull(ui, repo, source="default", **opts):
546 547 """pull largefiles for the specified revisions from the specified source
547 548
548 549 Pull largefiles that are referenced from local changesets but missing
549 550 locally, pulling from a remote repository to the local cache.
550 551
551 552 If SOURCE is omitted, the 'default' path will be used.
552 553 See :hg:`help urls` for more information.
553 554
554 555 .. container:: verbose
555 556
556 557 Some examples:
557 558
558 559 - pull largefiles for all branch heads::
559 560
560 561 hg lfpull -r "head() and not closed()"
561 562
562 563 - pull largefiles on the default branch::
563 564
564 565 hg lfpull -r "branch(default)"
565 566 """
566 567 repo.lfpullsource = source
567 568
568 569 revs = opts.get('rev', [])
569 570 if not revs:
570 571 raise util.Abort(_('no revisions specified'))
571 572 revs = scmutil.revrange(repo, revs)
572 573
573 574 numcached = 0
574 575 for rev in revs:
575 576 ui.note(_('pulling largefiles for revision %s\n') % rev)
576 577 (cached, missing) = cachelfiles(ui, repo, rev)
577 578 numcached += len(cached)
578 579 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,573 +1,576 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import platform
13 13 import shutil
14 14 import stat
15 15 import copy
16 16
17 17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 18 from mercurial.i18n import _
19 19 from mercurial import node
20 20
21 21 shortname = '.hglf'
22 22 shortnameslash = shortname + '/'
23 23 longname = 'largefiles'
24 24
25 25
26 26 # -- Private worker functions ------------------------------------------
27 27
28 28 def getminsize(ui, assumelfiles, opt, default=10):
29 29 lfsize = opt
30 30 if not lfsize and assumelfiles:
31 31 lfsize = ui.config(longname, 'minsize', default=default)
32 32 if lfsize:
33 33 try:
34 34 lfsize = float(lfsize)
35 35 except ValueError:
36 36 raise util.Abort(_('largefiles: size must be number (not %s)\n')
37 37 % lfsize)
38 38 if lfsize is None:
39 39 raise util.Abort(_('minimum size for largefiles must be specified'))
40 40 return lfsize
41 41
42 42 def link(src, dest):
43 43 util.makedirs(os.path.dirname(dest))
44 44 try:
45 45 util.oslink(src, dest)
46 46 except OSError:
47 47 # if hardlinks fail, fallback on atomic copy
48 48 dst = util.atomictempfile(dest)
49 49 for chunk in util.filechunkiter(open(src, 'rb')):
50 50 dst.write(chunk)
51 51 dst.close()
52 52 os.chmod(dest, os.stat(src).st_mode)
53 53
54 54 def usercachepath(ui, hash):
55 55 path = ui.configpath(longname, 'usercache', None)
56 56 if path:
57 57 path = os.path.join(path, hash)
58 58 else:
59 59 if os.name == 'nt':
60 60 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
61 61 if appdata:
62 62 path = os.path.join(appdata, longname, hash)
63 63 elif platform.system() == 'Darwin':
64 64 home = os.getenv('HOME')
65 65 if home:
66 66 path = os.path.join(home, 'Library', 'Caches',
67 67 longname, hash)
68 68 elif os.name == 'posix':
69 69 path = os.getenv('XDG_CACHE_HOME')
70 70 if path:
71 71 path = os.path.join(path, longname, hash)
72 72 else:
73 73 home = os.getenv('HOME')
74 74 if home:
75 75 path = os.path.join(home, '.cache', longname, hash)
76 76 else:
77 77 raise util.Abort(_('unknown operating system: %s\n') % os.name)
78 78 return path
79 79
80 80 def inusercache(ui, hash):
81 81 path = usercachepath(ui, hash)
82 82 return path and os.path.exists(path)
83 83
84 84 def findfile(repo, hash):
85 85 if instore(repo, hash):
86 86 repo.ui.note(_('found %s in store\n') % hash)
87 87 return storepath(repo, hash)
88 88 elif inusercache(repo.ui, hash):
89 89 repo.ui.note(_('found %s in system cache\n') % hash)
90 90 path = storepath(repo, hash)
91 91 link(usercachepath(repo.ui, hash), path)
92 92 return path
93 93 return None
94 94
95 95 class largefilesdirstate(dirstate.dirstate):
96 96 def __getitem__(self, key):
97 97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
98 98 def normal(self, f):
99 99 return super(largefilesdirstate, self).normal(unixpath(f))
100 100 def remove(self, f):
101 101 return super(largefilesdirstate, self).remove(unixpath(f))
102 102 def add(self, f):
103 103 return super(largefilesdirstate, self).add(unixpath(f))
104 104 def drop(self, f):
105 105 return super(largefilesdirstate, self).drop(unixpath(f))
106 106 def forget(self, f):
107 107 return super(largefilesdirstate, self).forget(unixpath(f))
108 108 def normallookup(self, f):
109 109 return super(largefilesdirstate, self).normallookup(unixpath(f))
110 110 def _ignore(self, f):
111 111 return False
112 112
113 113 def openlfdirstate(ui, repo, create=True):
114 114 '''
115 115 Return a dirstate object that tracks largefiles: i.e. its root is
116 116 the repo root, but it is saved in .hg/largefiles/dirstate.
117 117 '''
118 118 lfstoredir = repo.join(longname)
119 119 opener = scmutil.opener(lfstoredir)
120 120 lfdirstate = largefilesdirstate(opener, ui, repo.root,
121 121 repo.dirstate._validate)
122 122
123 123 # If the largefiles dirstate does not exist, populate and create
124 124 # it. This ensures that we create it on the first meaningful
125 125 # largefiles operation in a new clone.
126 126 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
127 127 matcher = getstandinmatcher(repo)
128 128 standins = repo.dirstate.walk(matcher, [], False, False)
129 129
130 130 if len(standins) > 0:
131 131 util.makedirs(lfstoredir)
132 132
133 133 for standin in standins:
134 134 lfile = splitstandin(standin)
135 135 lfdirstate.normallookup(lfile)
136 136 return lfdirstate
137 137
138 138 def lfdirstatestatus(lfdirstate, repo):
139 139 wctx = repo['.']
140 140 match = match_.always(repo.root, repo.getcwd())
141 141 unsure, s = lfdirstate.status(match, [], False, False, False)
142 142 modified, clean = s.modified, s.clean
143 143 for lfile in unsure:
144 144 try:
145 145 fctx = wctx[standin(lfile)]
146 146 except LookupError:
147 147 fctx = None
148 148 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
149 149 modified.append(lfile)
150 150 else:
151 151 clean.append(lfile)
152 152 lfdirstate.normal(lfile)
153 153 return s
154 154
155 155 def listlfiles(repo, rev=None, matcher=None):
156 156 '''return a list of largefiles in the working copy or the
157 157 specified changeset'''
158 158
159 159 if matcher is None:
160 160 matcher = getstandinmatcher(repo)
161 161
162 162 # ignore unknown files in working directory
163 163 return [splitstandin(f)
164 164 for f in repo[rev].walk(matcher)
165 165 if rev is not None or repo.dirstate[f] != '?']
166 166
167 167 def instore(repo, hash):
168 168 return os.path.exists(storepath(repo, hash))
169 169
170 170 def storepath(repo, hash):
171 171 return repo.join(os.path.join(longname, hash))
172 172
173 173 def copyfromcache(repo, hash, filename):
174 174 '''Copy the specified largefile from the repo or system cache to
175 175 filename in the repository. Return true on success or false if the
176 176 file was not found in either cache (which should not happened:
177 177 this is meant to be called only after ensuring that the needed
178 178 largefile exists in the cache).'''
179 179 path = findfile(repo, hash)
180 180 if path is None:
181 181 return False
182 182 util.makedirs(os.path.dirname(repo.wjoin(filename)))
183 183 # The write may fail before the file is fully written, but we
184 184 # don't use atomic writes in the working copy.
185 185 shutil.copy(path, repo.wjoin(filename))
186 186 return True
187 187
188 188 def copytostore(repo, rev, file, uploaded=False):
189 189 hash = readstandin(repo, file, rev)
190 190 if instore(repo, hash):
191 191 return
192 192 copytostoreabsolute(repo, repo.wjoin(file), hash)
193 193
194 194 def copyalltostore(repo, node):
195 195 '''Copy all largefiles in a given revision to the store'''
196 196
197 197 ctx = repo[node]
198 198 for filename in ctx.files():
199 199 if isstandin(filename) and filename in ctx.manifest():
200 200 realfile = splitstandin(filename)
201 201 copytostore(repo, ctx.node(), realfile)
202 202
203 203
204 204 def copytostoreabsolute(repo, file, hash):
205 205 if inusercache(repo.ui, hash):
206 206 link(usercachepath(repo.ui, hash), storepath(repo, hash))
207 elif not getattr(repo, "_isconverting", False):
207 else:
208 208 util.makedirs(os.path.dirname(storepath(repo, hash)))
209 209 dst = util.atomictempfile(storepath(repo, hash),
210 210 createmode=repo.store.createmode)
211 211 for chunk in util.filechunkiter(open(file, 'rb')):
212 212 dst.write(chunk)
213 213 dst.close()
214 214 linktousercache(repo, hash)
215 215
216 216 def linktousercache(repo, hash):
217 217 path = usercachepath(repo.ui, hash)
218 218 if path:
219 219 link(storepath(repo, hash), path)
220 220
221 221 def getstandinmatcher(repo, pats=[], opts={}):
222 222 '''Return a match object that applies pats to the standin directory'''
223 223 standindir = repo.wjoin(shortname)
224 224 if pats:
225 225 pats = [os.path.join(standindir, pat) for pat in pats]
226 226 else:
227 227 # no patterns: relative to repo root
228 228 pats = [standindir]
229 229 # no warnings about missing files or directories
230 230 match = scmutil.match(repo[None], pats, opts)
231 231 match.bad = lambda f, msg: None
232 232 return match
233 233
234 234 def composestandinmatcher(repo, rmatcher):
235 235 '''Return a matcher that accepts standins corresponding to the
236 236 files accepted by rmatcher. Pass the list of files in the matcher
237 237 as the paths specified by the user.'''
238 238 smatcher = getstandinmatcher(repo, rmatcher.files())
239 239 isstandin = smatcher.matchfn
240 240 def composedmatchfn(f):
241 241 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
242 242 smatcher.matchfn = composedmatchfn
243 243
244 244 return smatcher
245 245
246 246 def standin(filename):
247 247 '''Return the repo-relative path to the standin for the specified big
248 248 file.'''
249 249 # Notes:
250 250 # 1) Some callers want an absolute path, but for instance addlargefiles
251 251 # needs it repo-relative so it can be passed to repo[None].add(). So
252 252 # leave it up to the caller to use repo.wjoin() to get an absolute path.
253 253 # 2) Join with '/' because that's what dirstate always uses, even on
254 254 # Windows. Change existing separator to '/' first in case we are
255 255 # passed filenames from an external source (like the command line).
256 256 return shortnameslash + util.pconvert(filename)
257 257
258 258 def isstandin(filename):
259 259 '''Return true if filename is a big file standin. filename must be
260 260 in Mercurial's internal form (slash-separated).'''
261 261 return filename.startswith(shortnameslash)
262 262
263 263 def splitstandin(filename):
264 264 # Split on / because that's what dirstate always uses, even on Windows.
265 265 # Change local separator to / first just in case we are passed filenames
266 266 # from an external source (like the command line).
267 267 bits = util.pconvert(filename).split('/', 1)
268 268 if len(bits) == 2 and bits[0] == shortname:
269 269 return bits[1]
270 270 else:
271 271 return None
272 272
273 273 def updatestandin(repo, standin):
274 274 file = repo.wjoin(splitstandin(standin))
275 275 if os.path.exists(file):
276 276 hash = hashfile(file)
277 277 executable = getexecutable(file)
278 278 writestandin(repo, standin, hash, executable)
279 279
280 280 def readstandin(repo, filename, node=None):
281 281 '''read hex hash from standin for filename at given node, or working
282 282 directory if no node is given'''
283 283 return repo[node][standin(filename)].data().strip()
284 284
285 285 def writestandin(repo, standin, hash, executable):
286 286 '''write hash to <repo.root>/<standin>'''
287 287 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
288 288
289 289 def copyandhash(instream, outfile):
290 290 '''Read bytes from instream (iterable) and write them to outfile,
291 291 computing the SHA-1 hash of the data along the way. Return the hash.'''
292 292 hasher = util.sha1('')
293 293 for data in instream:
294 294 hasher.update(data)
295 295 outfile.write(data)
296 296 return hasher.hexdigest()
297 297
298 298 def hashrepofile(repo, file):
299 299 return hashfile(repo.wjoin(file))
300 300
301 301 def hashfile(file):
302 302 if not os.path.exists(file):
303 303 return ''
304 304 hasher = util.sha1('')
305 305 fd = open(file, 'rb')
306 306 for data in util.filechunkiter(fd, 128 * 1024):
307 307 hasher.update(data)
308 308 fd.close()
309 309 return hasher.hexdigest()
310 310
311 311 def getexecutable(filename):
312 312 mode = os.stat(filename).st_mode
313 313 return ((mode & stat.S_IXUSR) and
314 314 (mode & stat.S_IXGRP) and
315 315 (mode & stat.S_IXOTH))
316 316
317 317 def urljoin(first, second, *arg):
318 318 def join(left, right):
319 319 if not left.endswith('/'):
320 320 left += '/'
321 321 if right.startswith('/'):
322 322 right = right[1:]
323 323 return left + right
324 324
325 325 url = join(first, second)
326 326 for a in arg:
327 327 url = join(url, a)
328 328 return url
329 329
330 330 def hexsha1(data):
331 331 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
332 332 object data"""
333 333 h = util.sha1()
334 334 for chunk in util.filechunkiter(data):
335 335 h.update(chunk)
336 336 return h.hexdigest()
337 337
338 338 def httpsendfile(ui, filename):
339 339 return httpconnection.httpsendfile(ui, filename, 'rb')
340 340
341 341 def unixpath(path):
342 342 '''Return a version of path normalized for use with the lfdirstate.'''
343 343 return util.pconvert(os.path.normpath(path))
344 344
345 345 def islfilesrepo(repo):
346 346 if ('largefiles' in repo.requirements and
347 347 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
348 348 return True
349 349
350 350 return util.any(openlfdirstate(repo.ui, repo, False))
351 351
352 352 class storeprotonotcapable(Exception):
353 353 def __init__(self, storetypes):
354 354 self.storetypes = storetypes
355 355
356 356 def getstandinsstate(repo):
357 357 standins = []
358 358 matcher = getstandinmatcher(repo)
359 359 for standin in repo.dirstate.walk(matcher, [], False, False):
360 360 lfile = splitstandin(standin)
361 361 try:
362 362 hash = readstandin(repo, lfile)
363 363 except IOError:
364 364 hash = None
365 365 standins.append((lfile, hash))
366 366 return standins
367 367
368 368 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
369 369 lfstandin = standin(lfile)
370 370 if lfstandin in repo.dirstate:
371 371 stat = repo.dirstate._map[lfstandin]
372 372 state, mtime = stat[0], stat[3]
373 373 else:
374 374 state, mtime = '?', -1
375 375 if state == 'n':
376 376 if normallookup or mtime < 0:
377 377 # state 'n' doesn't ensure 'clean' in this case
378 378 lfdirstate.normallookup(lfile)
379 379 else:
380 380 lfdirstate.normal(lfile)
381 381 elif state == 'm':
382 382 lfdirstate.normallookup(lfile)
383 383 elif state == 'r':
384 384 lfdirstate.remove(lfile)
385 385 elif state == 'a':
386 386 lfdirstate.add(lfile)
387 387 elif state == '?':
388 388 lfdirstate.drop(lfile)
389 389
390 390 def markcommitted(orig, ctx, node):
391 391 repo = ctx._repo
392 392
393 393 orig(node)
394 394
395 395 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
396 396 # because files coming from the 2nd parent are omitted in the latter.
397 397 #
398 398 # The former should be used to get targets of "synclfdirstate",
399 399 # because such files:
400 400 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
401 401 # - have to be marked as "n" after commit, but
402 402 # - aren't listed in "repo[node].files()"
403 403
404 404 lfdirstate = openlfdirstate(repo.ui, repo)
405 405 for f in ctx.files():
406 406 if isstandin(f):
407 407 lfile = splitstandin(f)
408 408 synclfdirstate(repo, lfdirstate, lfile, False)
409 409 lfdirstate.write()
410 410
411 # As part of committing, copy all of the largefiles into the cache.
412 copyalltostore(repo, node)
413
411 414 def getlfilestoupdate(oldstandins, newstandins):
412 415 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
413 416 filelist = []
414 417 for f in changedstandins:
415 418 if f[0] not in filelist:
416 419 filelist.append(f[0])
417 420 return filelist
418 421
419 422 def getlfilestoupload(repo, missing, addfunc):
420 423 for n in missing:
421 424 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
422 425 ctx = repo[n]
423 426 files = set(ctx.files())
424 427 if len(parents) == 2:
425 428 mc = ctx.manifest()
426 429 mp1 = ctx.parents()[0].manifest()
427 430 mp2 = ctx.parents()[1].manifest()
428 431 for f in mp1:
429 432 if f not in mc:
430 433 files.add(f)
431 434 for f in mp2:
432 435 if f not in mc:
433 436 files.add(f)
434 437 for f in mc:
435 438 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
436 439 files.add(f)
437 440 for fn in files:
438 441 if isstandin(fn) and fn in ctx:
439 442 addfunc(fn, ctx[fn].data().strip())
440 443
441 444 def updatestandinsbymatch(repo, match):
442 445 '''Update standins in the working directory according to specified match
443 446
444 447 This returns (possibly modified) ``match`` object to be used for
445 448 subsequent commit process.
446 449 '''
447 450
448 451 ui = repo.ui
449 452
450 453 # Case 1: user calls commit with no specific files or
451 454 # include/exclude patterns: refresh and commit all files that
452 455 # are "dirty".
453 456 if match is None or match.always():
454 457 # Spend a bit of time here to get a list of files we know
455 458 # are modified so we can compare only against those.
456 459 # It can cost a lot of time (several seconds)
457 460 # otherwise to update all standins if the largefiles are
458 461 # large.
459 462 lfdirstate = openlfdirstate(ui, repo)
460 463 dirtymatch = match_.always(repo.root, repo.getcwd())
461 464 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
462 465 False)
463 466 modifiedfiles = unsure + s.modified + s.added + s.removed
464 467 lfiles = listlfiles(repo)
465 468 # this only loops through largefiles that exist (not
466 469 # removed/renamed)
467 470 for lfile in lfiles:
468 471 if lfile in modifiedfiles:
469 472 if os.path.exists(
470 473 repo.wjoin(standin(lfile))):
471 474 # this handles the case where a rebase is being
472 475 # performed and the working copy is not updated
473 476 # yet.
474 477 if os.path.exists(repo.wjoin(lfile)):
475 478 updatestandin(repo,
476 479 standin(lfile))
477 480
478 481 return match
479 482
480 483 lfiles = listlfiles(repo)
481 484 match._files = repo._subdirlfs(match.files(), lfiles)
482 485
483 486 # Case 2: user calls commit with specified patterns: refresh
484 487 # any matching big files.
485 488 smatcher = composestandinmatcher(repo, match)
486 489 standins = repo.dirstate.walk(smatcher, [], False, False)
487 490
488 491 # No matching big files: get out of the way and pass control to
489 492 # the usual commit() method.
490 493 if not standins:
491 494 return match
492 495
493 496 # Refresh all matching big files. It's possible that the
494 497 # commit will end up failing, in which case the big files will
495 498 # stay refreshed. No harm done: the user modified them and
496 499 # asked to commit them, so sooner or later we're going to
497 500 # refresh the standins. Might as well leave them refreshed.
498 501 lfdirstate = openlfdirstate(ui, repo)
499 502 for fstandin in standins:
500 503 lfile = splitstandin(fstandin)
501 504 if lfdirstate[lfile] != 'r':
502 505 updatestandin(repo, fstandin)
503 506
504 507 # Cook up a new matcher that only matches regular files or
505 508 # standins corresponding to the big files requested by the
506 509 # user. Have to modify _files to prevent commit() from
507 510 # complaining "not tracked" for big files.
508 511 match = copy.copy(match)
509 512 origmatchfn = match.matchfn
510 513
511 514 # Check both the list of largefiles and the list of
512 515 # standins because if a largefile was removed, it
513 516 # won't be in the list of largefiles at this point
514 517 match._files += sorted(standins)
515 518
516 519 actualfiles = []
517 520 for f in match._files:
518 521 fstandin = standin(f)
519 522
520 523 # ignore known largefiles and standins
521 524 if f in lfiles or fstandin in standins:
522 525 continue
523 526
524 527 actualfiles.append(f)
525 528 match._files = actualfiles
526 529
527 530 def matchfn(f):
528 531 if origmatchfn(f):
529 532 return f not in lfiles
530 533 else:
531 534 return f in standins
532 535
533 536 match.matchfn = matchfn
534 537
535 538 return match
536 539
537 540 class automatedcommithook(object):
538 541 '''Statefull hook to update standins at the 1st commit of resuming
539 542
540 543 For efficiency, updating standins in the working directory should
541 544 be avoided while automated committing (like rebase, transplant and
542 545 so on), because they should be updated before committing.
543 546
544 547 But the 1st commit of resuming automated committing (e.g. ``rebase
545 548 --continue``) should update them, because largefiles may be
546 549 modified manually.
547 550 '''
548 551 def __init__(self, resuming):
549 552 self.resuming = resuming
550 553
551 554 def __call__(self, repo, match):
552 555 if self.resuming:
553 556 self.resuming = False # avoids updating at subsequent commits
554 557 return updatestandinsbymatch(repo, match)
555 558 else:
556 559 return match
557 560
558 561 def getstatuswriter(ui, repo, forcibly=None):
559 562 '''Return the function to write largefiles specific status out
560 563
561 564 If ``forcibly`` is ``None``, this returns the last element of
562 565 ``repo._lfupdatereporters`` as "default" writer function.
563 566
564 567 Otherwise, this returns the function to always write out (or
565 568 ignore if ``not forcibly``) status.
566 569 '''
567 570 if forcibly is None:
568 571 return repo._lfstatuswriters[-1]
569 572 else:
570 573 if forcibly:
571 574 return ui.status # forcibly WRITE OUT
572 575 else:
573 576 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1295 +1,1287 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18
19 19 import lfutil
20 20 import lfcommands
21 21 import basestore
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def installnormalfilesmatchfn(manifest):
26 26 '''installmatchfn with a matchfn that ignores all largefiles'''
27 27 def overridematch(ctx, pats=[], opts={}, globbed=False,
28 28 default='relpath'):
29 29 match = oldmatch(ctx, pats, opts, globbed, default)
30 30 m = copy.copy(match)
31 31 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
32 32 manifest)
33 33 m._files = filter(notlfile, m._files)
34 34 m._fmap = set(m._files)
35 35 m._always = False
36 36 origmatchfn = m.matchfn
37 37 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
38 38 return m
39 39 oldmatch = installmatchfn(overridematch)
40 40
41 41 def installmatchfn(f):
42 42 '''monkey patch the scmutil module with a custom match function.
43 43 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
44 44 oldmatch = scmutil.match
45 45 setattr(f, 'oldmatch', oldmatch)
46 46 scmutil.match = f
47 47 return oldmatch
48 48
49 49 def restorematchfn():
50 50 '''restores scmutil.match to what it was before installmatchfn
51 51 was called. no-op if scmutil.match is its original function.
52 52
53 53 Note that n calls to installmatchfn will require n calls to
54 54 restore matchfn to reverse'''
55 55 scmutil.match = getattr(scmutil.match, 'oldmatch')
56 56
57 57 def installmatchandpatsfn(f):
58 58 oldmatchandpats = scmutil.matchandpats
59 59 setattr(f, 'oldmatchandpats', oldmatchandpats)
60 60 scmutil.matchandpats = f
61 61 return oldmatchandpats
62 62
63 63 def restorematchandpatsfn():
64 64 '''restores scmutil.matchandpats to what it was before
65 65 installmatchandpatsfn was called. No-op if scmutil.matchandpats
66 66 is its original function.
67 67
68 68 Note that n calls to installmatchandpatsfn will require n calls
69 69 to restore matchfn to reverse'''
70 70 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
71 71 scmutil.matchandpats)
72 72
73 73 def addlargefiles(ui, repo, *pats, **opts):
74 74 large = opts.pop('large', None)
75 75 lfsize = lfutil.getminsize(
76 76 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
77 77
78 78 lfmatcher = None
79 79 if lfutil.islfilesrepo(repo):
80 80 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
81 81 if lfpats:
82 82 lfmatcher = match_.match(repo.root, '', list(lfpats))
83 83
84 84 lfnames = []
85 85 m = scmutil.match(repo[None], pats, opts)
86 86 m.bad = lambda x, y: None
87 87 wctx = repo[None]
88 88 for f in repo.walk(m):
89 89 exact = m.exact(f)
90 90 lfile = lfutil.standin(f) in wctx
91 91 nfile = f in wctx
92 92 exists = lfile or nfile
93 93
94 94 # Don't warn the user when they attempt to add a normal tracked file.
95 95 # The normal add code will do that for us.
96 96 if exact and exists:
97 97 if lfile:
98 98 ui.warn(_('%s already a largefile\n') % f)
99 99 continue
100 100
101 101 if (exact or not exists) and not lfutil.isstandin(f):
102 102 wfile = repo.wjoin(f)
103 103
104 104 # In case the file was removed previously, but not committed
105 105 # (issue3507)
106 106 if not os.path.exists(wfile):
107 107 continue
108 108
109 109 abovemin = (lfsize and
110 110 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
111 111 if large or abovemin or (lfmatcher and lfmatcher(f)):
112 112 lfnames.append(f)
113 113 if ui.verbose or not exact:
114 114 ui.status(_('adding %s as a largefile\n') % m.rel(f))
115 115
116 116 bad = []
117 117
118 118 # Need to lock, otherwise there could be a race condition between
119 119 # when standins are created and added to the repo.
120 120 wlock = repo.wlock()
121 121 try:
122 122 if not opts.get('dry_run'):
123 123 standins = []
124 124 lfdirstate = lfutil.openlfdirstate(ui, repo)
125 125 for f in lfnames:
126 126 standinname = lfutil.standin(f)
127 127 lfutil.writestandin(repo, standinname, hash='',
128 128 executable=lfutil.getexecutable(repo.wjoin(f)))
129 129 standins.append(standinname)
130 130 if lfdirstate[f] == 'r':
131 131 lfdirstate.normallookup(f)
132 132 else:
133 133 lfdirstate.add(f)
134 134 lfdirstate.write()
135 135 bad += [lfutil.splitstandin(f)
136 136 for f in repo[None].add(standins)
137 137 if f in m.files()]
138 138 finally:
139 139 wlock.release()
140 140 return bad
141 141
142 142 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
143 143 after = opts.get('after')
144 144 if not pats and not after:
145 145 raise util.Abort(_('no files specified'))
146 146 m = scmutil.match(repo[None], pats, opts)
147 147 try:
148 148 repo.lfstatus = True
149 149 s = repo.status(match=m, clean=True)
150 150 finally:
151 151 repo.lfstatus = False
152 152 manifest = repo[None].manifest()
153 153 modified, added, deleted, clean = [[f for f in list
154 154 if lfutil.standin(f) in manifest]
155 155 for list in (s.modified, s.added,
156 156 s.deleted, s.clean)]
157 157
158 158 def warn(files, msg):
159 159 for f in files:
160 160 ui.warn(msg % m.rel(f))
161 161 return int(len(files) > 0)
162 162
163 163 result = 0
164 164
165 165 if after:
166 166 remove = deleted
167 167 result = warn(modified + added + clean,
168 168 _('not removing %s: file still exists\n'))
169 169 else:
170 170 remove = deleted + clean
171 171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 172 ' to force removal)\n'))
173 173 result = warn(added, _('not removing %s: file has been marked for add'
174 174 ' (use forget to undo)\n')) or result
175 175
176 176 for f in sorted(remove):
177 177 if ui.verbose or not m.exact(f):
178 178 ui.status(_('removing %s\n') % m.rel(f))
179 179
180 180 # Need to lock because standin files are deleted then removed from the
181 181 # repository and we could race in-between.
182 182 wlock = repo.wlock()
183 183 try:
184 184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 185 for f in remove:
186 186 if not after:
187 187 # If this is being called by addremove, notify the user that we
188 188 # are removing the file.
189 189 if isaddremove:
190 190 ui.status(_('removing %s\n') % f)
191 191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 192 lfdirstate.remove(f)
193 193 lfdirstate.write()
194 194 remove = [lfutil.standin(f) for f in remove]
195 195 # If this is being called by addremove, let the original addremove
196 196 # function handle this.
197 197 if not isaddremove:
198 198 for f in remove:
199 199 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
200 200 repo[None].forget(remove)
201 201 finally:
202 202 wlock.release()
203 203
204 204 return result
205 205
206 206 # For overriding mercurial.hgweb.webcommands so that largefiles will
207 207 # appear at their right place in the manifests.
208 208 def decodepath(orig, path):
209 209 return lfutil.splitstandin(path) or path
210 210
211 211 # -- Wrappers: modify existing commands --------------------------------
212 212
213 213 # Add works by going through the files that the user wanted to add and
214 214 # checking if they should be added as largefiles. Then it makes a new
215 215 # matcher which matches only the normal files and runs the original
216 216 # version of add.
217 217 def overrideadd(orig, ui, repo, *pats, **opts):
218 218 normal = opts.pop('normal')
219 219 if normal:
220 220 if opts.get('large'):
221 221 raise util.Abort(_('--normal cannot be used with --large'))
222 222 return orig(ui, repo, *pats, **opts)
223 223 bad = addlargefiles(ui, repo, *pats, **opts)
224 224 installnormalfilesmatchfn(repo[None].manifest())
225 225 result = orig(ui, repo, *pats, **opts)
226 226 restorematchfn()
227 227
228 228 return (result == 1 or bad) and 1 or 0
229 229
230 230 def overrideremove(orig, ui, repo, *pats, **opts):
231 231 installnormalfilesmatchfn(repo[None].manifest())
232 232 result = orig(ui, repo, *pats, **opts)
233 233 restorematchfn()
234 234 return removelargefiles(ui, repo, False, *pats, **opts) or result
235 235
236 236 def overridestatusfn(orig, repo, rev2, **opts):
237 237 try:
238 238 repo._repo.lfstatus = True
239 239 return orig(repo, rev2, **opts)
240 240 finally:
241 241 repo._repo.lfstatus = False
242 242
243 243 def overridestatus(orig, ui, repo, *pats, **opts):
244 244 try:
245 245 repo.lfstatus = True
246 246 return orig(ui, repo, *pats, **opts)
247 247 finally:
248 248 repo.lfstatus = False
249 249
250 250 def overridedirty(orig, repo, ignoreupdate=False):
251 251 try:
252 252 repo._repo.lfstatus = True
253 253 return orig(repo, ignoreupdate)
254 254 finally:
255 255 repo._repo.lfstatus = False
256 256
257 257 def overridelog(orig, ui, repo, *pats, **opts):
258 258 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
259 259 default='relpath'):
260 260 """Matcher that merges root directory with .hglf, suitable for log.
261 261 It is still possible to match .hglf directly.
262 262 For any listed files run log on the standin too.
263 263 matchfn tries both the given filename and with .hglf stripped.
264 264 """
265 265 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
266 266 m, p = copy.copy(matchandpats)
267 267
268 268 if m.always():
269 269 # We want to match everything anyway, so there's no benefit trying
270 270 # to add standins.
271 271 return matchandpats
272 272
273 273 pats = set(p)
274 274 # TODO: handling of patterns in both cases below
275 275 if m._cwd:
276 276 if os.path.isabs(m._cwd):
277 277 # TODO: handle largefile magic when invoked from other cwd
278 278 return matchandpats
279 279 back = (m._cwd.count('/') + 1) * '../'
280 280 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
281 281 else:
282 282 pats.update(lfutil.standin(f) for f in p)
283 283
284 284 for i in range(0, len(m._files)):
285 285 standin = lfutil.standin(m._files[i])
286 286 if standin in repo[ctx.node()]:
287 287 m._files[i] = standin
288 288 elif m._files[i] not in repo[ctx.node()]:
289 289 m._files.append(standin)
290 290 pats.add(standin)
291 291
292 292 m._fmap = set(m._files)
293 293 m._always = False
294 294 origmatchfn = m.matchfn
295 295 def lfmatchfn(f):
296 296 lf = lfutil.splitstandin(f)
297 297 if lf is not None and origmatchfn(lf):
298 298 return True
299 299 r = origmatchfn(f)
300 300 return r
301 301 m.matchfn = lfmatchfn
302 302
303 303 return m, pats
304 304
305 305 # For hg log --patch, the match object is used in two different senses:
306 306 # (1) to determine what revisions should be printed out, and
307 307 # (2) to determine what files to print out diffs for.
308 308 # The magic matchandpats override should be used for case (1) but not for
309 309 # case (2).
310 310 def overridemakelogfilematcher(repo, pats, opts):
311 311 pctx = repo[None]
312 312 match, pats = oldmatchandpats(pctx, pats, opts)
313 313 return lambda rev: match
314 314
315 315 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
316 316 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
317 317 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
318 318
319 319 try:
320 320 return orig(ui, repo, *pats, **opts)
321 321 finally:
322 322 restorematchandpatsfn()
323 323 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
324 324
325 325 def overrideverify(orig, ui, repo, *pats, **opts):
326 326 large = opts.pop('large', False)
327 327 all = opts.pop('lfa', False)
328 328 contents = opts.pop('lfc', False)
329 329
330 330 result = orig(ui, repo, *pats, **opts)
331 331 if large or all or contents:
332 332 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
333 333 return result
334 334
335 335 def overridedebugstate(orig, ui, repo, *pats, **opts):
336 336 large = opts.pop('large', False)
337 337 if large:
338 338 class fakerepo(object):
339 339 dirstate = lfutil.openlfdirstate(ui, repo)
340 340 orig(ui, fakerepo, *pats, **opts)
341 341 else:
342 342 orig(ui, repo, *pats, **opts)
343 343
344 344 # Override needs to refresh standins so that update's normal merge
345 345 # will go through properly. Then the other update hook (overriding repo.update)
346 346 # will get the new files. Filemerge is also overridden so that the merge
347 347 # will merge standins correctly.
348 348 def overrideupdate(orig, ui, repo, *pats, **opts):
349 349 # Need to lock between the standins getting updated and their
350 350 # largefiles getting updated
351 351 wlock = repo.wlock()
352 352 try:
353 353 if opts['check']:
354 354 lfdirstate = lfutil.openlfdirstate(ui, repo)
355 355 unsure, s = lfdirstate.status(
356 356 match_.always(repo.root, repo.getcwd()),
357 357 [], False, False, False)
358 358
359 359 mod = len(s.modified) > 0
360 360 for lfile in unsure:
361 361 standin = lfutil.standin(lfile)
362 362 if repo['.'][standin].data().strip() != \
363 363 lfutil.hashfile(repo.wjoin(lfile)):
364 364 mod = True
365 365 else:
366 366 lfdirstate.normal(lfile)
367 367 lfdirstate.write()
368 368 if mod:
369 369 raise util.Abort(_('uncommitted changes'))
370 370 return orig(ui, repo, *pats, **opts)
371 371 finally:
372 372 wlock.release()
373 373
374 374 # Before starting the manifest merge, merge.updates will call
375 375 # _checkunknown to check if there are any files in the merged-in
376 376 # changeset that collide with unknown files in the working copy.
377 377 #
378 378 # The largefiles are seen as unknown, so this prevents us from merging
379 379 # in a file 'foo' if we already have a largefile with the same name.
380 380 #
381 381 # The overridden function filters the unknown files by removing any
382 382 # largefiles. This makes the merge proceed and we can then handle this
383 383 # case further in the overridden manifestmerge function below.
384 384 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
385 385 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
386 386 return False
387 387 return origfn(repo, wctx, mctx, f)
388 388
389 389 # The manifest merge handles conflicts on the manifest level. We want
390 390 # to handle changes in largefile-ness of files at this level too.
391 391 #
392 392 # The strategy is to run the original manifestmerge and then process
393 393 # the action list it outputs. There are two cases we need to deal with:
394 394 #
395 395 # 1. Normal file in p1, largefile in p2. Here the largefile is
396 396 # detected via its standin file, which will enter the working copy
397 397 # with a "get" action. It is not "merge" since the standin is all
398 398 # Mercurial is concerned with at this level -- the link to the
399 399 # existing normal file is not relevant here.
400 400 #
401 401 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
402 402 # since the largefile will be present in the working copy and
403 403 # different from the normal file in p2. Mercurial therefore
404 404 # triggers a merge action.
405 405 #
406 406 # In both cases, we prompt the user and emit new actions to either
407 407 # remove the standin (if the normal file was kept) or to remove the
408 408 # normal file and get the standin (if the largefile was kept). The
409 409 # default prompt answer is to use the largefile version since it was
410 410 # presumably changed on purpose.
411 411 #
412 412 # Finally, the merge.applyupdates function will then take care of
413 413 # writing the files into the working copy and lfcommands.updatelfiles
414 414 # will update the largefiles.
415 415 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
416 416 partial, acceptremote, followcopies):
417 417 overwrite = force and not branchmerge
418 418 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
419 419 acceptremote, followcopies)
420 420
421 421 if overwrite:
422 422 return actions
423 423
424 424 removes = set(a[0] for a in actions['r'])
425 425
426 426 newglist = []
427 427 lfmr = [] # LargeFiles: Mark as Removed
428 428 for action in actions['g']:
429 429 f, args, msg = action
430 430 splitstandin = f and lfutil.splitstandin(f)
431 431 if (splitstandin is not None and
432 432 splitstandin in p1 and splitstandin not in removes):
433 433 # Case 1: normal file in the working copy, largefile in
434 434 # the second parent
435 435 lfile = splitstandin
436 436 standin = f
437 437 msg = _('remote turned local normal file %s into a largefile\n'
438 438 'use (l)argefile or keep (n)ormal file?'
439 439 '$$ &Largefile $$ &Normal file') % lfile
440 440 if repo.ui.promptchoice(msg, 0) == 0:
441 441 actions['r'].append((lfile, None, msg))
442 442 newglist.append((standin, (p2.flags(standin),), msg))
443 443 else:
444 444 actions['r'].append((standin, None, msg))
445 445 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
446 446 # Case 2: largefile in the working copy, normal file in
447 447 # the second parent
448 448 standin = lfutil.standin(f)
449 449 lfile = f
450 450 msg = _('remote turned local largefile %s into a normal file\n'
451 451 'keep (l)argefile or use (n)ormal file?'
452 452 '$$ &Largefile $$ &Normal file') % lfile
453 453 if repo.ui.promptchoice(msg, 0) == 0:
454 454 if branchmerge:
455 455 # largefile can be restored from standin safely
456 456 actions['r'].append((lfile, None, msg))
457 457 else:
458 458 # "lfile" should be marked as "removed" without
459 459 # removal of itself
460 460 lfmr.append((lfile, None, msg))
461 461
462 462 # linear-merge should treat this largefile as 're-added'
463 463 actions['a'].append((standin, None, msg))
464 464 else:
465 465 actions['r'].append((standin, None, msg))
466 466 newglist.append((lfile, (p2.flags(lfile),), msg))
467 467 else:
468 468 newglist.append(action)
469 469
470 470 newglist.sort()
471 471 actions['g'] = newglist
472 472 if lfmr:
473 473 lfmr.sort()
474 474 actions['lfmr'] = lfmr
475 475
476 476 return actions
477 477
478 478 def mergerecordupdates(orig, repo, actions, branchmerge):
479 479 if 'lfmr' in actions:
480 480 # this should be executed before 'orig', to execute 'remove'
481 481 # before all other actions
482 482 for lfile, args, msg in actions['lfmr']:
483 483 repo.dirstate.remove(lfile)
484 484
485 485 return orig(repo, actions, branchmerge)
486 486
487 487
488 488 # Override filemerge to prompt the user about how they wish to merge
489 489 # largefiles. This will handle identical edits without prompting the user.
490 490 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
491 491 if not lfutil.isstandin(orig):
492 492 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
493 493
494 494 ahash = fca.data().strip().lower()
495 495 dhash = fcd.data().strip().lower()
496 496 ohash = fco.data().strip().lower()
497 497 if (ohash != ahash and
498 498 ohash != dhash and
499 499 (dhash == ahash or
500 500 repo.ui.promptchoice(
501 501 _('largefile %s has a merge conflict\nancestor was %s\n'
502 502 'keep (l)ocal %s or\ntake (o)ther %s?'
503 503 '$$ &Local $$ &Other') %
504 504 (lfutil.splitstandin(orig), ahash, dhash, ohash),
505 505 0) == 1)):
506 506 repo.wwrite(fcd.path(), fco.data(), fco.flags())
507 507 return 0
508 508
509 509 # Copy first changes the matchers to match standins instead of
510 510 # largefiles. Then it overrides util.copyfile in that function it
511 511 # checks if the destination largefile already exists. It also keeps a
512 512 # list of copied files so that the largefiles can be copied and the
513 513 # dirstate updated.
514 514 def overridecopy(orig, ui, repo, pats, opts, rename=False):
515 515 # doesn't remove largefile on rename
516 516 if len(pats) < 2:
517 517 # this isn't legal, let the original function deal with it
518 518 return orig(ui, repo, pats, opts, rename)
519 519
520 520 def makestandin(relpath):
521 521 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
522 522 return os.path.join(repo.wjoin(lfutil.standin(path)))
523 523
524 524 fullpats = scmutil.expandpats(pats)
525 525 dest = fullpats[-1]
526 526
527 527 if os.path.isdir(dest):
528 528 if not os.path.isdir(makestandin(dest)):
529 529 os.makedirs(makestandin(dest))
530 530 # This could copy both lfiles and normal files in one command,
531 531 # but we don't want to do that. First replace their matcher to
532 532 # only match normal files and run it, then replace it to just
533 533 # match largefiles and run it again.
534 534 nonormalfiles = False
535 535 nolfiles = False
536 536 installnormalfilesmatchfn(repo[None].manifest())
537 537 try:
538 538 try:
539 539 result = orig(ui, repo, pats, opts, rename)
540 540 except util.Abort, e:
541 541 if str(e) != _('no files to copy'):
542 542 raise e
543 543 else:
544 544 nonormalfiles = True
545 545 result = 0
546 546 finally:
547 547 restorematchfn()
548 548
549 549 # The first rename can cause our current working directory to be removed.
550 550 # In that case there is nothing left to copy/rename so just quit.
551 551 try:
552 552 repo.getcwd()
553 553 except OSError:
554 554 return result
555 555
556 556 try:
557 557 try:
558 558 # When we call orig below it creates the standins but we don't add
559 559 # them to the dir state until later so lock during that time.
560 560 wlock = repo.wlock()
561 561
562 562 manifest = repo[None].manifest()
563 563 def overridematch(ctx, pats=[], opts={}, globbed=False,
564 564 default='relpath'):
565 565 newpats = []
566 566 # The patterns were previously mangled to add the standin
567 567 # directory; we need to remove that now
568 568 for pat in pats:
569 569 if match_.patkind(pat) is None and lfutil.shortname in pat:
570 570 newpats.append(pat.replace(lfutil.shortname, ''))
571 571 else:
572 572 newpats.append(pat)
573 573 match = oldmatch(ctx, newpats, opts, globbed, default)
574 574 m = copy.copy(match)
575 575 lfile = lambda f: lfutil.standin(f) in manifest
576 576 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
577 577 m._fmap = set(m._files)
578 578 origmatchfn = m.matchfn
579 579 m.matchfn = lambda f: (lfutil.isstandin(f) and
580 580 (f in manifest) and
581 581 origmatchfn(lfutil.splitstandin(f)) or
582 582 None)
583 583 return m
584 584 oldmatch = installmatchfn(overridematch)
585 585 listpats = []
586 586 for pat in pats:
587 587 if match_.patkind(pat) is not None:
588 588 listpats.append(pat)
589 589 else:
590 590 listpats.append(makestandin(pat))
591 591
592 592 try:
593 593 origcopyfile = util.copyfile
594 594 copiedfiles = []
595 595 def overridecopyfile(src, dest):
596 596 if (lfutil.shortname in src and
597 597 dest.startswith(repo.wjoin(lfutil.shortname))):
598 598 destlfile = dest.replace(lfutil.shortname, '')
599 599 if not opts['force'] and os.path.exists(destlfile):
600 600 raise IOError('',
601 601 _('destination largefile already exists'))
602 602 copiedfiles.append((src, dest))
603 603 origcopyfile(src, dest)
604 604
605 605 util.copyfile = overridecopyfile
606 606 result += orig(ui, repo, listpats, opts, rename)
607 607 finally:
608 608 util.copyfile = origcopyfile
609 609
610 610 lfdirstate = lfutil.openlfdirstate(ui, repo)
611 611 for (src, dest) in copiedfiles:
612 612 if (lfutil.shortname in src and
613 613 dest.startswith(repo.wjoin(lfutil.shortname))):
614 614 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
615 615 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
616 616 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
617 617 if not os.path.isdir(destlfiledir):
618 618 os.makedirs(destlfiledir)
619 619 if rename:
620 620 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
621 621
622 622 # The file is gone, but this deletes any empty parent
623 623 # directories as a side-effect.
624 624 util.unlinkpath(repo.wjoin(srclfile), True)
625 625 lfdirstate.remove(srclfile)
626 626 else:
627 627 util.copyfile(repo.wjoin(srclfile),
628 628 repo.wjoin(destlfile))
629 629
630 630 lfdirstate.add(destlfile)
631 631 lfdirstate.write()
632 632 except util.Abort, e:
633 633 if str(e) != _('no files to copy'):
634 634 raise e
635 635 else:
636 636 nolfiles = True
637 637 finally:
638 638 restorematchfn()
639 639 wlock.release()
640 640
641 641 if nolfiles and nonormalfiles:
642 642 raise util.Abort(_('no files to copy'))
643 643
644 644 return result
645 645
646 646 # When the user calls revert, we have to be careful to not revert any
647 647 # changes to other largefiles accidentally. This means we have to keep
648 648 # track of the largefiles that are being reverted so we only pull down
649 649 # the necessary largefiles.
650 650 #
651 651 # Standins are only updated (to match the hash of largefiles) before
652 652 # commits. Update the standins then run the original revert, changing
653 653 # the matcher to hit standins instead of largefiles. Based on the
654 654 # resulting standins update the largefiles.
655 655 def overriderevert(orig, ui, repo, *pats, **opts):
656 656 # Because we put the standins in a bad state (by updating them)
657 657 # and then return them to a correct state we need to lock to
658 658 # prevent others from changing them in their incorrect state.
659 659 wlock = repo.wlock()
660 660 try:
661 661 lfdirstate = lfutil.openlfdirstate(ui, repo)
662 662 s = lfutil.lfdirstatestatus(lfdirstate, repo)
663 663 lfdirstate.write()
664 664 for lfile in s.modified:
665 665 lfutil.updatestandin(repo, lfutil.standin(lfile))
666 666 for lfile in s.deleted:
667 667 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
668 668 os.unlink(repo.wjoin(lfutil.standin(lfile)))
669 669
670 670 oldstandins = lfutil.getstandinsstate(repo)
671 671
672 672 def overridematch(ctx, pats=[], opts={}, globbed=False,
673 673 default='relpath'):
674 674 match = oldmatch(ctx, pats, opts, globbed, default)
675 675 m = copy.copy(match)
676 676 def tostandin(f):
677 677 if lfutil.standin(f) in ctx:
678 678 return lfutil.standin(f)
679 679 elif lfutil.standin(f) in repo[None]:
680 680 return None
681 681 return f
682 682 m._files = [tostandin(f) for f in m._files]
683 683 m._files = [f for f in m._files if f is not None]
684 684 m._fmap = set(m._files)
685 685 origmatchfn = m.matchfn
686 686 def matchfn(f):
687 687 if lfutil.isstandin(f):
688 688 return (origmatchfn(lfutil.splitstandin(f)) and
689 689 (f in repo[None] or f in ctx))
690 690 return origmatchfn(f)
691 691 m.matchfn = matchfn
692 692 return m
693 693 oldmatch = installmatchfn(overridematch)
694 694 try:
695 695 orig(ui, repo, *pats, **opts)
696 696 finally:
697 697 restorematchfn()
698 698
699 699 newstandins = lfutil.getstandinsstate(repo)
700 700 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
701 701 # lfdirstate should be 'normallookup'-ed for updated files,
702 702 # because reverting doesn't touch dirstate for 'normal' files
703 703 # when target revision is explicitly specified: in such case,
704 704 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
705 705 # of target (standin) file.
706 706 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
707 707 normallookup=True)
708 708
709 709 finally:
710 710 wlock.release()
711 711
712 712 # after pulling changesets, we need to take some extra care to get
713 713 # largefiles updated remotely
714 714 def overridepull(orig, ui, repo, source=None, **opts):
715 715 revsprepull = len(repo)
716 716 if not source:
717 717 source = 'default'
718 718 repo.lfpullsource = source
719 719 result = orig(ui, repo, source, **opts)
720 720 revspostpull = len(repo)
721 721 lfrevs = opts.get('lfrev', [])
722 722 if opts.get('all_largefiles'):
723 723 lfrevs.append('pulled()')
724 724 if lfrevs and revspostpull > revsprepull:
725 725 numcached = 0
726 726 repo.firstpulled = revsprepull # for pulled() revset expression
727 727 try:
728 728 for rev in scmutil.revrange(repo, lfrevs):
729 729 ui.note(_('pulling largefiles for revision %s\n') % rev)
730 730 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
731 731 numcached += len(cached)
732 732 finally:
733 733 del repo.firstpulled
734 734 ui.status(_("%d largefiles cached\n") % numcached)
735 735 return result
736 736
737 737 def pulledrevsetsymbol(repo, subset, x):
738 738 """``pulled()``
739 739 Changesets that just has been pulled.
740 740
741 741 Only available with largefiles from pull --lfrev expressions.
742 742
743 743 .. container:: verbose
744 744
745 745 Some examples:
746 746
747 747 - pull largefiles for all new changesets::
748 748
749 749 hg pull -lfrev "pulled()"
750 750
751 751 - pull largefiles for all new branch heads::
752 752
753 753 hg pull -lfrev "head(pulled()) and not closed()"
754 754
755 755 """
756 756
757 757 try:
758 758 firstpulled = repo.firstpulled
759 759 except AttributeError:
760 760 raise util.Abort(_("pulled() only available in --lfrev"))
761 761 return revset.baseset([r for r in subset if r >= firstpulled])
762 762
763 763 def overrideclone(orig, ui, source, dest=None, **opts):
764 764 d = dest
765 765 if d is None:
766 766 d = hg.defaultdest(source)
767 767 if opts.get('all_largefiles') and not hg.islocal(d):
768 768 raise util.Abort(_(
769 769 '--all-largefiles is incompatible with non-local destination %s') %
770 770 d)
771 771
772 772 return orig(ui, source, dest, **opts)
773 773
774 774 def hgclone(orig, ui, opts, *args, **kwargs):
775 775 result = orig(ui, opts, *args, **kwargs)
776 776
777 777 if result is not None:
778 778 sourcerepo, destrepo = result
779 779 repo = destrepo.local()
780 780
781 781 # Caching is implicitly limited to 'rev' option, since the dest repo was
782 782 # truncated at that point. The user may expect a download count with
783 783 # this option, so attempt whether or not this is a largefile repo.
784 784 if opts.get('all_largefiles'):
785 785 success, missing = lfcommands.downloadlfiles(ui, repo, None)
786 786
787 787 if missing != 0:
788 788 return None
789 789
790 790 return result
791 791
792 792 def overriderebase(orig, ui, repo, **opts):
793 793 resuming = opts.get('continue')
794 794 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
795 795 repo._lfstatuswriters.append(lambda *msg, **opts: None)
796 796 try:
797 797 return orig(ui, repo, **opts)
798 798 finally:
799 799 repo._lfstatuswriters.pop()
800 800 repo._lfcommithooks.pop()
801 801
802 802 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
803 803 prefix=None, mtime=None, subrepos=None):
804 804 # No need to lock because we are only reading history and
805 805 # largefile caches, neither of which are modified.
806 806 lfcommands.cachelfiles(repo.ui, repo, node)
807 807
808 808 if kind not in archival.archivers:
809 809 raise util.Abort(_("unknown archive type '%s'") % kind)
810 810
811 811 ctx = repo[node]
812 812
813 813 if kind == 'files':
814 814 if prefix:
815 815 raise util.Abort(
816 816 _('cannot give prefix when archiving to files'))
817 817 else:
818 818 prefix = archival.tidyprefix(dest, kind, prefix)
819 819
820 820 def write(name, mode, islink, getdata):
821 821 if matchfn and not matchfn(name):
822 822 return
823 823 data = getdata()
824 824 if decode:
825 825 data = repo.wwritedata(name, data)
826 826 archiver.addfile(prefix + name, mode, islink, data)
827 827
828 828 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
829 829
830 830 if repo.ui.configbool("ui", "archivemeta", True):
831 831 def metadata():
832 832 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
833 833 hex(repo.changelog.node(0)), hex(node), ctx.branch())
834 834
835 835 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
836 836 if repo.tagtype(t) == 'global')
837 837 if not tags:
838 838 repo.ui.pushbuffer()
839 839 opts = {'template': '{latesttag}\n{latesttagdistance}',
840 840 'style': '', 'patch': None, 'git': None}
841 841 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
842 842 ltags, dist = repo.ui.popbuffer().split('\n')
843 843 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
844 844 tags += 'latesttagdistance: %s\n' % dist
845 845
846 846 return base + tags
847 847
848 848 write('.hg_archival.txt', 0644, False, metadata)
849 849
850 850 for f in ctx:
851 851 ff = ctx.flags(f)
852 852 getdata = ctx[f].data
853 853 if lfutil.isstandin(f):
854 854 path = lfutil.findfile(repo, getdata().strip())
855 855 if path is None:
856 856 raise util.Abort(
857 857 _('largefile %s not found in repo store or system cache')
858 858 % lfutil.splitstandin(f))
859 859 f = lfutil.splitstandin(f)
860 860
861 861 def getdatafn():
862 862 fd = None
863 863 try:
864 864 fd = open(path, 'rb')
865 865 return fd.read()
866 866 finally:
867 867 if fd:
868 868 fd.close()
869 869
870 870 getdata = getdatafn
871 871 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
872 872
873 873 if subrepos:
874 874 for subpath in sorted(ctx.substate):
875 875 sub = ctx.sub(subpath)
876 876 submatch = match_.narrowmatcher(subpath, matchfn)
877 877 sub.archive(repo.ui, archiver, prefix, submatch)
878 878
879 879 archiver.done()
880 880
881 881 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
882 882 repo._get(repo._state + ('hg',))
883 883 rev = repo._state[1]
884 884 ctx = repo._repo[rev]
885 885
886 886 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
887 887
888 888 def write(name, mode, islink, getdata):
889 889 # At this point, the standin has been replaced with the largefile name,
890 890 # so the normal matcher works here without the lfutil variants.
891 891 if match and not match(f):
892 892 return
893 893 data = getdata()
894 894
895 895 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
896 896
897 897 for f in ctx:
898 898 ff = ctx.flags(f)
899 899 getdata = ctx[f].data
900 900 if lfutil.isstandin(f):
901 901 path = lfutil.findfile(repo._repo, getdata().strip())
902 902 if path is None:
903 903 raise util.Abort(
904 904 _('largefile %s not found in repo store or system cache')
905 905 % lfutil.splitstandin(f))
906 906 f = lfutil.splitstandin(f)
907 907
908 908 def getdatafn():
909 909 fd = None
910 910 try:
911 911 fd = open(os.path.join(prefix, path), 'rb')
912 912 return fd.read()
913 913 finally:
914 914 if fd:
915 915 fd.close()
916 916
917 917 getdata = getdatafn
918 918
919 919 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
920 920
921 921 for subpath in sorted(ctx.substate):
922 922 sub = ctx.sub(subpath)
923 923 submatch = match_.narrowmatcher(subpath, match)
924 924 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
925 925 submatch)
926 926
927 927 # If a largefile is modified, the change is not reflected in its
928 928 # standin until a commit. cmdutil.bailifchanged() raises an exception
929 929 # if the repo has uncommitted changes. Wrap it to also check if
930 930 # largefiles were changed. This is used by bisect and backout.
931 931 def overridebailifchanged(orig, repo):
932 932 orig(repo)
933 933 repo.lfstatus = True
934 934 s = repo.status()
935 935 repo.lfstatus = False
936 936 if s.modified or s.added or s.removed or s.deleted:
937 937 raise util.Abort(_('uncommitted changes'))
938 938
939 939 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
940 940 def overridefetch(orig, ui, repo, *pats, **opts):
941 941 repo.lfstatus = True
942 942 s = repo.status()
943 943 repo.lfstatus = False
944 944 if s.modified or s.added or s.removed or s.deleted:
945 945 raise util.Abort(_('uncommitted changes'))
946 946 return orig(ui, repo, *pats, **opts)
947 947
948 948 def overrideforget(orig, ui, repo, *pats, **opts):
949 949 installnormalfilesmatchfn(repo[None].manifest())
950 950 result = orig(ui, repo, *pats, **opts)
951 951 restorematchfn()
952 952 m = scmutil.match(repo[None], pats, opts)
953 953
954 954 try:
955 955 repo.lfstatus = True
956 956 s = repo.status(match=m, clean=True)
957 957 finally:
958 958 repo.lfstatus = False
959 959 forget = sorted(s.modified + s.added + s.deleted + s.clean)
960 960 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
961 961
962 962 for f in forget:
963 963 if lfutil.standin(f) not in repo.dirstate and not \
964 964 os.path.isdir(m.rel(lfutil.standin(f))):
965 965 ui.warn(_('not removing %s: file is already untracked\n')
966 966 % m.rel(f))
967 967 result = 1
968 968
969 969 for f in forget:
970 970 if ui.verbose or not m.exact(f):
971 971 ui.status(_('removing %s\n') % m.rel(f))
972 972
973 973 # Need to lock because standin files are deleted then removed from the
974 974 # repository and we could race in-between.
975 975 wlock = repo.wlock()
976 976 try:
977 977 lfdirstate = lfutil.openlfdirstate(ui, repo)
978 978 for f in forget:
979 979 if lfdirstate[f] == 'a':
980 980 lfdirstate.drop(f)
981 981 else:
982 982 lfdirstate.remove(f)
983 983 lfdirstate.write()
984 984 standins = [lfutil.standin(f) for f in forget]
985 985 for f in standins:
986 986 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
987 987 repo[None].forget(standins)
988 988 finally:
989 989 wlock.release()
990 990
991 991 return result
992 992
993 993 def _getoutgoings(repo, other, missing, addfunc):
994 994 """get pairs of filename and largefile hash in outgoing revisions
995 995 in 'missing'.
996 996
997 997 largefiles already existing on 'other' repository are ignored.
998 998
999 999 'addfunc' is invoked with each unique pairs of filename and
1000 1000 largefile hash value.
1001 1001 """
1002 1002 knowns = set()
1003 1003 lfhashes = set()
1004 1004 def dedup(fn, lfhash):
1005 1005 k = (fn, lfhash)
1006 1006 if k not in knowns:
1007 1007 knowns.add(k)
1008 1008 lfhashes.add(lfhash)
1009 1009 lfutil.getlfilestoupload(repo, missing, dedup)
1010 1010 if lfhashes:
1011 1011 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1012 1012 for fn, lfhash in knowns:
1013 1013 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1014 1014 addfunc(fn, lfhash)
1015 1015
1016 1016 def outgoinghook(ui, repo, other, opts, missing):
1017 1017 if opts.pop('large', None):
1018 1018 lfhashes = set()
1019 1019 if ui.debugflag:
1020 1020 toupload = {}
1021 1021 def addfunc(fn, lfhash):
1022 1022 if fn not in toupload:
1023 1023 toupload[fn] = []
1024 1024 toupload[fn].append(lfhash)
1025 1025 lfhashes.add(lfhash)
1026 1026 def showhashes(fn):
1027 1027 for lfhash in sorted(toupload[fn]):
1028 1028 ui.debug(' %s\n' % (lfhash))
1029 1029 else:
1030 1030 toupload = set()
1031 1031 def addfunc(fn, lfhash):
1032 1032 toupload.add(fn)
1033 1033 lfhashes.add(lfhash)
1034 1034 def showhashes(fn):
1035 1035 pass
1036 1036 _getoutgoings(repo, other, missing, addfunc)
1037 1037
1038 1038 if not toupload:
1039 1039 ui.status(_('largefiles: no files to upload\n'))
1040 1040 else:
1041 1041 ui.status(_('largefiles to upload (%d entities):\n')
1042 1042 % (len(lfhashes)))
1043 1043 for file in sorted(toupload):
1044 1044 ui.status(lfutil.splitstandin(file) + '\n')
1045 1045 showhashes(file)
1046 1046 ui.status('\n')
1047 1047
1048 1048 def summaryremotehook(ui, repo, opts, changes):
1049 1049 largeopt = opts.get('large', False)
1050 1050 if changes is None:
1051 1051 if largeopt:
1052 1052 return (False, True) # only outgoing check is needed
1053 1053 else:
1054 1054 return (False, False)
1055 1055 elif largeopt:
1056 1056 url, branch, peer, outgoing = changes[1]
1057 1057 if peer is None:
1058 1058 # i18n: column positioning for "hg summary"
1059 1059 ui.status(_('largefiles: (no remote repo)\n'))
1060 1060 return
1061 1061
1062 1062 toupload = set()
1063 1063 lfhashes = set()
1064 1064 def addfunc(fn, lfhash):
1065 1065 toupload.add(fn)
1066 1066 lfhashes.add(lfhash)
1067 1067 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1068 1068
1069 1069 if not toupload:
1070 1070 # i18n: column positioning for "hg summary"
1071 1071 ui.status(_('largefiles: (no files to upload)\n'))
1072 1072 else:
1073 1073 # i18n: column positioning for "hg summary"
1074 1074 ui.status(_('largefiles: %d entities for %d files to upload\n')
1075 1075 % (len(lfhashes), len(toupload)))
1076 1076
1077 1077 def overridesummary(orig, ui, repo, *pats, **opts):
1078 1078 try:
1079 1079 repo.lfstatus = True
1080 1080 orig(ui, repo, *pats, **opts)
1081 1081 finally:
1082 1082 repo.lfstatus = False
1083 1083
1084 1084 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1085 1085 similarity=None):
1086 1086 if not lfutil.islfilesrepo(repo):
1087 1087 return orig(repo, pats, opts, dry_run, similarity)
1088 1088 # Get the list of missing largefiles so we can remove them
1089 1089 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1090 1090 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1091 1091 False, False, False)
1092 1092
1093 1093 # Call into the normal remove code, but the removing of the standin, we want
1094 1094 # to have handled by original addremove. Monkey patching here makes sure
1095 1095 # we don't remove the standin in the largefiles code, preventing a very
1096 1096 # confused state later.
1097 1097 if s.deleted:
1098 1098 m = [repo.wjoin(f) for f in s.deleted]
1099 1099 removelargefiles(repo.ui, repo, True, *m, **opts)
1100 1100 # Call into the normal add code, and any files that *should* be added as
1101 1101 # largefiles will be
1102 1102 addlargefiles(repo.ui, repo, *pats, **opts)
1103 1103 # Now that we've handled largefiles, hand off to the original addremove
1104 1104 # function to take care of the rest. Make sure it doesn't do anything with
1105 1105 # largefiles by installing a matcher that will ignore them.
1106 1106 installnormalfilesmatchfn(repo[None].manifest())
1107 1107 result = orig(repo, pats, opts, dry_run, similarity)
1108 1108 restorematchfn()
1109 1109 return result
1110 1110
1111 1111 # Calling purge with --all will cause the largefiles to be deleted.
1112 1112 # Override repo.status to prevent this from happening.
1113 1113 def overridepurge(orig, ui, repo, *dirs, **opts):
1114 1114 # XXX large file status is buggy when used on repo proxy.
1115 1115 # XXX this needs to be investigate.
1116 1116 repo = repo.unfiltered()
1117 1117 oldstatus = repo.status
1118 1118 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1119 1119 clean=False, unknown=False, listsubrepos=False):
1120 1120 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1121 1121 listsubrepos)
1122 1122 lfdirstate = lfutil.openlfdirstate(ui, repo)
1123 1123 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1124 1124 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1125 1125 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1126 1126 unknown, ignored, r.clean)
1127 1127 repo.status = overridestatus
1128 1128 orig(ui, repo, *dirs, **opts)
1129 1129 repo.status = oldstatus
1130 1130 def overriderollback(orig, ui, repo, **opts):
1131 1131 wlock = repo.wlock()
1132 1132 try:
1133 1133 before = repo.dirstate.parents()
1134 1134 orphans = set(f for f in repo.dirstate
1135 1135 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1136 1136 result = orig(ui, repo, **opts)
1137 1137 after = repo.dirstate.parents()
1138 1138 if before == after:
1139 1139 return result # no need to restore standins
1140 1140
1141 1141 pctx = repo['.']
1142 1142 for f in repo.dirstate:
1143 1143 if lfutil.isstandin(f):
1144 1144 orphans.discard(f)
1145 1145 if repo.dirstate[f] == 'r':
1146 1146 repo.wvfs.unlinkpath(f, ignoremissing=True)
1147 1147 elif f in pctx:
1148 1148 fctx = pctx[f]
1149 1149 repo.wwrite(f, fctx.data(), fctx.flags())
1150 1150 else:
1151 1151 # content of standin is not so important in 'a',
1152 1152 # 'm' or 'n' (coming from the 2nd parent) cases
1153 1153 lfutil.writestandin(repo, f, '', False)
1154 1154 for standin in orphans:
1155 1155 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1156 1156
1157 1157 lfdirstate = lfutil.openlfdirstate(ui, repo)
1158 1158 orphans = set(lfdirstate)
1159 1159 lfiles = lfutil.listlfiles(repo)
1160 1160 for file in lfiles:
1161 1161 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1162 1162 orphans.discard(file)
1163 1163 for lfile in orphans:
1164 1164 lfdirstate.drop(lfile)
1165 1165 lfdirstate.write()
1166 1166 finally:
1167 1167 wlock.release()
1168 1168 return result
1169 1169
1170 1170 def overridetransplant(orig, ui, repo, *revs, **opts):
1171 1171 resuming = opts.get('continue')
1172 1172 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1173 1173 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1174 1174 try:
1175 1175 result = orig(ui, repo, *revs, **opts)
1176 1176 finally:
1177 1177 repo._lfstatuswriters.pop()
1178 1178 repo._lfcommithooks.pop()
1179 1179 return result
1180 1180
1181 1181 def overridecat(orig, ui, repo, file1, *pats, **opts):
1182 1182 ctx = scmutil.revsingle(repo, opts.get('rev'))
1183 1183 err = 1
1184 1184 notbad = set()
1185 1185 m = scmutil.match(ctx, (file1,) + pats, opts)
1186 1186 origmatchfn = m.matchfn
1187 1187 def lfmatchfn(f):
1188 1188 if origmatchfn(f):
1189 1189 return True
1190 1190 lf = lfutil.splitstandin(f)
1191 1191 if lf is None:
1192 1192 return False
1193 1193 notbad.add(lf)
1194 1194 return origmatchfn(lf)
1195 1195 m.matchfn = lfmatchfn
1196 1196 origbadfn = m.bad
1197 1197 def lfbadfn(f, msg):
1198 1198 if not f in notbad:
1199 1199 origbadfn(f, msg)
1200 1200 m.bad = lfbadfn
1201 1201 for f in ctx.walk(m):
1202 1202 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1203 1203 pathname=f)
1204 1204 lf = lfutil.splitstandin(f)
1205 1205 if lf is None or origmatchfn(f):
1206 1206 # duplicating unreachable code from commands.cat
1207 1207 data = ctx[f].data()
1208 1208 if opts.get('decode'):
1209 1209 data = repo.wwritedata(f, data)
1210 1210 fp.write(data)
1211 1211 else:
1212 1212 hash = lfutil.readstandin(repo, lf, ctx.rev())
1213 1213 if not lfutil.inusercache(repo.ui, hash):
1214 1214 store = basestore._openstore(repo)
1215 1215 success, missing = store.get([(lf, hash)])
1216 1216 if len(success) != 1:
1217 1217 raise util.Abort(
1218 1218 _('largefile %s is not in cache and could not be '
1219 1219 'downloaded') % lf)
1220 1220 path = lfutil.usercachepath(repo.ui, hash)
1221 1221 fpin = open(path, "rb")
1222 1222 for chunk in util.filechunkiter(fpin, 128 * 1024):
1223 1223 fp.write(chunk)
1224 1224 fpin.close()
1225 1225 fp.close()
1226 1226 err = 0
1227 1227 return err
1228 1228
1229 def mercurialsinkbefore(orig, sink):
1230 sink.repo._isconverting = True
1231 orig(sink)
1232
1233 def mercurialsinkafter(orig, sink):
1234 sink.repo._isconverting = False
1235 orig(sink)
1236
1237 1229 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1238 1230 *args, **kwargs):
1239 1231 wlock = repo.wlock()
1240 1232 try:
1241 1233 # branch | | |
1242 1234 # merge | force | partial | action
1243 1235 # -------+-------+---------+--------------
1244 1236 # x | x | x | linear-merge
1245 1237 # o | x | x | branch-merge
1246 1238 # x | o | x | overwrite (as clean update)
1247 1239 # o | o | x | force-branch-merge (*1)
1248 1240 # x | x | o | (*)
1249 1241 # o | x | o | (*)
1250 1242 # x | o | o | overwrite (as revert)
1251 1243 # o | o | o | (*)
1252 1244 #
1253 1245 # (*) don't care
1254 1246 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1255 1247
1256 1248 linearmerge = not branchmerge and not force and not partial
1257 1249
1258 1250 if linearmerge or (branchmerge and force and not partial):
1259 1251 # update standins for linear-merge or force-branch-merge,
1260 1252 # because largefiles in the working directory may be modified
1261 1253 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1262 1254 unsure, s = lfdirstate.status(match_.always(repo.root,
1263 1255 repo.getcwd()),
1264 1256 [], False, False, False)
1265 1257 for lfile in unsure + s.modified + s.added:
1266 1258 lfutil.updatestandin(repo, lfutil.standin(lfile))
1267 1259
1268 1260 if linearmerge:
1269 1261 # Only call updatelfiles on the standins that have changed
1270 1262 # to save time
1271 1263 oldstandins = lfutil.getstandinsstate(repo)
1272 1264
1273 1265 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1274 1266
1275 1267 filelist = None
1276 1268 if linearmerge:
1277 1269 newstandins = lfutil.getstandinsstate(repo)
1278 1270 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1279 1271
1280 1272 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1281 1273 normallookup=partial)
1282 1274
1283 1275 return result
1284 1276 finally:
1285 1277 wlock.release()
1286 1278
1287 1279 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1288 1280 result = orig(repo, files, *args, **kwargs)
1289 1281
1290 1282 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1291 1283 if filelist:
1292 1284 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1293 1285 printmessage=False, normallookup=True)
1294 1286
1295 1287 return result
@@ -1,367 +1,364 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import os
12 12
13 13 from mercurial import error, manifest, match as match_, util
14 14 from mercurial.i18n import _
15 15 from mercurial import localrepo, scmutil
16 16
17 17 import lfcommands
18 18 import lfutil
19 19
20 20 def reposetup(ui, repo):
21 21 # wire repositories should be given new wireproto functions
22 22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
23 23 if not repo.local():
24 24 return
25 25
26 26 class lfilesrepo(repo.__class__):
27 27 lfstatus = False
28 28 def status_nolfiles(self, *args, **kwargs):
29 29 return super(lfilesrepo, self).status(*args, **kwargs)
30 30
31 31 # When lfstatus is set, return a context that gives the names
32 32 # of largefiles instead of their corresponding standins and
33 33 # identifies the largefiles as always binary, regardless of
34 34 # their actual contents.
35 35 def __getitem__(self, changeid):
36 36 ctx = super(lfilesrepo, self).__getitem__(changeid)
37 37 if self.lfstatus:
38 38 class lfilesmanifestdict(manifest.manifestdict):
39 39 def __contains__(self, filename):
40 40 orig = super(lfilesmanifestdict, self).__contains__
41 41 return orig(filename) or orig(lfutil.standin(filename))
42 42 class lfilesctx(ctx.__class__):
43 43 def files(self):
44 44 filenames = super(lfilesctx, self).files()
45 45 return [lfutil.splitstandin(f) or f for f in filenames]
46 46 def manifest(self):
47 47 man1 = super(lfilesctx, self).manifest()
48 48 man1.__class__ = lfilesmanifestdict
49 49 return man1
50 50 def filectx(self, path, fileid=None, filelog=None):
51 51 orig = super(lfilesctx, self).filectx
52 52 try:
53 53 if filelog is not None:
54 54 result = orig(path, fileid, filelog)
55 55 else:
56 56 result = orig(path, fileid)
57 57 except error.LookupError:
58 58 # Adding a null character will cause Mercurial to
59 59 # identify this as a binary file.
60 60 if filelog is not None:
61 61 result = orig(lfutil.standin(path), fileid,
62 62 filelog)
63 63 else:
64 64 result = orig(lfutil.standin(path), fileid)
65 65 olddata = result.data
66 66 result.data = lambda: olddata() + '\0'
67 67 return result
68 68 ctx.__class__ = lfilesctx
69 69 return ctx
70 70
71 71 # Figure out the status of big files and insert them into the
72 72 # appropriate list in the result. Also removes standin files
73 73 # from the listing. Revert to the original status if
74 74 # self.lfstatus is False.
75 75 # XXX large file status is buggy when used on repo proxy.
76 76 # XXX this needs to be investigated.
77 77 @localrepo.unfilteredmethod
78 78 def status(self, node1='.', node2=None, match=None, ignored=False,
79 79 clean=False, unknown=False, listsubrepos=False):
80 80 listignored, listclean, listunknown = ignored, clean, unknown
81 81 orig = super(lfilesrepo, self).status
82 82 if not self.lfstatus:
83 83 return orig(node1, node2, match, listignored, listclean,
84 84 listunknown, listsubrepos)
85 85
86 86 # some calls in this function rely on the old version of status
87 87 self.lfstatus = False
88 88 ctx1 = self[node1]
89 89 ctx2 = self[node2]
90 90 working = ctx2.rev() is None
91 91 parentworking = working and ctx1 == self['.']
92 92
93 93 if match is None:
94 94 match = match_.always(self.root, self.getcwd())
95 95
96 96 wlock = None
97 97 try:
98 98 try:
99 99 # updating the dirstate is optional
100 100 # so we don't wait on the lock
101 101 wlock = self.wlock(False)
102 102 except error.LockError:
103 103 pass
104 104
105 105 # First check if paths or patterns were specified on the
106 106 # command line. If there were, and they don't match any
107 107 # largefiles, we should just bail here and let super
108 108 # handle it -- thus gaining a big performance boost.
109 109 lfdirstate = lfutil.openlfdirstate(ui, self)
110 110 if not match.always():
111 111 for f in lfdirstate:
112 112 if match(f):
113 113 break
114 114 else:
115 115 return orig(node1, node2, match, listignored, listclean,
116 116 listunknown, listsubrepos)
117 117
118 118 # Create a copy of match that matches standins instead
119 119 # of largefiles.
120 120 def tostandins(files):
121 121 if not working:
122 122 return files
123 123 newfiles = []
124 124 dirstate = self.dirstate
125 125 for f in files:
126 126 sf = lfutil.standin(f)
127 127 if sf in dirstate:
128 128 newfiles.append(sf)
129 129 elif sf in dirstate.dirs():
130 130 # Directory entries could be regular or
131 131 # standin, check both
132 132 newfiles.extend((f, sf))
133 133 else:
134 134 newfiles.append(f)
135 135 return newfiles
136 136
137 137 m = copy.copy(match)
138 138 m._files = tostandins(m._files)
139 139
140 140 result = orig(node1, node2, m, ignored, clean, unknown,
141 141 listsubrepos)
142 142 if working:
143 143
144 144 def sfindirstate(f):
145 145 sf = lfutil.standin(f)
146 146 dirstate = self.dirstate
147 147 return sf in dirstate or sf in dirstate.dirs()
148 148
149 149 match._files = [f for f in match._files
150 150 if sfindirstate(f)]
151 151 # Don't waste time getting the ignored and unknown
152 152 # files from lfdirstate
153 153 unsure, s = lfdirstate.status(match, [], False, listclean,
154 154 False)
155 155 (modified, added, removed, clean) = (s.modified, s.added,
156 156 s.removed, s.clean)
157 157 if parentworking:
158 158 for lfile in unsure:
159 159 standin = lfutil.standin(lfile)
160 160 if standin not in ctx1:
161 161 # from second parent
162 162 modified.append(lfile)
163 163 elif ctx1[standin].data().strip() \
164 164 != lfutil.hashfile(self.wjoin(lfile)):
165 165 modified.append(lfile)
166 166 else:
167 167 if listclean:
168 168 clean.append(lfile)
169 169 lfdirstate.normal(lfile)
170 170 else:
171 171 tocheck = unsure + modified + added + clean
172 172 modified, added, clean = [], [], []
173 173
174 174 for lfile in tocheck:
175 175 standin = lfutil.standin(lfile)
176 176 if standin in ctx1:
177 177 abslfile = self.wjoin(lfile)
178 178 if ((ctx1[standin].data().strip() !=
179 179 lfutil.hashfile(abslfile)) or
180 180 (('x' in ctx1.flags(standin)) !=
181 181 bool(lfutil.getexecutable(abslfile)))):
182 182 modified.append(lfile)
183 183 elif listclean:
184 184 clean.append(lfile)
185 185 else:
186 186 added.append(lfile)
187 187
188 188 # at this point, 'removed' contains largefiles
189 189 # marked as 'R' in the working context.
190 190 # then, largefiles not managed also in the target
191 191 # context should be excluded from 'removed'.
192 192 removed = [lfile for lfile in removed
193 193 if lfutil.standin(lfile) in ctx1]
194 194
195 195 # Standins no longer found in lfdirstate has been
196 196 # removed
197 197 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
198 198 lfile = lfutil.splitstandin(standin)
199 199 if not match(lfile):
200 200 continue
201 201 if lfile not in lfdirstate:
202 202 removed.append(lfile)
203 203
204 204 # Filter result lists
205 205 result = list(result)
206 206
207 207 # Largefiles are not really removed when they're
208 208 # still in the normal dirstate. Likewise, normal
209 209 # files are not really removed if they are still in
210 210 # lfdirstate. This happens in merges where files
211 211 # change type.
212 212 removed = [f for f in removed
213 213 if f not in self.dirstate]
214 214 result[2] = [f for f in result[2]
215 215 if f not in lfdirstate]
216 216
217 217 lfiles = set(lfdirstate._map)
218 218 # Unknown files
219 219 result[4] = set(result[4]).difference(lfiles)
220 220 # Ignored files
221 221 result[5] = set(result[5]).difference(lfiles)
222 222 # combine normal files and largefiles
223 223 normals = [[fn for fn in filelist
224 224 if not lfutil.isstandin(fn)]
225 225 for filelist in result]
226 226 lfstatus = (modified, added, removed, s.deleted, [], [],
227 227 clean)
228 228 result = [sorted(list1 + list2)
229 229 for (list1, list2) in zip(normals, lfstatus)]
230 230 else: # not against working directory
231 231 result = [[lfutil.splitstandin(f) or f for f in items]
232 232 for items in result]
233 233
234 234 if wlock:
235 235 lfdirstate.write()
236 236
237 237 finally:
238 238 if wlock:
239 239 wlock.release()
240 240
241 241 self.lfstatus = True
242 242 return scmutil.status(*result)
243 243
244 # As part of committing, copy all of the largefiles into the
245 # cache.
246 244 def commitctx(self, ctx, *args, **kwargs):
247 245 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
248 lfutil.copyalltostore(self, node)
249 246 class lfilesctx(ctx.__class__):
250 247 def markcommitted(self, node):
251 248 orig = super(lfilesctx, self).markcommitted
252 249 return lfutil.markcommitted(orig, self, node)
253 250 ctx.__class__ = lfilesctx
254 251 return node
255 252
256 253 # Before commit, largefile standins have not had their
257 254 # contents updated to reflect the hash of their largefile.
258 255 # Do that here.
259 256 def commit(self, text="", user=None, date=None, match=None,
260 257 force=False, editor=False, extra={}):
261 258 orig = super(lfilesrepo, self).commit
262 259
263 260 wlock = self.wlock()
264 261 try:
265 262 lfcommithook = self._lfcommithooks[-1]
266 263 match = lfcommithook(self, match)
267 264 result = orig(text=text, user=user, date=date, match=match,
268 265 force=force, editor=editor, extra=extra)
269 266 return result
270 267 finally:
271 268 wlock.release()
272 269
273 270 def push(self, remote, force=False, revs=None, newbranch=False):
274 271 if remote.local():
275 272 missing = set(self.requirements) - remote.local().supported
276 273 if missing:
277 274 msg = _("required features are not"
278 275 " supported in the destination:"
279 276 " %s") % (', '.join(sorted(missing)))
280 277 raise util.Abort(msg)
281 278 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
282 279 newbranch=newbranch)
283 280
284 281 # TODO: _subdirlfs should be moved into "lfutil.py", because
285 282 # it is referred only from "lfutil.updatestandinsbymatch"
286 283 def _subdirlfs(self, files, lfiles):
287 284 '''
288 285 Adjust matched file list
289 286 If we pass a directory to commit whose only commitable files
290 287 are largefiles, the core commit code aborts before finding
291 288 the largefiles.
292 289 So we do the following:
293 290 For directories that only have largefiles as matches,
294 291 we explicitly add the largefiles to the match list and remove
295 292 the directory.
296 293 In other cases, we leave the match list unmodified.
297 294 '''
298 295 actualfiles = []
299 296 dirs = []
300 297 regulars = []
301 298
302 299 for f in files:
303 300 if lfutil.isstandin(f + '/'):
304 301 raise util.Abort(
305 302 _('file "%s" is a largefile standin') % f,
306 303 hint=('commit the largefile itself instead'))
307 304 # Scan directories
308 305 if os.path.isdir(self.wjoin(f)):
309 306 dirs.append(f)
310 307 else:
311 308 regulars.append(f)
312 309
313 310 for f in dirs:
314 311 matcheddir = False
315 312 d = self.dirstate.normalize(f) + '/'
316 313 # Check for matched normal files
317 314 for mf in regulars:
318 315 if self.dirstate.normalize(mf).startswith(d):
319 316 actualfiles.append(f)
320 317 matcheddir = True
321 318 break
322 319 if not matcheddir:
323 320 # If no normal match, manually append
324 321 # any matching largefiles
325 322 for lf in lfiles:
326 323 if self.dirstate.normalize(lf).startswith(d):
327 324 actualfiles.append(lf)
328 325 if not matcheddir:
329 326 actualfiles.append(lfutil.standin(f))
330 327 matcheddir = True
331 328 # Nothing in dir, so readd it
332 329 # and let commit reject it
333 330 if not matcheddir:
334 331 actualfiles.append(f)
335 332
336 333 # Always add normal files
337 334 actualfiles += regulars
338 335 return actualfiles
339 336
340 337 repo.__class__ = lfilesrepo
341 338
342 339 # stack of hooks being executed before committing.
343 340 # only last element ("_lfcommithooks[-1]") is used for each committing.
344 341 repo._lfcommithooks = [lfutil.updatestandinsbymatch]
345 342
346 343 # Stack of status writer functions taking "*msg, **opts" arguments
347 344 # like "ui.status()". Only last element ("_lfupdatereporters[-1]")
348 345 # is used to write status out.
349 346 repo._lfstatuswriters = [ui.status]
350 347
351 348 def prepushoutgoinghook(local, remote, outgoing):
352 349 if outgoing.missing:
353 350 toupload = set()
354 351 addfunc = lambda fn, lfhash: toupload.add(lfhash)
355 352 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
356 353 lfcommands.uploadlfiles(ui, local, remote, toupload)
357 354 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
358 355
359 356 def checkrequireslfiles(ui, repo, **kwargs):
360 357 if 'largefiles' not in repo.requirements and util.any(
361 358 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
362 359 repo.requirements.add('largefiles')
363 360 repo._writerequirements()
364 361
365 362 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
366 363 'largefiles')
367 364 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
@@ -1,183 +1,176 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles extension: uisetup'''
10 10
11 11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 12 httppeer, merge, scmutil, sshpeer, wireproto, revset, subrepo
13 13 from mercurial.i18n import _
14 14 from mercurial.hgweb import hgweb_mod, webcommands
15 15
16 16 import overrides
17 17 import proto
18 18
19 19 def uisetup(ui):
20 20 # Disable auto-status for some commands which assume that all
21 21 # files in the result are under Mercurial's control
22 22
23 23 entry = extensions.wrapcommand(commands.table, 'add',
24 24 overrides.overrideadd)
25 25 addopt = [('', 'large', None, _('add as largefile')),
26 26 ('', 'normal', None, _('add as normal file')),
27 27 ('', 'lfsize', '', _('add all files above this size '
28 28 '(in megabytes) as largefiles '
29 29 '(default: 10)'))]
30 30 entry[1].extend(addopt)
31 31
32 32 # The scmutil function is called both by the (trivial) addremove command,
33 33 # and in the process of handling commit -A (issue3542)
34 34 entry = extensions.wrapfunction(scmutil, 'addremove',
35 35 overrides.scmutiladdremove)
36 36 entry = extensions.wrapcommand(commands.table, 'remove',
37 37 overrides.overrideremove)
38 38 entry = extensions.wrapcommand(commands.table, 'forget',
39 39 overrides.overrideforget)
40 40
41 41 # Subrepos call status function
42 42 entry = extensions.wrapcommand(commands.table, 'status',
43 43 overrides.overridestatus)
44 44 entry = extensions.wrapfunction(subrepo.hgsubrepo, 'status',
45 45 overrides.overridestatusfn)
46 46
47 47 entry = extensions.wrapcommand(commands.table, 'log',
48 48 overrides.overridelog)
49 49 entry = extensions.wrapcommand(commands.table, 'rollback',
50 50 overrides.overriderollback)
51 51 entry = extensions.wrapcommand(commands.table, 'verify',
52 52 overrides.overrideverify)
53 53
54 54 verifyopt = [('', 'large', None,
55 55 _('verify that all largefiles in current revision exists')),
56 56 ('', 'lfa', None,
57 57 _('verify largefiles in all revisions, not just current')),
58 58 ('', 'lfc', None,
59 59 _('verify local largefile contents, not just existence'))]
60 60 entry[1].extend(verifyopt)
61 61
62 62 entry = extensions.wrapcommand(commands.table, 'debugstate',
63 63 overrides.overridedebugstate)
64 64 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
65 65 entry[1].extend(debugstateopt)
66 66
67 67 outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
68 68 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
69 69 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
70 70 entry[1].extend(outgoingopt)
71 71 cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
72 72 entry = extensions.wrapcommand(commands.table, 'summary',
73 73 overrides.overridesummary)
74 74 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
75 75 entry[1].extend(summaryopt)
76 76 cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
77 77
78 78 entry = extensions.wrapcommand(commands.table, 'update',
79 79 overrides.overrideupdate)
80 80 entry = extensions.wrapcommand(commands.table, 'pull',
81 81 overrides.overridepull)
82 82 pullopt = [('', 'all-largefiles', None,
83 83 _('download all pulled versions of largefiles (DEPRECATED)')),
84 84 ('', 'lfrev', [],
85 85 _('download largefiles for these revisions'), _('REV'))]
86 86 entry[1].extend(pullopt)
87 87 revset.symbols['pulled'] = overrides.pulledrevsetsymbol
88 88
89 89 entry = extensions.wrapcommand(commands.table, 'clone',
90 90 overrides.overrideclone)
91 91 cloneopt = [('', 'all-largefiles', None,
92 92 _('download all versions of all largefiles'))]
93 93 entry[1].extend(cloneopt)
94 94 entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
95 95
96 96 entry = extensions.wrapcommand(commands.table, 'cat',
97 97 overrides.overridecat)
98 98 entry = extensions.wrapfunction(merge, '_checkunknownfile',
99 99 overrides.overridecheckunknownfile)
100 100 entry = extensions.wrapfunction(merge, 'calculateupdates',
101 101 overrides.overridecalculateupdates)
102 102 entry = extensions.wrapfunction(merge, 'recordupdates',
103 103 overrides.mergerecordupdates)
104 104 entry = extensions.wrapfunction(merge, 'update',
105 105 overrides.mergeupdate)
106 106 entry = extensions.wrapfunction(filemerge, 'filemerge',
107 107 overrides.overridefilemerge)
108 108 entry = extensions.wrapfunction(cmdutil, 'copy',
109 109 overrides.overridecopy)
110 110
111 111 # Summary calls dirty on the subrepos
112 112 entry = extensions.wrapfunction(subrepo.hgsubrepo, 'dirty',
113 113 overrides.overridedirty)
114 114
115 115 # Backout calls revert so we need to override both the command and the
116 116 # function
117 117 entry = extensions.wrapcommand(commands.table, 'revert',
118 118 overrides.overriderevert)
119 119 entry = extensions.wrapfunction(commands, 'revert',
120 120 overrides.overriderevert)
121 121
122 122 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
123 123 extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
124 124 overrides.hgsubrepoarchive)
125 125 extensions.wrapfunction(cmdutil, 'bailifchanged',
126 126 overrides.overridebailifchanged)
127 127
128 128 extensions.wrapfunction(scmutil, 'marktouched',
129 129 overrides.scmutilmarktouched)
130 130
131 131 # create the new wireproto commands ...
132 132 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
133 133 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
134 134 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
135 135
136 136 # ... and wrap some existing ones
137 137 wireproto.commands['capabilities'] = (proto.capabilities, '')
138 138 wireproto.commands['heads'] = (proto.heads, '')
139 139 wireproto.commands['lheads'] = (wireproto.heads, '')
140 140
141 141 # make putlfile behave the same as push and {get,stat}lfile behave
142 142 # the same as pull w.r.t. permissions checks
143 143 hgweb_mod.perms['putlfile'] = 'push'
144 144 hgweb_mod.perms['getlfile'] = 'pull'
145 145 hgweb_mod.perms['statlfile'] = 'pull'
146 146
147 147 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
148 148
149 149 # the hello wireproto command uses wireproto.capabilities, so it won't see
150 150 # our largefiles capability unless we replace the actual function as well.
151 151 proto.capabilitiesorig = wireproto.capabilities
152 152 wireproto.capabilities = proto.capabilities
153 153
154 154 # can't do this in reposetup because it needs to have happened before
155 155 # wirerepo.__init__ is called
156 156 proto.ssholdcallstream = sshpeer.sshpeer._callstream
157 157 proto.httpoldcallstream = httppeer.httppeer._callstream
158 158 sshpeer.sshpeer._callstream = proto.sshrepocallstream
159 159 httppeer.httppeer._callstream = proto.httprepocallstream
160 160
161 161 # override some extensions' stuff as well
162 162 for name, module in extensions.extensions():
163 163 if name == 'fetch':
164 164 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
165 165 overrides.overridefetch)
166 166 if name == 'purge':
167 167 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
168 168 overrides.overridepurge)
169 169 if name == 'rebase':
170 170 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
171 171 overrides.overriderebase)
172 172 extensions.wrapfunction(module, 'rebase',
173 173 overrides.overriderebase)
174 174 if name == 'transplant':
175 175 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
176 176 overrides.overridetransplant)
177 if name == 'convert':
178 convcmd = getattr(module, 'convcmd')
179 hgsink = getattr(convcmd, 'mercurial_sink')
180 extensions.wrapfunction(hgsink, 'before',
181 overrides.mercurialsinkbefore)
182 extensions.wrapfunction(hgsink, 'after',
183 overrides.mercurialsinkafter)
@@ -1,613 +1,626 b''
1 1 This file focuses mainly on updating largefiles in the working
2 2 directory (and ".hg/largefiles/dirstate")
3 3
4 4 $ cat >> $HGRCPATH <<EOF
5 5 > [ui]
6 6 > merge = internal:fail
7 7 > [extensions]
8 8 > largefiles =
9 9 > EOF
10 10
11 11 $ hg init repo
12 12 $ cd repo
13 13
14 14 $ echo large1 > large1
15 15 $ echo large2 > large2
16 16 $ hg add --large large1 large2
17 17 $ echo normal1 > normal1
18 18 $ hg add normal1
19 19 $ hg commit -m '#0'
20 20 $ echo 'large1 in #1' > large1
21 21 $ echo 'normal1 in #1' > normal1
22 22 $ hg commit -m '#1'
23 23 $ hg update -q -C 0
24 24 $ echo 'large2 in #2' > large2
25 25 $ hg commit -m '#2'
26 26 created new head
27 27
28 28 Test that "hg merge" updates largefiles from "other" correctly
29 29
30 30 (getting largefiles from "other" normally)
31 31
32 32 $ hg status -A large1
33 33 C large1
34 34 $ cat large1
35 35 large1
36 36 $ cat .hglf/large1
37 37 4669e532d5b2c093a78eca010077e708a071bb64
38 38 $ hg merge --config debug.dirstate.delaywrite=2
39 39 getting changed largefiles
40 40 1 largefiles updated, 0 removed
41 41 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 42 (branch merge, don't forget to commit)
43 43 $ hg status -A large1
44 44 M large1
45 45 $ cat large1
46 46 large1 in #1
47 47 $ cat .hglf/large1
48 48 58e24f733a964da346e2407a2bee99d9001184f5
49 49 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
50 50 -4669e532d5b2c093a78eca010077e708a071bb64
51 51 +58e24f733a964da346e2407a2bee99d9001184f5
52 52
53 53 (getting largefiles from "other" via conflict prompt)
54 54
55 55 $ hg update -q -C 2
56 56 $ echo 'large1 in #3' > large1
57 57 $ echo 'normal1 in #3' > normal1
58 58 $ hg commit -m '#3'
59 59 $ cat .hglf/large1
60 60 e5bb990443d6a92aaf7223813720f7566c9dd05b
61 61 $ hg merge --config debug.dirstate.delaywrite=2 --config ui.interactive=True <<EOF
62 62 > o
63 63 > EOF
64 64 largefile large1 has a merge conflict
65 65 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
66 66 keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or
67 67 take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5? o
68 68 merging normal1
69 69 warning: conflicts during merge.
70 70 merging normal1 incomplete! (edit conflicts, then use 'hg resolve --mark')
71 71 getting changed largefiles
72 72 1 largefiles updated, 0 removed
73 73 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
74 74 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
75 75 [1]
76 76 $ hg status -A large1
77 77 M large1
78 78 $ cat large1
79 79 large1 in #1
80 80 $ cat .hglf/large1
81 81 58e24f733a964da346e2407a2bee99d9001184f5
82 82
83 83 Test that "hg revert -r REV" updates largefiles from "REV" correctly
84 84
85 85 $ hg update -q -C 3
86 86 $ hg status -A large1
87 87 C large1
88 88 $ cat large1
89 89 large1 in #3
90 90 $ cat .hglf/large1
91 91 e5bb990443d6a92aaf7223813720f7566c9dd05b
92 92 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
93 93 -4669e532d5b2c093a78eca010077e708a071bb64
94 94 +58e24f733a964da346e2407a2bee99d9001184f5
95 95 $ hg revert --no-backup -r 1 --config debug.dirstate.delaywrite=2 large1
96 96 $ hg status -A large1
97 97 M large1
98 98 $ cat large1
99 99 large1 in #1
100 100 $ cat .hglf/large1
101 101 58e24f733a964da346e2407a2bee99d9001184f5
102 102
103 103 Test that "hg rollback" restores status of largefiles correctly
104 104
105 105 $ hg update -C -q
106 106 $ hg remove large1
107 107 $ test -f .hglf/large1
108 108 [1]
109 109 $ hg forget large2
110 110 $ test -f .hglf/large2
111 111 [1]
112 112 $ echo largeX > largeX
113 113 $ hg add --large largeX
114 114 $ cat .hglf/largeX
115 115
116 116 $ hg commit -m 'will be rollback-ed soon'
117 117 $ echo largeY > largeY
118 118 $ hg add --large largeY
119 119 #if windows
120 120 $ hg status -A large1
121 121 large1: * (glob)
122 122 #else
123 123 $ hg status -A large1
124 124 large1: No such file or directory
125 125 #endif
126 126 $ hg status -A large2
127 127 ? large2
128 128 $ hg status -A largeX
129 129 C largeX
130 130 $ hg status -A largeY
131 131 A largeY
132 132 $ hg rollback
133 133 repository tip rolled back to revision 3 (undo commit)
134 134 working directory now based on revision 3
135 135 $ hg status -A large1
136 136 R large1
137 137 $ test -f .hglf/large1
138 138 [1]
139 139 $ hg status -A large2
140 140 R large2
141 141 $ test -f .hglf/large2
142 142 [1]
143 143 $ hg status -A largeX
144 144 A largeX
145 145 $ cat .hglf/largeX
146 146
147 147 $ hg status -A largeY
148 148 ? largeY
149 149 $ test -f .hglf/largeY
150 150 [1]
151 151
152 152 Test that "hg rollback" restores standins correctly
153 153
154 154 $ hg commit -m 'will be rollback-ed soon'
155 155 $ hg update -q -C 2
156 156 $ cat large1
157 157 large1
158 158 $ cat .hglf/large1
159 159 4669e532d5b2c093a78eca010077e708a071bb64
160 160 $ cat large2
161 161 large2 in #2
162 162 $ cat .hglf/large2
163 163 3cfce6277e7668985707b6887ce56f9f62f6ccd9
164 164
165 165 $ hg rollback -q -f
166 166 $ cat large1
167 167 large1
168 168 $ cat .hglf/large1
169 169 4669e532d5b2c093a78eca010077e708a071bb64
170 170 $ cat large2
171 171 large2 in #2
172 172 $ cat .hglf/large2
173 173 3cfce6277e7668985707b6887ce56f9f62f6ccd9
174 174
175 175 (rollback the parent of the working directory, when the parent of it
176 176 is not branch-tip)
177 177
178 178 $ hg update -q -C 1
179 179 $ cat .hglf/large1
180 180 58e24f733a964da346e2407a2bee99d9001184f5
181 181 $ cat .hglf/large2
182 182 1deebade43c8c498a3c8daddac0244dc55d1331d
183 183
184 184 $ echo normalX > normalX
185 185 $ hg add normalX
186 186 $ hg commit -m 'will be rollback-ed soon'
187 187 $ hg rollback -q
188 188
189 189 $ cat .hglf/large1
190 190 58e24f733a964da346e2407a2bee99d9001184f5
191 191 $ cat .hglf/large2
192 192 1deebade43c8c498a3c8daddac0244dc55d1331d
193 193
194 194 Test that "hg status" shows status of largefiles correctly just after
195 195 automated commit like rebase/transplant
196 196
197 197 $ cat >> .hg/hgrc <<EOF
198 198 > [extensions]
199 199 > rebase =
200 200 > strip =
201 201 > transplant =
202 202 > EOF
203 203 $ hg update -q -C 1
204 204 $ hg remove large1
205 205 $ echo largeX > largeX
206 206 $ hg add --large largeX
207 207 $ hg commit -m '#4'
208 208
209 209 $ hg rebase -s 1 -d 2 --keep
210 210 #if windows
211 211 $ hg status -A large1
212 212 large1: * (glob)
213 213 #else
214 214 $ hg status -A large1
215 215 large1: No such file or directory
216 216 #endif
217 217 $ hg status -A largeX
218 218 C largeX
219 219 $ hg strip -q 5
220 220
221 221 $ hg update -q -C 2
222 222 $ hg transplant -q 1 4
223 223 #if windows
224 224 $ hg status -A large1
225 225 large1: * (glob)
226 226 #else
227 227 $ hg status -A large1
228 228 large1: No such file or directory
229 229 #endif
230 230 $ hg status -A largeX
231 231 C largeX
232 232 $ hg strip -q 5
233 233
234 234 $ hg update -q -C 2
235 235 $ hg transplant -q --merge 1 --merge 4
236 236 #if windows
237 237 $ hg status -A large1
238 238 large1: * (glob)
239 239 #else
240 240 $ hg status -A large1
241 241 large1: No such file or directory
242 242 #endif
243 243 $ hg status -A largeX
244 244 C largeX
245 245 $ hg strip -q 5
246 246
247 247 Test that linear merge can detect modification (and conflict) correctly
248 248
249 249 (linear merge without conflict)
250 250
251 251 $ echo 'large2 for linear merge (no conflict)' > large2
252 252 $ hg update 3 --config debug.dirstate.delaywrite=2
253 253 getting changed largefiles
254 254 1 largefiles updated, 0 removed
255 255 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
256 256 $ hg status -A large2
257 257 M large2
258 258 $ cat large2
259 259 large2 for linear merge (no conflict)
260 260 $ cat .hglf/large2
261 261 9c4bf8f1b33536d6e5f89447e10620cfe52ea710
262 262
263 263 (linear merge with conflict, choosing "other")
264 264
265 265 $ hg update -q -C 2
266 266 $ echo 'large1 for linear merge (conflict)' > large1
267 267 $ hg update 3 --config ui.interactive=True <<EOF
268 268 > o
269 269 > EOF
270 270 largefile large1 has a merge conflict
271 271 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
272 272 keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or
273 273 take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b? o
274 274 getting changed largefiles
275 275 1 largefiles updated, 0 removed
276 276 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
277 277 $ hg status -A large1
278 278 C large1
279 279 $ cat large1
280 280 large1 in #3
281 281 $ cat .hglf/large1
282 282 e5bb990443d6a92aaf7223813720f7566c9dd05b
283 283
284 284 (linear merge with conflict, choosing "local")
285 285
286 286 $ hg update -q -C 2
287 287 $ echo 'large1 for linear merge (conflict)' > large1
288 288 $ hg update 3 --config debug.dirstate.delaywrite=2
289 289 largefile large1 has a merge conflict
290 290 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
291 291 keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or
292 292 take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b? l
293 293 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
294 294 $ hg status -A large1
295 295 M large1
296 296 $ cat large1
297 297 large1 for linear merge (conflict)
298 298 $ cat .hglf/large1
299 299 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
300 300
301 301 Test a linear merge to a revision containing same-name normal file
302 302
303 303 $ hg update -q -C 3
304 304 $ hg remove large2
305 305 $ echo 'large2 as normal file' > large2
306 306 $ hg add large2
307 307 $ echo 'large3 as normal file' > large3
308 308 $ hg add large3
309 309 $ hg commit -m '#5'
310 310 $ hg manifest
311 311 .hglf/large1
312 312 large2
313 313 large3
314 314 normal1
315 315
316 316 (modified largefile is already switched to normal)
317 317
318 318 $ hg update -q -C 2
319 319 $ echo 'modified large2 for linear merge' > large2
320 320 $ hg update -q 5
321 321 local changed .hglf/large2 which remote deleted
322 322 use (c)hanged version or (d)elete? c
323 323 remote turned local largefile large2 into a normal file
324 324 keep (l)argefile or use (n)ormal file? l
325 325 $ hg debugdirstate --nodates | grep large2
326 326 a 0 -1 .hglf/large2
327 327 r 0 0 large2
328 328 $ hg status -A large2
329 329 A large2
330 330 $ cat large2
331 331 modified large2 for linear merge
332 332
333 333 (added largefile is already committed as normal)
334 334
335 335 $ hg update -q -C 2
336 336 $ echo 'large3 as large file for linear merge' > large3
337 337 $ hg add --large large3
338 338 $ hg update -q 5
339 339 remote turned local largefile large3 into a normal file
340 340 keep (l)argefile or use (n)ormal file? l
341 341 $ hg debugdirstate --nodates | grep large3
342 342 a 0 -1 .hglf/large3
343 343 r 0 0 large3
344 344 $ hg status -A large3
345 345 A large3
346 346 $ cat large3
347 347 large3 as large file for linear merge
348 348 $ rm -f large3 .hglf/large3
349 349
350 350 Test that the internal linear merging works correctly
351 351 (both heads are stripped to keep pairing of revision number and commit log)
352 352
353 353 $ hg update -q -C 2
354 354 $ hg strip 3 4
355 355 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/9530e27857f7-backup.hg (glob)
356 356 $ mv .hg/strip-backup/9530e27857f7-backup.hg $TESTTMP
357 357
358 358 (internal linear merging at "hg pull --update")
359 359
360 360 $ echo 'large1 for linear merge (conflict)' > large1
361 361 $ echo 'large2 for linear merge (conflict with normal file)' > large2
362 362 $ hg pull --update --config debug.dirstate.delaywrite=2 $TESTTMP/9530e27857f7-backup.hg
363 363 pulling from $TESTTMP/9530e27857f7-backup.hg (glob)
364 364 searching for changes
365 365 adding changesets
366 366 adding manifests
367 367 adding file changes
368 368 added 3 changesets with 5 changes to 5 files
369 369 local changed .hglf/large2 which remote deleted
370 370 use (c)hanged version or (d)elete? c
371 371 remote turned local largefile large2 into a normal file
372 372 keep (l)argefile or use (n)ormal file? l
373 373 largefile large1 has a merge conflict
374 374 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
375 375 keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or
376 376 take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b? l
377 377 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
378 378
379 379 $ hg status -A large1
380 380 M large1
381 381 $ cat large1
382 382 large1 for linear merge (conflict)
383 383 $ cat .hglf/large1
384 384 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
385 385 $ hg status -A large2
386 386 A large2
387 387 $ cat large2
388 388 large2 for linear merge (conflict with normal file)
389 389 $ cat .hglf/large2
390 390 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
391 391
392 392 (internal linear merging at "hg unbundle --update")
393 393
394 394 $ hg update -q -C 2
395 395 $ hg rollback -q
396 396
397 397 $ echo 'large1 for linear merge (conflict)' > large1
398 398 $ echo 'large2 for linear merge (conflict with normal file)' > large2
399 399 $ hg unbundle --update --config debug.dirstate.delaywrite=2 $TESTTMP/9530e27857f7-backup.hg
400 400 adding changesets
401 401 adding manifests
402 402 adding file changes
403 403 added 3 changesets with 5 changes to 5 files
404 404 local changed .hglf/large2 which remote deleted
405 405 use (c)hanged version or (d)elete? c
406 406 remote turned local largefile large2 into a normal file
407 407 keep (l)argefile or use (n)ormal file? l
408 408 largefile large1 has a merge conflict
409 409 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
410 410 keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or
411 411 take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b? l
412 412 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
413 413
414 414 $ hg status -A large1
415 415 M large1
416 416 $ cat large1
417 417 large1 for linear merge (conflict)
418 418 $ cat .hglf/large1
419 419 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
420 420 $ hg status -A large2
421 421 A large2
422 422 $ cat large2
423 423 large2 for linear merge (conflict with normal file)
424 424 $ cat .hglf/large2
425 425 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
426 426
427 427 (internal linear merging in subrepo at "hg update")
428 428
429 429 $ cd ..
430 430 $ hg init subparent
431 431 $ cd subparent
432 432
433 433 $ hg clone -q -u 2 ../repo sub
434 434 $ cat > .hgsub <<EOF
435 435 > sub = sub
436 436 > EOF
437 437 $ hg add .hgsub
438 438 $ hg commit -m '#0@parent'
439 439 $ cat .hgsubstate
440 440 f74e50bd9e5594b7cf1e6c5cbab86ddd25f3ca2f sub
441 441 $ hg -R sub update -q
442 442 $ hg commit -m '#1@parent'
443 443 $ cat .hgsubstate
444 444 d65e59e952a9638e2ce863b41a420ca723dd3e8d sub
445 445 $ hg update -q 0
446 446
447 447 $ echo 'large1 for linear merge (conflict)' > sub/large1
448 448 $ echo 'large2 for linear merge (conflict with normal file)' > sub/large2
449 449 $ hg update --config ui.interactive=True --config debug.dirstate.delaywrite=2 <<EOF
450 450 > m
451 451 > r
452 452 > c
453 453 > l
454 454 > l
455 455 > EOF
456 456 subrepository sub diverged (local revision: f74e50bd9e55, remote revision: d65e59e952a9)
457 457 (M)erge, keep (l)ocal or keep (r)emote? m
458 458 subrepository sources for sub differ (in checked out version)
459 459 use (l)ocal source (f74e50bd9e55) or (r)emote source (d65e59e952a9)? r
460 460 local changed .hglf/large2 which remote deleted
461 461 use (c)hanged version or (d)elete? c
462 462 remote turned local largefile large2 into a normal file
463 463 keep (l)argefile or use (n)ormal file? l
464 464 largefile large1 has a merge conflict
465 465 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
466 466 keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or
467 467 take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b? l
468 468 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
469 469 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
470 470
471 471 $ hg -R sub status -A sub/large1
472 472 M sub/large1
473 473 $ cat sub/large1
474 474 large1 for linear merge (conflict)
475 475 $ cat sub/.hglf/large1
476 476 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
477 477 $ hg -R sub status -A sub/large2
478 478 A sub/large2
479 479 $ cat sub/large2
480 480 large2 for linear merge (conflict with normal file)
481 481 $ cat sub/.hglf/large2
482 482 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
483 483
484 484 $ cd ..
485 485 $ cd repo
486 486
487 487 Test that rebase updates largefiles in the working directory even if
488 488 it is aborted by conflict.
489 489
490 490 $ hg update -q -C 3
491 491 $ cat .hglf/large1
492 492 e5bb990443d6a92aaf7223813720f7566c9dd05b
493 493 $ cat large1
494 494 large1 in #3
495 495 $ hg rebase -s 1 -d 3 --keep --config ui.interactive=True <<EOF
496 496 > o
497 497 > EOF
498 498 largefile large1 has a merge conflict
499 499 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
500 500 keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or
501 501 take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5? o
502 502 merging normal1
503 503 warning: conflicts during merge.
504 504 merging normal1 incomplete! (edit conflicts, then use 'hg resolve --mark')
505 505 unresolved conflicts (see hg resolve, then hg rebase --continue)
506 506 [1]
507 507 $ cat .hglf/large1
508 508 58e24f733a964da346e2407a2bee99d9001184f5
509 509 $ cat large1
510 510 large1 in #1
511 511
512 512 Test that rebase updates standins for manually modified largefiles at
513 513 the 1st commit of resuming.
514 514
515 515 $ echo "manually modified before 'hg rebase --continue'" > large1
516 516 $ hg resolve -m normal1
517 517 (no more unresolved files)
518 518 $ hg rebase --continue --config ui.interactive=True <<EOF
519 519 > c
520 520 > EOF
521 521 local changed .hglf/large1 which remote deleted
522 522 use (c)hanged version or (d)elete? c
523 523
524 524 $ hg diff -c "tip~1" --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
525 525 -e5bb990443d6a92aaf7223813720f7566c9dd05b
526 526 +8a4f783556e7dea21139ca0466eafce954c75c13
527 527 $ rm -f large1
528 528 $ hg update -q -C tip
529 529 $ cat large1
530 530 manually modified before 'hg rebase --continue'
531 531
532 532 Test that transplant updates largefiles, of which standins are safely
533 533 changed, even if it is aborted by conflict of other.
534 534
535 535 $ hg update -q -C 5
536 536 $ cat .hglf/large1
537 537 e5bb990443d6a92aaf7223813720f7566c9dd05b
538 538 $ cat large1
539 539 large1 in #3
540 540 $ hg diff -c 4 .hglf/largeX | grep '^[+-][0-9a-z]'
541 541 +fa44618ea25181aff4f48b70428294790cec9f61
542 542 $ hg transplant 4
543 543 applying 07d6153b5c04
544 544 patching file .hglf/large1
545 545 Hunk #1 FAILED at 0
546 546 1 out of 1 hunks FAILED -- saving rejects to file .hglf/large1.rej
547 547 patch failed to apply
548 548 abort: fix up the merge and run hg transplant --continue
549 549 [255]
550 550 $ hg status -A large1
551 551 C large1
552 552 $ cat .hglf/large1
553 553 e5bb990443d6a92aaf7223813720f7566c9dd05b
554 554 $ cat large1
555 555 large1 in #3
556 556 $ hg status -A largeX
557 557 A largeX
558 558 $ cat .hglf/largeX
559 559 fa44618ea25181aff4f48b70428294790cec9f61
560 560 $ cat largeX
561 561 largeX
562 562
563 563 Test that transplant updates standins for manually modified largefiles
564 564 at the 1st commit of resuming.
565 565
566 566 $ echo "manually modified before 'hg transplant --continue'" > large1
567 567 $ hg transplant --continue
568 568 07d6153b5c04 transplanted as f1bf30eb88cc
569 569 $ hg diff -c tip .hglf/large1 | grep '^[+-][0-9a-z]'
570 570 -e5bb990443d6a92aaf7223813720f7566c9dd05b
571 571 +6a4f36d4075fbe0f30ec1d26ca44e63c05903671
572 572 $ rm -f large1
573 573 $ hg update -q -C tip
574 574 $ cat large1
575 575 manually modified before 'hg transplant --continue'
576 576
577 577 Test that "hg status" doesn't show removal of largefiles not managed
578 578 in the target context.
579 579
580 580 $ hg update -q -C 4
581 581 $ hg remove largeX
582 582 $ hg status -A largeX
583 583 R largeX
584 584 $ hg status -A --rev '.^1' largeX
585 585
586 586 #if execbit
587 587
588 588 Test that "hg status" against revisions other than parent notices exec
589 589 bit changes of largefiles.
590 590
591 591 $ hg update -q -C 4
592 592
593 593 (the case that large2 doesn't have exec bit in the target context but
594 594 in the working context)
595 595
596 596 $ chmod +x large2
597 597 $ hg status -A --rev 0 large2
598 598 M large2
599 599 $ hg commit -m 'chmod +x large2'
600 600
601 601 (the case that large2 has exec bit in the target context but not in
602 602 the working context)
603 603
604 604 $ echo dummy > dummy
605 605 $ hg add dummy
606 606 $ hg commit -m 'revision for separation'
607 607 $ chmod -x large2
608 608 $ hg status -A --rev '.^1' large2
609 609 M large2
610 610
611 611 #endif
612 612
613 613 $ cd ..
614
615 Test that "hg convert" avoids copying largefiles from the working
616 directory into store, because "hg convert" doesn't update largefiles
617 in the working directory (removing files under ".cache/largefiles"
618 forces "hg convert" to copy corresponding largefiles)
619
620 $ cat >> $HGRCPATH <<EOF
621 > [extensions]
622 > convert =
623 > EOF
624
625 $ rm $TESTTMP/.cache/largefiles/6a4f36d4075fbe0f30ec1d26ca44e63c05903671
626 $ hg convert -q repo repo.converted
General Comments 0
You need to be logged in to leave comments. Login now