##// END OF EJS Templates
merge: restate calculateupdates in terms of a matcher...
Augie Fackler -
r27345:98266b1d default
parent child Browse files
Show More
@@ -1,682 +1,681 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 from mercurial import cmdutil, scmutil, util, commands, obsolete
5 5 from mercurial import repoview, branchmap, merge, copies, error
6 6 import time, os, sys
7 7 import random
8 8 import functools
9 9
10 10 formatteropts = commands.formatteropts
11 11
12 12 cmdtable = {}
13 13 command = cmdutil.command(cmdtable)
14 14
15 15 def getlen(ui):
16 16 if ui.configbool("perf", "stub"):
17 17 return lambda x: 1
18 18 return len
19 19
20 20 def gettimer(ui, opts=None):
21 21 """return a timer function and formatter: (timer, formatter)
22 22
23 23 This function exists to gather the creation of formatter in a single
24 24 place instead of duplicating it in all performance commands."""
25 25
26 26 # enforce an idle period before execution to counteract power management
27 27 # experimental config: perf.presleep
28 28 time.sleep(ui.configint("perf", "presleep", 1))
29 29
30 30 if opts is None:
31 31 opts = {}
32 32 # redirect all to stderr
33 33 ui = ui.copy()
34 34 ui.fout = ui.ferr
35 35 # get a formatter
36 36 fm = ui.formatter('perf', opts)
37 37 # stub function, runs code only once instead of in a loop
38 38 # experimental config: perf.stub
39 39 if ui.configbool("perf", "stub"):
40 40 return functools.partial(stub_timer, fm), fm
41 41 return functools.partial(_timer, fm), fm
42 42
43 43 def stub_timer(fm, func, title=None):
44 44 func()
45 45
46 46 def _timer(fm, func, title=None):
47 47 results = []
48 48 begin = time.time()
49 49 count = 0
50 50 while True:
51 51 ostart = os.times()
52 52 cstart = time.time()
53 53 r = func()
54 54 cstop = time.time()
55 55 ostop = os.times()
56 56 count += 1
57 57 a, b = ostart, ostop
58 58 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
59 59 if cstop - begin > 3 and count >= 100:
60 60 break
61 61 if cstop - begin > 10 and count >= 3:
62 62 break
63 63
64 64 fm.startitem()
65 65
66 66 if title:
67 67 fm.write('title', '! %s\n', title)
68 68 if r:
69 69 fm.write('result', '! result: %s\n', r)
70 70 m = min(results)
71 71 fm.plain('!')
72 72 fm.write('wall', ' wall %f', m[0])
73 73 fm.write('comb', ' comb %f', m[1] + m[2])
74 74 fm.write('user', ' user %f', m[1])
75 75 fm.write('sys', ' sys %f', m[2])
76 76 fm.write('count', ' (best of %d)', count)
77 77 fm.plain('\n')
78 78
79 79 @command('perfwalk', formatteropts)
80 80 def perfwalk(ui, repo, *pats, **opts):
81 81 timer, fm = gettimer(ui, opts)
82 82 try:
83 83 m = scmutil.match(repo[None], pats, {})
84 84 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
85 85 except Exception:
86 86 try:
87 87 m = scmutil.match(repo[None], pats, {})
88 88 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
89 89 except Exception:
90 90 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
91 91 fm.end()
92 92
93 93 @command('perfannotate', formatteropts)
94 94 def perfannotate(ui, repo, f, **opts):
95 95 timer, fm = gettimer(ui, opts)
96 96 fc = repo['.'][f]
97 97 timer(lambda: len(fc.annotate(True)))
98 98 fm.end()
99 99
100 100 @command('perfstatus',
101 101 [('u', 'unknown', False,
102 102 'ask status to look for unknown files')] + formatteropts)
103 103 def perfstatus(ui, repo, **opts):
104 104 #m = match.always(repo.root, repo.getcwd())
105 105 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
106 106 # False))))
107 107 timer, fm = gettimer(ui, opts)
108 108 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
109 109 fm.end()
110 110
111 111 @command('perfaddremove', formatteropts)
112 112 def perfaddremove(ui, repo, **opts):
113 113 timer, fm = gettimer(ui, opts)
114 114 try:
115 115 oldquiet = repo.ui.quiet
116 116 repo.ui.quiet = True
117 117 matcher = scmutil.match(repo[None])
118 118 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
119 119 finally:
120 120 repo.ui.quiet = oldquiet
121 121 fm.end()
122 122
123 123 def clearcaches(cl):
124 124 # behave somewhat consistently across internal API changes
125 125 if util.safehasattr(cl, 'clearcaches'):
126 126 cl.clearcaches()
127 127 elif util.safehasattr(cl, '_nodecache'):
128 128 from mercurial.node import nullid, nullrev
129 129 cl._nodecache = {nullid: nullrev}
130 130 cl._nodepos = None
131 131
132 132 @command('perfheads', formatteropts)
133 133 def perfheads(ui, repo, **opts):
134 134 timer, fm = gettimer(ui, opts)
135 135 cl = repo.changelog
136 136 def d():
137 137 len(cl.headrevs())
138 138 clearcaches(cl)
139 139 timer(d)
140 140 fm.end()
141 141
142 142 @command('perftags', formatteropts)
143 143 def perftags(ui, repo, **opts):
144 144 import mercurial.changelog
145 145 import mercurial.manifest
146 146 timer, fm = gettimer(ui, opts)
147 147 def t():
148 148 repo.changelog = mercurial.changelog.changelog(repo.svfs)
149 149 repo.manifest = mercurial.manifest.manifest(repo.svfs)
150 150 repo._tags = None
151 151 return len(repo.tags())
152 152 timer(t)
153 153 fm.end()
154 154
155 155 @command('perfancestors', formatteropts)
156 156 def perfancestors(ui, repo, **opts):
157 157 timer, fm = gettimer(ui, opts)
158 158 heads = repo.changelog.headrevs()
159 159 def d():
160 160 for a in repo.changelog.ancestors(heads):
161 161 pass
162 162 timer(d)
163 163 fm.end()
164 164
165 165 @command('perfancestorset', formatteropts)
166 166 def perfancestorset(ui, repo, revset, **opts):
167 167 timer, fm = gettimer(ui, opts)
168 168 revs = repo.revs(revset)
169 169 heads = repo.changelog.headrevs()
170 170 def d():
171 171 s = repo.changelog.ancestors(heads)
172 172 for rev in revs:
173 173 rev in s
174 174 timer(d)
175 175 fm.end()
176 176
177 177 @command('perfdirs', formatteropts)
178 178 def perfdirs(ui, repo, **opts):
179 179 timer, fm = gettimer(ui, opts)
180 180 dirstate = repo.dirstate
181 181 'a' in dirstate
182 182 def d():
183 183 dirstate.dirs()
184 184 del dirstate._dirs
185 185 timer(d)
186 186 fm.end()
187 187
188 188 @command('perfdirstate', formatteropts)
189 189 def perfdirstate(ui, repo, **opts):
190 190 timer, fm = gettimer(ui, opts)
191 191 "a" in repo.dirstate
192 192 def d():
193 193 repo.dirstate.invalidate()
194 194 "a" in repo.dirstate
195 195 timer(d)
196 196 fm.end()
197 197
198 198 @command('perfdirstatedirs', formatteropts)
199 199 def perfdirstatedirs(ui, repo, **opts):
200 200 timer, fm = gettimer(ui, opts)
201 201 "a" in repo.dirstate
202 202 def d():
203 203 "a" in repo.dirstate._dirs
204 204 del repo.dirstate._dirs
205 205 timer(d)
206 206 fm.end()
207 207
208 208 @command('perfdirstatefoldmap', formatteropts)
209 209 def perfdirstatefoldmap(ui, repo, **opts):
210 210 timer, fm = gettimer(ui, opts)
211 211 dirstate = repo.dirstate
212 212 'a' in dirstate
213 213 def d():
214 214 dirstate._filefoldmap.get('a')
215 215 del dirstate._filefoldmap
216 216 timer(d)
217 217 fm.end()
218 218
219 219 @command('perfdirfoldmap', formatteropts)
220 220 def perfdirfoldmap(ui, repo, **opts):
221 221 timer, fm = gettimer(ui, opts)
222 222 dirstate = repo.dirstate
223 223 'a' in dirstate
224 224 def d():
225 225 dirstate._dirfoldmap.get('a')
226 226 del dirstate._dirfoldmap
227 227 del dirstate._dirs
228 228 timer(d)
229 229 fm.end()
230 230
231 231 @command('perfdirstatewrite', formatteropts)
232 232 def perfdirstatewrite(ui, repo, **opts):
233 233 timer, fm = gettimer(ui, opts)
234 234 ds = repo.dirstate
235 235 "a" in ds
236 236 def d():
237 237 ds._dirty = True
238 238 ds.write(repo.currenttransaction())
239 239 timer(d)
240 240 fm.end()
241 241
242 242 @command('perfmergecalculate',
243 243 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
244 244 def perfmergecalculate(ui, repo, rev, **opts):
245 245 timer, fm = gettimer(ui, opts)
246 246 wctx = repo[None]
247 247 rctx = scmutil.revsingle(repo, rev, rev)
248 248 ancestor = wctx.ancestor(rctx)
249 249 # we don't want working dir files to be stat'd in the benchmark, so prime
250 250 # that cache
251 251 wctx.dirty()
252 252 def d():
253 253 # acceptremote is True because we don't want prompts in the middle of
254 254 # our benchmark
255 255 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
256 False, acceptremote=True, followcopies=True)
256 acceptremote=True, followcopies=True)
257 257 timer(d)
258 258 fm.end()
259 259
260 260 @command('perfpathcopies', [], "REV REV")
261 261 def perfpathcopies(ui, repo, rev1, rev2, **opts):
262 262 timer, fm = gettimer(ui, opts)
263 263 ctx1 = scmutil.revsingle(repo, rev1, rev1)
264 264 ctx2 = scmutil.revsingle(repo, rev2, rev2)
265 265 def d():
266 266 copies.pathcopies(ctx1, ctx2)
267 267 timer(d)
268 268 fm.end()
269 269
270 270 @command('perfmanifest', [], 'REV')
271 271 def perfmanifest(ui, repo, rev, **opts):
272 272 timer, fm = gettimer(ui, opts)
273 273 ctx = scmutil.revsingle(repo, rev, rev)
274 274 t = ctx.manifestnode()
275 275 def d():
276 276 repo.manifest._mancache.clear()
277 277 repo.manifest._cache = None
278 278 repo.manifest.read(t)
279 279 timer(d)
280 280 fm.end()
281 281
282 282 @command('perfchangeset', formatteropts)
283 283 def perfchangeset(ui, repo, rev, **opts):
284 284 timer, fm = gettimer(ui, opts)
285 285 n = repo[rev].node()
286 286 def d():
287 287 repo.changelog.read(n)
288 288 #repo.changelog._cache = None
289 289 timer(d)
290 290 fm.end()
291 291
292 292 @command('perfindex', formatteropts)
293 293 def perfindex(ui, repo, **opts):
294 294 import mercurial.revlog
295 295 timer, fm = gettimer(ui, opts)
296 296 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
297 297 n = repo["tip"].node()
298 298 def d():
299 299 cl = mercurial.revlog.revlog(repo.svfs, "00changelog.i")
300 300 cl.rev(n)
301 301 timer(d)
302 302 fm.end()
303 303
304 304 @command('perfstartup', formatteropts)
305 305 def perfstartup(ui, repo, **opts):
306 306 timer, fm = gettimer(ui, opts)
307 307 cmd = sys.argv[0]
308 308 def d():
309 309 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
310 310 timer(d)
311 311 fm.end()
312 312
313 313 @command('perfparents', formatteropts)
314 314 def perfparents(ui, repo, **opts):
315 315 timer, fm = gettimer(ui, opts)
316 316 # control the number of commits perfparents iterates over
317 317 # experimental config: perf.parentscount
318 318 count = ui.configint("perf", "parentscount", 1000)
319 319 if len(repo.changelog) < count:
320 320 raise error.Abort("repo needs %d commits for this test" % count)
321 321 repo = repo.unfiltered()
322 322 nl = [repo.changelog.node(i) for i in xrange(count)]
323 323 def d():
324 324 for n in nl:
325 325 repo.changelog.parents(n)
326 326 timer(d)
327 327 fm.end()
328 328
329 329 @command('perfctxfiles', formatteropts)
330 330 def perfctxfiles(ui, repo, x, **opts):
331 331 x = int(x)
332 332 timer, fm = gettimer(ui, opts)
333 333 def d():
334 334 len(repo[x].files())
335 335 timer(d)
336 336 fm.end()
337 337
338 338 @command('perfrawfiles', formatteropts)
339 339 def perfrawfiles(ui, repo, x, **opts):
340 340 x = int(x)
341 341 timer, fm = gettimer(ui, opts)
342 342 cl = repo.changelog
343 343 def d():
344 344 len(cl.read(x)[3])
345 345 timer(d)
346 346 fm.end()
347 347
348 348 @command('perflookup', formatteropts)
349 349 def perflookup(ui, repo, rev, **opts):
350 350 timer, fm = gettimer(ui, opts)
351 351 timer(lambda: len(repo.lookup(rev)))
352 352 fm.end()
353 353
354 354 @command('perfrevrange', formatteropts)
355 355 def perfrevrange(ui, repo, *specs, **opts):
356 356 timer, fm = gettimer(ui, opts)
357 357 revrange = scmutil.revrange
358 358 timer(lambda: len(revrange(repo, specs)))
359 359 fm.end()
360 360
361 361 @command('perfnodelookup', formatteropts)
362 362 def perfnodelookup(ui, repo, rev, **opts):
363 363 timer, fm = gettimer(ui, opts)
364 364 import mercurial.revlog
365 365 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
366 366 n = repo[rev].node()
367 367 cl = mercurial.revlog.revlog(repo.svfs, "00changelog.i")
368 368 def d():
369 369 cl.rev(n)
370 370 clearcaches(cl)
371 371 timer(d)
372 372 fm.end()
373 373
374 374 @command('perflog',
375 375 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
376 376 def perflog(ui, repo, rev=None, **opts):
377 377 if rev is None:
378 378 rev=[]
379 379 timer, fm = gettimer(ui, opts)
380 380 ui.pushbuffer()
381 381 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
382 382 copies=opts.get('rename')))
383 383 ui.popbuffer()
384 384 fm.end()
385 385
386 386 @command('perfmoonwalk', formatteropts)
387 387 def perfmoonwalk(ui, repo, **opts):
388 388 """benchmark walking the changelog backwards
389 389
390 390 This also loads the changelog data for each revision in the changelog.
391 391 """
392 392 timer, fm = gettimer(ui, opts)
393 393 def moonwalk():
394 394 for i in xrange(len(repo), -1, -1):
395 395 ctx = repo[i]
396 396 ctx.branch() # read changelog data (in addition to the index)
397 397 timer(moonwalk)
398 398 fm.end()
399 399
400 400 @command('perftemplating', formatteropts)
401 401 def perftemplating(ui, repo, rev=None, **opts):
402 402 if rev is None:
403 403 rev=[]
404 404 timer, fm = gettimer(ui, opts)
405 405 ui.pushbuffer()
406 406 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
407 407 template='{date|shortdate} [{rev}:{node|short}]'
408 408 ' {author|person}: {desc|firstline}\n'))
409 409 ui.popbuffer()
410 410 fm.end()
411 411
412 412 @command('perfcca', formatteropts)
413 413 def perfcca(ui, repo, **opts):
414 414 timer, fm = gettimer(ui, opts)
415 415 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
416 416 fm.end()
417 417
418 418 @command('perffncacheload', formatteropts)
419 419 def perffncacheload(ui, repo, **opts):
420 420 timer, fm = gettimer(ui, opts)
421 421 s = repo.store
422 422 def d():
423 423 s.fncache._load()
424 424 timer(d)
425 425 fm.end()
426 426
427 427 @command('perffncachewrite', formatteropts)
428 428 def perffncachewrite(ui, repo, **opts):
429 429 timer, fm = gettimer(ui, opts)
430 430 s = repo.store
431 431 s.fncache._load()
432 432 lock = repo.lock()
433 433 tr = repo.transaction('perffncachewrite')
434 434 def d():
435 435 s.fncache._dirty = True
436 436 s.fncache.write(tr)
437 437 timer(d)
438 438 lock.release()
439 439 fm.end()
440 440
441 441 @command('perffncacheencode', formatteropts)
442 442 def perffncacheencode(ui, repo, **opts):
443 443 timer, fm = gettimer(ui, opts)
444 444 s = repo.store
445 445 s.fncache._load()
446 446 def d():
447 447 for p in s.fncache.entries:
448 448 s.encode(p)
449 449 timer(d)
450 450 fm.end()
451 451
452 452 @command('perfdiffwd', formatteropts)
453 453 def perfdiffwd(ui, repo, **opts):
454 454 """Profile diff of working directory changes"""
455 455 timer, fm = gettimer(ui, opts)
456 456 options = {
457 457 'w': 'ignore_all_space',
458 458 'b': 'ignore_space_change',
459 459 'B': 'ignore_blank_lines',
460 460 }
461 461
462 462 for diffopt in ('', 'w', 'b', 'B', 'wB'):
463 463 opts = dict((options[c], '1') for c in diffopt)
464 464 def d():
465 465 ui.pushbuffer()
466 466 commands.diff(ui, repo, **opts)
467 467 ui.popbuffer()
468 468 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
469 469 timer(d, title)
470 470 fm.end()
471 471
472 472 @command('perfrevlog',
473 473 [('d', 'dist', 100, 'distance between the revisions')] + formatteropts,
474 474 "[INDEXFILE]")
475 475 def perfrevlog(ui, repo, file_, **opts):
476 476 timer, fm = gettimer(ui, opts)
477 477 from mercurial import revlog
478 478 dist = opts['dist']
479 479 _len = getlen(ui)
480 480 def d():
481 481 r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)
482 482 for x in xrange(0, _len(r), dist):
483 483 r.revision(r.node(x))
484 484
485 485 timer(d)
486 486 fm.end()
487 487
488 488 @command('perfrevset',
489 489 [('C', 'clear', False, 'clear volatile cache between each call.'),
490 490 ('', 'contexts', False, 'obtain changectx for each revision')]
491 491 + formatteropts, "REVSET")
492 492 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
493 493 """benchmark the execution time of a revset
494 494
495 495 Use the --clean option if need to evaluate the impact of build volatile
496 496 revisions set cache on the revset execution. Volatile cache hold filtered
497 497 and obsolete related cache."""
498 498 timer, fm = gettimer(ui, opts)
499 499 def d():
500 500 if clear:
501 501 repo.invalidatevolatilesets()
502 502 if contexts:
503 503 for ctx in repo.set(expr): pass
504 504 else:
505 505 for r in repo.revs(expr): pass
506 506 timer(d)
507 507 fm.end()
508 508
509 509 @command('perfvolatilesets', formatteropts)
510 510 def perfvolatilesets(ui, repo, *names, **opts):
511 511 """benchmark the computation of various volatile set
512 512
513 513 Volatile set computes element related to filtering and obsolescence."""
514 514 timer, fm = gettimer(ui, opts)
515 515 repo = repo.unfiltered()
516 516
517 517 def getobs(name):
518 518 def d():
519 519 repo.invalidatevolatilesets()
520 520 obsolete.getrevs(repo, name)
521 521 return d
522 522
523 523 allobs = sorted(obsolete.cachefuncs)
524 524 if names:
525 525 allobs = [n for n in allobs if n in names]
526 526
527 527 for name in allobs:
528 528 timer(getobs(name), title=name)
529 529
530 530 def getfiltered(name):
531 531 def d():
532 532 repo.invalidatevolatilesets()
533 533 repoview.filterrevs(repo, name)
534 534 return d
535 535
536 536 allfilter = sorted(repoview.filtertable)
537 537 if names:
538 538 allfilter = [n for n in allfilter if n in names]
539 539
540 540 for name in allfilter:
541 541 timer(getfiltered(name), title=name)
542 542 fm.end()
543 543
544 544 @command('perfbranchmap',
545 545 [('f', 'full', False,
546 546 'Includes build time of subset'),
547 547 ] + formatteropts)
548 548 def perfbranchmap(ui, repo, full=False, **opts):
549 549 """benchmark the update of a branchmap
550 550
551 551 This benchmarks the full repo.branchmap() call with read and write disabled
552 552 """
553 553 timer, fm = gettimer(ui, opts)
554 554 def getbranchmap(filtername):
555 555 """generate a benchmark function for the filtername"""
556 556 if filtername is None:
557 557 view = repo
558 558 else:
559 559 view = repo.filtered(filtername)
560 560 def d():
561 561 if full:
562 562 view._branchcaches.clear()
563 563 else:
564 564 view._branchcaches.pop(filtername, None)
565 565 view.branchmap()
566 566 return d
567 567 # add filter in smaller subset to bigger subset
568 568 possiblefilters = set(repoview.filtertable)
569 569 allfilters = []
570 570 while possiblefilters:
571 571 for name in possiblefilters:
572 572 subset = branchmap.subsettable.get(name)
573 573 if subset not in possiblefilters:
574 574 break
575 575 else:
576 576 assert False, 'subset cycle %s!' % possiblefilters
577 577 allfilters.append(name)
578 578 possiblefilters.remove(name)
579 579
580 580 # warm the cache
581 581 if not full:
582 582 for name in allfilters:
583 583 repo.filtered(name).branchmap()
584 584 # add unfiltered
585 585 allfilters.append(None)
586 586 oldread = branchmap.read
587 587 oldwrite = branchmap.branchcache.write
588 588 try:
589 589 branchmap.read = lambda repo: None
590 590 branchmap.write = lambda repo: None
591 591 for name in allfilters:
592 592 timer(getbranchmap(name), title=str(name))
593 593 finally:
594 594 branchmap.read = oldread
595 595 branchmap.branchcache.write = oldwrite
596 596 fm.end()
597 597
598 598 @command('perfloadmarkers')
599 599 def perfloadmarkers(ui, repo):
600 600 """benchmark the time to parse the on-disk markers for a repo
601 601
602 602 Result is the number of markers in the repo."""
603 603 timer, fm = gettimer(ui)
604 604 timer(lambda: len(obsolete.obsstore(repo.svfs)))
605 605 fm.end()
606 606
607 607 @command('perflrucachedict', formatteropts +
608 608 [('', 'size', 4, 'size of cache'),
609 609 ('', 'gets', 10000, 'number of key lookups'),
610 610 ('', 'sets', 10000, 'number of key sets'),
611 611 ('', 'mixed', 10000, 'number of mixed mode operations'),
612 612 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
613 613 norepo=True)
614 614 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
615 615 mixedgetfreq=50, **opts):
616 616 def doinit():
617 617 for i in xrange(10000):
618 618 util.lrucachedict(size)
619 619
620 620 values = []
621 621 for i in xrange(size):
622 622 values.append(random.randint(0, sys.maxint))
623 623
624 624 # Get mode fills the cache and tests raw lookup performance with no
625 625 # eviction.
626 626 getseq = []
627 627 for i in xrange(gets):
628 628 getseq.append(random.choice(values))
629 629
630 630 def dogets():
631 631 d = util.lrucachedict(size)
632 632 for v in values:
633 633 d[v] = v
634 634 for key in getseq:
635 635 value = d[key]
636 636 value # silence pyflakes warning
637 637
638 638 # Set mode tests insertion speed with cache eviction.
639 639 setseq = []
640 640 for i in xrange(sets):
641 641 setseq.append(random.randint(0, sys.maxint))
642 642
643 643 def dosets():
644 644 d = util.lrucachedict(size)
645 645 for v in setseq:
646 646 d[v] = v
647 647
648 648 # Mixed mode randomly performs gets and sets with eviction.
649 649 mixedops = []
650 650 for i in xrange(mixed):
651 651 r = random.randint(0, 100)
652 652 if r < mixedgetfreq:
653 653 op = 0
654 654 else:
655 655 op = 1
656 656
657 657 mixedops.append((op, random.randint(0, size * 2)))
658 658
659 659 def domixed():
660 660 d = util.lrucachedict(size)
661 661
662 662 for op, v in mixedops:
663 663 if op == 0:
664 664 try:
665 665 d[v]
666 666 except KeyError:
667 667 pass
668 668 else:
669 669 d[v] = v
670 670
671 671 benches = [
672 672 (doinit, 'init'),
673 673 (dogets, 'gets'),
674 674 (dosets, 'sets'),
675 675 (domixed, 'mixed')
676 676 ]
677 677
678 678 for fn, title in benches:
679 679 timer, fm = gettimer(ui, opts)
680 680 timer(fn, title=title)
681 681 fm.end()
682
@@ -1,626 +1,625 b''
1 1 # hg.py - hg backend for convert extension
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # Notes for hg->hg conversion:
9 9 #
10 10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 11 # of commit messages, but new versions do. Changesets created by
12 12 # those older versions, then converted, may thus have different
13 13 # hashes for changesets that are otherwise identical.
14 14 #
15 15 # * Using "--config convert.hg.saverev=true" will make the source
16 16 # identifier to be stored in the converted revision. This will cause
17 17 # the converted revision to have a different identity than the
18 18 # source.
19 19
20 20
21 21 import os, time, cStringIO
22 22 from mercurial.i18n import _
23 23 from mercurial.node import bin, hex, nullid
24 24 from mercurial import hg, util, context, bookmarks, error, scmutil, exchange
25 25 from mercurial import phases
26 26 from mercurial import lock as lockmod
27 27 from mercurial import merge as mergemod
28 28
29 29 from common import NoRepo, commit, converter_source, converter_sink, mapfile
30 30
31 31 import re
32 32 sha1re = re.compile(r'\b[0-9a-f]{12,40}\b')
33 33
34 34 class mercurial_sink(converter_sink):
35 35 def __init__(self, ui, path):
36 36 converter_sink.__init__(self, ui, path)
37 37 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
38 38 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
39 39 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
40 40 self.lastbranch = None
41 41 if os.path.isdir(path) and len(os.listdir(path)) > 0:
42 42 try:
43 43 self.repo = hg.repository(self.ui, path)
44 44 if not self.repo.local():
45 45 raise NoRepo(_('%s is not a local Mercurial repository')
46 46 % path)
47 47 except error.RepoError as err:
48 48 ui.traceback()
49 49 raise NoRepo(err.args[0])
50 50 else:
51 51 try:
52 52 ui.status(_('initializing destination %s repository\n') % path)
53 53 self.repo = hg.repository(self.ui, path, create=True)
54 54 if not self.repo.local():
55 55 raise NoRepo(_('%s is not a local Mercurial repository')
56 56 % path)
57 57 self.created.append(path)
58 58 except error.RepoError:
59 59 ui.traceback()
60 60 raise NoRepo(_("could not create hg repository %s as sink")
61 61 % path)
62 62 self.lock = None
63 63 self.wlock = None
64 64 self.filemapmode = False
65 65 self.subrevmaps = {}
66 66
67 67 def before(self):
68 68 self.ui.debug('run hg sink pre-conversion action\n')
69 69 self.wlock = self.repo.wlock()
70 70 self.lock = self.repo.lock()
71 71
72 72 def after(self):
73 73 self.ui.debug('run hg sink post-conversion action\n')
74 74 if self.lock:
75 75 self.lock.release()
76 76 if self.wlock:
77 77 self.wlock.release()
78 78
79 79 def revmapfile(self):
80 80 return self.repo.join("shamap")
81 81
82 82 def authorfile(self):
83 83 return self.repo.join("authormap")
84 84
85 85 def setbranch(self, branch, pbranches):
86 86 if not self.clonebranches:
87 87 return
88 88
89 89 setbranch = (branch != self.lastbranch)
90 90 self.lastbranch = branch
91 91 if not branch:
92 92 branch = 'default'
93 93 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
94 94 if pbranches:
95 95 pbranch = pbranches[0][1]
96 96 else:
97 97 pbranch = 'default'
98 98
99 99 branchpath = os.path.join(self.path, branch)
100 100 if setbranch:
101 101 self.after()
102 102 try:
103 103 self.repo = hg.repository(self.ui, branchpath)
104 104 except Exception:
105 105 self.repo = hg.repository(self.ui, branchpath, create=True)
106 106 self.before()
107 107
108 108 # pbranches may bring revisions from other branches (merge parents)
109 109 # Make sure we have them, or pull them.
110 110 missings = {}
111 111 for b in pbranches:
112 112 try:
113 113 self.repo.lookup(b[0])
114 114 except Exception:
115 115 missings.setdefault(b[1], []).append(b[0])
116 116
117 117 if missings:
118 118 self.after()
119 119 for pbranch, heads in sorted(missings.iteritems()):
120 120 pbranchpath = os.path.join(self.path, pbranch)
121 121 prepo = hg.peer(self.ui, {}, pbranchpath)
122 122 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
123 123 exchange.pull(self.repo, prepo,
124 124 [prepo.lookup(h) for h in heads])
125 125 self.before()
126 126
127 127 def _rewritetags(self, source, revmap, data):
128 128 fp = cStringIO.StringIO()
129 129 for line in data.splitlines():
130 130 s = line.split(' ', 1)
131 131 if len(s) != 2:
132 132 continue
133 133 revid = revmap.get(source.lookuprev(s[0]))
134 134 if not revid:
135 135 if s[0] == hex(nullid):
136 136 revid = s[0]
137 137 else:
138 138 continue
139 139 fp.write('%s %s\n' % (revid, s[1]))
140 140 return fp.getvalue()
141 141
142 142 def _rewritesubstate(self, source, data):
143 143 fp = cStringIO.StringIO()
144 144 for line in data.splitlines():
145 145 s = line.split(' ', 1)
146 146 if len(s) != 2:
147 147 continue
148 148
149 149 revid = s[0]
150 150 subpath = s[1]
151 151 if revid != hex(nullid):
152 152 revmap = self.subrevmaps.get(subpath)
153 153 if revmap is None:
154 154 revmap = mapfile(self.ui,
155 155 self.repo.wjoin(subpath, '.hg/shamap'))
156 156 self.subrevmaps[subpath] = revmap
157 157
158 158 # It is reasonable that one or more of the subrepos don't
159 159 # need to be converted, in which case they can be cloned
160 160 # into place instead of converted. Therefore, only warn
161 161 # once.
162 162 msg = _('no ".hgsubstate" updates will be made for "%s"\n')
163 163 if len(revmap) == 0:
164 164 sub = self.repo.wvfs.reljoin(subpath, '.hg')
165 165
166 166 if self.repo.wvfs.exists(sub):
167 167 self.ui.warn(msg % subpath)
168 168
169 169 newid = revmap.get(revid)
170 170 if not newid:
171 171 if len(revmap) > 0:
172 172 self.ui.warn(_("%s is missing from %s/.hg/shamap\n") %
173 173 (revid, subpath))
174 174 else:
175 175 revid = newid
176 176
177 177 fp.write('%s %s\n' % (revid, subpath))
178 178
179 179 return fp.getvalue()
180 180
181 181 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
182 182 """Calculates the files from p2 that we need to pull in when merging p1
183 183 and p2, given that the merge is coming from the given source.
184 184
185 185 This prevents us from losing files that only exist in the target p2 and
186 186 that don't come from the source repo (like if you're merging multiple
187 187 repositories together).
188 188 """
189 189 anc = [p1ctx.ancestor(p2ctx)]
190 190 # Calculate what files are coming from p2
191 191 actions, diverge, rename = mergemod.calculateupdates(
192 192 self.repo, p1ctx, p2ctx, anc,
193 193 True, # branchmerge
194 194 True, # force
195 False, # partial
196 195 False, # acceptremote
197 196 False, # followcopies
198 197 )
199 198
200 199 for file, (action, info, msg) in actions.iteritems():
201 200 if source.targetfilebelongstosource(file):
202 201 # If the file belongs to the source repo, ignore the p2
203 202 # since it will be covered by the existing fileset.
204 203 continue
205 204
206 205 # If the file requires actual merging, abort. We don't have enough
207 206 # context to resolve merges correctly.
208 207 if action in ['m', 'dm', 'cd', 'dc']:
209 208 raise error.Abort(_("unable to convert merge commit "
210 209 "since target parents do not merge cleanly (file "
211 210 "%s, parents %s and %s)") % (file, p1ctx,
212 211 p2ctx))
213 212 elif action == 'k':
214 213 # 'keep' means nothing changed from p1
215 214 continue
216 215 else:
217 216 # Any other change means we want to take the p2 version
218 217 yield file
219 218
220 219 def putcommit(self, files, copies, parents, commit, source, revmap, full,
221 220 cleanp2):
222 221 files = dict(files)
223 222
224 223 def getfilectx(repo, memctx, f):
225 224 if p2ctx and f in p2files and f not in copies:
226 225 self.ui.debug('reusing %s from p2\n' % f)
227 226 try:
228 227 return p2ctx[f]
229 228 except error.ManifestLookupError:
230 229 # If the file doesn't exist in p2, then we're syncing a
231 230 # delete, so just return None.
232 231 return None
233 232 try:
234 233 v = files[f]
235 234 except KeyError:
236 235 return None
237 236 data, mode = source.getfile(f, v)
238 237 if data is None:
239 238 return None
240 239 if f == '.hgtags':
241 240 data = self._rewritetags(source, revmap, data)
242 241 if f == '.hgsubstate':
243 242 data = self._rewritesubstate(source, data)
244 243 return context.memfilectx(self.repo, f, data, 'l' in mode,
245 244 'x' in mode, copies.get(f))
246 245
247 246 pl = []
248 247 for p in parents:
249 248 if p not in pl:
250 249 pl.append(p)
251 250 parents = pl
252 251 nparents = len(parents)
253 252 if self.filemapmode and nparents == 1:
254 253 m1node = self.repo.changelog.read(bin(parents[0]))[0]
255 254 parent = parents[0]
256 255
257 256 if len(parents) < 2:
258 257 parents.append(nullid)
259 258 if len(parents) < 2:
260 259 parents.append(nullid)
261 260 p2 = parents.pop(0)
262 261
263 262 text = commit.desc
264 263
265 264 sha1s = re.findall(sha1re, text)
266 265 for sha1 in sha1s:
267 266 oldrev = source.lookuprev(sha1)
268 267 newrev = revmap.get(oldrev)
269 268 if newrev is not None:
270 269 text = text.replace(sha1, newrev[:len(sha1)])
271 270
272 271 extra = commit.extra.copy()
273 272
274 273 sourcename = self.repo.ui.config('convert', 'hg.sourcename')
275 274 if sourcename:
276 275 extra['convert_source'] = sourcename
277 276
278 277 for label in ('source', 'transplant_source', 'rebase_source',
279 278 'intermediate-source'):
280 279 node = extra.get(label)
281 280
282 281 if node is None:
283 282 continue
284 283
285 284 # Only transplant stores its reference in binary
286 285 if label == 'transplant_source':
287 286 node = hex(node)
288 287
289 288 newrev = revmap.get(node)
290 289 if newrev is not None:
291 290 if label == 'transplant_source':
292 291 newrev = bin(newrev)
293 292
294 293 extra[label] = newrev
295 294
296 295 if self.branchnames and commit.branch:
297 296 extra['branch'] = commit.branch
298 297 if commit.rev and commit.saverev:
299 298 extra['convert_revision'] = commit.rev
300 299
301 300 while parents:
302 301 p1 = p2
303 302 p2 = parents.pop(0)
304 303 p1ctx = self.repo[p1]
305 304 p2ctx = None
306 305 if p2 != nullid:
307 306 p2ctx = self.repo[p2]
308 307 fileset = set(files)
309 308 if full:
310 309 fileset.update(self.repo[p1])
311 310 fileset.update(self.repo[p2])
312 311
313 312 if p2ctx:
314 313 p2files = set(cleanp2)
315 314 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
316 315 p2files.add(file)
317 316 fileset.add(file)
318 317
319 318 ctx = context.memctx(self.repo, (p1, p2), text, fileset,
320 319 getfilectx, commit.author, commit.date, extra)
321 320
322 321 # We won't know if the conversion changes the node until after the
323 322 # commit, so copy the source's phase for now.
324 323 self.repo.ui.setconfig('phases', 'new-commit',
325 324 phases.phasenames[commit.phase], 'convert')
326 325
327 326 tr = self.repo.transaction("convert")
328 327
329 328 try:
330 329 node = hex(self.repo.commitctx(ctx))
331 330
332 331 # If the node value has changed, but the phase is lower than
333 332 # draft, set it back to draft since it hasn't been exposed
334 333 # anywhere.
335 334 if commit.rev != node:
336 335 ctx = self.repo[node]
337 336 if ctx.phase() < phases.draft:
338 337 phases.retractboundary(self.repo, tr, phases.draft,
339 338 [ctx.node()])
340 339 tr.close()
341 340 finally:
342 341 tr.release()
343 342
344 343 text = "(octopus merge fixup)\n"
345 344 p2 = node
346 345
347 346 if self.filemapmode and nparents == 1:
348 347 man = self.repo.manifest
349 348 mnode = self.repo.changelog.read(bin(p2))[0]
350 349 closed = 'close' in commit.extra
351 350 if not closed and not man.cmp(m1node, man.revision(mnode)):
352 351 self.ui.status(_("filtering out empty revision\n"))
353 352 self.repo.rollback(force=True)
354 353 return parent
355 354 return p2
356 355
357 356 def puttags(self, tags):
358 357 try:
359 358 parentctx = self.repo[self.tagsbranch]
360 359 tagparent = parentctx.node()
361 360 except error.RepoError:
362 361 parentctx = None
363 362 tagparent = nullid
364 363
365 364 oldlines = set()
366 365 for branch, heads in self.repo.branchmap().iteritems():
367 366 for h in heads:
368 367 if '.hgtags' in self.repo[h]:
369 368 oldlines.update(
370 369 set(self.repo[h]['.hgtags'].data().splitlines(True)))
371 370 oldlines = sorted(list(oldlines))
372 371
373 372 newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
374 373 if newlines == oldlines:
375 374 return None, None
376 375
377 376 # if the old and new tags match, then there is nothing to update
378 377 oldtags = set()
379 378 newtags = set()
380 379 for line in oldlines:
381 380 s = line.strip().split(' ', 1)
382 381 if len(s) != 2:
383 382 continue
384 383 oldtags.add(s[1])
385 384 for line in newlines:
386 385 s = line.strip().split(' ', 1)
387 386 if len(s) != 2:
388 387 continue
389 388 if s[1] not in oldtags:
390 389 newtags.add(s[1].strip())
391 390
392 391 if not newtags:
393 392 return None, None
394 393
395 394 data = "".join(newlines)
396 395 def getfilectx(repo, memctx, f):
397 396 return context.memfilectx(repo, f, data, False, False, None)
398 397
399 398 self.ui.status(_("updating tags\n"))
400 399 date = "%s 0" % int(time.mktime(time.gmtime()))
401 400 extra = {'branch': self.tagsbranch}
402 401 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
403 402 [".hgtags"], getfilectx, "convert-repo", date,
404 403 extra)
405 404 node = self.repo.commitctx(ctx)
406 405 return hex(node), hex(tagparent)
407 406
408 407 def setfilemapmode(self, active):
409 408 self.filemapmode = active
410 409
411 410 def putbookmarks(self, updatedbookmark):
412 411 if not len(updatedbookmark):
413 412 return
414 413 wlock = lock = tr = None
415 414 try:
416 415 wlock = self.repo.wlock()
417 416 lock = self.repo.lock()
418 417 tr = self.repo.transaction('bookmark')
419 418 self.ui.status(_("updating bookmarks\n"))
420 419 destmarks = self.repo._bookmarks
421 420 for bookmark in updatedbookmark:
422 421 destmarks[bookmark] = bin(updatedbookmark[bookmark])
423 422 destmarks.recordchange(tr)
424 423 tr.close()
425 424 finally:
426 425 lockmod.release(lock, wlock, tr)
427 426
428 427 def hascommitfrommap(self, rev):
429 428 # the exact semantics of clonebranches is unclear so we can't say no
430 429 return rev in self.repo or self.clonebranches
431 430
432 431 def hascommitforsplicemap(self, rev):
433 432 if rev not in self.repo and self.clonebranches:
434 433 raise error.Abort(_('revision %s not found in destination '
435 434 'repository (lookups with clonebranches=true '
436 435 'are not implemented)') % rev)
437 436 return rev in self.repo
438 437
439 438 class mercurial_source(converter_source):
440 439 def __init__(self, ui, path, revs=None):
441 440 converter_source.__init__(self, ui, path, revs)
442 441 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
443 442 self.ignored = set()
444 443 self.saverev = ui.configbool('convert', 'hg.saverev', False)
445 444 try:
446 445 self.repo = hg.repository(self.ui, path)
447 446 # try to provoke an exception if this isn't really a hg
448 447 # repo, but some other bogus compatible-looking url
449 448 if not self.repo.local():
450 449 raise error.RepoError
451 450 except error.RepoError:
452 451 ui.traceback()
453 452 raise NoRepo(_("%s is not a local Mercurial repository") % path)
454 453 self.lastrev = None
455 454 self.lastctx = None
456 455 self._changescache = None, None
457 456 self.convertfp = None
458 457 # Restrict converted revisions to startrev descendants
459 458 startnode = ui.config('convert', 'hg.startrev')
460 459 hgrevs = ui.config('convert', 'hg.revs')
461 460 if hgrevs is None:
462 461 if startnode is not None:
463 462 try:
464 463 startnode = self.repo.lookup(startnode)
465 464 except error.RepoError:
466 465 raise error.Abort(_('%s is not a valid start revision')
467 466 % startnode)
468 467 startrev = self.repo.changelog.rev(startnode)
469 468 children = {startnode: 1}
470 469 for r in self.repo.changelog.descendants([startrev]):
471 470 children[self.repo.changelog.node(r)] = 1
472 471 self.keep = children.__contains__
473 472 else:
474 473 self.keep = util.always
475 474 if revs:
476 475 self._heads = [self.repo[r].node() for r in revs]
477 476 else:
478 477 self._heads = self.repo.heads()
479 478 else:
480 479 if revs or startnode is not None:
481 480 raise error.Abort(_('hg.revs cannot be combined with '
482 481 'hg.startrev or --rev'))
483 482 nodes = set()
484 483 parents = set()
485 484 for r in scmutil.revrange(self.repo, [hgrevs]):
486 485 ctx = self.repo[r]
487 486 nodes.add(ctx.node())
488 487 parents.update(p.node() for p in ctx.parents())
489 488 self.keep = nodes.__contains__
490 489 self._heads = nodes - parents
491 490
492 491 def changectx(self, rev):
493 492 if self.lastrev != rev:
494 493 self.lastctx = self.repo[rev]
495 494 self.lastrev = rev
496 495 return self.lastctx
497 496
498 497 def parents(self, ctx):
499 498 return [p for p in ctx.parents() if p and self.keep(p.node())]
500 499
501 500 def getheads(self):
502 501 return [hex(h) for h in self._heads if self.keep(h)]
503 502
504 503 def getfile(self, name, rev):
505 504 try:
506 505 fctx = self.changectx(rev)[name]
507 506 return fctx.data(), fctx.flags()
508 507 except error.LookupError:
509 508 return None, None
510 509
511 510 def getchanges(self, rev, full):
512 511 ctx = self.changectx(rev)
513 512 parents = self.parents(ctx)
514 513 if full or not parents:
515 514 files = copyfiles = ctx.manifest()
516 515 if parents:
517 516 if self._changescache[0] == rev:
518 517 m, a, r = self._changescache[1]
519 518 else:
520 519 m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
521 520 if not full:
522 521 files = m + a + r
523 522 copyfiles = m + a
524 523 # getcopies() is also run for roots and before filtering so missing
525 524 # revlogs are detected early
526 525 copies = self.getcopies(ctx, parents, copyfiles)
527 526 cleanp2 = set()
528 527 if len(parents) == 2:
529 528 cleanp2.update(self.repo.status(parents[1].node(), ctx.node(),
530 529 clean=True).clean)
531 530 changes = [(f, rev) for f in files if f not in self.ignored]
532 531 changes.sort()
533 532 return changes, copies, cleanp2
534 533
535 534 def getcopies(self, ctx, parents, files):
536 535 copies = {}
537 536 for name in files:
538 537 if name in self.ignored:
539 538 continue
540 539 try:
541 540 copysource, _copynode = ctx.filectx(name).renamed()
542 541 if copysource in self.ignored:
543 542 continue
544 543 # Ignore copy sources not in parent revisions
545 544 found = False
546 545 for p in parents:
547 546 if copysource in p:
548 547 found = True
549 548 break
550 549 if not found:
551 550 continue
552 551 copies[name] = copysource
553 552 except TypeError:
554 553 pass
555 554 except error.LookupError as e:
556 555 if not self.ignoreerrors:
557 556 raise
558 557 self.ignored.add(name)
559 558 self.ui.warn(_('ignoring: %s\n') % e)
560 559 return copies
561 560
562 561 def getcommit(self, rev):
563 562 ctx = self.changectx(rev)
564 563 parents = [p.hex() for p in self.parents(ctx)]
565 564 crev = rev
566 565
567 566 return commit(author=ctx.user(),
568 567 date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
569 568 desc=ctx.description(), rev=crev, parents=parents,
570 569 branch=ctx.branch(), extra=ctx.extra(),
571 570 sortkey=ctx.rev(), saverev=self.saverev,
572 571 phase=ctx.phase())
573 572
574 573 def gettags(self):
575 574 # This will get written to .hgtags, filter non global tags out.
576 575 tags = [t for t in self.repo.tagslist()
577 576 if self.repo.tagtype(t[0]) == 'global']
578 577 return dict([(name, hex(node)) for name, node in tags
579 578 if self.keep(node)])
580 579
581 580 def getchangedfiles(self, rev, i):
582 581 ctx = self.changectx(rev)
583 582 parents = self.parents(ctx)
584 583 if not parents and i is None:
585 584 i = 0
586 585 changes = [], ctx.manifest().keys(), []
587 586 else:
588 587 i = i or 0
589 588 changes = self.repo.status(parents[i].node(), ctx.node())[:3]
590 589 changes = [[f for f in l if f not in self.ignored] for l in changes]
591 590
592 591 if i == 0:
593 592 self._changescache = (rev, changes)
594 593
595 594 return changes[0] + changes[1] + changes[2]
596 595
597 596 def converted(self, rev, destrev):
598 597 if self.convertfp is None:
599 598 self.convertfp = open(self.repo.join('shamap'), 'a')
600 599 self.convertfp.write('%s %s\n' % (destrev, rev))
601 600 self.convertfp.flush()
602 601
603 602 def before(self):
604 603 self.ui.debug('run hg source pre-conversion action\n')
605 604
606 605 def after(self):
607 606 self.ui.debug('run hg source post-conversion action\n')
608 607
609 608 def hasnativeorder(self):
610 609 return True
611 610
612 611 def hasnativeclose(self):
613 612 return True
614 613
615 614 def lookuprev(self, rev):
616 615 try:
617 616 return hex(self.repo.lookup(rev))
618 617 except (error.RepoError, error.LookupError):
619 618 return None
620 619
621 620 def getbookmarks(self):
622 621 return bookmarks.listbookmarks(self.repo)
623 622
624 623 def checkrevformat(self, revstr, mapname='splicemap'):
625 624 """ Mercurial, revision string is a 40 byte hex """
626 625 self.checkhexformat(revstr, mapname)
@@ -1,1433 +1,1433 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset, error
16 16 from mercurial.i18n import _
17 17
18 18 import lfutil
19 19 import lfcommands
20 20 import basestore
21 21
22 22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 23
24 24 def composelargefilematcher(match, manifest):
25 25 '''create a matcher that matches only the largefiles in the original
26 26 matcher'''
27 27 m = copy.copy(match)
28 28 lfile = lambda f: lfutil.standin(f) in manifest
29 29 m._files = filter(lfile, m._files)
30 30 m._fileroots = set(m._files)
31 31 m._always = False
32 32 origmatchfn = m.matchfn
33 33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 34 return m
35 35
36 36 def composenormalfilematcher(match, manifest, exclude=None):
37 37 excluded = set()
38 38 if exclude is not None:
39 39 excluded.update(exclude)
40 40
41 41 m = copy.copy(match)
42 42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 43 manifest or f in excluded)
44 44 m._files = filter(notlfile, m._files)
45 45 m._fileroots = set(m._files)
46 46 m._always = False
47 47 origmatchfn = m.matchfn
48 48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 49 return m
50 50
51 51 def installnormalfilesmatchfn(manifest):
52 52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 54 default='relpath', badfn=None):
55 55 if opts is None:
56 56 opts = {}
57 57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 58 return composenormalfilematcher(match, manifest)
59 59 oldmatch = installmatchfn(overridematch)
60 60
61 61 def installmatchfn(f):
62 62 '''monkey patch the scmutil module with a custom match function.
63 63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 64 oldmatch = scmutil.match
65 65 setattr(f, 'oldmatch', oldmatch)
66 66 scmutil.match = f
67 67 return oldmatch
68 68
69 69 def restorematchfn():
70 70 '''restores scmutil.match to what it was before installmatchfn
71 71 was called. no-op if scmutil.match is its original function.
72 72
73 73 Note that n calls to installmatchfn will require n calls to
74 74 restore the original matchfn.'''
75 75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76 76
77 77 def installmatchandpatsfn(f):
78 78 oldmatchandpats = scmutil.matchandpats
79 79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 80 scmutil.matchandpats = f
81 81 return oldmatchandpats
82 82
83 83 def restorematchandpatsfn():
84 84 '''restores scmutil.matchandpats to what it was before
85 85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 86 is its original function.
87 87
88 88 Note that n calls to installmatchandpatsfn will require n calls
89 89 to restore the original matchfn.'''
90 90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 91 scmutil.matchandpats)
92 92
93 93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 94 large = opts.get('large')
95 95 lfsize = lfutil.getminsize(
96 96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97 97
98 98 lfmatcher = None
99 99 if lfutil.islfilesrepo(repo):
100 100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 101 if lfpats:
102 102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103 103
104 104 lfnames = []
105 105 m = matcher
106 106
107 107 wctx = repo[None]
108 108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 109 exact = m.exact(f)
110 110 lfile = lfutil.standin(f) in wctx
111 111 nfile = f in wctx
112 112 exists = lfile or nfile
113 113
114 114 # addremove in core gets fancy with the name, add doesn't
115 115 if isaddremove:
116 116 name = m.uipath(f)
117 117 else:
118 118 name = m.rel(f)
119 119
120 120 # Don't warn the user when they attempt to add a normal tracked file.
121 121 # The normal add code will do that for us.
122 122 if exact and exists:
123 123 if lfile:
124 124 ui.warn(_('%s already a largefile\n') % name)
125 125 continue
126 126
127 127 if (exact or not exists) and not lfutil.isstandin(f):
128 128 # In case the file was removed previously, but not committed
129 129 # (issue3507)
130 130 if not repo.wvfs.exists(f):
131 131 continue
132 132
133 133 abovemin = (lfsize and
134 134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 136 lfnames.append(f)
137 137 if ui.verbose or not exact:
138 138 ui.status(_('adding %s as a largefile\n') % name)
139 139
140 140 bad = []
141 141
142 142 # Need to lock, otherwise there could be a race condition between
143 143 # when standins are created and added to the repo.
144 144 wlock = repo.wlock()
145 145 try:
146 146 if not opts.get('dry_run'):
147 147 standins = []
148 148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 149 for f in lfnames:
150 150 standinname = lfutil.standin(f)
151 151 lfutil.writestandin(repo, standinname, hash='',
152 152 executable=lfutil.getexecutable(repo.wjoin(f)))
153 153 standins.append(standinname)
154 154 if lfdirstate[f] == 'r':
155 155 lfdirstate.normallookup(f)
156 156 else:
157 157 lfdirstate.add(f)
158 158 lfdirstate.write()
159 159 bad += [lfutil.splitstandin(f)
160 160 for f in repo[None].add(standins)
161 161 if f in m.files()]
162 162
163 163 added = [f for f in lfnames if f not in bad]
164 164 finally:
165 165 wlock.release()
166 166 return added, bad
167 167
168 168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
169 169 after = opts.get('after')
170 170 m = composelargefilematcher(matcher, repo[None].manifest())
171 171 try:
172 172 repo.lfstatus = True
173 173 s = repo.status(match=m, clean=not isaddremove)
174 174 finally:
175 175 repo.lfstatus = False
176 176 manifest = repo[None].manifest()
177 177 modified, added, deleted, clean = [[f for f in list
178 178 if lfutil.standin(f) in manifest]
179 179 for list in (s.modified, s.added,
180 180 s.deleted, s.clean)]
181 181
182 182 def warn(files, msg):
183 183 for f in files:
184 184 ui.warn(msg % m.rel(f))
185 185 return int(len(files) > 0)
186 186
187 187 result = 0
188 188
189 189 if after:
190 190 remove = deleted
191 191 result = warn(modified + added + clean,
192 192 _('not removing %s: file still exists\n'))
193 193 else:
194 194 remove = deleted + clean
195 195 result = warn(modified, _('not removing %s: file is modified (use -f'
196 196 ' to force removal)\n'))
197 197 result = warn(added, _('not removing %s: file has been marked for add'
198 198 ' (use forget to undo)\n')) or result
199 199
200 200 # Need to lock because standin files are deleted then removed from the
201 201 # repository and we could race in-between.
202 202 wlock = repo.wlock()
203 203 try:
204 204 lfdirstate = lfutil.openlfdirstate(ui, repo)
205 205 for f in sorted(remove):
206 206 if ui.verbose or not m.exact(f):
207 207 # addremove in core gets fancy with the name, remove doesn't
208 208 if isaddremove:
209 209 name = m.uipath(f)
210 210 else:
211 211 name = m.rel(f)
212 212 ui.status(_('removing %s\n') % name)
213 213
214 214 if not opts.get('dry_run'):
215 215 if not after:
216 216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217 217
218 218 if opts.get('dry_run'):
219 219 return result
220 220
221 221 remove = [lfutil.standin(f) for f in remove]
222 222 # If this is being called by addremove, let the original addremove
223 223 # function handle this.
224 224 if not isaddremove:
225 225 for f in remove:
226 226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 227 repo[None].forget(remove)
228 228
229 229 for f in remove:
230 230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
231 231 False)
232 232
233 233 lfdirstate.write()
234 234 finally:
235 235 wlock.release()
236 236
237 237 return result
238 238
239 239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 240 # appear at their right place in the manifests.
241 241 def decodepath(orig, path):
242 242 return lfutil.splitstandin(path) or path
243 243
244 244 # -- Wrappers: modify existing commands --------------------------------
245 245
246 246 def overrideadd(orig, ui, repo, *pats, **opts):
247 247 if opts.get('normal') and opts.get('large'):
248 248 raise error.Abort(_('--normal cannot be used with --large'))
249 249 return orig(ui, repo, *pats, **opts)
250 250
251 251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
252 252 # The --normal flag short circuits this override
253 253 if opts.get('normal'):
254 254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
255 255
256 256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
257 257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
258 258 ladded)
259 259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
260 260
261 261 bad.extend(f for f in lbad)
262 262 return bad
263 263
264 264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
265 265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
266 266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
267 267 return removelargefiles(ui, repo, False, matcher, after=after,
268 268 force=force) or result
269 269
270 270 def overridestatusfn(orig, repo, rev2, **opts):
271 271 try:
272 272 repo._repo.lfstatus = True
273 273 return orig(repo, rev2, **opts)
274 274 finally:
275 275 repo._repo.lfstatus = False
276 276
277 277 def overridestatus(orig, ui, repo, *pats, **opts):
278 278 try:
279 279 repo.lfstatus = True
280 280 return orig(ui, repo, *pats, **opts)
281 281 finally:
282 282 repo.lfstatus = False
283 283
284 284 def overridedirty(orig, repo, ignoreupdate=False):
285 285 try:
286 286 repo._repo.lfstatus = True
287 287 return orig(repo, ignoreupdate)
288 288 finally:
289 289 repo._repo.lfstatus = False
290 290
291 291 def overridelog(orig, ui, repo, *pats, **opts):
292 292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
293 293 default='relpath', badfn=None):
294 294 """Matcher that merges root directory with .hglf, suitable for log.
295 295 It is still possible to match .hglf directly.
296 296 For any listed files run log on the standin too.
297 297 matchfn tries both the given filename and with .hglf stripped.
298 298 """
299 299 if opts is None:
300 300 opts = {}
301 301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
302 302 badfn=badfn)
303 303 m, p = copy.copy(matchandpats)
304 304
305 305 if m.always():
306 306 # We want to match everything anyway, so there's no benefit trying
307 307 # to add standins.
308 308 return matchandpats
309 309
310 310 pats = set(p)
311 311
312 312 def fixpats(pat, tostandin=lfutil.standin):
313 313 if pat.startswith('set:'):
314 314 return pat
315 315
316 316 kindpat = match_._patsplit(pat, None)
317 317
318 318 if kindpat[0] is not None:
319 319 return kindpat[0] + ':' + tostandin(kindpat[1])
320 320 return tostandin(kindpat[1])
321 321
322 322 if m._cwd:
323 323 hglf = lfutil.shortname
324 324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
325 325
326 326 def tostandin(f):
327 327 # The file may already be a standin, so truncate the back
328 328 # prefix and test before mangling it. This avoids turning
329 329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
330 330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
331 331 return f
332 332
333 333 # An absolute path is from outside the repo, so truncate the
334 334 # path to the root before building the standin. Otherwise cwd
335 335 # is somewhere in the repo, relative to root, and needs to be
336 336 # prepended before building the standin.
337 337 if os.path.isabs(m._cwd):
338 338 f = f[len(back):]
339 339 else:
340 340 f = m._cwd + '/' + f
341 341 return back + lfutil.standin(f)
342 342
343 343 pats.update(fixpats(f, tostandin) for f in p)
344 344 else:
345 345 def tostandin(f):
346 346 if lfutil.splitstandin(f):
347 347 return f
348 348 return lfutil.standin(f)
349 349 pats.update(fixpats(f, tostandin) for f in p)
350 350
351 351 for i in range(0, len(m._files)):
352 352 # Don't add '.hglf' to m.files, since that is already covered by '.'
353 353 if m._files[i] == '.':
354 354 continue
355 355 standin = lfutil.standin(m._files[i])
356 356 # If the "standin" is a directory, append instead of replace to
357 357 # support naming a directory on the command line with only
358 358 # largefiles. The original directory is kept to support normal
359 359 # files.
360 360 if standin in repo[ctx.node()]:
361 361 m._files[i] = standin
362 362 elif m._files[i] not in repo[ctx.node()] \
363 363 and repo.wvfs.isdir(standin):
364 364 m._files.append(standin)
365 365
366 366 m._fileroots = set(m._files)
367 367 m._always = False
368 368 origmatchfn = m.matchfn
369 369 def lfmatchfn(f):
370 370 lf = lfutil.splitstandin(f)
371 371 if lf is not None and origmatchfn(lf):
372 372 return True
373 373 r = origmatchfn(f)
374 374 return r
375 375 m.matchfn = lfmatchfn
376 376
377 377 ui.debug('updated patterns: %s\n' % sorted(pats))
378 378 return m, pats
379 379
380 380 # For hg log --patch, the match object is used in two different senses:
381 381 # (1) to determine what revisions should be printed out, and
382 382 # (2) to determine what files to print out diffs for.
383 383 # The magic matchandpats override should be used for case (1) but not for
384 384 # case (2).
385 385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
386 386 wctx = repo[None]
387 387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
388 388 return lambda rev: match
389 389
390 390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
391 391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
392 392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
393 393
394 394 try:
395 395 return orig(ui, repo, *pats, **opts)
396 396 finally:
397 397 restorematchandpatsfn()
398 398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
399 399
400 400 def overrideverify(orig, ui, repo, *pats, **opts):
401 401 large = opts.pop('large', False)
402 402 all = opts.pop('lfa', False)
403 403 contents = opts.pop('lfc', False)
404 404
405 405 result = orig(ui, repo, *pats, **opts)
406 406 if large or all or contents:
407 407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
408 408 return result
409 409
410 410 def overridedebugstate(orig, ui, repo, *pats, **opts):
411 411 large = opts.pop('large', False)
412 412 if large:
413 413 class fakerepo(object):
414 414 dirstate = lfutil.openlfdirstate(ui, repo)
415 415 orig(ui, fakerepo, *pats, **opts)
416 416 else:
417 417 orig(ui, repo, *pats, **opts)
418 418
419 419 # Before starting the manifest merge, merge.updates will call
420 420 # _checkunknownfile to check if there are any files in the merged-in
421 421 # changeset that collide with unknown files in the working copy.
422 422 #
423 423 # The largefiles are seen as unknown, so this prevents us from merging
424 424 # in a file 'foo' if we already have a largefile with the same name.
425 425 #
426 426 # The overridden function filters the unknown files by removing any
427 427 # largefiles. This makes the merge proceed and we can then handle this
428 428 # case further in the overridden calculateupdates function below.
429 429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
430 430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
431 431 return False
432 432 return origfn(repo, wctx, mctx, f, f2)
433 433
434 434 # The manifest merge handles conflicts on the manifest level. We want
435 435 # to handle changes in largefile-ness of files at this level too.
436 436 #
437 437 # The strategy is to run the original calculateupdates and then process
438 438 # the action list it outputs. There are two cases we need to deal with:
439 439 #
440 440 # 1. Normal file in p1, largefile in p2. Here the largefile is
441 441 # detected via its standin file, which will enter the working copy
442 442 # with a "get" action. It is not "merge" since the standin is all
443 443 # Mercurial is concerned with at this level -- the link to the
444 444 # existing normal file is not relevant here.
445 445 #
446 446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
447 447 # since the largefile will be present in the working copy and
448 448 # different from the normal file in p2. Mercurial therefore
449 449 # triggers a merge action.
450 450 #
451 451 # In both cases, we prompt the user and emit new actions to either
452 452 # remove the standin (if the normal file was kept) or to remove the
453 453 # normal file and get the standin (if the largefile was kept). The
454 454 # default prompt answer is to use the largefile version since it was
455 455 # presumably changed on purpose.
456 456 #
457 457 # Finally, the merge.applyupdates function will then take care of
458 458 # writing the files into the working copy and lfcommands.updatelfiles
459 459 # will update the largefiles.
460 460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
461 partial, acceptremote, followcopies):
461 acceptremote, followcopies, matcher=None):
462 462 overwrite = force and not branchmerge
463 463 actions, diverge, renamedelete = origfn(
464 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
465 followcopies)
464 repo, p1, p2, pas, branchmerge, force, acceptremote,
465 followcopies, matcher=matcher)
466 466
467 467 if overwrite:
468 468 return actions, diverge, renamedelete
469 469
470 470 # Convert to dictionary with filename as key and action as value.
471 471 lfiles = set()
472 472 for f in actions:
473 473 splitstandin = f and lfutil.splitstandin(f)
474 474 if splitstandin in p1:
475 475 lfiles.add(splitstandin)
476 476 elif lfutil.standin(f) in p1:
477 477 lfiles.add(f)
478 478
479 479 for lfile in lfiles:
480 480 standin = lfutil.standin(lfile)
481 481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
482 482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
483 483 if sm in ('g', 'dc') and lm != 'r':
484 484 if sm == 'dc':
485 485 f1, f2, fa, move, anc = sargs
486 486 sargs = (p2[f2].flags(),)
487 487 # Case 1: normal file in the working copy, largefile in
488 488 # the second parent
489 489 usermsg = _('remote turned local normal file %s into a largefile\n'
490 490 'use (l)argefile or keep (n)ormal file?'
491 491 '$$ &Largefile $$ &Normal file') % lfile
492 492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
493 493 actions[lfile] = ('r', None, 'replaced by standin')
494 494 actions[standin] = ('g', sargs, 'replaces standin')
495 495 else: # keep local normal file
496 496 actions[lfile] = ('k', None, 'replaces standin')
497 497 if branchmerge:
498 498 actions[standin] = ('k', None, 'replaced by non-standin')
499 499 else:
500 500 actions[standin] = ('r', None, 'replaced by non-standin')
501 501 elif lm in ('g', 'dc') and sm != 'r':
502 502 if lm == 'dc':
503 503 f1, f2, fa, move, anc = largs
504 504 largs = (p2[f2].flags(),)
505 505 # Case 2: largefile in the working copy, normal file in
506 506 # the second parent
507 507 usermsg = _('remote turned local largefile %s into a normal file\n'
508 508 'keep (l)argefile or use (n)ormal file?'
509 509 '$$ &Largefile $$ &Normal file') % lfile
510 510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
511 511 if branchmerge:
512 512 # largefile can be restored from standin safely
513 513 actions[lfile] = ('k', None, 'replaced by standin')
514 514 actions[standin] = ('k', None, 'replaces standin')
515 515 else:
516 516 # "lfile" should be marked as "removed" without
517 517 # removal of itself
518 518 actions[lfile] = ('lfmr', None,
519 519 'forget non-standin largefile')
520 520
521 521 # linear-merge should treat this largefile as 're-added'
522 522 actions[standin] = ('a', None, 'keep standin')
523 523 else: # pick remote normal file
524 524 actions[lfile] = ('g', largs, 'replaces standin')
525 525 actions[standin] = ('r', None, 'replaced by non-standin')
526 526
527 527 return actions, diverge, renamedelete
528 528
529 529 def mergerecordupdates(orig, repo, actions, branchmerge):
530 530 if 'lfmr' in actions:
531 531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
532 532 for lfile, args, msg in actions['lfmr']:
533 533 # this should be executed before 'orig', to execute 'remove'
534 534 # before all other actions
535 535 repo.dirstate.remove(lfile)
536 536 # make sure lfile doesn't get synclfdirstate'd as normal
537 537 lfdirstate.add(lfile)
538 538 lfdirstate.write()
539 539
540 540 return orig(repo, actions, branchmerge)
541 541
542 542
543 543 # Override filemerge to prompt the user about how they wish to merge
544 544 # largefiles. This will handle identical edits without prompting the user.
545 545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
546 546 labels=None):
547 547 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
548 548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
549 549 labels=labels)
550 550
551 551 ahash = fca.data().strip().lower()
552 552 dhash = fcd.data().strip().lower()
553 553 ohash = fco.data().strip().lower()
554 554 if (ohash != ahash and
555 555 ohash != dhash and
556 556 (dhash == ahash or
557 557 repo.ui.promptchoice(
558 558 _('largefile %s has a merge conflict\nancestor was %s\n'
559 559 'keep (l)ocal %s or\ntake (o)ther %s?'
560 560 '$$ &Local $$ &Other') %
561 561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
562 562 0) == 1)):
563 563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
564 564 return True, 0, False
565 565
566 566 def copiespathcopies(orig, ctx1, ctx2, match=None):
567 567 copies = orig(ctx1, ctx2, match=match)
568 568 updated = {}
569 569
570 570 for k, v in copies.iteritems():
571 571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
572 572
573 573 return updated
574 574
575 575 # Copy first changes the matchers to match standins instead of
576 576 # largefiles. Then it overrides util.copyfile in that function it
577 577 # checks if the destination largefile already exists. It also keeps a
578 578 # list of copied files so that the largefiles can be copied and the
579 579 # dirstate updated.
580 580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
581 581 # doesn't remove largefile on rename
582 582 if len(pats) < 2:
583 583 # this isn't legal, let the original function deal with it
584 584 return orig(ui, repo, pats, opts, rename)
585 585
586 586 # This could copy both lfiles and normal files in one command,
587 587 # but we don't want to do that. First replace their matcher to
588 588 # only match normal files and run it, then replace it to just
589 589 # match largefiles and run it again.
590 590 nonormalfiles = False
591 591 nolfiles = False
592 592 installnormalfilesmatchfn(repo[None].manifest())
593 593 try:
594 594 result = orig(ui, repo, pats, opts, rename)
595 595 except error.Abort as e:
596 596 if str(e) != _('no files to copy'):
597 597 raise e
598 598 else:
599 599 nonormalfiles = True
600 600 result = 0
601 601 finally:
602 602 restorematchfn()
603 603
604 604 # The first rename can cause our current working directory to be removed.
605 605 # In that case there is nothing left to copy/rename so just quit.
606 606 try:
607 607 repo.getcwd()
608 608 except OSError:
609 609 return result
610 610
611 611 def makestandin(relpath):
612 612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
613 613 return os.path.join(repo.wjoin(lfutil.standin(path)))
614 614
615 615 fullpats = scmutil.expandpats(pats)
616 616 dest = fullpats[-1]
617 617
618 618 if os.path.isdir(dest):
619 619 if not os.path.isdir(makestandin(dest)):
620 620 os.makedirs(makestandin(dest))
621 621
622 622 try:
623 623 # When we call orig below it creates the standins but we don't add
624 624 # them to the dir state until later so lock during that time.
625 625 wlock = repo.wlock()
626 626
627 627 manifest = repo[None].manifest()
628 628 def overridematch(ctx, pats=(), opts=None, globbed=False,
629 629 default='relpath', badfn=None):
630 630 if opts is None:
631 631 opts = {}
632 632 newpats = []
633 633 # The patterns were previously mangled to add the standin
634 634 # directory; we need to remove that now
635 635 for pat in pats:
636 636 if match_.patkind(pat) is None and lfutil.shortname in pat:
637 637 newpats.append(pat.replace(lfutil.shortname, ''))
638 638 else:
639 639 newpats.append(pat)
640 640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
641 641 m = copy.copy(match)
642 642 lfile = lambda f: lfutil.standin(f) in manifest
643 643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
644 644 m._fileroots = set(m._files)
645 645 origmatchfn = m.matchfn
646 646 m.matchfn = lambda f: (lfutil.isstandin(f) and
647 647 (f in manifest) and
648 648 origmatchfn(lfutil.splitstandin(f)) or
649 649 None)
650 650 return m
651 651 oldmatch = installmatchfn(overridematch)
652 652 listpats = []
653 653 for pat in pats:
654 654 if match_.patkind(pat) is not None:
655 655 listpats.append(pat)
656 656 else:
657 657 listpats.append(makestandin(pat))
658 658
659 659 try:
660 660 origcopyfile = util.copyfile
661 661 copiedfiles = []
662 662 def overridecopyfile(src, dest):
663 663 if (lfutil.shortname in src and
664 664 dest.startswith(repo.wjoin(lfutil.shortname))):
665 665 destlfile = dest.replace(lfutil.shortname, '')
666 666 if not opts['force'] and os.path.exists(destlfile):
667 667 raise IOError('',
668 668 _('destination largefile already exists'))
669 669 copiedfiles.append((src, dest))
670 670 origcopyfile(src, dest)
671 671
672 672 util.copyfile = overridecopyfile
673 673 result += orig(ui, repo, listpats, opts, rename)
674 674 finally:
675 675 util.copyfile = origcopyfile
676 676
677 677 lfdirstate = lfutil.openlfdirstate(ui, repo)
678 678 for (src, dest) in copiedfiles:
679 679 if (lfutil.shortname in src and
680 680 dest.startswith(repo.wjoin(lfutil.shortname))):
681 681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
682 682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
683 683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
684 684 if not os.path.isdir(destlfiledir):
685 685 os.makedirs(destlfiledir)
686 686 if rename:
687 687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
688 688
689 689 # The file is gone, but this deletes any empty parent
690 690 # directories as a side-effect.
691 691 util.unlinkpath(repo.wjoin(srclfile), True)
692 692 lfdirstate.remove(srclfile)
693 693 else:
694 694 util.copyfile(repo.wjoin(srclfile),
695 695 repo.wjoin(destlfile))
696 696
697 697 lfdirstate.add(destlfile)
698 698 lfdirstate.write()
699 699 except error.Abort as e:
700 700 if str(e) != _('no files to copy'):
701 701 raise e
702 702 else:
703 703 nolfiles = True
704 704 finally:
705 705 restorematchfn()
706 706 wlock.release()
707 707
708 708 if nolfiles and nonormalfiles:
709 709 raise error.Abort(_('no files to copy'))
710 710
711 711 return result
712 712
713 713 # When the user calls revert, we have to be careful to not revert any
714 714 # changes to other largefiles accidentally. This means we have to keep
715 715 # track of the largefiles that are being reverted so we only pull down
716 716 # the necessary largefiles.
717 717 #
718 718 # Standins are only updated (to match the hash of largefiles) before
719 719 # commits. Update the standins then run the original revert, changing
720 720 # the matcher to hit standins instead of largefiles. Based on the
721 721 # resulting standins update the largefiles.
722 722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
723 723 # Because we put the standins in a bad state (by updating them)
724 724 # and then return them to a correct state we need to lock to
725 725 # prevent others from changing them in their incorrect state.
726 726 wlock = repo.wlock()
727 727 try:
728 728 lfdirstate = lfutil.openlfdirstate(ui, repo)
729 729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
730 730 lfdirstate.write()
731 731 for lfile in s.modified:
732 732 lfutil.updatestandin(repo, lfutil.standin(lfile))
733 733 for lfile in s.deleted:
734 734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
735 735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
736 736
737 737 oldstandins = lfutil.getstandinsstate(repo)
738 738
739 739 def overridematch(mctx, pats=(), opts=None, globbed=False,
740 740 default='relpath', badfn=None):
741 741 if opts is None:
742 742 opts = {}
743 743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
744 744 m = copy.copy(match)
745 745
746 746 # revert supports recursing into subrepos, and though largefiles
747 747 # currently doesn't work correctly in that case, this match is
748 748 # called, so the lfdirstate above may not be the correct one for
749 749 # this invocation of match.
750 750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
751 751 False)
752 752
753 753 def tostandin(f):
754 754 standin = lfutil.standin(f)
755 755 if standin in ctx or standin in mctx:
756 756 return standin
757 757 elif standin in repo[None] or lfdirstate[f] == 'r':
758 758 return None
759 759 return f
760 760 m._files = [tostandin(f) for f in m._files]
761 761 m._files = [f for f in m._files if f is not None]
762 762 m._fileroots = set(m._files)
763 763 origmatchfn = m.matchfn
764 764 def matchfn(f):
765 765 if lfutil.isstandin(f):
766 766 return (origmatchfn(lfutil.splitstandin(f)) and
767 767 (f in ctx or f in mctx))
768 768 return origmatchfn(f)
769 769 m.matchfn = matchfn
770 770 return m
771 771 oldmatch = installmatchfn(overridematch)
772 772 try:
773 773 orig(ui, repo, ctx, parents, *pats, **opts)
774 774 finally:
775 775 restorematchfn()
776 776
777 777 newstandins = lfutil.getstandinsstate(repo)
778 778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
779 779 # lfdirstate should be 'normallookup'-ed for updated files,
780 780 # because reverting doesn't touch dirstate for 'normal' files
781 781 # when target revision is explicitly specified: in such case,
782 782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
783 783 # of target (standin) file.
784 784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
785 785 normallookup=True)
786 786
787 787 finally:
788 788 wlock.release()
789 789
790 790 # after pulling changesets, we need to take some extra care to get
791 791 # largefiles updated remotely
792 792 def overridepull(orig, ui, repo, source=None, **opts):
793 793 revsprepull = len(repo)
794 794 if not source:
795 795 source = 'default'
796 796 repo.lfpullsource = source
797 797 result = orig(ui, repo, source, **opts)
798 798 revspostpull = len(repo)
799 799 lfrevs = opts.get('lfrev', [])
800 800 if opts.get('all_largefiles'):
801 801 lfrevs.append('pulled()')
802 802 if lfrevs and revspostpull > revsprepull:
803 803 numcached = 0
804 804 repo.firstpulled = revsprepull # for pulled() revset expression
805 805 try:
806 806 for rev in scmutil.revrange(repo, lfrevs):
807 807 ui.note(_('pulling largefiles for revision %s\n') % rev)
808 808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
809 809 numcached += len(cached)
810 810 finally:
811 811 del repo.firstpulled
812 812 ui.status(_("%d largefiles cached\n") % numcached)
813 813 return result
814 814
815 815 def pulledrevsetsymbol(repo, subset, x):
816 816 """``pulled()``
817 817 Changesets that just has been pulled.
818 818
819 819 Only available with largefiles from pull --lfrev expressions.
820 820
821 821 .. container:: verbose
822 822
823 823 Some examples:
824 824
825 825 - pull largefiles for all new changesets::
826 826
827 827 hg pull -lfrev "pulled()"
828 828
829 829 - pull largefiles for all new branch heads::
830 830
831 831 hg pull -lfrev "head(pulled()) and not closed()"
832 832
833 833 """
834 834
835 835 try:
836 836 firstpulled = repo.firstpulled
837 837 except AttributeError:
838 838 raise error.Abort(_("pulled() only available in --lfrev"))
839 839 return revset.baseset([r for r in subset if r >= firstpulled])
840 840
841 841 def overrideclone(orig, ui, source, dest=None, **opts):
842 842 d = dest
843 843 if d is None:
844 844 d = hg.defaultdest(source)
845 845 if opts.get('all_largefiles') and not hg.islocal(d):
846 846 raise error.Abort(_(
847 847 '--all-largefiles is incompatible with non-local destination %s') %
848 848 d)
849 849
850 850 return orig(ui, source, dest, **opts)
851 851
852 852 def hgclone(orig, ui, opts, *args, **kwargs):
853 853 result = orig(ui, opts, *args, **kwargs)
854 854
855 855 if result is not None:
856 856 sourcerepo, destrepo = result
857 857 repo = destrepo.local()
858 858
859 859 # When cloning to a remote repo (like through SSH), no repo is available
860 860 # from the peer. Therefore the largefiles can't be downloaded and the
861 861 # hgrc can't be updated.
862 862 if not repo:
863 863 return result
864 864
865 865 # If largefiles is required for this repo, permanently enable it locally
866 866 if 'largefiles' in repo.requirements:
867 867 fp = repo.vfs('hgrc', 'a', text=True)
868 868 try:
869 869 fp.write('\n[extensions]\nlargefiles=\n')
870 870 finally:
871 871 fp.close()
872 872
873 873 # Caching is implicitly limited to 'rev' option, since the dest repo was
874 874 # truncated at that point. The user may expect a download count with
875 875 # this option, so attempt whether or not this is a largefile repo.
876 876 if opts.get('all_largefiles'):
877 877 success, missing = lfcommands.downloadlfiles(ui, repo, None)
878 878
879 879 if missing != 0:
880 880 return None
881 881
882 882 return result
883 883
884 884 def overriderebase(orig, ui, repo, **opts):
885 885 if not util.safehasattr(repo, '_largefilesenabled'):
886 886 return orig(ui, repo, **opts)
887 887
888 888 resuming = opts.get('continue')
889 889 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
890 890 repo._lfstatuswriters.append(lambda *msg, **opts: None)
891 891 try:
892 892 return orig(ui, repo, **opts)
893 893 finally:
894 894 repo._lfstatuswriters.pop()
895 895 repo._lfcommithooks.pop()
896 896
897 897 def overridearchivecmd(orig, ui, repo, dest, **opts):
898 898 repo.unfiltered().lfstatus = True
899 899
900 900 try:
901 901 return orig(ui, repo.unfiltered(), dest, **opts)
902 902 finally:
903 903 repo.unfiltered().lfstatus = False
904 904
905 905 def hgwebarchive(orig, web, req, tmpl):
906 906 web.repo.lfstatus = True
907 907
908 908 try:
909 909 return orig(web, req, tmpl)
910 910 finally:
911 911 web.repo.lfstatus = False
912 912
913 913 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
914 914 prefix='', mtime=None, subrepos=None):
915 915 # For some reason setting repo.lfstatus in hgwebarchive only changes the
916 916 # unfiltered repo's attr, so check that as well.
917 917 if not repo.lfstatus and not repo.unfiltered().lfstatus:
918 918 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
919 919 subrepos)
920 920
921 921 # No need to lock because we are only reading history and
922 922 # largefile caches, neither of which are modified.
923 923 if node is not None:
924 924 lfcommands.cachelfiles(repo.ui, repo, node)
925 925
926 926 if kind not in archival.archivers:
927 927 raise error.Abort(_("unknown archive type '%s'") % kind)
928 928
929 929 ctx = repo[node]
930 930
931 931 if kind == 'files':
932 932 if prefix:
933 933 raise error.Abort(
934 934 _('cannot give prefix when archiving to files'))
935 935 else:
936 936 prefix = archival.tidyprefix(dest, kind, prefix)
937 937
938 938 def write(name, mode, islink, getdata):
939 939 if matchfn and not matchfn(name):
940 940 return
941 941 data = getdata()
942 942 if decode:
943 943 data = repo.wwritedata(name, data)
944 944 archiver.addfile(prefix + name, mode, islink, data)
945 945
946 946 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
947 947
948 948 if repo.ui.configbool("ui", "archivemeta", True):
949 949 write('.hg_archival.txt', 0o644, False,
950 950 lambda: archival.buildmetadata(ctx))
951 951
952 952 for f in ctx:
953 953 ff = ctx.flags(f)
954 954 getdata = ctx[f].data
955 955 if lfutil.isstandin(f):
956 956 if node is not None:
957 957 path = lfutil.findfile(repo, getdata().strip())
958 958
959 959 if path is None:
960 960 raise error.Abort(
961 961 _('largefile %s not found in repo store or system cache')
962 962 % lfutil.splitstandin(f))
963 963 else:
964 964 path = lfutil.splitstandin(f)
965 965
966 966 f = lfutil.splitstandin(f)
967 967
968 968 def getdatafn():
969 969 fd = None
970 970 try:
971 971 fd = open(path, 'rb')
972 972 return fd.read()
973 973 finally:
974 974 if fd:
975 975 fd.close()
976 976
977 977 getdata = getdatafn
978 978 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
979 979
980 980 if subrepos:
981 981 for subpath in sorted(ctx.substate):
982 982 sub = ctx.workingsub(subpath)
983 983 submatch = match_.narrowmatcher(subpath, matchfn)
984 984 sub._repo.lfstatus = True
985 985 sub.archive(archiver, prefix, submatch)
986 986
987 987 archiver.done()
988 988
989 989 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
990 990 if not repo._repo.lfstatus:
991 991 return orig(repo, archiver, prefix, match)
992 992
993 993 repo._get(repo._state + ('hg',))
994 994 rev = repo._state[1]
995 995 ctx = repo._repo[rev]
996 996
997 997 if ctx.node() is not None:
998 998 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
999 999
1000 1000 def write(name, mode, islink, getdata):
1001 1001 # At this point, the standin has been replaced with the largefile name,
1002 1002 # so the normal matcher works here without the lfutil variants.
1003 1003 if match and not match(f):
1004 1004 return
1005 1005 data = getdata()
1006 1006
1007 1007 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1008 1008
1009 1009 for f in ctx:
1010 1010 ff = ctx.flags(f)
1011 1011 getdata = ctx[f].data
1012 1012 if lfutil.isstandin(f):
1013 1013 if ctx.node() is not None:
1014 1014 path = lfutil.findfile(repo._repo, getdata().strip())
1015 1015
1016 1016 if path is None:
1017 1017 raise error.Abort(
1018 1018 _('largefile %s not found in repo store or system cache')
1019 1019 % lfutil.splitstandin(f))
1020 1020 else:
1021 1021 path = lfutil.splitstandin(f)
1022 1022
1023 1023 f = lfutil.splitstandin(f)
1024 1024
1025 1025 def getdatafn():
1026 1026 fd = None
1027 1027 try:
1028 1028 fd = open(os.path.join(prefix, path), 'rb')
1029 1029 return fd.read()
1030 1030 finally:
1031 1031 if fd:
1032 1032 fd.close()
1033 1033
1034 1034 getdata = getdatafn
1035 1035
1036 1036 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1037 1037
1038 1038 for subpath in sorted(ctx.substate):
1039 1039 sub = ctx.workingsub(subpath)
1040 1040 submatch = match_.narrowmatcher(subpath, match)
1041 1041 sub._repo.lfstatus = True
1042 1042 sub.archive(archiver, prefix + repo._path + '/', submatch)
1043 1043
1044 1044 # If a largefile is modified, the change is not reflected in its
1045 1045 # standin until a commit. cmdutil.bailifchanged() raises an exception
1046 1046 # if the repo has uncommitted changes. Wrap it to also check if
1047 1047 # largefiles were changed. This is used by bisect, backout and fetch.
1048 1048 def overridebailifchanged(orig, repo, *args, **kwargs):
1049 1049 orig(repo, *args, **kwargs)
1050 1050 repo.lfstatus = True
1051 1051 s = repo.status()
1052 1052 repo.lfstatus = False
1053 1053 if s.modified or s.added or s.removed or s.deleted:
1054 1054 raise error.Abort(_('uncommitted changes'))
1055 1055
1056 1056 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1057 1057 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1058 1058 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1059 1059 m = composelargefilematcher(match, repo[None].manifest())
1060 1060
1061 1061 try:
1062 1062 repo.lfstatus = True
1063 1063 s = repo.status(match=m, clean=True)
1064 1064 finally:
1065 1065 repo.lfstatus = False
1066 1066 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1067 1067 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1068 1068
1069 1069 for f in forget:
1070 1070 if lfutil.standin(f) not in repo.dirstate and not \
1071 1071 repo.wvfs.isdir(lfutil.standin(f)):
1072 1072 ui.warn(_('not removing %s: file is already untracked\n')
1073 1073 % m.rel(f))
1074 1074 bad.append(f)
1075 1075
1076 1076 for f in forget:
1077 1077 if ui.verbose or not m.exact(f):
1078 1078 ui.status(_('removing %s\n') % m.rel(f))
1079 1079
1080 1080 # Need to lock because standin files are deleted then removed from the
1081 1081 # repository and we could race in-between.
1082 1082 wlock = repo.wlock()
1083 1083 try:
1084 1084 lfdirstate = lfutil.openlfdirstate(ui, repo)
1085 1085 for f in forget:
1086 1086 if lfdirstate[f] == 'a':
1087 1087 lfdirstate.drop(f)
1088 1088 else:
1089 1089 lfdirstate.remove(f)
1090 1090 lfdirstate.write()
1091 1091 standins = [lfutil.standin(f) for f in forget]
1092 1092 for f in standins:
1093 1093 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1094 1094 rejected = repo[None].forget(standins)
1095 1095 finally:
1096 1096 wlock.release()
1097 1097
1098 1098 bad.extend(f for f in rejected if f in m.files())
1099 1099 forgot.extend(f for f in forget if f not in rejected)
1100 1100 return bad, forgot
1101 1101
1102 1102 def _getoutgoings(repo, other, missing, addfunc):
1103 1103 """get pairs of filename and largefile hash in outgoing revisions
1104 1104 in 'missing'.
1105 1105
1106 1106 largefiles already existing on 'other' repository are ignored.
1107 1107
1108 1108 'addfunc' is invoked with each unique pairs of filename and
1109 1109 largefile hash value.
1110 1110 """
1111 1111 knowns = set()
1112 1112 lfhashes = set()
1113 1113 def dedup(fn, lfhash):
1114 1114 k = (fn, lfhash)
1115 1115 if k not in knowns:
1116 1116 knowns.add(k)
1117 1117 lfhashes.add(lfhash)
1118 1118 lfutil.getlfilestoupload(repo, missing, dedup)
1119 1119 if lfhashes:
1120 1120 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1121 1121 for fn, lfhash in knowns:
1122 1122 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1123 1123 addfunc(fn, lfhash)
1124 1124
1125 1125 def outgoinghook(ui, repo, other, opts, missing):
1126 1126 if opts.pop('large', None):
1127 1127 lfhashes = set()
1128 1128 if ui.debugflag:
1129 1129 toupload = {}
1130 1130 def addfunc(fn, lfhash):
1131 1131 if fn not in toupload:
1132 1132 toupload[fn] = []
1133 1133 toupload[fn].append(lfhash)
1134 1134 lfhashes.add(lfhash)
1135 1135 def showhashes(fn):
1136 1136 for lfhash in sorted(toupload[fn]):
1137 1137 ui.debug(' %s\n' % (lfhash))
1138 1138 else:
1139 1139 toupload = set()
1140 1140 def addfunc(fn, lfhash):
1141 1141 toupload.add(fn)
1142 1142 lfhashes.add(lfhash)
1143 1143 def showhashes(fn):
1144 1144 pass
1145 1145 _getoutgoings(repo, other, missing, addfunc)
1146 1146
1147 1147 if not toupload:
1148 1148 ui.status(_('largefiles: no files to upload\n'))
1149 1149 else:
1150 1150 ui.status(_('largefiles to upload (%d entities):\n')
1151 1151 % (len(lfhashes)))
1152 1152 for file in sorted(toupload):
1153 1153 ui.status(lfutil.splitstandin(file) + '\n')
1154 1154 showhashes(file)
1155 1155 ui.status('\n')
1156 1156
1157 1157 def summaryremotehook(ui, repo, opts, changes):
1158 1158 largeopt = opts.get('large', False)
1159 1159 if changes is None:
1160 1160 if largeopt:
1161 1161 return (False, True) # only outgoing check is needed
1162 1162 else:
1163 1163 return (False, False)
1164 1164 elif largeopt:
1165 1165 url, branch, peer, outgoing = changes[1]
1166 1166 if peer is None:
1167 1167 # i18n: column positioning for "hg summary"
1168 1168 ui.status(_('largefiles: (no remote repo)\n'))
1169 1169 return
1170 1170
1171 1171 toupload = set()
1172 1172 lfhashes = set()
1173 1173 def addfunc(fn, lfhash):
1174 1174 toupload.add(fn)
1175 1175 lfhashes.add(lfhash)
1176 1176 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1177 1177
1178 1178 if not toupload:
1179 1179 # i18n: column positioning for "hg summary"
1180 1180 ui.status(_('largefiles: (no files to upload)\n'))
1181 1181 else:
1182 1182 # i18n: column positioning for "hg summary"
1183 1183 ui.status(_('largefiles: %d entities for %d files to upload\n')
1184 1184 % (len(lfhashes), len(toupload)))
1185 1185
1186 1186 def overridesummary(orig, ui, repo, *pats, **opts):
1187 1187 try:
1188 1188 repo.lfstatus = True
1189 1189 orig(ui, repo, *pats, **opts)
1190 1190 finally:
1191 1191 repo.lfstatus = False
1192 1192
1193 1193 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1194 1194 similarity=None):
1195 1195 if opts is None:
1196 1196 opts = {}
1197 1197 if not lfutil.islfilesrepo(repo):
1198 1198 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1199 1199 # Get the list of missing largefiles so we can remove them
1200 1200 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1201 1201 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1202 1202 False, False, False)
1203 1203
1204 1204 # Call into the normal remove code, but the removing of the standin, we want
1205 1205 # to have handled by original addremove. Monkey patching here makes sure
1206 1206 # we don't remove the standin in the largefiles code, preventing a very
1207 1207 # confused state later.
1208 1208 if s.deleted:
1209 1209 m = copy.copy(matcher)
1210 1210
1211 1211 # The m._files and m._map attributes are not changed to the deleted list
1212 1212 # because that affects the m.exact() test, which in turn governs whether
1213 1213 # or not the file name is printed, and how. Simply limit the original
1214 1214 # matches to those in the deleted status list.
1215 1215 matchfn = m.matchfn
1216 1216 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1217 1217
1218 1218 removelargefiles(repo.ui, repo, True, m, **opts)
1219 1219 # Call into the normal add code, and any files that *should* be added as
1220 1220 # largefiles will be
1221 1221 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1222 1222 # Now that we've handled largefiles, hand off to the original addremove
1223 1223 # function to take care of the rest. Make sure it doesn't do anything with
1224 1224 # largefiles by passing a matcher that will ignore them.
1225 1225 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1226 1226 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1227 1227
1228 1228 # Calling purge with --all will cause the largefiles to be deleted.
1229 1229 # Override repo.status to prevent this from happening.
1230 1230 def overridepurge(orig, ui, repo, *dirs, **opts):
1231 1231 # XXX Monkey patching a repoview will not work. The assigned attribute will
1232 1232 # be set on the unfiltered repo, but we will only lookup attributes in the
1233 1233 # unfiltered repo if the lookup in the repoview object itself fails. As the
1234 1234 # monkey patched method exists on the repoview class the lookup will not
1235 1235 # fail. As a result, the original version will shadow the monkey patched
1236 1236 # one, defeating the monkey patch.
1237 1237 #
1238 1238 # As a work around we use an unfiltered repo here. We should do something
1239 1239 # cleaner instead.
1240 1240 repo = repo.unfiltered()
1241 1241 oldstatus = repo.status
1242 1242 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1243 1243 clean=False, unknown=False, listsubrepos=False):
1244 1244 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1245 1245 listsubrepos)
1246 1246 lfdirstate = lfutil.openlfdirstate(ui, repo)
1247 1247 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1248 1248 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1249 1249 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1250 1250 unknown, ignored, r.clean)
1251 1251 repo.status = overridestatus
1252 1252 orig(ui, repo, *dirs, **opts)
1253 1253 repo.status = oldstatus
1254 1254 def overriderollback(orig, ui, repo, **opts):
1255 1255 wlock = repo.wlock()
1256 1256 try:
1257 1257 before = repo.dirstate.parents()
1258 1258 orphans = set(f for f in repo.dirstate
1259 1259 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1260 1260 result = orig(ui, repo, **opts)
1261 1261 after = repo.dirstate.parents()
1262 1262 if before == after:
1263 1263 return result # no need to restore standins
1264 1264
1265 1265 pctx = repo['.']
1266 1266 for f in repo.dirstate:
1267 1267 if lfutil.isstandin(f):
1268 1268 orphans.discard(f)
1269 1269 if repo.dirstate[f] == 'r':
1270 1270 repo.wvfs.unlinkpath(f, ignoremissing=True)
1271 1271 elif f in pctx:
1272 1272 fctx = pctx[f]
1273 1273 repo.wwrite(f, fctx.data(), fctx.flags())
1274 1274 else:
1275 1275 # content of standin is not so important in 'a',
1276 1276 # 'm' or 'n' (coming from the 2nd parent) cases
1277 1277 lfutil.writestandin(repo, f, '', False)
1278 1278 for standin in orphans:
1279 1279 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1280 1280
1281 1281 lfdirstate = lfutil.openlfdirstate(ui, repo)
1282 1282 orphans = set(lfdirstate)
1283 1283 lfiles = lfutil.listlfiles(repo)
1284 1284 for file in lfiles:
1285 1285 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1286 1286 orphans.discard(file)
1287 1287 for lfile in orphans:
1288 1288 lfdirstate.drop(lfile)
1289 1289 lfdirstate.write()
1290 1290 finally:
1291 1291 wlock.release()
1292 1292 return result
1293 1293
1294 1294 def overridetransplant(orig, ui, repo, *revs, **opts):
1295 1295 resuming = opts.get('continue')
1296 1296 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1297 1297 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1298 1298 try:
1299 1299 result = orig(ui, repo, *revs, **opts)
1300 1300 finally:
1301 1301 repo._lfstatuswriters.pop()
1302 1302 repo._lfcommithooks.pop()
1303 1303 return result
1304 1304
1305 1305 def overridecat(orig, ui, repo, file1, *pats, **opts):
1306 1306 ctx = scmutil.revsingle(repo, opts.get('rev'))
1307 1307 err = 1
1308 1308 notbad = set()
1309 1309 m = scmutil.match(ctx, (file1,) + pats, opts)
1310 1310 origmatchfn = m.matchfn
1311 1311 def lfmatchfn(f):
1312 1312 if origmatchfn(f):
1313 1313 return True
1314 1314 lf = lfutil.splitstandin(f)
1315 1315 if lf is None:
1316 1316 return False
1317 1317 notbad.add(lf)
1318 1318 return origmatchfn(lf)
1319 1319 m.matchfn = lfmatchfn
1320 1320 origbadfn = m.bad
1321 1321 def lfbadfn(f, msg):
1322 1322 if not f in notbad:
1323 1323 origbadfn(f, msg)
1324 1324 m.bad = lfbadfn
1325 1325
1326 1326 origvisitdirfn = m.visitdir
1327 1327 def lfvisitdirfn(dir):
1328 1328 if dir == lfutil.shortname:
1329 1329 return True
1330 1330 ret = origvisitdirfn(dir)
1331 1331 if ret:
1332 1332 return ret
1333 1333 lf = lfutil.splitstandin(dir)
1334 1334 if lf is None:
1335 1335 return False
1336 1336 return origvisitdirfn(lf)
1337 1337 m.visitdir = lfvisitdirfn
1338 1338
1339 1339 for f in ctx.walk(m):
1340 1340 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1341 1341 pathname=f)
1342 1342 lf = lfutil.splitstandin(f)
1343 1343 if lf is None or origmatchfn(f):
1344 1344 # duplicating unreachable code from commands.cat
1345 1345 data = ctx[f].data()
1346 1346 if opts.get('decode'):
1347 1347 data = repo.wwritedata(f, data)
1348 1348 fp.write(data)
1349 1349 else:
1350 1350 hash = lfutil.readstandin(repo, lf, ctx.rev())
1351 1351 if not lfutil.inusercache(repo.ui, hash):
1352 1352 store = basestore._openstore(repo)
1353 1353 success, missing = store.get([(lf, hash)])
1354 1354 if len(success) != 1:
1355 1355 raise error.Abort(
1356 1356 _('largefile %s is not in cache and could not be '
1357 1357 'downloaded') % lf)
1358 1358 path = lfutil.usercachepath(repo.ui, hash)
1359 1359 fpin = open(path, "rb")
1360 1360 for chunk in util.filechunkiter(fpin, 128 * 1024):
1361 1361 fp.write(chunk)
1362 1362 fpin.close()
1363 1363 fp.close()
1364 1364 err = 0
1365 1365 return err
1366 1366
1367 1367 def mergeupdate(orig, repo, node, branchmerge, force,
1368 1368 *args, **kwargs):
1369 1369 matcher = kwargs.get('matcher', None)
1370 1370 # note if this is a partial update
1371 1371 partial = matcher and not matcher.always()
1372 1372 wlock = repo.wlock()
1373 1373 try:
1374 1374 # branch | | |
1375 1375 # merge | force | partial | action
1376 1376 # -------+-------+---------+--------------
1377 1377 # x | x | x | linear-merge
1378 1378 # o | x | x | branch-merge
1379 1379 # x | o | x | overwrite (as clean update)
1380 1380 # o | o | x | force-branch-merge (*1)
1381 1381 # x | x | o | (*)
1382 1382 # o | x | o | (*)
1383 1383 # x | o | o | overwrite (as revert)
1384 1384 # o | o | o | (*)
1385 1385 #
1386 1386 # (*) don't care
1387 1387 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1388 1388
1389 1389 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1390 1390 unsure, s = lfdirstate.status(match_.always(repo.root,
1391 1391 repo.getcwd()),
1392 1392 [], False, False, False)
1393 1393 pctx = repo['.']
1394 1394 for lfile in unsure + s.modified:
1395 1395 lfileabs = repo.wvfs.join(lfile)
1396 1396 if not os.path.exists(lfileabs):
1397 1397 continue
1398 1398 lfhash = lfutil.hashrepofile(repo, lfile)
1399 1399 standin = lfutil.standin(lfile)
1400 1400 lfutil.writestandin(repo, standin, lfhash,
1401 1401 lfutil.getexecutable(lfileabs))
1402 1402 if (standin in pctx and
1403 1403 lfhash == lfutil.readstandin(repo, lfile, '.')):
1404 1404 lfdirstate.normal(lfile)
1405 1405 for lfile in s.added:
1406 1406 lfutil.updatestandin(repo, lfutil.standin(lfile))
1407 1407 lfdirstate.write()
1408 1408
1409 1409 oldstandins = lfutil.getstandinsstate(repo)
1410 1410
1411 1411 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1412 1412
1413 1413 newstandins = lfutil.getstandinsstate(repo)
1414 1414 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1415 1415 if branchmerge or force or partial:
1416 1416 filelist.extend(s.deleted + s.removed)
1417 1417
1418 1418 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1419 1419 normallookup=partial)
1420 1420
1421 1421 return result
1422 1422 finally:
1423 1423 wlock.release()
1424 1424
1425 1425 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1426 1426 result = orig(repo, files, *args, **kwargs)
1427 1427
1428 1428 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1429 1429 if filelist:
1430 1430 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1431 1431 printmessage=False, normallookup=True)
1432 1432
1433 1433 return result
@@ -1,1545 +1,1545 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import shutil
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullhex,
20 20 nullid,
21 21 nullrev,
22 22 )
23 23 from . import (
24 24 copies,
25 25 destutil,
26 26 error,
27 27 filemerge,
28 28 obsolete,
29 29 subrepo,
30 30 util,
31 31 worker,
32 32 )
33 33
34 34 _pack = struct.pack
35 35 _unpack = struct.unpack
36 36
37 37 def _droponode(data):
38 38 # used for compatibility for v1
39 39 bits = data.split('\0')
40 40 bits = bits[:-2] + bits[-1:]
41 41 return '\0'.join(bits)
42 42
43 43 class mergestate(object):
44 44 '''track 3-way merge state of individual files
45 45
46 46 The merge state is stored on disk when needed. Two files are used: one with
47 47 an old format (version 1), and one with a new format (version 2). Version 2
48 48 stores a superset of the data in version 1, including new kinds of records
49 49 in the future. For more about the new format, see the documentation for
50 50 `_readrecordsv2`.
51 51
52 52 Each record can contain arbitrary content, and has an associated type. This
53 53 `type` should be a letter. If `type` is uppercase, the record is mandatory:
54 54 versions of Mercurial that don't support it should abort. If `type` is
55 55 lowercase, the record can be safely ignored.
56 56
57 57 Currently known records:
58 58
59 59 L: the node of the "local" part of the merge (hexified version)
60 60 O: the node of the "other" part of the merge (hexified version)
61 61 F: a file to be merged entry
62 62 C: a change/delete or delete/change conflict
63 63 D: a file that the external merge driver will merge internally
64 64 (experimental)
65 65 m: the external merge driver defined for this merge plus its run state
66 66 (experimental)
67 67 X: unsupported mandatory record type (used in tests)
68 68 x: unsupported advisory record type (used in tests)
69 69
70 70 Merge driver run states (experimental):
71 71 u: driver-resolved files unmarked -- needs to be run next time we're about
72 72 to resolve or commit
73 73 m: driver-resolved files marked -- only needs to be run before commit
74 74 s: success/skipped -- does not need to be run any more
75 75
76 76 '''
77 77 statepathv1 = 'merge/state'
78 78 statepathv2 = 'merge/state2'
79 79
80 80 @staticmethod
81 81 def clean(repo, node=None, other=None):
82 82 """Initialize a brand new merge state, removing any existing state on
83 83 disk."""
84 84 ms = mergestate(repo)
85 85 ms.reset(node, other)
86 86 return ms
87 87
88 88 @staticmethod
89 89 def read(repo):
90 90 """Initialize the merge state, reading it from disk."""
91 91 ms = mergestate(repo)
92 92 ms._read()
93 93 return ms
94 94
95 95 def __init__(self, repo):
96 96 """Initialize the merge state.
97 97
98 98 Do not use this directly! Instead call read() or clean()."""
99 99 self._repo = repo
100 100 self._dirty = False
101 101
102 102 def reset(self, node=None, other=None):
103 103 self._state = {}
104 104 self._local = None
105 105 self._other = None
106 106 for var in ('localctx', 'otherctx'):
107 107 if var in vars(self):
108 108 delattr(self, var)
109 109 if node:
110 110 self._local = node
111 111 self._other = other
112 112 self._readmergedriver = None
113 113 if self.mergedriver:
114 114 self._mdstate = 's'
115 115 else:
116 116 self._mdstate = 'u'
117 117 shutil.rmtree(self._repo.join('merge'), True)
118 118 self._results = {}
119 119 self._dirty = False
120 120
121 121 def _read(self):
122 122 """Analyse each record content to restore a serialized state from disk
123 123
124 124 This function process "record" entry produced by the de-serialization
125 125 of on disk file.
126 126 """
127 127 self._state = {}
128 128 self._local = None
129 129 self._other = None
130 130 for var in ('localctx', 'otherctx'):
131 131 if var in vars(self):
132 132 delattr(self, var)
133 133 self._readmergedriver = None
134 134 self._mdstate = 's'
135 135 unsupported = set()
136 136 records = self._readrecords()
137 137 for rtype, record in records:
138 138 if rtype == 'L':
139 139 self._local = bin(record)
140 140 elif rtype == 'O':
141 141 self._other = bin(record)
142 142 elif rtype == 'm':
143 143 bits = record.split('\0', 1)
144 144 mdstate = bits[1]
145 145 if len(mdstate) != 1 or mdstate not in 'ums':
146 146 # the merge driver should be idempotent, so just rerun it
147 147 mdstate = 'u'
148 148
149 149 self._readmergedriver = bits[0]
150 150 self._mdstate = mdstate
151 151 elif rtype in 'FDC':
152 152 bits = record.split('\0')
153 153 self._state[bits[0]] = bits[1:]
154 154 elif not rtype.islower():
155 155 unsupported.add(rtype)
156 156 self._results = {}
157 157 self._dirty = False
158 158
159 159 if unsupported:
160 160 raise error.UnsupportedMergeRecords(unsupported)
161 161
162 162 def _readrecords(self):
163 163 """Read merge state from disk and return a list of record (TYPE, data)
164 164
165 165 We read data from both v1 and v2 files and decide which one to use.
166 166
167 167 V1 has been used by version prior to 2.9.1 and contains less data than
168 168 v2. We read both versions and check if no data in v2 contradicts
169 169 v1. If there is not contradiction we can safely assume that both v1
170 170 and v2 were written at the same time and use the extract data in v2. If
171 171 there is contradiction we ignore v2 content as we assume an old version
172 172 of Mercurial has overwritten the mergestate file and left an old v2
173 173 file around.
174 174
175 175 returns list of record [(TYPE, data), ...]"""
176 176 v1records = self._readrecordsv1()
177 177 v2records = self._readrecordsv2()
178 178 if self._v1v2match(v1records, v2records):
179 179 return v2records
180 180 else:
181 181 # v1 file is newer than v2 file, use it
182 182 # we have to infer the "other" changeset of the merge
183 183 # we cannot do better than that with v1 of the format
184 184 mctx = self._repo[None].parents()[-1]
185 185 v1records.append(('O', mctx.hex()))
186 186 # add place holder "other" file node information
187 187 # nobody is using it yet so we do no need to fetch the data
188 188 # if mctx was wrong `mctx[bits[-2]]` may fails.
189 189 for idx, r in enumerate(v1records):
190 190 if r[0] == 'F':
191 191 bits = r[1].split('\0')
192 192 bits.insert(-2, '')
193 193 v1records[idx] = (r[0], '\0'.join(bits))
194 194 return v1records
195 195
196 196 def _v1v2match(self, v1records, v2records):
197 197 oldv2 = set() # old format version of v2 record
198 198 for rec in v2records:
199 199 if rec[0] == 'L':
200 200 oldv2.add(rec)
201 201 elif rec[0] == 'F':
202 202 # drop the onode data (not contained in v1)
203 203 oldv2.add(('F', _droponode(rec[1])))
204 204 for rec in v1records:
205 205 if rec not in oldv2:
206 206 return False
207 207 else:
208 208 return True
209 209
210 210 def _readrecordsv1(self):
211 211 """read on disk merge state for version 1 file
212 212
213 213 returns list of record [(TYPE, data), ...]
214 214
215 215 Note: the "F" data from this file are one entry short
216 216 (no "other file node" entry)
217 217 """
218 218 records = []
219 219 try:
220 220 f = self._repo.vfs(self.statepathv1)
221 221 for i, l in enumerate(f):
222 222 if i == 0:
223 223 records.append(('L', l[:-1]))
224 224 else:
225 225 records.append(('F', l[:-1]))
226 226 f.close()
227 227 except IOError as err:
228 228 if err.errno != errno.ENOENT:
229 229 raise
230 230 return records
231 231
232 232 def _readrecordsv2(self):
233 233 """read on disk merge state for version 2 file
234 234
235 235 This format is a list of arbitrary records of the form:
236 236
237 237 [type][length][content]
238 238
239 239 `type` is a single character, `length` is a 4 byte integer, and
240 240 `content` is an arbitrary byte sequence of length `length`.
241 241
242 242 Mercurial versions prior to 3.7 have a bug where if there are
243 243 unsupported mandatory merge records, attempting to clear out the merge
244 244 state with hg update --clean or similar aborts. The 't' record type
245 245 works around that by writing out what those versions treat as an
246 246 advisory record, but later versions interpret as special: the first
247 247 character is the 'real' record type and everything onwards is the data.
248 248
249 249 Returns list of records [(TYPE, data), ...]."""
250 250 records = []
251 251 try:
252 252 f = self._repo.vfs(self.statepathv2)
253 253 data = f.read()
254 254 off = 0
255 255 end = len(data)
256 256 while off < end:
257 257 rtype = data[off]
258 258 off += 1
259 259 length = _unpack('>I', data[off:(off + 4)])[0]
260 260 off += 4
261 261 record = data[off:(off + length)]
262 262 off += length
263 263 if rtype == 't':
264 264 rtype, record = record[0], record[1:]
265 265 records.append((rtype, record))
266 266 f.close()
267 267 except IOError as err:
268 268 if err.errno != errno.ENOENT:
269 269 raise
270 270 return records
271 271
272 272 @util.propertycache
273 273 def mergedriver(self):
274 274 # protect against the following:
275 275 # - A configures a malicious merge driver in their hgrc, then
276 276 # pauses the merge
277 277 # - A edits their hgrc to remove references to the merge driver
278 278 # - A gives a copy of their entire repo, including .hg, to B
279 279 # - B inspects .hgrc and finds it to be clean
280 280 # - B then continues the merge and the malicious merge driver
281 281 # gets invoked
282 282 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
283 283 if (self._readmergedriver is not None
284 284 and self._readmergedriver != configmergedriver):
285 285 raise error.ConfigError(
286 286 _("merge driver changed since merge started"),
287 287 hint=_("revert merge driver change or abort merge"))
288 288
289 289 return configmergedriver
290 290
291 291 @util.propertycache
292 292 def localctx(self):
293 293 if self._local is None:
294 294 raise RuntimeError("localctx accessed but self._local isn't set")
295 295 return self._repo[self._local]
296 296
297 297 @util.propertycache
298 298 def otherctx(self):
299 299 if self._other is None:
300 300 raise RuntimeError("localctx accessed but self._local isn't set")
301 301 return self._repo[self._other]
302 302
303 303 def active(self):
304 304 """Whether mergestate is active.
305 305
306 306 Returns True if there appears to be mergestate. This is a rough proxy
307 307 for "is a merge in progress."
308 308 """
309 309 # Check local variables before looking at filesystem for performance
310 310 # reasons.
311 311 return bool(self._local) or bool(self._state) or \
312 312 self._repo.vfs.exists(self.statepathv1) or \
313 313 self._repo.vfs.exists(self.statepathv2)
314 314
315 315 def commit(self):
316 316 """Write current state on disk (if necessary)"""
317 317 if self._dirty:
318 318 records = self._makerecords()
319 319 self._writerecords(records)
320 320 self._dirty = False
321 321
322 322 def _makerecords(self):
323 323 records = []
324 324 records.append(('L', hex(self._local)))
325 325 records.append(('O', hex(self._other)))
326 326 if self.mergedriver:
327 327 records.append(('m', '\0'.join([
328 328 self.mergedriver, self._mdstate])))
329 329 for d, v in self._state.iteritems():
330 330 if v[0] == 'd':
331 331 records.append(('D', '\0'.join([d] + v)))
332 332 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
333 333 # older versions of Mercurial
334 334 elif v[1] == nullhex or v[6] == nullhex:
335 335 records.append(('C', '\0'.join([d] + v)))
336 336 else:
337 337 records.append(('F', '\0'.join([d] + v)))
338 338 return records
339 339
340 340 def _writerecords(self, records):
341 341 """Write current state on disk (both v1 and v2)"""
342 342 self._writerecordsv1(records)
343 343 self._writerecordsv2(records)
344 344
345 345 def _writerecordsv1(self, records):
346 346 """Write current state on disk in a version 1 file"""
347 347 f = self._repo.vfs(self.statepathv1, 'w')
348 348 irecords = iter(records)
349 349 lrecords = irecords.next()
350 350 assert lrecords[0] == 'L'
351 351 f.write(hex(self._local) + '\n')
352 352 for rtype, data in irecords:
353 353 if rtype == 'F':
354 354 f.write('%s\n' % _droponode(data))
355 355 f.close()
356 356
357 357 def _writerecordsv2(self, records):
358 358 """Write current state on disk in a version 2 file
359 359
360 360 See the docstring for _readrecordsv2 for why we use 't'."""
361 361 # these are the records that all version 2 clients can read
362 362 whitelist = 'LOF'
363 363 f = self._repo.vfs(self.statepathv2, 'w')
364 364 for key, data in records:
365 365 assert len(key) == 1
366 366 if key not in whitelist:
367 367 key, data = 't', '%s%s' % (key, data)
368 368 format = '>sI%is' % len(data)
369 369 f.write(_pack(format, key, len(data), data))
370 370 f.close()
371 371
372 372 def add(self, fcl, fco, fca, fd):
373 373 """add a new (potentially?) conflicting file the merge state
374 374 fcl: file context for local,
375 375 fco: file context for remote,
376 376 fca: file context for ancestors,
377 377 fd: file path of the resulting merge.
378 378
379 379 note: also write the local version to the `.hg/merge` directory.
380 380 """
381 381 if fcl.isabsent():
382 382 hash = nullhex
383 383 else:
384 384 hash = util.sha1(fcl.path()).hexdigest()
385 385 self._repo.vfs.write('merge/' + hash, fcl.data())
386 386 self._state[fd] = ['u', hash, fcl.path(),
387 387 fca.path(), hex(fca.filenode()),
388 388 fco.path(), hex(fco.filenode()),
389 389 fcl.flags()]
390 390 self._dirty = True
391 391
392 392 def __contains__(self, dfile):
393 393 return dfile in self._state
394 394
395 395 def __getitem__(self, dfile):
396 396 return self._state[dfile][0]
397 397
398 398 def __iter__(self):
399 399 return iter(sorted(self._state))
400 400
401 401 def files(self):
402 402 return self._state.keys()
403 403
404 404 def mark(self, dfile, state):
405 405 self._state[dfile][0] = state
406 406 self._dirty = True
407 407
408 408 def mdstate(self):
409 409 return self._mdstate
410 410
411 411 def unresolved(self):
412 412 """Obtain the paths of unresolved files."""
413 413
414 414 for f, entry in self._state.items():
415 415 if entry[0] == 'u':
416 416 yield f
417 417
418 418 def driverresolved(self):
419 419 """Obtain the paths of driver-resolved files."""
420 420
421 421 for f, entry in self._state.items():
422 422 if entry[0] == 'd':
423 423 yield f
424 424
425 425 def _resolve(self, preresolve, dfile, wctx, labels=None):
426 426 """rerun merge process for file path `dfile`"""
427 427 if self[dfile] in 'rd':
428 428 return True, 0
429 429 stateentry = self._state[dfile]
430 430 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
431 431 octx = self._repo[self._other]
432 432 fcd = self._filectxorabsent(hash, wctx, dfile)
433 433 fco = self._filectxorabsent(onode, octx, ofile)
434 434 # TODO: move this to filectxorabsent
435 435 fca = self._repo.filectx(afile, fileid=anode)
436 436 # "premerge" x flags
437 437 flo = fco.flags()
438 438 fla = fca.flags()
439 439 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
440 440 if fca.node() == nullid:
441 441 if preresolve:
442 442 self._repo.ui.warn(
443 443 _('warning: cannot merge flags for %s\n') % afile)
444 444 elif flags == fla:
445 445 flags = flo
446 446 if preresolve:
447 447 # restore local
448 448 if hash != nullhex:
449 449 f = self._repo.vfs('merge/' + hash)
450 450 self._repo.wwrite(dfile, f.read(), flags)
451 451 f.close()
452 452 else:
453 453 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
454 454 complete, r, deleted = filemerge.premerge(self._repo, self._local,
455 455 lfile, fcd, fco, fca,
456 456 labels=labels)
457 457 else:
458 458 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
459 459 lfile, fcd, fco, fca,
460 460 labels=labels)
461 461 if r is None:
462 462 # no real conflict
463 463 del self._state[dfile]
464 464 self._dirty = True
465 465 elif not r:
466 466 self.mark(dfile, 'r')
467 467
468 468 if complete:
469 469 action = None
470 470 if deleted:
471 471 if fcd.isabsent():
472 472 # dc: local picked. Need to drop if present, which may
473 473 # happen on re-resolves.
474 474 action = 'f'
475 475 else:
476 476 # cd: remote picked (or otherwise deleted)
477 477 action = 'r'
478 478 else:
479 479 if fcd.isabsent(): # dc: remote picked
480 480 action = 'g'
481 481 elif fco.isabsent(): # cd: local picked
482 482 if dfile in self.localctx:
483 483 action = 'am'
484 484 else:
485 485 action = 'a'
486 486 # else: regular merges (no action necessary)
487 487 self._results[dfile] = r, action
488 488
489 489 return complete, r
490 490
491 491 def _filectxorabsent(self, hexnode, ctx, f):
492 492 if hexnode == nullhex:
493 493 return filemerge.absentfilectx(ctx, f)
494 494 else:
495 495 return ctx[f]
496 496
497 497 def preresolve(self, dfile, wctx, labels=None):
498 498 """run premerge process for dfile
499 499
500 500 Returns whether the merge is complete, and the exit code."""
501 501 return self._resolve(True, dfile, wctx, labels=labels)
502 502
503 503 def resolve(self, dfile, wctx, labels=None):
504 504 """run merge process (assuming premerge was run) for dfile
505 505
506 506 Returns the exit code of the merge."""
507 507 return self._resolve(False, dfile, wctx, labels=labels)[1]
508 508
509 509 def counts(self):
510 510 """return counts for updated, merged and removed files in this
511 511 session"""
512 512 updated, merged, removed = 0, 0, 0
513 513 for r, action in self._results.itervalues():
514 514 if r is None:
515 515 updated += 1
516 516 elif r == 0:
517 517 if action == 'r':
518 518 removed += 1
519 519 else:
520 520 merged += 1
521 521 return updated, merged, removed
522 522
523 523 def unresolvedcount(self):
524 524 """get unresolved count for this merge (persistent)"""
525 525 return len([True for f, entry in self._state.iteritems()
526 526 if entry[0] == 'u'])
527 527
528 528 def actions(self):
529 529 """return lists of actions to perform on the dirstate"""
530 530 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
531 531 for f, (r, action) in self._results.iteritems():
532 532 if action is not None:
533 533 actions[action].append((f, None, "merge result"))
534 534 return actions
535 535
536 536 def recordactions(self):
537 537 """record remove/add/get actions in the dirstate"""
538 538 branchmerge = self._repo.dirstate.p2() != nullid
539 539 recordupdates(self._repo, self.actions(), branchmerge)
540 540
541 541 def queueremove(self, f):
542 542 """queues a file to be removed from the dirstate
543 543
544 544 Meant for use by custom merge drivers."""
545 545 self._results[f] = 0, 'r'
546 546
547 547 def queueadd(self, f):
548 548 """queues a file to be added to the dirstate
549 549
550 550 Meant for use by custom merge drivers."""
551 551 self._results[f] = 0, 'a'
552 552
553 553 def queueget(self, f):
554 554 """queues a file to be marked modified in the dirstate
555 555
556 556 Meant for use by custom merge drivers."""
557 557 self._results[f] = 0, 'g'
558 558
559 559 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
560 560 if f2 is None:
561 561 f2 = f
562 562 return (os.path.isfile(repo.wjoin(f))
563 563 and repo.wvfs.audit.check(f)
564 564 and repo.dirstate.normalize(f) not in repo.dirstate
565 565 and mctx[f2].cmp(wctx[f]))
566 566
567 567 def _checkunknownfiles(repo, wctx, mctx, force, actions):
568 568 """
569 569 Considers any actions that care about the presence of conflicting unknown
570 570 files. For some actions, the result is to abort; for others, it is to
571 571 choose a different action.
572 572 """
573 573 aborts = []
574 574 if not force:
575 575 for f, (m, args, msg) in actions.iteritems():
576 576 if m in ('c', 'dc'):
577 577 if _checkunknownfile(repo, wctx, mctx, f):
578 578 aborts.append(f)
579 579 elif m == 'dg':
580 580 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
581 581 aborts.append(f)
582 582
583 583 for f in sorted(aborts):
584 584 repo.ui.warn(_("%s: untracked file differs\n") % f)
585 585 if aborts:
586 586 raise error.Abort(_("untracked files in working directory differ "
587 587 "from files in requested revision"))
588 588
589 589 for f, (m, args, msg) in actions.iteritems():
590 590 if m == 'c':
591 591 actions[f] = ('g', args, msg)
592 592 elif m == 'cm':
593 593 fl2, anc = args
594 594 different = _checkunknownfile(repo, wctx, mctx, f)
595 595 if different:
596 596 actions[f] = ('m', (f, f, None, False, anc),
597 597 "remote differs from untracked local")
598 598 else:
599 599 actions[f] = ('g', (fl2,), "remote created")
600 600
601 601 def _forgetremoved(wctx, mctx, branchmerge):
602 602 """
603 603 Forget removed files
604 604
605 605 If we're jumping between revisions (as opposed to merging), and if
606 606 neither the working directory nor the target rev has the file,
607 607 then we need to remove it from the dirstate, to prevent the
608 608 dirstate from listing the file when it is no longer in the
609 609 manifest.
610 610
611 611 If we're merging, and the other revision has removed a file
612 612 that is not present in the working directory, we need to mark it
613 613 as removed.
614 614 """
615 615
616 616 actions = {}
617 617 m = 'f'
618 618 if branchmerge:
619 619 m = 'r'
620 620 for f in wctx.deleted():
621 621 if f not in mctx:
622 622 actions[f] = m, None, "forget deleted"
623 623
624 624 if not branchmerge:
625 625 for f in wctx.removed():
626 626 if f not in mctx:
627 627 actions[f] = 'f', None, "forget removed"
628 628
629 629 return actions
630 630
631 631 def _checkcollision(repo, wmf, actions):
632 632 # build provisional merged manifest up
633 633 pmmf = set(wmf)
634 634
635 635 if actions:
636 636 # k, dr, e and rd are no-op
637 637 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
638 638 for f, args, msg in actions[m]:
639 639 pmmf.add(f)
640 640 for f, args, msg in actions['r']:
641 641 pmmf.discard(f)
642 642 for f, args, msg in actions['dm']:
643 643 f2, flags = args
644 644 pmmf.discard(f2)
645 645 pmmf.add(f)
646 646 for f, args, msg in actions['dg']:
647 647 pmmf.add(f)
648 648 for f, args, msg in actions['m']:
649 649 f1, f2, fa, move, anc = args
650 650 if move:
651 651 pmmf.discard(f1)
652 652 pmmf.add(f)
653 653
654 654 # check case-folding collision in provisional merged manifest
655 655 foldmap = {}
656 656 for f in sorted(pmmf):
657 657 fold = util.normcase(f)
658 658 if fold in foldmap:
659 659 raise error.Abort(_("case-folding collision between %s and %s")
660 660 % (f, foldmap[fold]))
661 661 foldmap[fold] = f
662 662
663 663 # check case-folding of directories
664 664 foldprefix = unfoldprefix = lastfull = ''
665 665 for fold, f in sorted(foldmap.items()):
666 666 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
667 667 # the folded prefix matches but actual casing is different
668 668 raise error.Abort(_("case-folding collision between "
669 669 "%s and directory of %s") % (lastfull, f))
670 670 foldprefix = fold + '/'
671 671 unfoldprefix = f + '/'
672 672 lastfull = f
673 673
674 674 def driverpreprocess(repo, ms, wctx, labels=None):
675 675 """run the preprocess step of the merge driver, if any
676 676
677 677 This is currently not implemented -- it's an extension point."""
678 678 return True
679 679
680 680 def driverconclude(repo, ms, wctx, labels=None):
681 681 """run the conclude step of the merge driver, if any
682 682
683 683 This is currently not implemented -- it's an extension point."""
684 684 return True
685 685
686 686 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
687 687 acceptremote, followcopies):
688 688 """
689 689 Merge p1 and p2 with ancestor pa and generate merge action list
690 690
691 691 branchmerge and force are as passed in to update
692 692 partial = function to filter file lists
693 693 acceptremote = accept the incoming changes without prompting
694 694 """
695 695
696 696 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
697 697
698 698 # manifests fetched in order are going to be faster, so prime the caches
699 699 [x.manifest() for x in
700 700 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
701 701
702 702 if followcopies:
703 703 ret = copies.mergecopies(repo, wctx, p2, pa)
704 704 copy, movewithdir, diverge, renamedelete = ret
705 705
706 706 repo.ui.note(_("resolving manifests\n"))
707 707 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
708 708 % (bool(branchmerge), bool(force), bool(partial)))
709 709 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
710 710
711 711 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
712 712 copied = set(copy.values())
713 713 copied.update(movewithdir.values())
714 714
715 715 if '.hgsubstate' in m1:
716 716 # check whether sub state is modified
717 717 for s in sorted(wctx.substate):
718 718 if wctx.sub(s).dirty():
719 719 m1['.hgsubstate'] += '+'
720 720 break
721 721
722 722 # Compare manifests
723 723 diff = m1.diff(m2)
724 724
725 725 actions = {}
726 726 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
727 727 if partial and not partial(f):
728 728 continue
729 729 if n1 and n2: # file exists on both local and remote side
730 730 if f not in ma:
731 731 fa = copy.get(f, None)
732 732 if fa is not None:
733 733 actions[f] = ('m', (f, f, fa, False, pa.node()),
734 734 "both renamed from " + fa)
735 735 else:
736 736 actions[f] = ('m', (f, f, None, False, pa.node()),
737 737 "both created")
738 738 else:
739 739 a = ma[f]
740 740 fla = ma.flags(f)
741 741 nol = 'l' not in fl1 + fl2 + fla
742 742 if n2 == a and fl2 == fla:
743 743 actions[f] = ('k' , (), "remote unchanged")
744 744 elif n1 == a and fl1 == fla: # local unchanged - use remote
745 745 if n1 == n2: # optimization: keep local content
746 746 actions[f] = ('e', (fl2,), "update permissions")
747 747 else:
748 748 actions[f] = ('g', (fl2,), "remote is newer")
749 749 elif nol and n2 == a: # remote only changed 'x'
750 750 actions[f] = ('e', (fl2,), "update permissions")
751 751 elif nol and n1 == a: # local only changed 'x'
752 752 actions[f] = ('g', (fl1,), "remote is newer")
753 753 else: # both changed something
754 754 actions[f] = ('m', (f, f, f, False, pa.node()),
755 755 "versions differ")
756 756 elif n1: # file exists only on local side
757 757 if f in copied:
758 758 pass # we'll deal with it on m2 side
759 759 elif f in movewithdir: # directory rename, move local
760 760 f2 = movewithdir[f]
761 761 if f2 in m2:
762 762 actions[f2] = ('m', (f, f2, None, True, pa.node()),
763 763 "remote directory rename, both created")
764 764 else:
765 765 actions[f2] = ('dm', (f, fl1),
766 766 "remote directory rename - move from " + f)
767 767 elif f in copy:
768 768 f2 = copy[f]
769 769 actions[f] = ('m', (f, f2, f2, False, pa.node()),
770 770 "local copied/moved from " + f2)
771 771 elif f in ma: # clean, a different, no remote
772 772 if n1 != ma[f]:
773 773 if acceptremote:
774 774 actions[f] = ('r', None, "remote delete")
775 775 else:
776 776 actions[f] = ('cd', (f, None, f, False, pa.node()),
777 777 "prompt changed/deleted")
778 778 elif n1[20:] == 'a':
779 779 # This extra 'a' is added by working copy manifest to mark
780 780 # the file as locally added. We should forget it instead of
781 781 # deleting it.
782 782 actions[f] = ('f', None, "remote deleted")
783 783 else:
784 784 actions[f] = ('r', None, "other deleted")
785 785 elif n2: # file exists only on remote side
786 786 if f in copied:
787 787 pass # we'll deal with it on m1 side
788 788 elif f in movewithdir:
789 789 f2 = movewithdir[f]
790 790 if f2 in m1:
791 791 actions[f2] = ('m', (f2, f, None, False, pa.node()),
792 792 "local directory rename, both created")
793 793 else:
794 794 actions[f2] = ('dg', (f, fl2),
795 795 "local directory rename - get from " + f)
796 796 elif f in copy:
797 797 f2 = copy[f]
798 798 if f2 in m2:
799 799 actions[f] = ('m', (f2, f, f2, False, pa.node()),
800 800 "remote copied from " + f2)
801 801 else:
802 802 actions[f] = ('m', (f2, f, f2, True, pa.node()),
803 803 "remote moved from " + f2)
804 804 elif f not in ma:
805 805 # local unknown, remote created: the logic is described by the
806 806 # following table:
807 807 #
808 808 # force branchmerge different | action
809 809 # n * * | create
810 810 # y n * | create
811 811 # y y n | create
812 812 # y y y | merge
813 813 #
814 814 # Checking whether the files are different is expensive, so we
815 815 # don't do that when we can avoid it.
816 816 if not force:
817 817 actions[f] = ('c', (fl2,), "remote created")
818 818 elif not branchmerge:
819 819 actions[f] = ('c', (fl2,), "remote created")
820 820 else:
821 821 actions[f] = ('cm', (fl2, pa.node()),
822 822 "remote created, get or merge")
823 823 elif n2 != ma[f]:
824 824 if acceptremote:
825 825 actions[f] = ('c', (fl2,), "remote recreating")
826 826 else:
827 827 actions[f] = ('dc', (None, f, f, False, pa.node()),
828 828 "prompt deleted/changed")
829 829
830 830 return actions, diverge, renamedelete
831 831
832 832 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
833 833 """Resolves false conflicts where the nodeid changed but the content
834 834 remained the same."""
835 835
836 836 for f, (m, args, msg) in actions.items():
837 837 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
838 838 # local did change but ended up with same content
839 839 actions[f] = 'r', None, "prompt same"
840 840 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
841 841 # remote did change but ended up with same content
842 842 del actions[f] # don't get = keep local deleted
843 843
844 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
845 acceptremote, followcopies):
844 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
845 acceptremote, followcopies, matcher=None):
846 846 "Calculate the actions needed to merge mctx into wctx using ancestors"
847 if matcher is None or matcher.always():
848 partial = False
849 else:
850 partial = matcher.matchfn
847 851
848 852 if len(ancestors) == 1: # default
849 853 actions, diverge, renamedelete = manifestmerge(
850 854 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
851 855 acceptremote, followcopies)
852 856 _checkunknownfiles(repo, wctx, mctx, force, actions)
853 857
854 858 else: # only when merge.preferancestor=* - the default
855 859 repo.ui.note(
856 860 _("note: merging %s and %s using bids from ancestors %s\n") %
857 861 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
858 862
859 863 # Call for bids
860 864 fbids = {} # mapping filename to bids (action method to list af actions)
861 865 diverge, renamedelete = None, None
862 866 for ancestor in ancestors:
863 867 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
864 868 actions, diverge1, renamedelete1 = manifestmerge(
865 869 repo, wctx, mctx, ancestor, branchmerge, force, partial,
866 870 acceptremote, followcopies)
867 871 _checkunknownfiles(repo, wctx, mctx, force, actions)
868 872
869 873 # Track the shortest set of warning on the theory that bid
870 874 # merge will correctly incorporate more information
871 875 if diverge is None or len(diverge1) < len(diverge):
872 876 diverge = diverge1
873 877 if renamedelete is None or len(renamedelete) < len(renamedelete1):
874 878 renamedelete = renamedelete1
875 879
876 880 for f, a in sorted(actions.iteritems()):
877 881 m, args, msg = a
878 882 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
879 883 if f in fbids:
880 884 d = fbids[f]
881 885 if m in d:
882 886 d[m].append(a)
883 887 else:
884 888 d[m] = [a]
885 889 else:
886 890 fbids[f] = {m: [a]}
887 891
888 892 # Pick the best bid for each file
889 893 repo.ui.note(_('\nauction for merging merge bids\n'))
890 894 actions = {}
891 895 for f, bids in sorted(fbids.items()):
892 896 # bids is a mapping from action method to list af actions
893 897 # Consensus?
894 898 if len(bids) == 1: # all bids are the same kind of method
895 899 m, l = bids.items()[0]
896 900 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
897 901 repo.ui.note(" %s: consensus for %s\n" % (f, m))
898 902 actions[f] = l[0]
899 903 continue
900 904 # If keep is an option, just do it.
901 905 if 'k' in bids:
902 906 repo.ui.note(" %s: picking 'keep' action\n" % f)
903 907 actions[f] = bids['k'][0]
904 908 continue
905 909 # If there are gets and they all agree [how could they not?], do it.
906 910 if 'g' in bids:
907 911 ga0 = bids['g'][0]
908 912 if all(a == ga0 for a in bids['g'][1:]):
909 913 repo.ui.note(" %s: picking 'get' action\n" % f)
910 914 actions[f] = ga0
911 915 continue
912 916 # TODO: Consider other simple actions such as mode changes
913 917 # Handle inefficient democrazy.
914 918 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
915 919 for m, l in sorted(bids.items()):
916 920 for _f, args, msg in l:
917 921 repo.ui.note(' %s -> %s\n' % (msg, m))
918 922 # Pick random action. TODO: Instead, prompt user when resolving
919 923 m, l = bids.items()[0]
920 924 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
921 925 (f, m))
922 926 actions[f] = l[0]
923 927 continue
924 928 repo.ui.note(_('end of auction\n\n'))
925 929
926 930 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
927 931
928 932 if wctx.rev() is None:
929 933 fractions = _forgetremoved(wctx, mctx, branchmerge)
930 934 actions.update(fractions)
931 935
932 936 return actions, diverge, renamedelete
933 937
934 938 def batchremove(repo, actions):
935 939 """apply removes to the working directory
936 940
937 941 yields tuples for progress updates
938 942 """
939 943 verbose = repo.ui.verbose
940 944 unlink = util.unlinkpath
941 945 wjoin = repo.wjoin
942 946 audit = repo.wvfs.audit
943 947 i = 0
944 948 for f, args, msg in actions:
945 949 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
946 950 if verbose:
947 951 repo.ui.note(_("removing %s\n") % f)
948 952 audit(f)
949 953 try:
950 954 unlink(wjoin(f), ignoremissing=True)
951 955 except OSError as inst:
952 956 repo.ui.warn(_("update failed to remove %s: %s!\n") %
953 957 (f, inst.strerror))
954 958 if i == 100:
955 959 yield i, f
956 960 i = 0
957 961 i += 1
958 962 if i > 0:
959 963 yield i, f
960 964
961 965 def batchget(repo, mctx, actions):
962 966 """apply gets to the working directory
963 967
964 968 mctx is the context to get from
965 969
966 970 yields tuples for progress updates
967 971 """
968 972 verbose = repo.ui.verbose
969 973 fctx = mctx.filectx
970 974 wwrite = repo.wwrite
971 975 i = 0
972 976 for f, args, msg in actions:
973 977 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
974 978 if verbose:
975 979 repo.ui.note(_("getting %s\n") % f)
976 980 wwrite(f, fctx(f).data(), args[0])
977 981 if i == 100:
978 982 yield i, f
979 983 i = 0
980 984 i += 1
981 985 if i > 0:
982 986 yield i, f
983 987
984 988 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
985 989 """apply the merge action list to the working directory
986 990
987 991 wctx is the working copy context
988 992 mctx is the context to be merged into the working copy
989 993
990 994 Return a tuple of counts (updated, merged, removed, unresolved) that
991 995 describes how many files were affected by the update.
992 996 """
993 997
994 998 updated, merged, removed = 0, 0, 0
995 999 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node())
996 1000 moves = []
997 1001 for m, l in actions.items():
998 1002 l.sort()
999 1003
1000 1004 # 'cd' and 'dc' actions are treated like other merge conflicts
1001 1005 mergeactions = sorted(actions['cd'])
1002 1006 mergeactions.extend(sorted(actions['dc']))
1003 1007 mergeactions.extend(actions['m'])
1004 1008 for f, args, msg in mergeactions:
1005 1009 f1, f2, fa, move, anc = args
1006 1010 if f == '.hgsubstate': # merged internally
1007 1011 continue
1008 1012 if f1 is None:
1009 1013 fcl = filemerge.absentfilectx(wctx, fa)
1010 1014 else:
1011 1015 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1012 1016 fcl = wctx[f1]
1013 1017 if f2 is None:
1014 1018 fco = filemerge.absentfilectx(mctx, fa)
1015 1019 else:
1016 1020 fco = mctx[f2]
1017 1021 actx = repo[anc]
1018 1022 if fa in actx:
1019 1023 fca = actx[fa]
1020 1024 else:
1021 1025 # TODO: move to absentfilectx
1022 1026 fca = repo.filectx(f1, fileid=nullrev)
1023 1027 ms.add(fcl, fco, fca, f)
1024 1028 if f1 != f and move:
1025 1029 moves.append(f1)
1026 1030
1027 1031 audit = repo.wvfs.audit
1028 1032 _updating = _('updating')
1029 1033 _files = _('files')
1030 1034 progress = repo.ui.progress
1031 1035
1032 1036 # remove renamed files after safely stored
1033 1037 for f in moves:
1034 1038 if os.path.lexists(repo.wjoin(f)):
1035 1039 repo.ui.debug("removing %s\n" % f)
1036 1040 audit(f)
1037 1041 util.unlinkpath(repo.wjoin(f))
1038 1042
1039 1043 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1040 1044
1041 1045 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1042 1046 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1043 1047
1044 1048 # remove in parallel (must come first)
1045 1049 z = 0
1046 1050 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1047 1051 for i, item in prog:
1048 1052 z += i
1049 1053 progress(_updating, z, item=item, total=numupdates, unit=_files)
1050 1054 removed = len(actions['r'])
1051 1055
1052 1056 # get in parallel
1053 1057 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1054 1058 for i, item in prog:
1055 1059 z += i
1056 1060 progress(_updating, z, item=item, total=numupdates, unit=_files)
1057 1061 updated = len(actions['g'])
1058 1062
1059 1063 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1060 1064 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1061 1065
1062 1066 # forget (manifest only, just log it) (must come first)
1063 1067 for f, args, msg in actions['f']:
1064 1068 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1065 1069 z += 1
1066 1070 progress(_updating, z, item=f, total=numupdates, unit=_files)
1067 1071
1068 1072 # re-add (manifest only, just log it)
1069 1073 for f, args, msg in actions['a']:
1070 1074 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1071 1075 z += 1
1072 1076 progress(_updating, z, item=f, total=numupdates, unit=_files)
1073 1077
1074 1078 # re-add/mark as modified (manifest only, just log it)
1075 1079 for f, args, msg in actions['am']:
1076 1080 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1077 1081 z += 1
1078 1082 progress(_updating, z, item=f, total=numupdates, unit=_files)
1079 1083
1080 1084 # keep (noop, just log it)
1081 1085 for f, args, msg in actions['k']:
1082 1086 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1083 1087 # no progress
1084 1088
1085 1089 # directory rename, move local
1086 1090 for f, args, msg in actions['dm']:
1087 1091 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1088 1092 z += 1
1089 1093 progress(_updating, z, item=f, total=numupdates, unit=_files)
1090 1094 f0, flags = args
1091 1095 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1092 1096 audit(f)
1093 1097 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1094 1098 util.unlinkpath(repo.wjoin(f0))
1095 1099 updated += 1
1096 1100
1097 1101 # local directory rename, get
1098 1102 for f, args, msg in actions['dg']:
1099 1103 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1100 1104 z += 1
1101 1105 progress(_updating, z, item=f, total=numupdates, unit=_files)
1102 1106 f0, flags = args
1103 1107 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1104 1108 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1105 1109 updated += 1
1106 1110
1107 1111 # exec
1108 1112 for f, args, msg in actions['e']:
1109 1113 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1110 1114 z += 1
1111 1115 progress(_updating, z, item=f, total=numupdates, unit=_files)
1112 1116 flags, = args
1113 1117 audit(f)
1114 1118 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1115 1119 updated += 1
1116 1120
1117 1121 # the ordering is important here -- ms.mergedriver will raise if the merge
1118 1122 # driver has changed, and we want to be able to bypass it when overwrite is
1119 1123 # True
1120 1124 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1121 1125
1122 1126 if usemergedriver:
1123 1127 ms.commit()
1124 1128 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1125 1129 # the driver might leave some files unresolved
1126 1130 unresolvedf = set(ms.unresolved())
1127 1131 if not proceed:
1128 1132 # XXX setting unresolved to at least 1 is a hack to make sure we
1129 1133 # error out
1130 1134 return updated, merged, removed, max(len(unresolvedf), 1)
1131 1135 newactions = []
1132 1136 for f, args, msg in mergeactions:
1133 1137 if f in unresolvedf:
1134 1138 newactions.append((f, args, msg))
1135 1139 mergeactions = newactions
1136 1140
1137 1141 # premerge
1138 1142 tocomplete = []
1139 1143 for f, args, msg in mergeactions:
1140 1144 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1141 1145 z += 1
1142 1146 progress(_updating, z, item=f, total=numupdates, unit=_files)
1143 1147 if f == '.hgsubstate': # subrepo states need updating
1144 1148 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1145 1149 overwrite)
1146 1150 continue
1147 1151 audit(f)
1148 1152 complete, r = ms.preresolve(f, wctx, labels=labels)
1149 1153 if not complete:
1150 1154 numupdates += 1
1151 1155 tocomplete.append((f, args, msg))
1152 1156
1153 1157 # merge
1154 1158 for f, args, msg in tocomplete:
1155 1159 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1156 1160 z += 1
1157 1161 progress(_updating, z, item=f, total=numupdates, unit=_files)
1158 1162 ms.resolve(f, wctx, labels=labels)
1159 1163
1160 1164 ms.commit()
1161 1165
1162 1166 unresolved = ms.unresolvedcount()
1163 1167
1164 1168 if usemergedriver and not unresolved and ms.mdstate() != 's':
1165 1169 if not driverconclude(repo, ms, wctx, labels=labels):
1166 1170 # XXX setting unresolved to at least 1 is a hack to make sure we
1167 1171 # error out
1168 1172 unresolved = max(unresolved, 1)
1169 1173
1170 1174 ms.commit()
1171 1175
1172 1176 msupdated, msmerged, msremoved = ms.counts()
1173 1177 updated += msupdated
1174 1178 merged += msmerged
1175 1179 removed += msremoved
1176 1180
1177 1181 extraactions = ms.actions()
1178 1182 for k, acts in extraactions.iteritems():
1179 1183 actions[k].extend(acts)
1180 1184
1181 1185 progress(_updating, None, total=numupdates, unit=_files)
1182 1186
1183 1187 return updated, merged, removed, unresolved
1184 1188
1185 1189 def recordupdates(repo, actions, branchmerge):
1186 1190 "record merge actions to the dirstate"
1187 1191 # remove (must come first)
1188 1192 for f, args, msg in actions.get('r', []):
1189 1193 if branchmerge:
1190 1194 repo.dirstate.remove(f)
1191 1195 else:
1192 1196 repo.dirstate.drop(f)
1193 1197
1194 1198 # forget (must come first)
1195 1199 for f, args, msg in actions.get('f', []):
1196 1200 repo.dirstate.drop(f)
1197 1201
1198 1202 # re-add
1199 1203 for f, args, msg in actions.get('a', []):
1200 1204 repo.dirstate.add(f)
1201 1205
1202 1206 # re-add/mark as modified
1203 1207 for f, args, msg in actions.get('am', []):
1204 1208 if branchmerge:
1205 1209 repo.dirstate.normallookup(f)
1206 1210 else:
1207 1211 repo.dirstate.add(f)
1208 1212
1209 1213 # exec change
1210 1214 for f, args, msg in actions.get('e', []):
1211 1215 repo.dirstate.normallookup(f)
1212 1216
1213 1217 # keep
1214 1218 for f, args, msg in actions.get('k', []):
1215 1219 pass
1216 1220
1217 1221 # get
1218 1222 for f, args, msg in actions.get('g', []):
1219 1223 if branchmerge:
1220 1224 repo.dirstate.otherparent(f)
1221 1225 else:
1222 1226 repo.dirstate.normal(f)
1223 1227
1224 1228 # merge
1225 1229 for f, args, msg in actions.get('m', []):
1226 1230 f1, f2, fa, move, anc = args
1227 1231 if branchmerge:
1228 1232 # We've done a branch merge, mark this file as merged
1229 1233 # so that we properly record the merger later
1230 1234 repo.dirstate.merge(f)
1231 1235 if f1 != f2: # copy/rename
1232 1236 if move:
1233 1237 repo.dirstate.remove(f1)
1234 1238 if f1 != f:
1235 1239 repo.dirstate.copy(f1, f)
1236 1240 else:
1237 1241 repo.dirstate.copy(f2, f)
1238 1242 else:
1239 1243 # We've update-merged a locally modified file, so
1240 1244 # we set the dirstate to emulate a normal checkout
1241 1245 # of that file some time in the past. Thus our
1242 1246 # merge will appear as a normal local file
1243 1247 # modification.
1244 1248 if f2 == f: # file not locally copied/moved
1245 1249 repo.dirstate.normallookup(f)
1246 1250 if move:
1247 1251 repo.dirstate.drop(f1)
1248 1252
1249 1253 # directory rename, move local
1250 1254 for f, args, msg in actions.get('dm', []):
1251 1255 f0, flag = args
1252 1256 if branchmerge:
1253 1257 repo.dirstate.add(f)
1254 1258 repo.dirstate.remove(f0)
1255 1259 repo.dirstate.copy(f0, f)
1256 1260 else:
1257 1261 repo.dirstate.normal(f)
1258 1262 repo.dirstate.drop(f0)
1259 1263
1260 1264 # directory rename, get
1261 1265 for f, args, msg in actions.get('dg', []):
1262 1266 f0, flag = args
1263 1267 if branchmerge:
1264 1268 repo.dirstate.add(f)
1265 1269 repo.dirstate.copy(f0, f)
1266 1270 else:
1267 1271 repo.dirstate.normal(f)
1268 1272
1269 1273 def update(repo, node, branchmerge, force, ancestor=None,
1270 1274 mergeancestor=False, labels=None, matcher=None):
1271 1275 """
1272 1276 Perform a merge between the working directory and the given node
1273 1277
1274 1278 node = the node to update to, or None if unspecified
1275 1279 branchmerge = whether to merge between branches
1276 1280 force = whether to force branch merging or file overwriting
1277 1281 matcher = a matcher to filter file lists (dirstate not updated)
1278 1282 mergeancestor = whether it is merging with an ancestor. If true,
1279 1283 we should accept the incoming changes for any prompts that occur.
1280 1284 If false, merging with an ancestor (fast-forward) is only allowed
1281 1285 between different named branches. This flag is used by rebase extension
1282 1286 as a temporary fix and should be avoided in general.
1283 1287
1284 1288 The table below shows all the behaviors of the update command
1285 1289 given the -c and -C or no options, whether the working directory
1286 1290 is dirty, whether a revision is specified, and the relationship of
1287 1291 the parent rev to the target rev (linear, on the same named
1288 1292 branch, or on another named branch).
1289 1293
1290 1294 This logic is tested by test-update-branches.t.
1291 1295
1292 1296 -c -C dirty rev | linear same cross
1293 1297 n n n n | ok (1) x
1294 1298 n n n y | ok ok ok
1295 1299 n n y n | merge (2) (2)
1296 1300 n n y y | merge (3) (3)
1297 1301 n y * * | discard discard discard
1298 1302 y n y * | (4) (4) (4)
1299 1303 y n n * | ok ok ok
1300 1304 y y * * | (5) (5) (5)
1301 1305
1302 1306 x = can't happen
1303 1307 * = don't-care
1304 1308 1 = abort: not a linear update (merge or update --check to force update)
1305 1309 2 = abort: uncommitted changes (commit and merge, or update --clean to
1306 1310 discard changes)
1307 1311 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1308 1312 4 = abort: uncommitted changes (checked in commands.py)
1309 1313 5 = incompatible options (checked in commands.py)
1310 1314
1311 1315 Return the same tuple as applyupdates().
1312 1316 """
1313 1317
1314 1318 onode = node
1315 1319 wlock = repo.wlock()
1316 1320 # If we're doing a partial update, we need to skip updating
1317 1321 # the dirstate, so make a note of any partial-ness to the
1318 1322 # update here.
1319 1323 if matcher is None or matcher.always():
1320 1324 partial = False
1321 1325 else:
1322 1326 partial = True
1323 1327 try:
1324 1328 wc = repo[None]
1325 1329 pl = wc.parents()
1326 1330 p1 = pl[0]
1327 1331 pas = [None]
1328 1332 if ancestor is not None:
1329 1333 pas = [repo[ancestor]]
1330 1334
1331 1335 if node is None:
1332 1336 if (repo.ui.configbool('devel', 'all-warnings')
1333 1337 or repo.ui.configbool('devel', 'oldapi')):
1334 1338 repo.ui.develwarn('update with no target')
1335 1339 rev, _mark, _act = destutil.destupdate(repo)
1336 1340 node = repo[rev].node()
1337 1341
1338 1342 overwrite = force and not branchmerge
1339 1343
1340 1344 p2 = repo[node]
1341 1345 if pas[0] is None:
1342 1346 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1343 1347 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1344 1348 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1345 1349 else:
1346 1350 pas = [p1.ancestor(p2, warn=branchmerge)]
1347 1351
1348 1352 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1349 1353
1350 1354 ### check phase
1351 1355 if not overwrite:
1352 1356 if len(pl) > 1:
1353 1357 raise error.Abort(_("outstanding uncommitted merge"))
1354 1358 ms = mergestate.read(repo)
1355 1359 if list(ms.unresolved()):
1356 1360 raise error.Abort(_("outstanding merge conflicts"))
1357 1361 if branchmerge:
1358 1362 if pas == [p2]:
1359 1363 raise error.Abort(_("merging with a working directory ancestor"
1360 1364 " has no effect"))
1361 1365 elif pas == [p1]:
1362 1366 if not mergeancestor and p1.branch() == p2.branch():
1363 1367 raise error.Abort(_("nothing to merge"),
1364 1368 hint=_("use 'hg update' "
1365 1369 "or check 'hg heads'"))
1366 1370 if not force and (wc.files() or wc.deleted()):
1367 1371 raise error.Abort(_("uncommitted changes"),
1368 1372 hint=_("use 'hg status' to list changes"))
1369 1373 for s in sorted(wc.substate):
1370 1374 wc.sub(s).bailifchanged()
1371 1375
1372 1376 elif not overwrite:
1373 1377 if p1 == p2: # no-op update
1374 1378 # call the hooks and exit early
1375 1379 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1376 1380 repo.hook('update', parent1=xp2, parent2='', error=0)
1377 1381 return 0, 0, 0, 0
1378 1382
1379 1383 if pas not in ([p1], [p2]): # nonlinear
1380 1384 dirty = wc.dirty(missing=True)
1381 1385 if dirty or onode is None:
1382 1386 # Branching is a bit strange to ensure we do the minimal
1383 1387 # amount of call to obsolete.background.
1384 1388 foreground = obsolete.foreground(repo, [p1.node()])
1385 1389 # note: the <node> variable contains a random identifier
1386 1390 if repo[node].node() in foreground:
1387 1391 pas = [p1] # allow updating to successors
1388 1392 elif dirty:
1389 1393 msg = _("uncommitted changes")
1390 1394 if onode is None:
1391 1395 hint = _("commit and merge, or update --clean to"
1392 1396 " discard changes")
1393 1397 else:
1394 1398 hint = _("commit or update --clean to discard"
1395 1399 " changes")
1396 1400 raise error.Abort(msg, hint=hint)
1397 1401 else: # node is none
1398 1402 msg = _("not a linear update")
1399 1403 hint = _("merge or update --check to force update")
1400 1404 raise error.Abort(msg, hint=hint)
1401 1405 else:
1402 1406 # Allow jumping branches if clean and specific rev given
1403 1407 pas = [p1]
1404 1408
1405 1409 # deprecated config: merge.followcopies
1406 1410 followcopies = False
1407 1411 if overwrite:
1408 1412 pas = [wc]
1409 1413 elif pas == [p2]: # backwards
1410 1414 pas = [wc.p1()]
1411 1415 elif not branchmerge and not wc.dirty(missing=True):
1412 1416 pass
1413 1417 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1414 1418 followcopies = True
1415 1419
1416 1420 ### calculate phase
1417 if matcher is None or matcher.always():
1418 partial = False
1419 else:
1420 partial = matcher.matchfn
1421 1421 actionbyfile, diverge, renamedelete = calculateupdates(
1422 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1423 followcopies)
1422 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1423 followcopies, matcher=matcher)
1424 1424 # Convert to dictionary-of-lists format
1425 1425 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1426 1426 for f, (m, args, msg) in actionbyfile.iteritems():
1427 1427 if m not in actions:
1428 1428 actions[m] = []
1429 1429 actions[m].append((f, args, msg))
1430 1430
1431 1431 if not util.checkcase(repo.path):
1432 1432 # check collision between files only in p2 for clean update
1433 1433 if (not branchmerge and
1434 1434 (force or not wc.dirty(missing=True, branch=False))):
1435 1435 _checkcollision(repo, p2.manifest(), None)
1436 1436 else:
1437 1437 _checkcollision(repo, wc.manifest(), actions)
1438 1438
1439 1439 # Prompt and create actions. Most of this is in the resolve phase
1440 1440 # already, but we can't handle .hgsubstate in filemerge or
1441 1441 # subrepo.submerge yet so we have to keep prompting for it.
1442 1442 for f, args, msg in sorted(actions['cd']):
1443 1443 if f != '.hgsubstate':
1444 1444 continue
1445 1445 if repo.ui.promptchoice(
1446 1446 _("local changed %s which remote deleted\n"
1447 1447 "use (c)hanged version or (d)elete?"
1448 1448 "$$ &Changed $$ &Delete") % f, 0):
1449 1449 actions['r'].append((f, None, "prompt delete"))
1450 1450 elif f in p1:
1451 1451 actions['am'].append((f, None, "prompt keep"))
1452 1452 else:
1453 1453 actions['a'].append((f, None, "prompt keep"))
1454 1454
1455 1455 for f, args, msg in sorted(actions['dc']):
1456 1456 if f != '.hgsubstate':
1457 1457 continue
1458 1458 f1, f2, fa, move, anc = args
1459 1459 flags = p2[f2].flags()
1460 1460 if repo.ui.promptchoice(
1461 1461 _("remote changed %s which local deleted\n"
1462 1462 "use (c)hanged version or leave (d)eleted?"
1463 1463 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1464 1464 actions['g'].append((f, (flags,), "prompt recreating"))
1465 1465
1466 1466 # divergent renames
1467 1467 for f, fl in sorted(diverge.iteritems()):
1468 1468 repo.ui.warn(_("note: possible conflict - %s was renamed "
1469 1469 "multiple times to:\n") % f)
1470 1470 for nf in fl:
1471 1471 repo.ui.warn(" %s\n" % nf)
1472 1472
1473 1473 # rename and delete
1474 1474 for f, fl in sorted(renamedelete.iteritems()):
1475 1475 repo.ui.warn(_("note: possible conflict - %s was deleted "
1476 1476 "and renamed to:\n") % f)
1477 1477 for nf in fl:
1478 1478 repo.ui.warn(" %s\n" % nf)
1479 1479
1480 1480 ### apply phase
1481 1481 if not branchmerge: # just jump to the new rev
1482 1482 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1483 1483 if not partial:
1484 1484 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1485 1485 # note that we're in the middle of an update
1486 1486 repo.vfs.write('updatestate', p2.hex())
1487 1487
1488 1488 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1489 1489
1490 1490 if not partial:
1491 1491 repo.dirstate.beginparentchange()
1492 1492 repo.setparents(fp1, fp2)
1493 1493 recordupdates(repo, actions, branchmerge)
1494 1494 # update completed, clear state
1495 1495 util.unlink(repo.join('updatestate'))
1496 1496
1497 1497 if not branchmerge:
1498 1498 repo.dirstate.setbranch(p2.branch())
1499 1499 repo.dirstate.endparentchange()
1500 1500 finally:
1501 1501 wlock.release()
1502 1502
1503 1503 if not partial:
1504 1504 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1505 1505 return stats
1506 1506
1507 1507 def graft(repo, ctx, pctx, labels, keepparent=False):
1508 1508 """Do a graft-like merge.
1509 1509
1510 1510 This is a merge where the merge ancestor is chosen such that one
1511 1511 or more changesets are grafted onto the current changeset. In
1512 1512 addition to the merge, this fixes up the dirstate to include only
1513 1513 a single parent (if keepparent is False) and tries to duplicate any
1514 1514 renames/copies appropriately.
1515 1515
1516 1516 ctx - changeset to rebase
1517 1517 pctx - merge base, usually ctx.p1()
1518 1518 labels - merge labels eg ['local', 'graft']
1519 1519 keepparent - keep second parent if any
1520 1520
1521 1521 """
1522 1522 # If we're grafting a descendant onto an ancestor, be sure to pass
1523 1523 # mergeancestor=True to update. This does two things: 1) allows the merge if
1524 1524 # the destination is the same as the parent of the ctx (so we can use graft
1525 1525 # to copy commits), and 2) informs update that the incoming changes are
1526 1526 # newer than the destination so it doesn't prompt about "remote changed foo
1527 1527 # which local deleted".
1528 1528 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1529 1529
1530 1530 stats = update(repo, ctx.node(), True, True, pctx.node(),
1531 1531 mergeancestor=mergeancestor, labels=labels)
1532 1532
1533 1533 pother = nullid
1534 1534 parents = ctx.parents()
1535 1535 if keepparent and len(parents) == 2 and pctx in parents:
1536 1536 parents.remove(pctx)
1537 1537 pother = parents[0].node()
1538 1538
1539 1539 repo.dirstate.beginparentchange()
1540 1540 repo.setparents(repo['.'].node(), pother)
1541 1541 repo.dirstate.write(repo.currenttransaction())
1542 1542 # fix up dirstate for copies and renames
1543 1543 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1544 1544 repo.dirstate.endparentchange()
1545 1545 return stats
General Comments 0
You need to be logged in to leave comments. Login now