##// END OF EJS Templates
repair: use `first` instead of direct indexing...
Pierre-Yves David -
r22818:d7b11449 default
parent child Browse files
Show More
@@ -1,669 +1,676
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from i18n import _
10 10 from lock import release
11 11 from node import nullid
12 12 import localrepo, bundlerepo, unionrepo, httppeer, sshpeer, statichttprepo
13 13 import bookmarks, lock, util, extensions, error, node, scmutil, phases, url
14 14 import cmdutil, discovery, repoview, exchange
15 15 import merge as mergemod
16 16 import verify as verifymod
17 17 import errno, os, shutil
18 18
19 19 def _local(path):
20 20 path = util.expandpath(util.urllocalpath(path))
21 21 return (os.path.isfile(path) and bundlerepo or localrepo)
22 22
23 23 def addbranchrevs(lrepo, other, branches, revs):
24 24 peer = other.peer() # a courtesy to callers using a localrepo for other
25 25 hashbranch, branches = branches
26 26 if not hashbranch and not branches:
27 return revs or None, revs and revs[0] or None
27 x = revs or None
28 if util.safehasattr(revs, 'first'):
29 y = revs.first()
30 elif revs:
31 y = revs[0]
32 else:
33 y = None
34 return x, y
28 35 revs = revs and list(revs) or []
29 36 if not peer.capable('branchmap'):
30 37 if branches:
31 38 raise util.Abort(_("remote branch lookup not supported"))
32 39 revs.append(hashbranch)
33 40 return revs, revs[0]
34 41 branchmap = peer.branchmap()
35 42
36 43 def primary(branch):
37 44 if branch == '.':
38 45 if not lrepo:
39 46 raise util.Abort(_("dirstate branch not accessible"))
40 47 branch = lrepo.dirstate.branch()
41 48 if branch in branchmap:
42 49 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
43 50 return True
44 51 else:
45 52 return False
46 53
47 54 for branch in branches:
48 55 if not primary(branch):
49 56 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
50 57 if hashbranch:
51 58 if not primary(hashbranch):
52 59 revs.append(hashbranch)
53 60 return revs, revs[0]
54 61
55 62 def parseurl(path, branches=None):
56 63 '''parse url#branch, returning (url, (branch, branches))'''
57 64
58 65 u = util.url(path)
59 66 branch = None
60 67 if u.fragment:
61 68 branch = u.fragment
62 69 u.fragment = None
63 70 return str(u), (branch, branches or [])
64 71
65 72 schemes = {
66 73 'bundle': bundlerepo,
67 74 'union': unionrepo,
68 75 'file': _local,
69 76 'http': httppeer,
70 77 'https': httppeer,
71 78 'ssh': sshpeer,
72 79 'static-http': statichttprepo,
73 80 }
74 81
75 82 def _peerlookup(path):
76 83 u = util.url(path)
77 84 scheme = u.scheme or 'file'
78 85 thing = schemes.get(scheme) or schemes['file']
79 86 try:
80 87 return thing(path)
81 88 except TypeError:
82 89 return thing
83 90
84 91 def islocal(repo):
85 92 '''return true if repo (or path pointing to repo) is local'''
86 93 if isinstance(repo, str):
87 94 try:
88 95 return _peerlookup(repo).islocal(repo)
89 96 except AttributeError:
90 97 return False
91 98 return repo.local()
92 99
93 100 def openpath(ui, path):
94 101 '''open path with open if local, url.open if remote'''
95 102 pathurl = util.url(path, parsequery=False, parsefragment=False)
96 103 if pathurl.islocal():
97 104 return util.posixfile(pathurl.localpath(), 'rb')
98 105 else:
99 106 return url.open(ui, path)
100 107
101 108 # a list of (ui, repo) functions called for wire peer initialization
102 109 wirepeersetupfuncs = []
103 110
104 111 def _peerorrepo(ui, path, create=False):
105 112 """return a repository object for the specified path"""
106 113 obj = _peerlookup(path).instance(ui, path, create)
107 114 ui = getattr(obj, "ui", ui)
108 115 for name, module in extensions.extensions(ui):
109 116 hook = getattr(module, 'reposetup', None)
110 117 if hook:
111 118 hook(ui, obj)
112 119 if not obj.local():
113 120 for f in wirepeersetupfuncs:
114 121 f(ui, obj)
115 122 return obj
116 123
117 124 def repository(ui, path='', create=False):
118 125 """return a repository object for the specified path"""
119 126 peer = _peerorrepo(ui, path, create)
120 127 repo = peer.local()
121 128 if not repo:
122 129 raise util.Abort(_("repository '%s' is not local") %
123 130 (path or peer.url()))
124 131 return repo.filtered('visible')
125 132
126 133 def peer(uiorrepo, opts, path, create=False):
127 134 '''return a repository peer for the specified path'''
128 135 rui = remoteui(uiorrepo, opts)
129 136 return _peerorrepo(rui, path, create).peer()
130 137
131 138 def defaultdest(source):
132 139 '''return default destination of clone if none is given
133 140
134 141 >>> defaultdest('foo')
135 142 'foo'
136 143 >>> defaultdest('/foo/bar')
137 144 'bar'
138 145 >>> defaultdest('/')
139 146 ''
140 147 >>> defaultdest('')
141 148 ''
142 149 >>> defaultdest('http://example.org/')
143 150 ''
144 151 >>> defaultdest('http://example.org/foo/')
145 152 'foo'
146 153 '''
147 154 path = util.url(source).path
148 155 if not path:
149 156 return ''
150 157 return os.path.basename(os.path.normpath(path))
151 158
152 159 def share(ui, source, dest=None, update=True):
153 160 '''create a shared repository'''
154 161
155 162 if not islocal(source):
156 163 raise util.Abort(_('can only share local repositories'))
157 164
158 165 if not dest:
159 166 dest = defaultdest(source)
160 167 else:
161 168 dest = ui.expandpath(dest)
162 169
163 170 if isinstance(source, str):
164 171 origsource = ui.expandpath(source)
165 172 source, branches = parseurl(origsource)
166 173 srcrepo = repository(ui, source)
167 174 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
168 175 else:
169 176 srcrepo = source.local()
170 177 origsource = source = srcrepo.url()
171 178 checkout = None
172 179
173 180 sharedpath = srcrepo.sharedpath # if our source is already sharing
174 181
175 182 destwvfs = scmutil.vfs(dest, realpath=True)
176 183 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
177 184
178 185 if destvfs.lexists():
179 186 raise util.Abort(_('destination already exists'))
180 187
181 188 if not destwvfs.isdir():
182 189 destwvfs.mkdir()
183 190 destvfs.makedir()
184 191
185 192 requirements = ''
186 193 try:
187 194 requirements = srcrepo.opener.read('requires')
188 195 except IOError, inst:
189 196 if inst.errno != errno.ENOENT:
190 197 raise
191 198
192 199 requirements += 'shared\n'
193 200 destvfs.write('requires', requirements)
194 201 destvfs.write('sharedpath', sharedpath)
195 202
196 203 r = repository(ui, destwvfs.base)
197 204
198 205 default = srcrepo.ui.config('paths', 'default')
199 206 if default:
200 207 fp = r.opener("hgrc", "w", text=True)
201 208 fp.write("[paths]\n")
202 209 fp.write("default = %s\n" % default)
203 210 fp.close()
204 211
205 212 if update:
206 213 r.ui.status(_("updating working directory\n"))
207 214 if update is not True:
208 215 checkout = update
209 216 for test in (checkout, 'default', 'tip'):
210 217 if test is None:
211 218 continue
212 219 try:
213 220 uprev = r.lookup(test)
214 221 break
215 222 except error.RepoLookupError:
216 223 continue
217 224 _update(r, uprev)
218 225
219 226 def copystore(ui, srcrepo, destpath):
220 227 '''copy files from store of srcrepo in destpath
221 228
222 229 returns destlock
223 230 '''
224 231 destlock = None
225 232 try:
226 233 hardlink = None
227 234 num = 0
228 235 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
229 236 srcvfs = scmutil.vfs(srcrepo.sharedpath)
230 237 dstvfs = scmutil.vfs(destpath)
231 238 for f in srcrepo.store.copylist():
232 239 if srcpublishing and f.endswith('phaseroots'):
233 240 continue
234 241 dstbase = os.path.dirname(f)
235 242 if dstbase and not dstvfs.exists(dstbase):
236 243 dstvfs.mkdir(dstbase)
237 244 if srcvfs.exists(f):
238 245 if f.endswith('data'):
239 246 # 'dstbase' may be empty (e.g. revlog format 0)
240 247 lockfile = os.path.join(dstbase, "lock")
241 248 # lock to avoid premature writing to the target
242 249 destlock = lock.lock(dstvfs, lockfile)
243 250 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
244 251 hardlink)
245 252 num += n
246 253 if hardlink:
247 254 ui.debug("linked %d files\n" % num)
248 255 else:
249 256 ui.debug("copied %d files\n" % num)
250 257 return destlock
251 258 except: # re-raises
252 259 release(destlock)
253 260 raise
254 261
255 262 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
256 263 update=True, stream=False, branch=None):
257 264 """Make a copy of an existing repository.
258 265
259 266 Create a copy of an existing repository in a new directory. The
260 267 source and destination are URLs, as passed to the repository
261 268 function. Returns a pair of repository peers, the source and
262 269 newly created destination.
263 270
264 271 The location of the source is added to the new repository's
265 272 .hg/hgrc file, as the default to be used for future pulls and
266 273 pushes.
267 274
268 275 If an exception is raised, the partly cloned/updated destination
269 276 repository will be deleted.
270 277
271 278 Arguments:
272 279
273 280 source: repository object or URL
274 281
275 282 dest: URL of destination repository to create (defaults to base
276 283 name of source repository)
277 284
278 285 pull: always pull from source repository, even in local case
279 286
280 287 stream: stream raw data uncompressed from repository (fast over
281 288 LAN, slow over WAN)
282 289
283 290 rev: revision to clone up to (implies pull=True)
284 291
285 292 update: update working directory after clone completes, if
286 293 destination is local repository (True means update to default rev,
287 294 anything else is treated as a revision)
288 295
289 296 branch: branches to clone
290 297 """
291 298
292 299 if isinstance(source, str):
293 300 origsource = ui.expandpath(source)
294 301 source, branch = parseurl(origsource, branch)
295 302 srcpeer = peer(ui, peeropts, source)
296 303 else:
297 304 srcpeer = source.peer() # in case we were called with a localrepo
298 305 branch = (None, branch or [])
299 306 origsource = source = srcpeer.url()
300 307 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
301 308
302 309 if dest is None:
303 310 dest = defaultdest(source)
304 311 if dest:
305 312 ui.status(_("destination directory: %s\n") % dest)
306 313 else:
307 314 dest = ui.expandpath(dest)
308 315
309 316 dest = util.urllocalpath(dest)
310 317 source = util.urllocalpath(source)
311 318
312 319 if not dest:
313 320 raise util.Abort(_("empty destination path is not valid"))
314 321
315 322 destvfs = scmutil.vfs(dest, expandpath=True)
316 323 if destvfs.lexists():
317 324 if not destvfs.isdir():
318 325 raise util.Abort(_("destination '%s' already exists") % dest)
319 326 elif destvfs.listdir():
320 327 raise util.Abort(_("destination '%s' is not empty") % dest)
321 328
322 329 srclock = destlock = cleandir = None
323 330 srcrepo = srcpeer.local()
324 331 try:
325 332 abspath = origsource
326 333 if islocal(origsource):
327 334 abspath = os.path.abspath(util.urllocalpath(origsource))
328 335
329 336 if islocal(dest):
330 337 cleandir = dest
331 338
332 339 copy = False
333 340 if (srcrepo and srcrepo.cancopy() and islocal(dest)
334 341 and not phases.hassecret(srcrepo)):
335 342 copy = not pull and not rev
336 343
337 344 if copy:
338 345 try:
339 346 # we use a lock here because if we race with commit, we
340 347 # can end up with extra data in the cloned revlogs that's
341 348 # not pointed to by changesets, thus causing verify to
342 349 # fail
343 350 srclock = srcrepo.lock(wait=False)
344 351 except error.LockError:
345 352 copy = False
346 353
347 354 if copy:
348 355 srcrepo.hook('preoutgoing', throw=True, source='clone')
349 356 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
350 357 if not os.path.exists(dest):
351 358 os.mkdir(dest)
352 359 else:
353 360 # only clean up directories we create ourselves
354 361 cleandir = hgdir
355 362 try:
356 363 destpath = hgdir
357 364 util.makedir(destpath, notindexed=True)
358 365 except OSError, inst:
359 366 if inst.errno == errno.EEXIST:
360 367 cleandir = None
361 368 raise util.Abort(_("destination '%s' already exists")
362 369 % dest)
363 370 raise
364 371
365 372 destlock = copystore(ui, srcrepo, destpath)
366 373 # copy bookmarks over
367 374 srcbookmarks = srcrepo.join('bookmarks')
368 375 dstbookmarks = os.path.join(destpath, 'bookmarks')
369 376 if os.path.exists(srcbookmarks):
370 377 util.copyfile(srcbookmarks, dstbookmarks)
371 378
372 379 # Recomputing branch cache might be slow on big repos,
373 380 # so just copy it
374 381 def copybranchcache(fname):
375 382 srcbranchcache = srcrepo.join('cache/%s' % fname)
376 383 dstbranchcache = os.path.join(dstcachedir, fname)
377 384 if os.path.exists(srcbranchcache):
378 385 if not os.path.exists(dstcachedir):
379 386 os.mkdir(dstcachedir)
380 387 util.copyfile(srcbranchcache, dstbranchcache)
381 388
382 389 dstcachedir = os.path.join(destpath, 'cache')
383 390 # In local clones we're copying all nodes, not just served
384 391 # ones. Therefore copy all branchcaches over.
385 392 copybranchcache('branch2')
386 393 for cachename in repoview.filtertable:
387 394 copybranchcache('branch2-%s' % cachename)
388 395
389 396 # we need to re-init the repo after manually copying the data
390 397 # into it
391 398 destpeer = peer(srcrepo, peeropts, dest)
392 399 srcrepo.hook('outgoing', source='clone',
393 400 node=node.hex(node.nullid))
394 401 else:
395 402 try:
396 403 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
397 404 # only pass ui when no srcrepo
398 405 except OSError, inst:
399 406 if inst.errno == errno.EEXIST:
400 407 cleandir = None
401 408 raise util.Abort(_("destination '%s' already exists")
402 409 % dest)
403 410 raise
404 411
405 412 revs = None
406 413 if rev:
407 414 if not srcpeer.capable('lookup'):
408 415 raise util.Abort(_("src repository does not support "
409 416 "revision lookup and so doesn't "
410 417 "support clone by revision"))
411 418 revs = [srcpeer.lookup(r) for r in rev]
412 419 checkout = revs[0]
413 420 if destpeer.local():
414 421 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
415 422 elif srcrepo:
416 423 exchange.push(srcrepo, destpeer, revs=revs,
417 424 bookmarks=srcrepo._bookmarks.keys())
418 425 else:
419 426 raise util.Abort(_("clone from remote to remote not supported"))
420 427
421 428 cleandir = None
422 429
423 430 destrepo = destpeer.local()
424 431 if destrepo:
425 432 template = (
426 433 '# You may want to set your username here if it is not set\n'
427 434 "# globally, or this repository requires a different\n"
428 435 '# username from your usual configuration. If you want to\n'
429 436 '# set something for all of your repositories on this\n'
430 437 '# computer, try running the command\n'
431 438 "# 'hg config --edit --global'\n"
432 439 '# [ui]\n'
433 440 '# username = Jane Doe <jdoe@example.com>\n'
434 441 '[paths]\n'
435 442 'default = %s\n'
436 443 )
437 444 fp = destrepo.opener("hgrc", "w", text=True)
438 445 u = util.url(abspath)
439 446 u.passwd = None
440 447 defaulturl = str(u)
441 448 fp.write(template % defaulturl)
442 449 fp.close()
443 450
444 451 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
445 452
446 453 if update:
447 454 if update is not True:
448 455 checkout = srcpeer.lookup(update)
449 456 uprev = None
450 457 status = None
451 458 if checkout is not None:
452 459 try:
453 460 uprev = destrepo.lookup(checkout)
454 461 except error.RepoLookupError:
455 462 pass
456 463 if uprev is None:
457 464 try:
458 465 uprev = destrepo._bookmarks['@']
459 466 update = '@'
460 467 bn = destrepo[uprev].branch()
461 468 if bn == 'default':
462 469 status = _("updating to bookmark @\n")
463 470 else:
464 471 status = (_("updating to bookmark @ on branch %s\n")
465 472 % bn)
466 473 except KeyError:
467 474 try:
468 475 uprev = destrepo.branchtip('default')
469 476 except error.RepoLookupError:
470 477 uprev = destrepo.lookup('tip')
471 478 if not status:
472 479 bn = destrepo[uprev].branch()
473 480 status = _("updating to branch %s\n") % bn
474 481 destrepo.ui.status(status)
475 482 _update(destrepo, uprev)
476 483 if update in destrepo._bookmarks:
477 484 bookmarks.setcurrent(destrepo, update)
478 485 finally:
479 486 release(srclock, destlock)
480 487 if cleandir is not None:
481 488 shutil.rmtree(cleandir, True)
482 489 if srcpeer is not None:
483 490 srcpeer.close()
484 491 return srcpeer, destpeer
485 492
486 493 def _showstats(repo, stats):
487 494 repo.ui.status(_("%d files updated, %d files merged, "
488 495 "%d files removed, %d files unresolved\n") % stats)
489 496
490 497 def updaterepo(repo, node, overwrite):
491 498 """Update the working directory to node.
492 499
493 500 When overwrite is set, changes are clobbered, merged else
494 501
495 502 returns stats (see pydoc mercurial.merge.applyupdates)"""
496 503 return mergemod.update(repo, node, False, overwrite, None,
497 504 labels=['working copy', 'destination'])
498 505
499 506 def update(repo, node):
500 507 """update the working directory to node, merging linear changes"""
501 508 stats = updaterepo(repo, node, False)
502 509 _showstats(repo, stats)
503 510 if stats[3]:
504 511 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
505 512 return stats[3] > 0
506 513
507 514 # naming conflict in clone()
508 515 _update = update
509 516
510 517 def clean(repo, node, show_stats=True):
511 518 """forcibly switch the working directory to node, clobbering changes"""
512 519 stats = updaterepo(repo, node, True)
513 520 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
514 521 if show_stats:
515 522 _showstats(repo, stats)
516 523 return stats[3] > 0
517 524
518 525 def merge(repo, node, force=None, remind=True):
519 526 """Branch merge with node, resolving changes. Return true if any
520 527 unresolved conflicts."""
521 528 stats = mergemod.update(repo, node, True, force, False)
522 529 _showstats(repo, stats)
523 530 if stats[3]:
524 531 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
525 532 "or 'hg update -C .' to abandon\n"))
526 533 elif remind:
527 534 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
528 535 return stats[3] > 0
529 536
530 537 def _incoming(displaychlist, subreporecurse, ui, repo, source,
531 538 opts, buffered=False):
532 539 """
533 540 Helper for incoming / gincoming.
534 541 displaychlist gets called with
535 542 (remoterepo, incomingchangesetlist, displayer) parameters,
536 543 and is supposed to contain only code that can't be unified.
537 544 """
538 545 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
539 546 other = peer(repo, opts, source)
540 547 ui.status(_('comparing with %s\n') % util.hidepassword(source))
541 548 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
542 549
543 550 if revs:
544 551 revs = [other.lookup(rev) for rev in revs]
545 552 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
546 553 revs, opts["bundle"], opts["force"])
547 554 try:
548 555 if not chlist:
549 556 ui.status(_("no changes found\n"))
550 557 return subreporecurse()
551 558
552 559 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
553 560 displaychlist(other, chlist, displayer)
554 561 displayer.close()
555 562 finally:
556 563 cleanupfn()
557 564 subreporecurse()
558 565 return 0 # exit code is zero since we found incoming changes
559 566
560 567 def incoming(ui, repo, source, opts):
561 568 def subreporecurse():
562 569 ret = 1
563 570 if opts.get('subrepos'):
564 571 ctx = repo[None]
565 572 for subpath in sorted(ctx.substate):
566 573 sub = ctx.sub(subpath)
567 574 ret = min(ret, sub.incoming(ui, source, opts))
568 575 return ret
569 576
570 577 def display(other, chlist, displayer):
571 578 limit = cmdutil.loglimit(opts)
572 579 if opts.get('newest_first'):
573 580 chlist.reverse()
574 581 count = 0
575 582 for n in chlist:
576 583 if limit is not None and count >= limit:
577 584 break
578 585 parents = [p for p in other.changelog.parents(n) if p != nullid]
579 586 if opts.get('no_merges') and len(parents) == 2:
580 587 continue
581 588 count += 1
582 589 displayer.show(other[n])
583 590 return _incoming(display, subreporecurse, ui, repo, source, opts)
584 591
585 592 def _outgoing(ui, repo, dest, opts):
586 593 dest = ui.expandpath(dest or 'default-push', dest or 'default')
587 594 dest, branches = parseurl(dest, opts.get('branch'))
588 595 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
589 596 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
590 597 if revs:
591 598 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
592 599
593 600 other = peer(repo, opts, dest)
594 601 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
595 602 force=opts.get('force'))
596 603 o = outgoing.missing
597 604 if not o:
598 605 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
599 606 return o, other
600 607
601 608 def outgoing(ui, repo, dest, opts):
602 609 def recurse():
603 610 ret = 1
604 611 if opts.get('subrepos'):
605 612 ctx = repo[None]
606 613 for subpath in sorted(ctx.substate):
607 614 sub = ctx.sub(subpath)
608 615 ret = min(ret, sub.outgoing(ui, dest, opts))
609 616 return ret
610 617
611 618 limit = cmdutil.loglimit(opts)
612 619 o, other = _outgoing(ui, repo, dest, opts)
613 620 if not o:
614 621 cmdutil.outgoinghooks(ui, repo, other, opts, o)
615 622 return recurse()
616 623
617 624 if opts.get('newest_first'):
618 625 o.reverse()
619 626 displayer = cmdutil.show_changeset(ui, repo, opts)
620 627 count = 0
621 628 for n in o:
622 629 if limit is not None and count >= limit:
623 630 break
624 631 parents = [p for p in repo.changelog.parents(n) if p != nullid]
625 632 if opts.get('no_merges') and len(parents) == 2:
626 633 continue
627 634 count += 1
628 635 displayer.show(repo[n])
629 636 displayer.close()
630 637 cmdutil.outgoinghooks(ui, repo, other, opts, o)
631 638 recurse()
632 639 return 0 # exit code is zero since we found outgoing changes
633 640
634 641 def revert(repo, node, choose):
635 642 """revert changes to revision in node without updating dirstate"""
636 643 return mergemod.update(repo, node, False, True, choose)[3] > 0
637 644
638 645 def verify(repo):
639 646 """verify the consistency of a repository"""
640 647 return verifymod.verify(repo)
641 648
642 649 def remoteui(src, opts):
643 650 'build a remote ui from ui or repo and opts'
644 651 if util.safehasattr(src, 'baseui'): # looks like a repository
645 652 dst = src.baseui.copy() # drop repo-specific config
646 653 src = src.ui # copy target options from repo
647 654 else: # assume it's a global ui object
648 655 dst = src.copy() # keep all global options
649 656
650 657 # copy ssh-specific options
651 658 for o in 'ssh', 'remotecmd':
652 659 v = opts.get(o) or src.config('ui', o)
653 660 if v:
654 661 dst.setconfig("ui", o, v, 'copied')
655 662
656 663 # copy bundle-specific options
657 664 r = src.config('bundle', 'mainreporoot')
658 665 if r:
659 666 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
660 667
661 668 # copy selected local settings to the remote ui
662 669 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
663 670 for key, val in src.configitems(sect):
664 671 dst.setconfig(sect, key, val, 'copied')
665 672 v = src.config('web', 'cacerts')
666 673 if v:
667 674 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
668 675
669 676 return dst
@@ -1,189 +1,189
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from mercurial import changegroup, exchange
10 10 from mercurial.node import short
11 11 from mercurial.i18n import _
12 12 import errno
13 13
14 14 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 15 """create a bundle with the specified revisions as a backup"""
16 16 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip')
17 17 backupdir = "strip-backup"
18 18 vfs = repo.vfs
19 19 if not vfs.isdir(backupdir):
20 20 vfs.mkdir(backupdir)
21 21 name = "%s/%s-%s.hg" % (backupdir, short(node), suffix)
22 22 if compress:
23 23 bundletype = "HG10BZ"
24 24 else:
25 25 bundletype = "HG10UN"
26 26 return changegroup.writebundle(cg, name, bundletype, vfs)
27 27
28 28 def _collectfiles(repo, striprev):
29 29 """find out the filelogs affected by the strip"""
30 30 files = set()
31 31
32 32 for x in xrange(striprev, len(repo)):
33 33 files.update(repo[x].files())
34 34
35 35 return sorted(files)
36 36
37 37 def _collectbrokencsets(repo, files, striprev):
38 38 """return the changesets which will be broken by the truncation"""
39 39 s = set()
40 40 def collectone(revlog):
41 41 _, brokenset = revlog.getstrippoint(striprev)
42 42 s.update([revlog.linkrev(r) for r in brokenset])
43 43
44 44 collectone(repo.manifest)
45 45 for fname in files:
46 46 collectone(repo.file(fname))
47 47
48 48 return s
49 49
50 50 def strip(ui, repo, nodelist, backup=True, topic='backup'):
51 51
52 52 # Simple way to maintain backwards compatibility for this
53 53 # argument.
54 54 if backup in ['none', 'strip']:
55 55 backup = False
56 56
57 57 repo = repo.unfiltered()
58 58 repo.destroying()
59 59
60 60 cl = repo.changelog
61 61 # TODO handle undo of merge sets
62 62 if isinstance(nodelist, str):
63 63 nodelist = [nodelist]
64 64 striplist = [cl.rev(node) for node in nodelist]
65 65 striprev = min(striplist)
66 66
67 67 # Some revisions with rev > striprev may not be descendants of striprev.
68 68 # We have to find these revisions and put them in a bundle, so that
69 69 # we can restore them after the truncations.
70 70 # To create the bundle we use repo.changegroupsubset which requires
71 71 # the list of heads and bases of the set of interesting revisions.
72 72 # (head = revision in the set that has no descendant in the set;
73 73 # base = revision in the set that has no ancestor in the set)
74 74 tostrip = set(striplist)
75 75 for rev in striplist:
76 76 for desc in cl.descendants([rev]):
77 77 tostrip.add(desc)
78 78
79 79 files = _collectfiles(repo, striprev)
80 80 saverevs = _collectbrokencsets(repo, files, striprev)
81 81
82 82 # compute heads
83 83 saveheads = set(saverevs)
84 84 for r in xrange(striprev + 1, len(cl)):
85 85 if r not in tostrip:
86 86 saverevs.add(r)
87 87 saveheads.difference_update(cl.parentrevs(r))
88 88 saveheads.add(r)
89 89 saveheads = [cl.node(r) for r in saveheads]
90 90
91 91 # compute base nodes
92 92 if saverevs:
93 93 descendants = set(cl.descendants(saverevs))
94 94 saverevs.difference_update(descendants)
95 95 savebases = [cl.node(r) for r in saverevs]
96 96 stripbases = [cl.node(r) for r in tostrip]
97 97
98 98 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
99 99 # is much faster
100 100 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
101 101 if newbmtarget:
102 newbmtarget = repo[newbmtarget[0]].node()
102 newbmtarget = repo[newbmtarget.first()].node()
103 103 else:
104 104 newbmtarget = '.'
105 105
106 106 bm = repo._bookmarks
107 107 updatebm = []
108 108 for m in bm:
109 109 rev = repo[bm[m]].rev()
110 110 if rev in tostrip:
111 111 updatebm.append(m)
112 112
113 113 # create a changegroup for all the branches we need to keep
114 114 backupfile = None
115 115 vfs = repo.vfs
116 116 if backup:
117 117 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
118 118 repo.ui.status(_("saved backup bundle to %s\n") %
119 119 vfs.join(backupfile))
120 120 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
121 121 vfs.join(backupfile))
122 122 if saveheads or savebases:
123 123 # do not compress partial bundle if we remove it from disk later
124 124 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
125 125 compress=False)
126 126
127 127 mfst = repo.manifest
128 128
129 129 tr = repo.transaction("strip")
130 130 offset = len(tr.entries)
131 131
132 132 try:
133 133 tr.startgroup()
134 134 cl.strip(striprev, tr)
135 135 mfst.strip(striprev, tr)
136 136 for fn in files:
137 137 repo.file(fn).strip(striprev, tr)
138 138 tr.endgroup()
139 139
140 140 try:
141 141 for i in xrange(offset, len(tr.entries)):
142 142 file, troffset, ignore = tr.entries[i]
143 143 repo.sopener(file, 'a').truncate(troffset)
144 144 if troffset == 0:
145 145 repo.store.markremoved(file)
146 146 tr.close()
147 147 except: # re-raises
148 148 tr.abort()
149 149 raise
150 150
151 151 if saveheads or savebases:
152 152 ui.note(_("adding branch\n"))
153 153 f = vfs.open(chgrpfile, "rb")
154 154 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
155 155 if not repo.ui.verbose:
156 156 # silence internal shuffling chatter
157 157 repo.ui.pushbuffer()
158 158 changegroup.addchangegroup(repo, gen, 'strip',
159 159 'bundle:' + vfs.join(chgrpfile), True)
160 160 if not repo.ui.verbose:
161 161 repo.ui.popbuffer()
162 162 f.close()
163 163
164 164 # remove undo files
165 165 for undovfs, undofile in repo.undofiles():
166 166 try:
167 167 undovfs.unlink(undofile)
168 168 except OSError, e:
169 169 if e.errno != errno.ENOENT:
170 170 ui.warn(_('error removing %s: %s\n') %
171 171 (undovfs.join(undofile), str(e)))
172 172
173 173 for m in updatebm:
174 174 bm[m] = repo[newbmtarget].node()
175 175 bm.write()
176 176 except: # re-raises
177 177 if backupfile:
178 178 ui.warn(_("strip failed, full bundle stored in '%s'\n")
179 179 % vfs.join(backupfile))
180 180 elif saveheads:
181 181 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
182 182 % vfs.join(chgrpfile))
183 183 raise
184 184 else:
185 185 if saveheads or savebases:
186 186 # Remove partial backup only if there were no exceptions
187 187 vfs.unlink(chgrpfile)
188 188
189 189 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now