##// END OF EJS Templates
branchcache: fetch source branchcache during clone (issue3378)...
Tomasz Kleczek -
r17740:e6067bec default
parent child Browse files
Show More
@@ -1,600 +1,610 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from i18n import _
10 10 from lock import release
11 11 from node import hex, nullid
12 12 import localrepo, bundlerepo, httppeer, sshpeer, statichttprepo, bookmarks
13 13 import lock, util, extensions, error, node, scmutil, phases
14 14 import cmdutil, discovery
15 15 import merge as mergemod
16 16 import verify as verifymod
17 17 import errno, os, shutil
18 18
19 19 def _local(path):
20 20 path = util.expandpath(util.urllocalpath(path))
21 21 return (os.path.isfile(path) and bundlerepo or localrepo)
22 22
23 23 def addbranchrevs(lrepo, other, branches, revs):
24 24 peer = other.peer() # a courtesy to callers using a localrepo for other
25 25 hashbranch, branches = branches
26 26 if not hashbranch and not branches:
27 27 return revs or None, revs and revs[0] or None
28 28 revs = revs and list(revs) or []
29 29 if not peer.capable('branchmap'):
30 30 if branches:
31 31 raise util.Abort(_("remote branch lookup not supported"))
32 32 revs.append(hashbranch)
33 33 return revs, revs[0]
34 34 branchmap = peer.branchmap()
35 35
36 36 def primary(branch):
37 37 if branch == '.':
38 38 if not lrepo:
39 39 raise util.Abort(_("dirstate branch not accessible"))
40 40 branch = lrepo.dirstate.branch()
41 41 if branch in branchmap:
42 42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
43 43 return True
44 44 else:
45 45 return False
46 46
47 47 for branch in branches:
48 48 if not primary(branch):
49 49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
50 50 if hashbranch:
51 51 if not primary(hashbranch):
52 52 revs.append(hashbranch)
53 53 return revs, revs[0]
54 54
55 55 def parseurl(path, branches=None):
56 56 '''parse url#branch, returning (url, (branch, branches))'''
57 57
58 58 u = util.url(path)
59 59 branch = None
60 60 if u.fragment:
61 61 branch = u.fragment
62 62 u.fragment = None
63 63 return str(u), (branch, branches or [])
64 64
65 65 schemes = {
66 66 'bundle': bundlerepo,
67 67 'file': _local,
68 68 'http': httppeer,
69 69 'https': httppeer,
70 70 'ssh': sshpeer,
71 71 'static-http': statichttprepo,
72 72 }
73 73
74 74 def _peerlookup(path):
75 75 u = util.url(path)
76 76 scheme = u.scheme or 'file'
77 77 thing = schemes.get(scheme) or schemes['file']
78 78 try:
79 79 return thing(path)
80 80 except TypeError:
81 81 return thing
82 82
83 83 def islocal(repo):
84 84 '''return true if repo or path is local'''
85 85 if isinstance(repo, str):
86 86 try:
87 87 return _peerlookup(repo).islocal(repo)
88 88 except AttributeError:
89 89 return False
90 90 return repo.local()
91 91
92 92 def _peerorrepo(ui, path, create=False):
93 93 """return a repository object for the specified path"""
94 94 obj = _peerlookup(path).instance(ui, path, create)
95 95 ui = getattr(obj, "ui", ui)
96 96 for name, module in extensions.extensions():
97 97 hook = getattr(module, 'reposetup', None)
98 98 if hook:
99 99 hook(ui, obj)
100 100 return obj
101 101
102 102 def repository(ui, path='', create=False):
103 103 """return a repository object for the specified path"""
104 104 peer = _peerorrepo(ui, path, create)
105 105 repo = peer.local()
106 106 if not repo:
107 107 raise util.Abort(_("repository '%s' is not local") %
108 108 (path or peer.url()))
109 109 return repo
110 110
111 111 def peer(uiorrepo, opts, path, create=False):
112 112 '''return a repository peer for the specified path'''
113 113 rui = remoteui(uiorrepo, opts)
114 114 return _peerorrepo(rui, path, create).peer()
115 115
116 116 def defaultdest(source):
117 117 '''return default destination of clone if none is given'''
118 118 return os.path.basename(os.path.normpath(source))
119 119
120 120 def share(ui, source, dest=None, update=True):
121 121 '''create a shared repository'''
122 122
123 123 if not islocal(source):
124 124 raise util.Abort(_('can only share local repositories'))
125 125
126 126 if not dest:
127 127 dest = defaultdest(source)
128 128 else:
129 129 dest = ui.expandpath(dest)
130 130
131 131 if isinstance(source, str):
132 132 origsource = ui.expandpath(source)
133 133 source, branches = parseurl(origsource)
134 134 srcrepo = repository(ui, source)
135 135 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
136 136 else:
137 137 srcrepo = source.local()
138 138 origsource = source = srcrepo.url()
139 139 checkout = None
140 140
141 141 sharedpath = srcrepo.sharedpath # if our source is already sharing
142 142
143 143 root = os.path.realpath(dest)
144 144 roothg = os.path.join(root, '.hg')
145 145
146 146 if os.path.exists(roothg):
147 147 raise util.Abort(_('destination already exists'))
148 148
149 149 if not os.path.isdir(root):
150 150 os.mkdir(root)
151 151 util.makedir(roothg, notindexed=True)
152 152
153 153 requirements = ''
154 154 try:
155 155 requirements = srcrepo.opener.read('requires')
156 156 except IOError, inst:
157 157 if inst.errno != errno.ENOENT:
158 158 raise
159 159
160 160 requirements += 'shared\n'
161 161 util.writefile(os.path.join(roothg, 'requires'), requirements)
162 162 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
163 163
164 164 r = repository(ui, root)
165 165
166 166 default = srcrepo.ui.config('paths', 'default')
167 167 if default:
168 168 fp = r.opener("hgrc", "w", text=True)
169 169 fp.write("[paths]\n")
170 170 fp.write("default = %s\n" % default)
171 171 fp.close()
172 172
173 173 if update:
174 174 r.ui.status(_("updating working directory\n"))
175 175 if update is not True:
176 176 checkout = update
177 177 for test in (checkout, 'default', 'tip'):
178 178 if test is None:
179 179 continue
180 180 try:
181 181 uprev = r.lookup(test)
182 182 break
183 183 except error.RepoLookupError:
184 184 continue
185 185 _update(r, uprev)
186 186
187 187 def copystore(ui, srcrepo, destpath):
188 188 '''copy files from store of srcrepo in destpath
189 189
190 190 returns destlock
191 191 '''
192 192 destlock = None
193 193 try:
194 194 hardlink = None
195 195 num = 0
196 196 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
197 197 for f in srcrepo.store.copylist():
198 198 if srcpublishing and f.endswith('phaseroots'):
199 199 continue
200 200 src = os.path.join(srcrepo.sharedpath, f)
201 201 dst = os.path.join(destpath, f)
202 202 dstbase = os.path.dirname(dst)
203 203 if dstbase and not os.path.exists(dstbase):
204 204 os.mkdir(dstbase)
205 205 if os.path.exists(src):
206 206 if dst.endswith('data'):
207 207 # lock to avoid premature writing to the target
208 208 destlock = lock.lock(os.path.join(dstbase, "lock"))
209 209 hardlink, n = util.copyfiles(src, dst, hardlink)
210 210 num += n
211 211 if hardlink:
212 212 ui.debug("linked %d files\n" % num)
213 213 else:
214 214 ui.debug("copied %d files\n" % num)
215 215 return destlock
216 216 except: # re-raises
217 217 release(destlock)
218 218 raise
219 219
220 220 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
221 221 update=True, stream=False, branch=None):
222 222 """Make a copy of an existing repository.
223 223
224 224 Create a copy of an existing repository in a new directory. The
225 225 source and destination are URLs, as passed to the repository
226 226 function. Returns a pair of repository peers, the source and
227 227 newly created destination.
228 228
229 229 The location of the source is added to the new repository's
230 230 .hg/hgrc file, as the default to be used for future pulls and
231 231 pushes.
232 232
233 233 If an exception is raised, the partly cloned/updated destination
234 234 repository will be deleted.
235 235
236 236 Arguments:
237 237
238 238 source: repository object or URL
239 239
240 240 dest: URL of destination repository to create (defaults to base
241 241 name of source repository)
242 242
243 243 pull: always pull from source repository, even in local case
244 244
245 245 stream: stream raw data uncompressed from repository (fast over
246 246 LAN, slow over WAN)
247 247
248 248 rev: revision to clone up to (implies pull=True)
249 249
250 250 update: update working directory after clone completes, if
251 251 destination is local repository (True means update to default rev,
252 252 anything else is treated as a revision)
253 253
254 254 branch: branches to clone
255 255 """
256 256
257 257 if isinstance(source, str):
258 258 origsource = ui.expandpath(source)
259 259 source, branch = parseurl(origsource, branch)
260 260 srcpeer = peer(ui, peeropts, source)
261 261 else:
262 262 srcpeer = source.peer() # in case we were called with a localrepo
263 263 branch = (None, branch or [])
264 264 origsource = source = srcpeer.url()
265 265 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
266 266
267 267 if dest is None:
268 268 dest = defaultdest(source)
269 269 ui.status(_("destination directory: %s\n") % dest)
270 270 else:
271 271 dest = ui.expandpath(dest)
272 272
273 273 dest = util.urllocalpath(dest)
274 274 source = util.urllocalpath(source)
275 275
276 276 if not dest:
277 277 raise util.Abort(_("empty destination path is not valid"))
278 278 if os.path.exists(dest):
279 279 if not os.path.isdir(dest):
280 280 raise util.Abort(_("destination '%s' already exists") % dest)
281 281 elif os.listdir(dest):
282 282 raise util.Abort(_("destination '%s' is not empty") % dest)
283 283
284 284 class DirCleanup(object):
285 285 def __init__(self, dir_):
286 286 self.rmtree = shutil.rmtree
287 287 self.dir_ = dir_
288 288 def close(self):
289 289 self.dir_ = None
290 290 def cleanup(self):
291 291 if self.dir_:
292 292 self.rmtree(self.dir_, True)
293 293
294 294 srclock = destlock = dircleanup = None
295 295 srcrepo = srcpeer.local()
296 296 try:
297 297 abspath = origsource
298 298 if islocal(origsource):
299 299 abspath = os.path.abspath(util.urllocalpath(origsource))
300 300
301 301 if islocal(dest):
302 302 dircleanup = DirCleanup(dest)
303 303
304 304 copy = False
305 305 if (srcrepo and srcrepo.cancopy() and islocal(dest)
306 306 and not phases.hassecret(srcrepo)):
307 307 copy = not pull and not rev
308 308
309 309 if copy:
310 310 try:
311 311 # we use a lock here because if we race with commit, we
312 312 # can end up with extra data in the cloned revlogs that's
313 313 # not pointed to by changesets, thus causing verify to
314 314 # fail
315 315 srclock = srcrepo.lock(wait=False)
316 316 except error.LockError:
317 317 copy = False
318 318
319 319 if copy:
320 320 srcrepo.hook('preoutgoing', throw=True, source='clone')
321 321 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
322 322 if not os.path.exists(dest):
323 323 os.mkdir(dest)
324 324 else:
325 325 # only clean up directories we create ourselves
326 326 dircleanup.dir_ = hgdir
327 327 try:
328 328 destpath = hgdir
329 329 util.makedir(destpath, notindexed=True)
330 330 except OSError, inst:
331 331 if inst.errno == errno.EEXIST:
332 332 dircleanup.close()
333 333 raise util.Abort(_("destination '%s' already exists")
334 334 % dest)
335 335 raise
336 336
337 337 destlock = copystore(ui, srcrepo, destpath)
338 338
339 # Recomputing branch cache might be slow on big repos,
340 # so just copy it
341 dstcachedir = os.path.join(destpath, 'cache')
342 srcbranchcache = srcrepo.sjoin('cache/branchheads')
343 dstbranchcache = os.path.join(dstcachedir, 'branchheads')
344 if os.path.exists(srcbranchcache):
345 if not os.path.exists(dstcachedir):
346 os.mkdir(dstcachedir)
347 util.copyfile(srcbranchcache, dstbranchcache)
348
339 349 # we need to re-init the repo after manually copying the data
340 350 # into it
341 351 destpeer = peer(ui, peeropts, dest)
342 352 srcrepo.hook('outgoing', source='clone',
343 353 node=node.hex(node.nullid))
344 354 else:
345 355 try:
346 356 destpeer = peer(ui, peeropts, dest, create=True)
347 357 except OSError, inst:
348 358 if inst.errno == errno.EEXIST:
349 359 dircleanup.close()
350 360 raise util.Abort(_("destination '%s' already exists")
351 361 % dest)
352 362 raise
353 363
354 364 revs = None
355 365 if rev:
356 366 if not srcpeer.capable('lookup'):
357 367 raise util.Abort(_("src repository does not support "
358 368 "revision lookup and so doesn't "
359 369 "support clone by revision"))
360 370 revs = [srcpeer.lookup(r) for r in rev]
361 371 checkout = revs[0]
362 372 if destpeer.local():
363 373 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
364 374 elif srcrepo:
365 375 srcrepo.push(destpeer, revs=revs)
366 376 else:
367 377 raise util.Abort(_("clone from remote to remote not supported"))
368 378
369 379 if dircleanup:
370 380 dircleanup.close()
371 381
372 382 # clone all bookmarks except divergent ones
373 383 destrepo = destpeer.local()
374 384 if destrepo and srcpeer.capable("pushkey"):
375 385 rb = srcpeer.listkeys('bookmarks')
376 386 for k, n in rb.iteritems():
377 387 try:
378 388 m = destrepo.lookup(n)
379 389 destrepo._bookmarks[k] = m
380 390 except error.RepoLookupError:
381 391 pass
382 392 if rb:
383 393 bookmarks.write(destrepo)
384 394 elif srcrepo and destpeer.capable("pushkey"):
385 395 for k, n in srcrepo._bookmarks.iteritems():
386 396 destpeer.pushkey('bookmarks', k, '', hex(n))
387 397
388 398 if destrepo:
389 399 fp = destrepo.opener("hgrc", "w", text=True)
390 400 fp.write("[paths]\n")
391 401 u = util.url(abspath)
392 402 u.passwd = None
393 403 defaulturl = str(u)
394 404 fp.write("default = %s\n" % defaulturl)
395 405 fp.close()
396 406
397 407 destrepo.ui.setconfig('paths', 'default', defaulturl)
398 408
399 409 if update:
400 410 if update is not True:
401 411 checkout = srcpeer.lookup(update)
402 412 for test in (checkout, 'default', 'tip'):
403 413 if test is None:
404 414 continue
405 415 try:
406 416 uprev = destrepo.lookup(test)
407 417 break
408 418 except error.RepoLookupError:
409 419 continue
410 420 bn = destrepo[uprev].branch()
411 421 destrepo.ui.status(_("updating to branch %s\n") % bn)
412 422 _update(destrepo, uprev)
413 423 if update in destrepo._bookmarks:
414 424 bookmarks.setcurrent(destrepo, update)
415 425
416 426 return srcpeer, destpeer
417 427 finally:
418 428 release(srclock, destlock)
419 429 if dircleanup is not None:
420 430 dircleanup.cleanup()
421 431 if srcpeer is not None:
422 432 srcpeer.close()
423 433
424 434 def _showstats(repo, stats):
425 435 repo.ui.status(_("%d files updated, %d files merged, "
426 436 "%d files removed, %d files unresolved\n") % stats)
427 437
428 438 def update(repo, node):
429 439 """update the working directory to node, merging linear changes"""
430 440 stats = mergemod.update(repo, node, False, False, None)
431 441 _showstats(repo, stats)
432 442 if stats[3]:
433 443 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
434 444 return stats[3] > 0
435 445
436 446 # naming conflict in clone()
437 447 _update = update
438 448
439 449 def clean(repo, node, show_stats=True):
440 450 """forcibly switch the working directory to node, clobbering changes"""
441 451 stats = mergemod.update(repo, node, False, True, None)
442 452 if show_stats:
443 453 _showstats(repo, stats)
444 454 return stats[3] > 0
445 455
446 456 def merge(repo, node, force=None, remind=True):
447 457 """Branch merge with node, resolving changes. Return true if any
448 458 unresolved conflicts."""
449 459 stats = mergemod.update(repo, node, True, force, False)
450 460 _showstats(repo, stats)
451 461 if stats[3]:
452 462 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
453 463 "or 'hg update -C .' to abandon\n"))
454 464 elif remind:
455 465 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
456 466 return stats[3] > 0
457 467
458 468 def _incoming(displaychlist, subreporecurse, ui, repo, source,
459 469 opts, buffered=False):
460 470 """
461 471 Helper for incoming / gincoming.
462 472 displaychlist gets called with
463 473 (remoterepo, incomingchangesetlist, displayer) parameters,
464 474 and is supposed to contain only code that can't be unified.
465 475 """
466 476 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
467 477 other = peer(repo, opts, source)
468 478 ui.status(_('comparing with %s\n') % util.hidepassword(source))
469 479 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
470 480
471 481 if revs:
472 482 revs = [other.lookup(rev) for rev in revs]
473 483 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
474 484 revs, opts["bundle"], opts["force"])
475 485 try:
476 486 if not chlist:
477 487 ui.status(_("no changes found\n"))
478 488 return subreporecurse()
479 489
480 490 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
481 491
482 492 # XXX once graphlog extension makes it into core,
483 493 # should be replaced by a if graph/else
484 494 displaychlist(other, chlist, displayer)
485 495
486 496 displayer.close()
487 497 finally:
488 498 cleanupfn()
489 499 subreporecurse()
490 500 return 0 # exit code is zero since we found incoming changes
491 501
492 502 def incoming(ui, repo, source, opts):
493 503 def subreporecurse():
494 504 ret = 1
495 505 if opts.get('subrepos'):
496 506 ctx = repo[None]
497 507 for subpath in sorted(ctx.substate):
498 508 sub = ctx.sub(subpath)
499 509 ret = min(ret, sub.incoming(ui, source, opts))
500 510 return ret
501 511
502 512 def display(other, chlist, displayer):
503 513 limit = cmdutil.loglimit(opts)
504 514 if opts.get('newest_first'):
505 515 chlist.reverse()
506 516 count = 0
507 517 for n in chlist:
508 518 if limit is not None and count >= limit:
509 519 break
510 520 parents = [p for p in other.changelog.parents(n) if p != nullid]
511 521 if opts.get('no_merges') and len(parents) == 2:
512 522 continue
513 523 count += 1
514 524 displayer.show(other[n])
515 525 return _incoming(display, subreporecurse, ui, repo, source, opts)
516 526
517 527 def _outgoing(ui, repo, dest, opts):
518 528 dest = ui.expandpath(dest or 'default-push', dest or 'default')
519 529 dest, branches = parseurl(dest, opts.get('branch'))
520 530 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
521 531 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
522 532 if revs:
523 533 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
524 534
525 535 other = peer(repo, opts, dest)
526 536 outgoing = discovery.findcommonoutgoing(repo, other, revs,
527 537 force=opts.get('force'))
528 538 o = outgoing.missing
529 539 if not o:
530 540 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
531 541 return None
532 542 return o
533 543
534 544 def outgoing(ui, repo, dest, opts):
535 545 def recurse():
536 546 ret = 1
537 547 if opts.get('subrepos'):
538 548 ctx = repo[None]
539 549 for subpath in sorted(ctx.substate):
540 550 sub = ctx.sub(subpath)
541 551 ret = min(ret, sub.outgoing(ui, dest, opts))
542 552 return ret
543 553
544 554 limit = cmdutil.loglimit(opts)
545 555 o = _outgoing(ui, repo, dest, opts)
546 556 if o is None:
547 557 return recurse()
548 558
549 559 if opts.get('newest_first'):
550 560 o.reverse()
551 561 displayer = cmdutil.show_changeset(ui, repo, opts)
552 562 count = 0
553 563 for n in o:
554 564 if limit is not None and count >= limit:
555 565 break
556 566 parents = [p for p in repo.changelog.parents(n) if p != nullid]
557 567 if opts.get('no_merges') and len(parents) == 2:
558 568 continue
559 569 count += 1
560 570 displayer.show(repo[n])
561 571 displayer.close()
562 572 recurse()
563 573 return 0 # exit code is zero since we found outgoing changes
564 574
565 575 def revert(repo, node, choose):
566 576 """revert changes to revision in node without updating dirstate"""
567 577 return mergemod.update(repo, node, False, True, choose)[3] > 0
568 578
569 579 def verify(repo):
570 580 """verify the consistency of a repository"""
571 581 return verifymod.verify(repo)
572 582
573 583 def remoteui(src, opts):
574 584 'build a remote ui from ui or repo and opts'
575 585 if util.safehasattr(src, 'baseui'): # looks like a repository
576 586 dst = src.baseui.copy() # drop repo-specific config
577 587 src = src.ui # copy target options from repo
578 588 else: # assume it's a global ui object
579 589 dst = src.copy() # keep all global options
580 590
581 591 # copy ssh-specific options
582 592 for o in 'ssh', 'remotecmd':
583 593 v = opts.get(o) or src.config('ui', o)
584 594 if v:
585 595 dst.setconfig("ui", o, v)
586 596
587 597 # copy bundle-specific options
588 598 r = src.config('bundle', 'mainreporoot')
589 599 if r:
590 600 dst.setconfig('bundle', 'mainreporoot', r)
591 601
592 602 # copy selected local settings to the remote ui
593 603 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
594 604 for key, val in src.configitems(sect):
595 605 dst.setconfig(sect, key, val)
596 606 v = src.config('web', 'cacerts')
597 607 if v:
598 608 dst.setconfig('web', 'cacerts', util.expandpath(v))
599 609
600 610 return dst
@@ -1,2620 +1,2637 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wvfs = scmutil.vfs(path, expand=True)
121 121 self.wopener = self.wvfs
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.vfs = scmutil.vfs(self.path)
127 127 self.opener = self.vfs
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 186 self.spath = self.store.path
187 187 self.svfs = self.store.vfs
188 188 self.sopener = self.svfs
189 189 self.sjoin = self.store.join
190 190 self.vfs.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 if store and not obsolete._enabled:
293 293 # message is rare enough to not be translated
294 294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 295 self.ui.warn(msg % len(list(store)))
296 296 return store
297 297
298 298 @propertycache
299 299 def hiddenrevs(self):
300 300 """hiddenrevs: revs that should be hidden by command and tools
301 301
302 302 This set is carried on the repo to ease initialization and lazy
303 303 loading; it'll probably move back to changelog for efficiency and
304 304 consistency reasons.
305 305
306 306 Note that the hiddenrevs will needs invalidations when
307 307 - a new changesets is added (possible unstable above extinct)
308 308 - a new obsolete marker is added (possible new extinct changeset)
309 309
310 310 hidden changesets cannot have non-hidden descendants
311 311 """
312 312 hidden = set()
313 313 if self.obsstore:
314 314 ### hide extinct changeset that are not accessible by any mean
315 315 hiddenquery = 'extinct() - ::(. + bookmark())'
316 316 hidden.update(self.revs(hiddenquery))
317 317 return hidden
318 318
319 319 @storecache('00changelog.i')
320 320 def changelog(self):
321 321 c = changelog.changelog(self.sopener)
322 322 if 'HG_PENDING' in os.environ:
323 323 p = os.environ['HG_PENDING']
324 324 if p.startswith(self.root):
325 325 c.readpending('00changelog.i.a')
326 326 return c
327 327
328 328 @storecache('00manifest.i')
329 329 def manifest(self):
330 330 return manifest.manifest(self.sopener)
331 331
332 332 @filecache('dirstate')
333 333 def dirstate(self):
334 334 warned = [0]
335 335 def validate(node):
336 336 try:
337 337 self.changelog.rev(node)
338 338 return node
339 339 except error.LookupError:
340 340 if not warned[0]:
341 341 warned[0] = True
342 342 self.ui.warn(_("warning: ignoring unknown"
343 343 " working parent %s!\n") % short(node))
344 344 return nullid
345 345
346 346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347 347
348 348 def __getitem__(self, changeid):
349 349 if changeid is None:
350 350 return context.workingctx(self)
351 351 return context.changectx(self, changeid)
352 352
353 353 def __contains__(self, changeid):
354 354 try:
355 355 return bool(self.lookup(changeid))
356 356 except error.RepoLookupError:
357 357 return False
358 358
359 359 def __nonzero__(self):
360 360 return True
361 361
362 362 def __len__(self):
363 363 return len(self.changelog)
364 364
365 365 def __iter__(self):
366 366 return iter(self.changelog)
367 367
368 368 def revs(self, expr, *args):
369 369 '''Return a list of revisions matching the given revset'''
370 370 expr = revset.formatspec(expr, *args)
371 371 m = revset.match(None, expr)
372 372 return [r for r in m(self, list(self))]
373 373
374 374 def set(self, expr, *args):
375 375 '''
376 376 Yield a context for each matching revision, after doing arg
377 377 replacement via revset.formatspec
378 378 '''
379 379 for r in self.revs(expr, *args):
380 380 yield self[r]
381 381
382 382 def url(self):
383 383 return 'file:' + self.root
384 384
385 385 def hook(self, name, throw=False, **args):
386 386 return hook.hook(self.ui, self, name, throw, **args)
387 387
388 388 tag_disallowed = ':\r\n'
389 389
390 390 def _tag(self, names, node, message, local, user, date, extra={}):
391 391 if isinstance(names, str):
392 392 allchars = names
393 393 names = (names,)
394 394 else:
395 395 allchars = ''.join(names)
396 396 for c in self.tag_disallowed:
397 397 if c in allchars:
398 398 raise util.Abort(_('%r cannot be used in a tag name') % c)
399 399
400 400 branches = self.branchmap()
401 401 for name in names:
402 402 self.hook('pretag', throw=True, node=hex(node), tag=name,
403 403 local=local)
404 404 if name in branches:
405 405 self.ui.warn(_("warning: tag %s conflicts with existing"
406 406 " branch name\n") % name)
407 407
408 408 def writetags(fp, names, munge, prevtags):
409 409 fp.seek(0, 2)
410 410 if prevtags and prevtags[-1] != '\n':
411 411 fp.write('\n')
412 412 for name in names:
413 413 m = munge and munge(name) or name
414 414 if (self._tagscache.tagtypes and
415 415 name in self._tagscache.tagtypes):
416 416 old = self.tags().get(name, nullid)
417 417 fp.write('%s %s\n' % (hex(old), m))
418 418 fp.write('%s %s\n' % (hex(node), m))
419 419 fp.close()
420 420
421 421 prevtags = ''
422 422 if local:
423 423 try:
424 424 fp = self.opener('localtags', 'r+')
425 425 except IOError:
426 426 fp = self.opener('localtags', 'a')
427 427 else:
428 428 prevtags = fp.read()
429 429
430 430 # local tags are stored in the current charset
431 431 writetags(fp, names, None, prevtags)
432 432 for name in names:
433 433 self.hook('tag', node=hex(node), tag=name, local=local)
434 434 return
435 435
436 436 try:
437 437 fp = self.wfile('.hgtags', 'rb+')
438 438 except IOError, e:
439 439 if e.errno != errno.ENOENT:
440 440 raise
441 441 fp = self.wfile('.hgtags', 'ab')
442 442 else:
443 443 prevtags = fp.read()
444 444
445 445 # committed tags are stored in UTF-8
446 446 writetags(fp, names, encoding.fromlocal, prevtags)
447 447
448 448 fp.close()
449 449
450 450 self.invalidatecaches()
451 451
452 452 if '.hgtags' not in self.dirstate:
453 453 self[None].add(['.hgtags'])
454 454
455 455 m = matchmod.exact(self.root, '', ['.hgtags'])
456 456 tagnode = self.commit(message, user, date, extra=extra, match=m)
457 457
458 458 for name in names:
459 459 self.hook('tag', node=hex(node), tag=name, local=local)
460 460
461 461 return tagnode
462 462
463 463 def tag(self, names, node, message, local, user, date):
464 464 '''tag a revision with one or more symbolic names.
465 465
466 466 names is a list of strings or, when adding a single tag, names may be a
467 467 string.
468 468
469 469 if local is True, the tags are stored in a per-repository file.
470 470 otherwise, they are stored in the .hgtags file, and a new
471 471 changeset is committed with the change.
472 472
473 473 keyword arguments:
474 474
475 475 local: whether to store tags in non-version-controlled file
476 476 (default False)
477 477
478 478 message: commit message to use if committing
479 479
480 480 user: name of user to use if committing
481 481
482 482 date: date tuple to use if committing'''
483 483
484 484 if not local:
485 485 for x in self.status()[:5]:
486 486 if '.hgtags' in x:
487 487 raise util.Abort(_('working copy of .hgtags is changed '
488 488 '(please commit .hgtags manually)'))
489 489
490 490 self.tags() # instantiate the cache
491 491 self._tag(names, node, message, local, user, date)
492 492
493 493 @propertycache
494 494 def _tagscache(self):
495 495 '''Returns a tagscache object that contains various tags related
496 496 caches.'''
497 497
498 498 # This simplifies its cache management by having one decorated
499 499 # function (this one) and the rest simply fetch things from it.
500 500 class tagscache(object):
501 501 def __init__(self):
502 502 # These two define the set of tags for this repository. tags
503 503 # maps tag name to node; tagtypes maps tag name to 'global' or
504 504 # 'local'. (Global tags are defined by .hgtags across all
505 505 # heads, and local tags are defined in .hg/localtags.)
506 506 # They constitute the in-memory cache of tags.
507 507 self.tags = self.tagtypes = None
508 508
509 509 self.nodetagscache = self.tagslist = None
510 510
511 511 cache = tagscache()
512 512 cache.tags, cache.tagtypes = self._findtags()
513 513
514 514 return cache
515 515
516 516 def tags(self):
517 517 '''return a mapping of tag to node'''
518 518 t = {}
519 519 if self.changelog.filteredrevs:
520 520 tags, tt = self._findtags()
521 521 else:
522 522 tags = self._tagscache.tags
523 523 for k, v in tags.iteritems():
524 524 try:
525 525 # ignore tags to unknown nodes
526 526 self.changelog.rev(v)
527 527 t[k] = v
528 528 except (error.LookupError, ValueError):
529 529 pass
530 530 return t
531 531
532 532 def _findtags(self):
533 533 '''Do the hard work of finding tags. Return a pair of dicts
534 534 (tags, tagtypes) where tags maps tag name to node, and tagtypes
535 535 maps tag name to a string like \'global\' or \'local\'.
536 536 Subclasses or extensions are free to add their own tags, but
537 537 should be aware that the returned dicts will be retained for the
538 538 duration of the localrepo object.'''
539 539
540 540 # XXX what tagtype should subclasses/extensions use? Currently
541 541 # mq and bookmarks add tags, but do not set the tagtype at all.
542 542 # Should each extension invent its own tag type? Should there
543 543 # be one tagtype for all such "virtual" tags? Or is the status
544 544 # quo fine?
545 545
546 546 alltags = {} # map tag name to (node, hist)
547 547 tagtypes = {}
548 548
549 549 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
550 550 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
551 551
552 552 # Build the return dicts. Have to re-encode tag names because
553 553 # the tags module always uses UTF-8 (in order not to lose info
554 554 # writing to the cache), but the rest of Mercurial wants them in
555 555 # local encoding.
556 556 tags = {}
557 557 for (name, (node, hist)) in alltags.iteritems():
558 558 if node != nullid:
559 559 tags[encoding.tolocal(name)] = node
560 560 tags['tip'] = self.changelog.tip()
561 561 tagtypes = dict([(encoding.tolocal(name), value)
562 562 for (name, value) in tagtypes.iteritems()])
563 563 return (tags, tagtypes)
564 564
565 565 def tagtype(self, tagname):
566 566 '''
567 567 return the type of the given tag. result can be:
568 568
569 569 'local' : a local tag
570 570 'global' : a global tag
571 571 None : tag does not exist
572 572 '''
573 573
574 574 return self._tagscache.tagtypes.get(tagname)
575 575
576 576 def tagslist(self):
577 577 '''return a list of tags ordered by revision'''
578 578 if not self._tagscache.tagslist:
579 579 l = []
580 580 for t, n in self.tags().iteritems():
581 581 r = self.changelog.rev(n)
582 582 l.append((r, t, n))
583 583 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
584 584
585 585 return self._tagscache.tagslist
586 586
587 587 def nodetags(self, node):
588 588 '''return the tags associated with a node'''
589 589 if not self._tagscache.nodetagscache:
590 590 nodetagscache = {}
591 591 for t, n in self._tagscache.tags.iteritems():
592 592 nodetagscache.setdefault(n, []).append(t)
593 593 for tags in nodetagscache.itervalues():
594 594 tags.sort()
595 595 self._tagscache.nodetagscache = nodetagscache
596 596 return self._tagscache.nodetagscache.get(node, [])
597 597
598 598 def nodebookmarks(self, node):
599 599 marks = []
600 600 for bookmark, n in self._bookmarks.iteritems():
601 601 if n == node:
602 602 marks.append(bookmark)
603 603 return sorted(marks)
604 604
605 605 def _branchtags(self, partial, lrev):
606 606 # TODO: rename this function?
607 607 tiprev = len(self) - 1
608 608 if lrev != tiprev:
609 609 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
610 610 self._updatebranchcache(partial, ctxgen)
611 611 self._writebranchcache(partial, self.changelog.tip(), tiprev)
612 612
613 613 return partial
614 614
615 615 def updatebranchcache(self):
616 616 tip = self.changelog.tip()
617 617 if self._branchcache is not None and self._branchcachetip == tip:
618 618 return
619 619
620 620 oldtip = self._branchcachetip
621 621 self._branchcachetip = tip
622 622 if oldtip is None or oldtip not in self.changelog.nodemap:
623 623 partial, last, lrev = self._readbranchcache()
624 624 else:
625 625 lrev = self.changelog.rev(oldtip)
626 626 partial = self._branchcache
627 627
628 628 self._branchtags(partial, lrev)
629 629 # this private cache holds all heads (not just the branch tips)
630 630 self._branchcache = partial
631 631
632 632 def branchmap(self):
633 633 '''returns a dictionary {branch: [branchheads]}'''
634 634 if self.changelog.filteredrevs:
635 635 # some changeset are excluded we can't use the cache
636 636 branchmap = {}
637 637 self._updatebranchcache(branchmap, (self[r] for r in self))
638 638 return branchmap
639 639 else:
640 640 self.updatebranchcache()
641 641 return self._branchcache
642 642
643 643
644 644 def _branchtip(self, heads):
645 645 '''return the tipmost branch head in heads'''
646 646 tip = heads[-1]
647 647 for h in reversed(heads):
648 648 if not self[h].closesbranch():
649 649 tip = h
650 650 break
651 651 return tip
652 652
653 653 def branchtip(self, branch):
654 654 '''return the tip node for a given branch'''
655 655 if branch not in self.branchmap():
656 656 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
657 657 return self._branchtip(self.branchmap()[branch])
658 658
659 659 def branchtags(self):
660 660 '''return a dict where branch names map to the tipmost head of
661 661 the branch, open heads come before closed'''
662 662 bt = {}
663 663 for bn, heads in self.branchmap().iteritems():
664 664 bt[bn] = self._branchtip(heads)
665 665 return bt
666 666
667 667 def _readbranchcache(self):
668 668 partial = {}
669 669 try:
670 670 f = self.opener("cache/branchheads")
671 671 lines = f.read().split('\n')
672 672 f.close()
673 673 except (IOError, OSError):
674 674 return {}, nullid, nullrev
675 675
676 676 try:
677 677 last, lrev = lines.pop(0).split(" ", 1)
678 678 last, lrev = bin(last), int(lrev)
679 679 if lrev >= len(self) or self[lrev].node() != last:
680 680 # invalidate the cache
681 681 raise ValueError('invalidating branch cache (tip differs)')
682 682 for l in lines:
683 683 if not l:
684 684 continue
685 685 node, label = l.split(" ", 1)
686 686 label = encoding.tolocal(label.strip())
687 687 if not node in self:
688 688 raise ValueError('invalidating branch cache because node '+
689 689 '%s does not exist' % node)
690 690 partial.setdefault(label, []).append(bin(node))
691 691 except KeyboardInterrupt:
692 692 raise
693 693 except Exception, inst:
694 694 if self.ui.debugflag:
695 695 self.ui.warn(str(inst), '\n')
696 696 partial, last, lrev = {}, nullid, nullrev
697 697 return partial, last, lrev
698 698
699 699 def _writebranchcache(self, branches, tip, tiprev):
700 700 try:
701 701 f = self.opener("cache/branchheads", "w", atomictemp=True)
702 702 f.write("%s %s\n" % (hex(tip), tiprev))
703 703 for label, nodes in branches.iteritems():
704 704 for node in nodes:
705 705 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
706 706 f.close()
707 707 except (IOError, OSError):
708 708 pass
709 709
710 710 def _updatebranchcache(self, partial, ctxgen):
711 711 """Given a branchhead cache, partial, that may have extra nodes or be
712 712 missing heads, and a generator of nodes that are at least a superset of
713 713 heads missing, this function updates partial to be correct.
714 714 """
715 715 # collect new branch entries
716 716 newbranches = {}
717 717 for c in ctxgen:
718 718 newbranches.setdefault(c.branch(), []).append(c.node())
719 719 # if older branchheads are reachable from new ones, they aren't
720 720 # really branchheads. Note checking parents is insufficient:
721 721 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
722 722 for branch, newnodes in newbranches.iteritems():
723 723 bheads = partial.setdefault(branch, [])
724 724 # Remove candidate heads that no longer are in the repo (e.g., as
725 725 # the result of a strip that just happened). Avoid using 'node in
726 726 # self' here because that dives down into branchcache code somewhat
727 727 # recursively.
728 728 bheadrevs = [self.changelog.rev(node) for node in bheads
729 729 if self.changelog.hasnode(node)]
730 730 newheadrevs = [self.changelog.rev(node) for node in newnodes
731 731 if self.changelog.hasnode(node)]
732 732 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
733 733 # Remove duplicates - nodes that are in newheadrevs and are already
734 734 # in bheadrevs. This can happen if you strip a node whose parent
735 735 # was already a head (because they're on different branches).
736 736 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
737 737
738 738 # Starting from tip means fewer passes over reachable. If we know
739 739 # the new candidates are not ancestors of existing heads, we don't
740 740 # have to examine ancestors of existing heads
741 741 if ctxisnew:
742 742 iterrevs = sorted(newheadrevs)
743 743 else:
744 744 iterrevs = list(bheadrevs)
745 745
746 746 # This loop prunes out two kinds of heads - heads that are
747 747 # superseded by a head in newheadrevs, and newheadrevs that are not
748 748 # heads because an existing head is their descendant.
749 749 while iterrevs:
750 750 latest = iterrevs.pop()
751 751 if latest not in bheadrevs:
752 752 continue
753 753 ancestors = set(self.changelog.ancestors([latest],
754 754 bheadrevs[0]))
755 755 if ancestors:
756 756 bheadrevs = [b for b in bheadrevs if b not in ancestors]
757 757 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
758 758
759 759 # There may be branches that cease to exist when the last commit in the
760 760 # branch was stripped. This code filters them out. Note that the
761 761 # branch that ceased to exist may not be in newbranches because
762 762 # newbranches is the set of candidate heads, which when you strip the
763 763 # last commit in a branch will be the parent branch.
764 764 for branch in partial.keys():
765 765 nodes = [head for head in partial[branch]
766 766 if self.changelog.hasnode(head)]
767 767 if not nodes:
768 768 del partial[branch]
769 769
770 770 def lookup(self, key):
771 771 return self[key].node()
772 772
773 773 def lookupbranch(self, key, remote=None):
774 774 repo = remote or self
775 775 if key in repo.branchmap():
776 776 return key
777 777
778 778 repo = (remote and remote.local()) and remote or self
779 779 return repo[key].branch()
780 780
781 781 def known(self, nodes):
782 782 nm = self.changelog.nodemap
783 783 pc = self._phasecache
784 784 result = []
785 785 for n in nodes:
786 786 r = nm.get(n)
787 787 resp = not (r is None or pc.phase(self, r) >= phases.secret)
788 788 result.append(resp)
789 789 return result
790 790
791 791 def local(self):
792 792 return self
793 793
794 794 def cancopy(self):
795 795 return self.local() # so statichttprepo's override of local() works
796 796
797 797 def join(self, f):
798 798 return os.path.join(self.path, f)
799 799
800 800 def wjoin(self, f):
801 801 return os.path.join(self.root, f)
802 802
803 803 def file(self, f):
804 804 if f[0] == '/':
805 805 f = f[1:]
806 806 return filelog.filelog(self.sopener, f)
807 807
808 808 def changectx(self, changeid):
809 809 return self[changeid]
810 810
811 811 def parents(self, changeid=None):
812 812 '''get list of changectxs for parents of changeid'''
813 813 return self[changeid].parents()
814 814
815 815 def setparents(self, p1, p2=nullid):
816 816 copies = self.dirstate.setparents(p1, p2)
817 817 if copies:
818 818 # Adjust copy records, the dirstate cannot do it, it
819 819 # requires access to parents manifests. Preserve them
820 820 # only for entries added to first parent.
821 821 pctx = self[p1]
822 822 for f in copies:
823 823 if f not in pctx and copies[f] in pctx:
824 824 self.dirstate.copy(copies[f], f)
825 825
826 826 def filectx(self, path, changeid=None, fileid=None):
827 827 """changeid can be a changeset revision, node, or tag.
828 828 fileid can be a file revision or node."""
829 829 return context.filectx(self, path, changeid, fileid)
830 830
831 831 def getcwd(self):
832 832 return self.dirstate.getcwd()
833 833
834 834 def pathto(self, f, cwd=None):
835 835 return self.dirstate.pathto(f, cwd)
836 836
837 837 def wfile(self, f, mode='r'):
838 838 return self.wopener(f, mode)
839 839
840 840 def _link(self, f):
841 841 return os.path.islink(self.wjoin(f))
842 842
843 843 def _loadfilter(self, filter):
844 844 if filter not in self.filterpats:
845 845 l = []
846 846 for pat, cmd in self.ui.configitems(filter):
847 847 if cmd == '!':
848 848 continue
849 849 mf = matchmod.match(self.root, '', [pat])
850 850 fn = None
851 851 params = cmd
852 852 for name, filterfn in self._datafilters.iteritems():
853 853 if cmd.startswith(name):
854 854 fn = filterfn
855 855 params = cmd[len(name):].lstrip()
856 856 break
857 857 if not fn:
858 858 fn = lambda s, c, **kwargs: util.filter(s, c)
859 859 # Wrap old filters not supporting keyword arguments
860 860 if not inspect.getargspec(fn)[2]:
861 861 oldfn = fn
862 862 fn = lambda s, c, **kwargs: oldfn(s, c)
863 863 l.append((mf, fn, params))
864 864 self.filterpats[filter] = l
865 865 return self.filterpats[filter]
866 866
867 867 def _filter(self, filterpats, filename, data):
868 868 for mf, fn, cmd in filterpats:
869 869 if mf(filename):
870 870 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
871 871 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
872 872 break
873 873
874 874 return data
875 875
876 876 @propertycache
877 877 def _encodefilterpats(self):
878 878 return self._loadfilter('encode')
879 879
880 880 @propertycache
881 881 def _decodefilterpats(self):
882 882 return self._loadfilter('decode')
883 883
884 884 def adddatafilter(self, name, filter):
885 885 self._datafilters[name] = filter
886 886
887 887 def wread(self, filename):
888 888 if self._link(filename):
889 889 data = os.readlink(self.wjoin(filename))
890 890 else:
891 891 data = self.wopener.read(filename)
892 892 return self._filter(self._encodefilterpats, filename, data)
893 893
894 894 def wwrite(self, filename, data, flags):
895 895 data = self._filter(self._decodefilterpats, filename, data)
896 896 if 'l' in flags:
897 897 self.wopener.symlink(data, filename)
898 898 else:
899 899 self.wopener.write(filename, data)
900 900 if 'x' in flags:
901 901 util.setflags(self.wjoin(filename), False, True)
902 902
903 903 def wwritedata(self, filename, data):
904 904 return self._filter(self._decodefilterpats, filename, data)
905 905
906 906 def transaction(self, desc):
907 907 tr = self._transref and self._transref() or None
908 908 if tr and tr.running():
909 909 return tr.nest()
910 910
911 911 # abort here if the journal already exists
912 912 if os.path.exists(self.sjoin("journal")):
913 913 raise error.RepoError(
914 914 _("abandoned transaction found - run hg recover"))
915 915
916 916 self._writejournal(desc)
917 917 renames = [(x, undoname(x)) for x in self._journalfiles()]
918 918
919 919 tr = transaction.transaction(self.ui.warn, self.sopener,
920 920 self.sjoin("journal"),
921 921 aftertrans(renames),
922 922 self.store.createmode)
923 923 self._transref = weakref.ref(tr)
924 924 return tr
925 925
926 926 def _journalfiles(self):
927 927 return (self.sjoin('journal'), self.join('journal.dirstate'),
928 928 self.join('journal.branch'), self.join('journal.desc'),
929 929 self.join('journal.bookmarks'),
930 930 self.sjoin('journal.phaseroots'))
931 931
932 932 def undofiles(self):
933 933 return [undoname(x) for x in self._journalfiles()]
934 934
935 935 def _writejournal(self, desc):
936 936 self.opener.write("journal.dirstate",
937 937 self.opener.tryread("dirstate"))
938 938 self.opener.write("journal.branch",
939 939 encoding.fromlocal(self.dirstate.branch()))
940 940 self.opener.write("journal.desc",
941 941 "%d\n%s\n" % (len(self), desc))
942 942 self.opener.write("journal.bookmarks",
943 943 self.opener.tryread("bookmarks"))
944 944 self.sopener.write("journal.phaseroots",
945 945 self.sopener.tryread("phaseroots"))
946 946
947 947 def recover(self):
948 948 lock = self.lock()
949 949 try:
950 950 if os.path.exists(self.sjoin("journal")):
951 951 self.ui.status(_("rolling back interrupted transaction\n"))
952 952 transaction.rollback(self.sopener, self.sjoin("journal"),
953 953 self.ui.warn)
954 954 self.invalidate()
955 955 return True
956 956 else:
957 957 self.ui.warn(_("no interrupted transaction available\n"))
958 958 return False
959 959 finally:
960 960 lock.release()
961 961
962 962 def rollback(self, dryrun=False, force=False):
963 963 wlock = lock = None
964 964 try:
965 965 wlock = self.wlock()
966 966 lock = self.lock()
967 967 if os.path.exists(self.sjoin("undo")):
968 968 return self._rollback(dryrun, force)
969 969 else:
970 970 self.ui.warn(_("no rollback information available\n"))
971 971 return 1
972 972 finally:
973 973 release(lock, wlock)
974 974
975 975 def _rollback(self, dryrun, force):
976 976 ui = self.ui
977 977 try:
978 978 args = self.opener.read('undo.desc').splitlines()
979 979 (oldlen, desc, detail) = (int(args[0]), args[1], None)
980 980 if len(args) >= 3:
981 981 detail = args[2]
982 982 oldtip = oldlen - 1
983 983
984 984 if detail and ui.verbose:
985 985 msg = (_('repository tip rolled back to revision %s'
986 986 ' (undo %s: %s)\n')
987 987 % (oldtip, desc, detail))
988 988 else:
989 989 msg = (_('repository tip rolled back to revision %s'
990 990 ' (undo %s)\n')
991 991 % (oldtip, desc))
992 992 except IOError:
993 993 msg = _('rolling back unknown transaction\n')
994 994 desc = None
995 995
996 996 if not force and self['.'] != self['tip'] and desc == 'commit':
997 997 raise util.Abort(
998 998 _('rollback of last commit while not checked out '
999 999 'may lose data'), hint=_('use -f to force'))
1000 1000
1001 1001 ui.status(msg)
1002 1002 if dryrun:
1003 1003 return 0
1004 1004
1005 1005 parents = self.dirstate.parents()
1006 1006 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1007 1007 if os.path.exists(self.join('undo.bookmarks')):
1008 1008 util.rename(self.join('undo.bookmarks'),
1009 1009 self.join('bookmarks'))
1010 1010 if os.path.exists(self.sjoin('undo.phaseroots')):
1011 1011 util.rename(self.sjoin('undo.phaseroots'),
1012 1012 self.sjoin('phaseroots'))
1013 1013 self.invalidate()
1014 1014
1015 1015 # Discard all cache entries to force reloading everything.
1016 1016 self._filecache.clear()
1017 1017
1018 1018 parentgone = (parents[0] not in self.changelog.nodemap or
1019 1019 parents[1] not in self.changelog.nodemap)
1020 1020 if parentgone:
1021 1021 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1022 1022 try:
1023 1023 branch = self.opener.read('undo.branch')
1024 1024 self.dirstate.setbranch(encoding.tolocal(branch))
1025 1025 except IOError:
1026 1026 ui.warn(_('named branch could not be reset: '
1027 1027 'current branch is still \'%s\'\n')
1028 1028 % self.dirstate.branch())
1029 1029
1030 1030 self.dirstate.invalidate()
1031 1031 parents = tuple([p.rev() for p in self.parents()])
1032 1032 if len(parents) > 1:
1033 1033 ui.status(_('working directory now based on '
1034 1034 'revisions %d and %d\n') % parents)
1035 1035 else:
1036 1036 ui.status(_('working directory now based on '
1037 1037 'revision %d\n') % parents)
1038 1038 # TODO: if we know which new heads may result from this rollback, pass
1039 1039 # them to destroy(), which will prevent the branchhead cache from being
1040 1040 # invalidated.
1041 1041 self.destroyed()
1042 1042 return 0
1043 1043
1044 1044 def invalidatecaches(self):
1045 1045 def delcache(name):
1046 1046 try:
1047 1047 delattr(self, name)
1048 1048 except AttributeError:
1049 1049 pass
1050 1050
1051 1051 delcache('_tagscache')
1052 1052
1053 1053 self._branchcache = None # in UTF-8
1054 1054 self._branchcachetip = None
1055 1055 obsolete.clearobscaches(self)
1056 1056
1057 1057 def invalidatedirstate(self):
1058 1058 '''Invalidates the dirstate, causing the next call to dirstate
1059 1059 to check if it was modified since the last time it was read,
1060 1060 rereading it if it has.
1061 1061
1062 1062 This is different to dirstate.invalidate() that it doesn't always
1063 1063 rereads the dirstate. Use dirstate.invalidate() if you want to
1064 1064 explicitly read the dirstate again (i.e. restoring it to a previous
1065 1065 known good state).'''
1066 1066 if 'dirstate' in self.__dict__:
1067 1067 for k in self.dirstate._filecache:
1068 1068 try:
1069 1069 delattr(self.dirstate, k)
1070 1070 except AttributeError:
1071 1071 pass
1072 1072 delattr(self, 'dirstate')
1073 1073
1074 1074 def invalidate(self):
1075 1075 for k in self._filecache:
1076 1076 # dirstate is invalidated separately in invalidatedirstate()
1077 1077 if k == 'dirstate':
1078 1078 continue
1079 1079
1080 1080 try:
1081 1081 delattr(self, k)
1082 1082 except AttributeError:
1083 1083 pass
1084 1084 self.invalidatecaches()
1085 1085
1086 1086 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1087 1087 try:
1088 1088 l = lock.lock(lockname, 0, releasefn, desc=desc)
1089 1089 except error.LockHeld, inst:
1090 1090 if not wait:
1091 1091 raise
1092 1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1093 1093 (desc, inst.locker))
1094 1094 # default to 600 seconds timeout
1095 1095 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1096 1096 releasefn, desc=desc)
1097 1097 if acquirefn:
1098 1098 acquirefn()
1099 1099 return l
1100 1100
1101 1101 def _afterlock(self, callback):
1102 1102 """add a callback to the current repository lock.
1103 1103
1104 1104 The callback will be executed on lock release."""
1105 1105 l = self._lockref and self._lockref()
1106 1106 if l:
1107 1107 l.postrelease.append(callback)
1108 1108 else:
1109 1109 callback()
1110 1110
1111 1111 def lock(self, wait=True):
1112 1112 '''Lock the repository store (.hg/store) and return a weak reference
1113 1113 to the lock. Use this before modifying the store (e.g. committing or
1114 1114 stripping). If you are opening a transaction, get a lock as well.)'''
1115 1115 l = self._lockref and self._lockref()
1116 1116 if l is not None and l.held:
1117 1117 l.lock()
1118 1118 return l
1119 1119
1120 1120 def unlock():
1121 1121 self.store.write()
1122 1122 if '_phasecache' in vars(self):
1123 1123 self._phasecache.write()
1124 1124 for k, ce in self._filecache.items():
1125 1125 if k == 'dirstate':
1126 1126 continue
1127 1127 ce.refresh()
1128 1128
1129 1129 l = self._lock(self.sjoin("lock"), wait, unlock,
1130 1130 self.invalidate, _('repository %s') % self.origroot)
1131 1131 self._lockref = weakref.ref(l)
1132 1132 return l
1133 1133
1134 1134 def wlock(self, wait=True):
1135 1135 '''Lock the non-store parts of the repository (everything under
1136 1136 .hg except .hg/store) and return a weak reference to the lock.
1137 1137 Use this before modifying files in .hg.'''
1138 1138 l = self._wlockref and self._wlockref()
1139 1139 if l is not None and l.held:
1140 1140 l.lock()
1141 1141 return l
1142 1142
1143 1143 def unlock():
1144 1144 self.dirstate.write()
1145 1145 ce = self._filecache.get('dirstate')
1146 1146 if ce:
1147 1147 ce.refresh()
1148 1148
1149 1149 l = self._lock(self.join("wlock"), wait, unlock,
1150 1150 self.invalidatedirstate, _('working directory of %s') %
1151 1151 self.origroot)
1152 1152 self._wlockref = weakref.ref(l)
1153 1153 return l
1154 1154
1155 1155 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1156 1156 """
1157 1157 commit an individual file as part of a larger transaction
1158 1158 """
1159 1159
1160 1160 fname = fctx.path()
1161 1161 text = fctx.data()
1162 1162 flog = self.file(fname)
1163 1163 fparent1 = manifest1.get(fname, nullid)
1164 1164 fparent2 = fparent2o = manifest2.get(fname, nullid)
1165 1165
1166 1166 meta = {}
1167 1167 copy = fctx.renamed()
1168 1168 if copy and copy[0] != fname:
1169 1169 # Mark the new revision of this file as a copy of another
1170 1170 # file. This copy data will effectively act as a parent
1171 1171 # of this new revision. If this is a merge, the first
1172 1172 # parent will be the nullid (meaning "look up the copy data")
1173 1173 # and the second one will be the other parent. For example:
1174 1174 #
1175 1175 # 0 --- 1 --- 3 rev1 changes file foo
1176 1176 # \ / rev2 renames foo to bar and changes it
1177 1177 # \- 2 -/ rev3 should have bar with all changes and
1178 1178 # should record that bar descends from
1179 1179 # bar in rev2 and foo in rev1
1180 1180 #
1181 1181 # this allows this merge to succeed:
1182 1182 #
1183 1183 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1184 1184 # \ / merging rev3 and rev4 should use bar@rev2
1185 1185 # \- 2 --- 4 as the merge base
1186 1186 #
1187 1187
1188 1188 cfname = copy[0]
1189 1189 crev = manifest1.get(cfname)
1190 1190 newfparent = fparent2
1191 1191
1192 1192 if manifest2: # branch merge
1193 1193 if fparent2 == nullid or crev is None: # copied on remote side
1194 1194 if cfname in manifest2:
1195 1195 crev = manifest2[cfname]
1196 1196 newfparent = fparent1
1197 1197
1198 1198 # find source in nearest ancestor if we've lost track
1199 1199 if not crev:
1200 1200 self.ui.debug(" %s: searching for copy revision for %s\n" %
1201 1201 (fname, cfname))
1202 1202 for ancestor in self[None].ancestors():
1203 1203 if cfname in ancestor:
1204 1204 crev = ancestor[cfname].filenode()
1205 1205 break
1206 1206
1207 1207 if crev:
1208 1208 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1209 1209 meta["copy"] = cfname
1210 1210 meta["copyrev"] = hex(crev)
1211 1211 fparent1, fparent2 = nullid, newfparent
1212 1212 else:
1213 1213 self.ui.warn(_("warning: can't find ancestor for '%s' "
1214 1214 "copied from '%s'!\n") % (fname, cfname))
1215 1215
1216 1216 elif fparent2 != nullid:
1217 1217 # is one parent an ancestor of the other?
1218 1218 fparentancestor = flog.ancestor(fparent1, fparent2)
1219 1219 if fparentancestor == fparent1:
1220 1220 fparent1, fparent2 = fparent2, nullid
1221 1221 elif fparentancestor == fparent2:
1222 1222 fparent2 = nullid
1223 1223
1224 1224 # is the file changed?
1225 1225 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1226 1226 changelist.append(fname)
1227 1227 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1228 1228
1229 1229 # are just the flags changed during merge?
1230 1230 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1231 1231 changelist.append(fname)
1232 1232
1233 1233 return fparent1
1234 1234
1235 1235 def commit(self, text="", user=None, date=None, match=None, force=False,
1236 1236 editor=False, extra={}):
1237 1237 """Add a new revision to current repository.
1238 1238
1239 1239 Revision information is gathered from the working directory,
1240 1240 match can be used to filter the committed files. If editor is
1241 1241 supplied, it is called to get a commit message.
1242 1242 """
1243 1243
1244 1244 def fail(f, msg):
1245 1245 raise util.Abort('%s: %s' % (f, msg))
1246 1246
1247 1247 if not match:
1248 1248 match = matchmod.always(self.root, '')
1249 1249
1250 1250 if not force:
1251 1251 vdirs = []
1252 1252 match.dir = vdirs.append
1253 1253 match.bad = fail
1254 1254
1255 1255 wlock = self.wlock()
1256 1256 try:
1257 1257 wctx = self[None]
1258 1258 merge = len(wctx.parents()) > 1
1259 1259
1260 1260 if (not force and merge and match and
1261 1261 (match.files() or match.anypats())):
1262 1262 raise util.Abort(_('cannot partially commit a merge '
1263 1263 '(do not specify files or patterns)'))
1264 1264
1265 1265 changes = self.status(match=match, clean=force)
1266 1266 if force:
1267 1267 changes[0].extend(changes[6]) # mq may commit unchanged files
1268 1268
1269 1269 # check subrepos
1270 1270 subs = []
1271 1271 commitsubs = set()
1272 1272 newstate = wctx.substate.copy()
1273 1273 # only manage subrepos and .hgsubstate if .hgsub is present
1274 1274 if '.hgsub' in wctx:
1275 1275 # we'll decide whether to track this ourselves, thanks
1276 1276 if '.hgsubstate' in changes[0]:
1277 1277 changes[0].remove('.hgsubstate')
1278 1278 if '.hgsubstate' in changes[2]:
1279 1279 changes[2].remove('.hgsubstate')
1280 1280
1281 1281 # compare current state to last committed state
1282 1282 # build new substate based on last committed state
1283 1283 oldstate = wctx.p1().substate
1284 1284 for s in sorted(newstate.keys()):
1285 1285 if not match(s):
1286 1286 # ignore working copy, use old state if present
1287 1287 if s in oldstate:
1288 1288 newstate[s] = oldstate[s]
1289 1289 continue
1290 1290 if not force:
1291 1291 raise util.Abort(
1292 1292 _("commit with new subrepo %s excluded") % s)
1293 1293 if wctx.sub(s).dirty(True):
1294 1294 if not self.ui.configbool('ui', 'commitsubrepos'):
1295 1295 raise util.Abort(
1296 1296 _("uncommitted changes in subrepo %s") % s,
1297 1297 hint=_("use --subrepos for recursive commit"))
1298 1298 subs.append(s)
1299 1299 commitsubs.add(s)
1300 1300 else:
1301 1301 bs = wctx.sub(s).basestate()
1302 1302 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1303 1303 if oldstate.get(s, (None, None, None))[1] != bs:
1304 1304 subs.append(s)
1305 1305
1306 1306 # check for removed subrepos
1307 1307 for p in wctx.parents():
1308 1308 r = [s for s in p.substate if s not in newstate]
1309 1309 subs += [s for s in r if match(s)]
1310 1310 if subs:
1311 1311 if (not match('.hgsub') and
1312 1312 '.hgsub' in (wctx.modified() + wctx.added())):
1313 1313 raise util.Abort(
1314 1314 _("can't commit subrepos without .hgsub"))
1315 1315 changes[0].insert(0, '.hgsubstate')
1316 1316
1317 1317 elif '.hgsub' in changes[2]:
1318 1318 # clean up .hgsubstate when .hgsub is removed
1319 1319 if ('.hgsubstate' in wctx and
1320 1320 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1321 1321 changes[2].insert(0, '.hgsubstate')
1322 1322
1323 1323 # make sure all explicit patterns are matched
1324 1324 if not force and match.files():
1325 1325 matched = set(changes[0] + changes[1] + changes[2])
1326 1326
1327 1327 for f in match.files():
1328 1328 f = self.dirstate.normalize(f)
1329 1329 if f == '.' or f in matched or f in wctx.substate:
1330 1330 continue
1331 1331 if f in changes[3]: # missing
1332 1332 fail(f, _('file not found!'))
1333 1333 if f in vdirs: # visited directory
1334 1334 d = f + '/'
1335 1335 for mf in matched:
1336 1336 if mf.startswith(d):
1337 1337 break
1338 1338 else:
1339 1339 fail(f, _("no match under directory!"))
1340 1340 elif f not in self.dirstate:
1341 1341 fail(f, _("file not tracked!"))
1342 1342
1343 1343 if (not force and not extra.get("close") and not merge
1344 1344 and not (changes[0] or changes[1] or changes[2])
1345 1345 and wctx.branch() == wctx.p1().branch()):
1346 1346 return None
1347 1347
1348 1348 if merge and changes[3]:
1349 1349 raise util.Abort(_("cannot commit merge with missing files"))
1350 1350
1351 1351 ms = mergemod.mergestate(self)
1352 1352 for f in changes[0]:
1353 1353 if f in ms and ms[f] == 'u':
1354 1354 raise util.Abort(_("unresolved merge conflicts "
1355 1355 "(see hg help resolve)"))
1356 1356
1357 1357 cctx = context.workingctx(self, text, user, date, extra, changes)
1358 1358 if editor:
1359 1359 cctx._text = editor(self, cctx, subs)
1360 1360 edited = (text != cctx._text)
1361 1361
1362 1362 # commit subs and write new state
1363 1363 if subs:
1364 1364 for s in sorted(commitsubs):
1365 1365 sub = wctx.sub(s)
1366 1366 self.ui.status(_('committing subrepository %s\n') %
1367 1367 subrepo.subrelpath(sub))
1368 1368 sr = sub.commit(cctx._text, user, date)
1369 1369 newstate[s] = (newstate[s][0], sr)
1370 1370 subrepo.writestate(self, newstate)
1371 1371
1372 1372 # Save commit message in case this transaction gets rolled back
1373 1373 # (e.g. by a pretxncommit hook). Leave the content alone on
1374 1374 # the assumption that the user will use the same editor again.
1375 1375 msgfn = self.savecommitmessage(cctx._text)
1376 1376
1377 1377 p1, p2 = self.dirstate.parents()
1378 1378 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1379 1379 try:
1380 1380 self.hook("precommit", throw=True, parent1=hookp1,
1381 1381 parent2=hookp2)
1382 1382 ret = self.commitctx(cctx, True)
1383 1383 except: # re-raises
1384 1384 if edited:
1385 1385 self.ui.write(
1386 1386 _('note: commit message saved in %s\n') % msgfn)
1387 1387 raise
1388 1388
1389 1389 # update bookmarks, dirstate and mergestate
1390 1390 bookmarks.update(self, [p1, p2], ret)
1391 1391 for f in changes[0] + changes[1]:
1392 1392 self.dirstate.normal(f)
1393 1393 for f in changes[2]:
1394 1394 self.dirstate.drop(f)
1395 1395 self.dirstate.setparents(ret)
1396 1396 ms.reset()
1397 1397 finally:
1398 1398 wlock.release()
1399 1399
1400 1400 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1401 1401 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1402 1402 self._afterlock(commithook)
1403 1403 return ret
1404 1404
1405 1405 def commitctx(self, ctx, error=False):
1406 1406 """Add a new revision to current repository.
1407 1407 Revision information is passed via the context argument.
1408 1408 """
1409 1409
1410 1410 tr = lock = None
1411 1411 removed = list(ctx.removed())
1412 1412 p1, p2 = ctx.p1(), ctx.p2()
1413 1413 user = ctx.user()
1414 1414
1415 1415 lock = self.lock()
1416 1416 try:
1417 1417 tr = self.transaction("commit")
1418 1418 trp = weakref.proxy(tr)
1419 1419
1420 1420 if ctx.files():
1421 1421 m1 = p1.manifest().copy()
1422 1422 m2 = p2.manifest()
1423 1423
1424 1424 # check in files
1425 1425 new = {}
1426 1426 changed = []
1427 1427 linkrev = len(self)
1428 1428 for f in sorted(ctx.modified() + ctx.added()):
1429 1429 self.ui.note(f + "\n")
1430 1430 try:
1431 1431 fctx = ctx[f]
1432 1432 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1433 1433 changed)
1434 1434 m1.set(f, fctx.flags())
1435 1435 except OSError, inst:
1436 1436 self.ui.warn(_("trouble committing %s!\n") % f)
1437 1437 raise
1438 1438 except IOError, inst:
1439 1439 errcode = getattr(inst, 'errno', errno.ENOENT)
1440 1440 if error or errcode and errcode != errno.ENOENT:
1441 1441 self.ui.warn(_("trouble committing %s!\n") % f)
1442 1442 raise
1443 1443 else:
1444 1444 removed.append(f)
1445 1445
1446 1446 # update manifest
1447 1447 m1.update(new)
1448 1448 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1449 1449 drop = [f for f in removed if f in m1]
1450 1450 for f in drop:
1451 1451 del m1[f]
1452 1452 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1453 1453 p2.manifestnode(), (new, drop))
1454 1454 files = changed + removed
1455 1455 else:
1456 1456 mn = p1.manifestnode()
1457 1457 files = []
1458 1458
1459 1459 # update changelog
1460 1460 self.changelog.delayupdate()
1461 1461 n = self.changelog.add(mn, files, ctx.description(),
1462 1462 trp, p1.node(), p2.node(),
1463 1463 user, ctx.date(), ctx.extra().copy())
1464 1464 p = lambda: self.changelog.writepending() and self.root or ""
1465 1465 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1466 1466 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1467 1467 parent2=xp2, pending=p)
1468 1468 self.changelog.finalize(trp)
1469 1469 # set the new commit is proper phase
1470 1470 targetphase = phases.newcommitphase(self.ui)
1471 1471 if targetphase:
1472 1472 # retract boundary do not alter parent changeset.
1473 1473 # if a parent have higher the resulting phase will
1474 1474 # be compliant anyway
1475 1475 #
1476 1476 # if minimal phase was 0 we don't need to retract anything
1477 1477 phases.retractboundary(self, targetphase, [n])
1478 1478 tr.close()
1479 1479 self.updatebranchcache()
1480 1480 return n
1481 1481 finally:
1482 1482 if tr:
1483 1483 tr.release()
1484 1484 lock.release()
1485 1485
1486 1486 def destroyed(self, newheadnodes=None):
1487 1487 '''Inform the repository that nodes have been destroyed.
1488 1488 Intended for use by strip and rollback, so there's a common
1489 1489 place for anything that has to be done after destroying history.
1490 1490
1491 1491 If you know the branchheadcache was uptodate before nodes were removed
1492 1492 and you also know the set of candidate new heads that may have resulted
1493 1493 from the destruction, you can set newheadnodes. This will enable the
1494 1494 code to update the branchheads cache, rather than having future code
1495 1495 decide it's invalid and regenerating it from scratch.
1496 1496 '''
1497 1497 # If we have info, newheadnodes, on how to update the branch cache, do
1498 1498 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1499 1499 # will be caught the next time it is read.
1500 1500 if newheadnodes:
1501 1501 tiprev = len(self) - 1
1502 1502 ctxgen = (self[node] for node in newheadnodes
1503 1503 if self.changelog.hasnode(node))
1504 1504 self._updatebranchcache(self._branchcache, ctxgen)
1505 1505 self._writebranchcache(self._branchcache, self.changelog.tip(),
1506 1506 tiprev)
1507 1507
1508 1508 # Ensure the persistent tag cache is updated. Doing it now
1509 1509 # means that the tag cache only has to worry about destroyed
1510 1510 # heads immediately after a strip/rollback. That in turn
1511 1511 # guarantees that "cachetip == currenttip" (comparing both rev
1512 1512 # and node) always means no nodes have been added or destroyed.
1513 1513
1514 1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1515 1515 # head, refresh the tag cache, then immediately add a new head.
1516 1516 # But I think doing it this way is necessary for the "instant
1517 1517 # tag cache retrieval" case to work.
1518 1518 self.invalidatecaches()
1519 1519
1520 1520 # Discard all cache entries to force reloading everything.
1521 1521 self._filecache.clear()
1522 1522
1523 1523 def walk(self, match, node=None):
1524 1524 '''
1525 1525 walk recursively through the directory tree or a given
1526 1526 changeset, finding all files matched by the match
1527 1527 function
1528 1528 '''
1529 1529 return self[node].walk(match)
1530 1530
1531 1531 def status(self, node1='.', node2=None, match=None,
1532 1532 ignored=False, clean=False, unknown=False,
1533 1533 listsubrepos=False):
1534 1534 """return status of files between two nodes or node and working
1535 1535 directory.
1536 1536
1537 1537 If node1 is None, use the first dirstate parent instead.
1538 1538 If node2 is None, compare node1 with working directory.
1539 1539 """
1540 1540
1541 1541 def mfmatches(ctx):
1542 1542 mf = ctx.manifest().copy()
1543 1543 if match.always():
1544 1544 return mf
1545 1545 for fn in mf.keys():
1546 1546 if not match(fn):
1547 1547 del mf[fn]
1548 1548 return mf
1549 1549
1550 1550 if isinstance(node1, context.changectx):
1551 1551 ctx1 = node1
1552 1552 else:
1553 1553 ctx1 = self[node1]
1554 1554 if isinstance(node2, context.changectx):
1555 1555 ctx2 = node2
1556 1556 else:
1557 1557 ctx2 = self[node2]
1558 1558
1559 1559 working = ctx2.rev() is None
1560 1560 parentworking = working and ctx1 == self['.']
1561 1561 match = match or matchmod.always(self.root, self.getcwd())
1562 1562 listignored, listclean, listunknown = ignored, clean, unknown
1563 1563
1564 1564 # load earliest manifest first for caching reasons
1565 1565 if not working and ctx2.rev() < ctx1.rev():
1566 1566 ctx2.manifest()
1567 1567
1568 1568 if not parentworking:
1569 1569 def bad(f, msg):
1570 1570 # 'f' may be a directory pattern from 'match.files()',
1571 1571 # so 'f not in ctx1' is not enough
1572 1572 if f not in ctx1 and f not in ctx1.dirs():
1573 1573 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1574 1574 match.bad = bad
1575 1575
1576 1576 if working: # we need to scan the working dir
1577 1577 subrepos = []
1578 1578 if '.hgsub' in self.dirstate:
1579 1579 subrepos = ctx2.substate.keys()
1580 1580 s = self.dirstate.status(match, subrepos, listignored,
1581 1581 listclean, listunknown)
1582 1582 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1583 1583
1584 1584 # check for any possibly clean files
1585 1585 if parentworking and cmp:
1586 1586 fixup = []
1587 1587 # do a full compare of any files that might have changed
1588 1588 for f in sorted(cmp):
1589 1589 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1590 1590 or ctx1[f].cmp(ctx2[f])):
1591 1591 modified.append(f)
1592 1592 else:
1593 1593 fixup.append(f)
1594 1594
1595 1595 # update dirstate for files that are actually clean
1596 1596 if fixup:
1597 1597 if listclean:
1598 1598 clean += fixup
1599 1599
1600 1600 try:
1601 1601 # updating the dirstate is optional
1602 1602 # so we don't wait on the lock
1603 1603 wlock = self.wlock(False)
1604 1604 try:
1605 1605 for f in fixup:
1606 1606 self.dirstate.normal(f)
1607 1607 finally:
1608 1608 wlock.release()
1609 1609 except error.LockError:
1610 1610 pass
1611 1611
1612 1612 if not parentworking:
1613 1613 mf1 = mfmatches(ctx1)
1614 1614 if working:
1615 1615 # we are comparing working dir against non-parent
1616 1616 # generate a pseudo-manifest for the working dir
1617 1617 mf2 = mfmatches(self['.'])
1618 1618 for f in cmp + modified + added:
1619 1619 mf2[f] = None
1620 1620 mf2.set(f, ctx2.flags(f))
1621 1621 for f in removed:
1622 1622 if f in mf2:
1623 1623 del mf2[f]
1624 1624 else:
1625 1625 # we are comparing two revisions
1626 1626 deleted, unknown, ignored = [], [], []
1627 1627 mf2 = mfmatches(ctx2)
1628 1628
1629 1629 modified, added, clean = [], [], []
1630 1630 withflags = mf1.withflags() | mf2.withflags()
1631 1631 for fn in mf2:
1632 1632 if fn in mf1:
1633 1633 if (fn not in deleted and
1634 1634 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1635 1635 (mf1[fn] != mf2[fn] and
1636 1636 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1637 1637 modified.append(fn)
1638 1638 elif listclean:
1639 1639 clean.append(fn)
1640 1640 del mf1[fn]
1641 1641 elif fn not in deleted:
1642 1642 added.append(fn)
1643 1643 removed = mf1.keys()
1644 1644
1645 1645 if working and modified and not self.dirstate._checklink:
1646 1646 # Symlink placeholders may get non-symlink-like contents
1647 1647 # via user error or dereferencing by NFS or Samba servers,
1648 1648 # so we filter out any placeholders that don't look like a
1649 1649 # symlink
1650 1650 sane = []
1651 1651 for f in modified:
1652 1652 if ctx2.flags(f) == 'l':
1653 1653 d = ctx2[f].data()
1654 1654 if len(d) >= 1024 or '\n' in d or util.binary(d):
1655 1655 self.ui.debug('ignoring suspect symlink placeholder'
1656 1656 ' "%s"\n' % f)
1657 1657 continue
1658 1658 sane.append(f)
1659 1659 modified = sane
1660 1660
1661 1661 r = modified, added, removed, deleted, unknown, ignored, clean
1662 1662
1663 1663 if listsubrepos:
1664 1664 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1665 1665 if working:
1666 1666 rev2 = None
1667 1667 else:
1668 1668 rev2 = ctx2.substate[subpath][1]
1669 1669 try:
1670 1670 submatch = matchmod.narrowmatcher(subpath, match)
1671 1671 s = sub.status(rev2, match=submatch, ignored=listignored,
1672 1672 clean=listclean, unknown=listunknown,
1673 1673 listsubrepos=True)
1674 1674 for rfiles, sfiles in zip(r, s):
1675 1675 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1676 1676 except error.LookupError:
1677 1677 self.ui.status(_("skipping missing subrepository: %s\n")
1678 1678 % subpath)
1679 1679
1680 1680 for l in r:
1681 1681 l.sort()
1682 1682 return r
1683 1683
1684 1684 def heads(self, start=None):
1685 1685 heads = self.changelog.heads(start)
1686 1686 # sort the output in rev descending order
1687 1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1688 1688
1689 1689 def branchheads(self, branch=None, start=None, closed=False):
1690 1690 '''return a (possibly filtered) list of heads for the given branch
1691 1691
1692 1692 Heads are returned in topological order, from newest to oldest.
1693 1693 If branch is None, use the dirstate branch.
1694 1694 If start is not None, return only heads reachable from start.
1695 1695 If closed is True, return heads that are marked as closed as well.
1696 1696 '''
1697 1697 if branch is None:
1698 1698 branch = self[None].branch()
1699 1699 branches = self.branchmap()
1700 1700 if branch not in branches:
1701 1701 return []
1702 1702 # the cache returns heads ordered lowest to highest
1703 1703 bheads = list(reversed(branches[branch]))
1704 1704 if start is not None:
1705 1705 # filter out the heads that cannot be reached from startrev
1706 1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 1707 bheads = [h for h in bheads if h in fbheads]
1708 1708 if not closed:
1709 1709 bheads = [h for h in bheads if not self[h].closesbranch()]
1710 1710 return bheads
1711 1711
1712 1712 def branches(self, nodes):
1713 1713 if not nodes:
1714 1714 nodes = [self.changelog.tip()]
1715 1715 b = []
1716 1716 for n in nodes:
1717 1717 t = n
1718 1718 while True:
1719 1719 p = self.changelog.parents(n)
1720 1720 if p[1] != nullid or p[0] == nullid:
1721 1721 b.append((t, n, p[0], p[1]))
1722 1722 break
1723 1723 n = p[0]
1724 1724 return b
1725 1725
1726 1726 def between(self, pairs):
1727 1727 r = []
1728 1728
1729 1729 for top, bottom in pairs:
1730 1730 n, l, i = top, [], 0
1731 1731 f = 1
1732 1732
1733 1733 while n != bottom and n != nullid:
1734 1734 p = self.changelog.parents(n)[0]
1735 1735 if i == f:
1736 1736 l.append(n)
1737 1737 f = f * 2
1738 1738 n = p
1739 1739 i += 1
1740 1740
1741 1741 r.append(l)
1742 1742
1743 1743 return r
1744 1744
1745 1745 def pull(self, remote, heads=None, force=False):
1746 1746 # don't open transaction for nothing or you break future useful
1747 1747 # rollback call
1748 1748 tr = None
1749 1749 trname = 'pull\n' + util.hidepassword(remote.url())
1750 1750 lock = self.lock()
1751 1751 try:
1752 1752 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1753 1753 force=force)
1754 1754 common, fetch, rheads = tmp
1755 1755 if not fetch:
1756 1756 self.ui.status(_("no changes found\n"))
1757 1757 added = []
1758 1758 result = 0
1759 1759 else:
1760 1760 tr = self.transaction(trname)
1761 1761 if heads is None and list(common) == [nullid]:
1762 1762 self.ui.status(_("requesting all changes\n"))
1763 1763 elif heads is None and remote.capable('changegroupsubset'):
1764 1764 # issue1320, avoid a race if remote changed after discovery
1765 1765 heads = rheads
1766 1766
1767 1767 if remote.capable('getbundle'):
1768 1768 cg = remote.getbundle('pull', common=common,
1769 1769 heads=heads or rheads)
1770 1770 elif heads is None:
1771 1771 cg = remote.changegroup(fetch, 'pull')
1772 1772 elif not remote.capable('changegroupsubset'):
1773 1773 raise util.Abort(_("partial pull cannot be done because "
1774 1774 "other repository doesn't support "
1775 1775 "changegroupsubset."))
1776 1776 else:
1777 1777 cg = remote.changegroupsubset(fetch, heads, 'pull')
1778 1778 clstart = len(self.changelog)
1779 1779 result = self.addchangegroup(cg, 'pull', remote.url())
1780 1780 clend = len(self.changelog)
1781 1781 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1782 1782
1783 1783 # compute target subset
1784 1784 if heads is None:
1785 1785 # We pulled every thing possible
1786 1786 # sync on everything common
1787 1787 subset = common + added
1788 1788 else:
1789 1789 # We pulled a specific subset
1790 1790 # sync on this subset
1791 1791 subset = heads
1792 1792
1793 1793 # Get remote phases data from remote
1794 1794 remotephases = remote.listkeys('phases')
1795 1795 publishing = bool(remotephases.get('publishing', False))
1796 1796 if remotephases and not publishing:
1797 1797 # remote is new and unpublishing
1798 1798 pheads, _dr = phases.analyzeremotephases(self, subset,
1799 1799 remotephases)
1800 1800 phases.advanceboundary(self, phases.public, pheads)
1801 1801 phases.advanceboundary(self, phases.draft, subset)
1802 1802 else:
1803 1803 # Remote is old or publishing all common changesets
1804 1804 # should be seen as public
1805 1805 phases.advanceboundary(self, phases.public, subset)
1806 1806
1807 1807 if obsolete._enabled:
1808 1808 self.ui.debug('fetching remote obsolete markers')
1809 1809 remoteobs = remote.listkeys('obsolete')
1810 1810 if 'dump0' in remoteobs:
1811 1811 if tr is None:
1812 1812 tr = self.transaction(trname)
1813 1813 for key in sorted(remoteobs, reverse=True):
1814 1814 if key.startswith('dump'):
1815 1815 data = base85.b85decode(remoteobs[key])
1816 1816 self.obsstore.mergemarkers(tr, data)
1817 1817 if tr is not None:
1818 1818 tr.close()
1819 1819 finally:
1820 1820 if tr is not None:
1821 1821 tr.release()
1822 1822 lock.release()
1823 1823
1824 1824 return result
1825 1825
1826 1826 def checkpush(self, force, revs):
1827 1827 """Extensions can override this function if additional checks have
1828 1828 to be performed before pushing, or call it if they override push
1829 1829 command.
1830 1830 """
1831 1831 pass
1832 1832
1833 1833 def push(self, remote, force=False, revs=None, newbranch=False):
1834 1834 '''Push outgoing changesets (limited by revs) from the current
1835 1835 repository to remote. Return an integer:
1836 1836 - None means nothing to push
1837 1837 - 0 means HTTP error
1838 1838 - 1 means we pushed and remote head count is unchanged *or*
1839 1839 we have outgoing changesets but refused to push
1840 1840 - other values as described by addchangegroup()
1841 1841 '''
1842 1842 # there are two ways to push to remote repo:
1843 1843 #
1844 1844 # addchangegroup assumes local user can lock remote
1845 1845 # repo (local filesystem, old ssh servers).
1846 1846 #
1847 1847 # unbundle assumes local user cannot lock remote repo (new ssh
1848 1848 # servers, http servers).
1849 1849
1850 1850 if not remote.canpush():
1851 1851 raise util.Abort(_("destination does not support push"))
1852 1852 # get local lock as we might write phase data
1853 1853 locallock = self.lock()
1854 1854 try:
1855 1855 self.checkpush(force, revs)
1856 1856 lock = None
1857 1857 unbundle = remote.capable('unbundle')
1858 1858 if not unbundle:
1859 1859 lock = remote.lock()
1860 1860 try:
1861 1861 # discovery
1862 1862 fci = discovery.findcommonincoming
1863 1863 commoninc = fci(self, remote, force=force)
1864 1864 common, inc, remoteheads = commoninc
1865 1865 fco = discovery.findcommonoutgoing
1866 1866 outgoing = fco(self, remote, onlyheads=revs,
1867 1867 commoninc=commoninc, force=force)
1868 1868
1869 1869
1870 1870 if not outgoing.missing:
1871 1871 # nothing to push
1872 1872 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1873 1873 ret = None
1874 1874 else:
1875 1875 # something to push
1876 1876 if not force:
1877 1877 # if self.obsstore == False --> no obsolete
1878 1878 # then, save the iteration
1879 1879 if self.obsstore:
1880 1880 # this message are here for 80 char limit reason
1881 1881 mso = _("push includes an obsolete changeset: %s!")
1882 1882 msu = _("push includes an unstable changeset: %s!")
1883 1883 # If we are to push if there is at least one
1884 1884 # obsolete or unstable changeset in missing, at
1885 1885 # least one of the missinghead will be obsolete or
1886 1886 # unstable. So checking heads only is ok
1887 1887 for node in outgoing.missingheads:
1888 1888 ctx = self[node]
1889 1889 if ctx.obsolete():
1890 1890 raise util.Abort(_(mso) % ctx)
1891 1891 elif ctx.unstable():
1892 1892 raise util.Abort(_(msu) % ctx)
1893 1893 discovery.checkheads(self, remote, outgoing,
1894 1894 remoteheads, newbranch,
1895 1895 bool(inc))
1896 1896
1897 1897 # create a changegroup from local
1898 1898 if revs is None and not outgoing.excluded:
1899 1899 # push everything,
1900 1900 # use the fast path, no race possible on push
1901 1901 cg = self._changegroup(outgoing.missing, 'push')
1902 1902 else:
1903 1903 cg = self.getlocalbundle('push', outgoing)
1904 1904
1905 1905 # apply changegroup to remote
1906 1906 if unbundle:
1907 1907 # local repo finds heads on server, finds out what
1908 1908 # revs it must push. once revs transferred, if server
1909 1909 # finds it has different heads (someone else won
1910 1910 # commit/push race), server aborts.
1911 1911 if force:
1912 1912 remoteheads = ['force']
1913 1913 # ssh: return remote's addchangegroup()
1914 1914 # http: return remote's addchangegroup() or 0 for error
1915 1915 ret = remote.unbundle(cg, remoteheads, 'push')
1916 1916 else:
1917 1917 # we return an integer indicating remote head count
1918 1918 # change
1919 1919 ret = remote.addchangegroup(cg, 'push', self.url())
1920 1920
1921 1921 if ret:
1922 1922 # push succeed, synchronize target of the push
1923 1923 cheads = outgoing.missingheads
1924 1924 elif revs is None:
1925 1925 # All out push fails. synchronize all common
1926 1926 cheads = outgoing.commonheads
1927 1927 else:
1928 1928 # I want cheads = heads(::missingheads and ::commonheads)
1929 1929 # (missingheads is revs with secret changeset filtered out)
1930 1930 #
1931 1931 # This can be expressed as:
1932 1932 # cheads = ( (missingheads and ::commonheads)
1933 1933 # + (commonheads and ::missingheads))"
1934 1934 # )
1935 1935 #
1936 1936 # while trying to push we already computed the following:
1937 1937 # common = (::commonheads)
1938 1938 # missing = ((commonheads::missingheads) - commonheads)
1939 1939 #
1940 1940 # We can pick:
1941 1941 # * missingheads part of common (::commonheads)
1942 1942 common = set(outgoing.common)
1943 1943 cheads = [node for node in revs if node in common]
1944 1944 # and
1945 1945 # * commonheads parents on missing
1946 1946 revset = self.set('%ln and parents(roots(%ln))',
1947 1947 outgoing.commonheads,
1948 1948 outgoing.missing)
1949 1949 cheads.extend(c.node() for c in revset)
1950 1950 # even when we don't push, exchanging phase data is useful
1951 1951 remotephases = remote.listkeys('phases')
1952 1952 if not remotephases: # old server or public only repo
1953 1953 phases.advanceboundary(self, phases.public, cheads)
1954 1954 # don't push any phase data as there is nothing to push
1955 1955 else:
1956 1956 ana = phases.analyzeremotephases(self, cheads, remotephases)
1957 1957 pheads, droots = ana
1958 1958 ### Apply remote phase on local
1959 1959 if remotephases.get('publishing', False):
1960 1960 phases.advanceboundary(self, phases.public, cheads)
1961 1961 else: # publish = False
1962 1962 phases.advanceboundary(self, phases.public, pheads)
1963 1963 phases.advanceboundary(self, phases.draft, cheads)
1964 1964 ### Apply local phase on remote
1965 1965
1966 1966 # Get the list of all revs draft on remote by public here.
1967 1967 # XXX Beware that revset break if droots is not strictly
1968 1968 # XXX root we may want to ensure it is but it is costly
1969 1969 outdated = self.set('heads((%ln::%ln) and public())',
1970 1970 droots, cheads)
1971 1971 for newremotehead in outdated:
1972 1972 r = remote.pushkey('phases',
1973 1973 newremotehead.hex(),
1974 1974 str(phases.draft),
1975 1975 str(phases.public))
1976 1976 if not r:
1977 1977 self.ui.warn(_('updating %s to public failed!\n')
1978 1978 % newremotehead)
1979 1979 self.ui.debug('try to push obsolete markers to remote\n')
1980 1980 if (obsolete._enabled and self.obsstore and
1981 1981 'obsolete' in remote.listkeys('namespaces')):
1982 1982 rslts = []
1983 1983 remotedata = self.listkeys('obsolete')
1984 1984 for key in sorted(remotedata, reverse=True):
1985 1985 # reverse sort to ensure we end with dump0
1986 1986 data = remotedata[key]
1987 1987 rslts.append(remote.pushkey('obsolete', key, '', data))
1988 1988 if [r for r in rslts if not r]:
1989 1989 msg = _('failed to push some obsolete markers!\n')
1990 1990 self.ui.warn(msg)
1991 1991 finally:
1992 1992 if lock is not None:
1993 1993 lock.release()
1994 1994 finally:
1995 1995 locallock.release()
1996 1996
1997 1997 self.ui.debug("checking for updated bookmarks\n")
1998 1998 rb = remote.listkeys('bookmarks')
1999 1999 for k in rb.keys():
2000 2000 if k in self._bookmarks:
2001 2001 nr, nl = rb[k], hex(self._bookmarks[k])
2002 2002 if nr in self:
2003 2003 cr = self[nr]
2004 2004 cl = self[nl]
2005 2005 if bookmarks.validdest(self, cr, cl):
2006 2006 r = remote.pushkey('bookmarks', k, nr, nl)
2007 2007 if r:
2008 2008 self.ui.status(_("updating bookmark %s\n") % k)
2009 2009 else:
2010 2010 self.ui.warn(_('updating bookmark %s'
2011 2011 ' failed!\n') % k)
2012 2012
2013 2013 return ret
2014 2014
2015 2015 def changegroupinfo(self, nodes, source):
2016 2016 if self.ui.verbose or source == 'bundle':
2017 2017 self.ui.status(_("%d changesets found\n") % len(nodes))
2018 2018 if self.ui.debugflag:
2019 2019 self.ui.debug("list of changesets:\n")
2020 2020 for node in nodes:
2021 2021 self.ui.debug("%s\n" % hex(node))
2022 2022
2023 2023 def changegroupsubset(self, bases, heads, source):
2024 2024 """Compute a changegroup consisting of all the nodes that are
2025 2025 descendants of any of the bases and ancestors of any of the heads.
2026 2026 Return a chunkbuffer object whose read() method will return
2027 2027 successive changegroup chunks.
2028 2028
2029 2029 It is fairly complex as determining which filenodes and which
2030 2030 manifest nodes need to be included for the changeset to be complete
2031 2031 is non-trivial.
2032 2032
2033 2033 Another wrinkle is doing the reverse, figuring out which changeset in
2034 2034 the changegroup a particular filenode or manifestnode belongs to.
2035 2035 """
2036 2036 cl = self.changelog
2037 2037 if not bases:
2038 2038 bases = [nullid]
2039 2039 csets, bases, heads = cl.nodesbetween(bases, heads)
2040 2040 # We assume that all ancestors of bases are known
2041 2041 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2042 2042 return self._changegroupsubset(common, csets, heads, source)
2043 2043
2044 2044 def getlocalbundle(self, source, outgoing):
2045 2045 """Like getbundle, but taking a discovery.outgoing as an argument.
2046 2046
2047 2047 This is only implemented for local repos and reuses potentially
2048 2048 precomputed sets in outgoing."""
2049 2049 if not outgoing.missing:
2050 2050 return None
2051 2051 return self._changegroupsubset(outgoing.common,
2052 2052 outgoing.missing,
2053 2053 outgoing.missingheads,
2054 2054 source)
2055 2055
2056 2056 def getbundle(self, source, heads=None, common=None):
2057 2057 """Like changegroupsubset, but returns the set difference between the
2058 2058 ancestors of heads and the ancestors common.
2059 2059
2060 2060 If heads is None, use the local heads. If common is None, use [nullid].
2061 2061
2062 2062 The nodes in common might not all be known locally due to the way the
2063 2063 current discovery protocol works.
2064 2064 """
2065 2065 cl = self.changelog
2066 2066 if common:
2067 2067 nm = cl.nodemap
2068 2068 common = [n for n in common if n in nm]
2069 2069 else:
2070 2070 common = [nullid]
2071 2071 if not heads:
2072 2072 heads = cl.heads()
2073 2073 return self.getlocalbundle(source,
2074 2074 discovery.outgoing(cl, common, heads))
2075 2075
2076 2076 def _changegroupsubset(self, commonrevs, csets, heads, source):
2077 2077
2078 2078 cl = self.changelog
2079 2079 mf = self.manifest
2080 2080 mfs = {} # needed manifests
2081 2081 fnodes = {} # needed file nodes
2082 2082 changedfiles = set()
2083 2083 fstate = ['', {}]
2084 2084 count = [0, 0]
2085 2085
2086 2086 # can we go through the fast path ?
2087 2087 heads.sort()
2088 2088 if heads == sorted(self.heads()):
2089 2089 return self._changegroup(csets, source)
2090 2090
2091 2091 # slow path
2092 2092 self.hook('preoutgoing', throw=True, source=source)
2093 2093 self.changegroupinfo(csets, source)
2094 2094
2095 2095 # filter any nodes that claim to be part of the known set
2096 2096 def prune(revlog, missing):
2097 2097 rr, rl = revlog.rev, revlog.linkrev
2098 2098 return [n for n in missing
2099 2099 if rl(rr(n)) not in commonrevs]
2100 2100
2101 2101 progress = self.ui.progress
2102 2102 _bundling = _('bundling')
2103 2103 _changesets = _('changesets')
2104 2104 _manifests = _('manifests')
2105 2105 _files = _('files')
2106 2106
2107 2107 def lookup(revlog, x):
2108 2108 if revlog == cl:
2109 2109 c = cl.read(x)
2110 2110 changedfiles.update(c[3])
2111 2111 mfs.setdefault(c[0], x)
2112 2112 count[0] += 1
2113 2113 progress(_bundling, count[0],
2114 2114 unit=_changesets, total=count[1])
2115 2115 return x
2116 2116 elif revlog == mf:
2117 2117 clnode = mfs[x]
2118 2118 mdata = mf.readfast(x)
2119 2119 for f, n in mdata.iteritems():
2120 2120 if f in changedfiles:
2121 2121 fnodes[f].setdefault(n, clnode)
2122 2122 count[0] += 1
2123 2123 progress(_bundling, count[0],
2124 2124 unit=_manifests, total=count[1])
2125 2125 return clnode
2126 2126 else:
2127 2127 progress(_bundling, count[0], item=fstate[0],
2128 2128 unit=_files, total=count[1])
2129 2129 return fstate[1][x]
2130 2130
2131 2131 bundler = changegroup.bundle10(lookup)
2132 2132 reorder = self.ui.config('bundle', 'reorder', 'auto')
2133 2133 if reorder == 'auto':
2134 2134 reorder = None
2135 2135 else:
2136 2136 reorder = util.parsebool(reorder)
2137 2137
2138 2138 def gengroup():
2139 2139 # Create a changenode group generator that will call our functions
2140 2140 # back to lookup the owning changenode and collect information.
2141 2141 count[:] = [0, len(csets)]
2142 2142 for chunk in cl.group(csets, bundler, reorder=reorder):
2143 2143 yield chunk
2144 2144 progress(_bundling, None)
2145 2145
2146 2146 # Create a generator for the manifestnodes that calls our lookup
2147 2147 # and data collection functions back.
2148 2148 for f in changedfiles:
2149 2149 fnodes[f] = {}
2150 2150 count[:] = [0, len(mfs)]
2151 2151 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2152 2152 yield chunk
2153 2153 progress(_bundling, None)
2154 2154
2155 2155 mfs.clear()
2156 2156
2157 2157 # Go through all our files in order sorted by name.
2158 2158 count[:] = [0, len(changedfiles)]
2159 2159 for fname in sorted(changedfiles):
2160 2160 filerevlog = self.file(fname)
2161 2161 if not len(filerevlog):
2162 2162 raise util.Abort(_("empty or missing revlog for %s")
2163 2163 % fname)
2164 2164 fstate[0] = fname
2165 2165 fstate[1] = fnodes.pop(fname, {})
2166 2166
2167 2167 nodelist = prune(filerevlog, fstate[1])
2168 2168 if nodelist:
2169 2169 count[0] += 1
2170 2170 yield bundler.fileheader(fname)
2171 2171 for chunk in filerevlog.group(nodelist, bundler, reorder):
2172 2172 yield chunk
2173 2173
2174 2174 # Signal that no more groups are left.
2175 2175 yield bundler.close()
2176 2176 progress(_bundling, None)
2177 2177
2178 2178 if csets:
2179 2179 self.hook('outgoing', node=hex(csets[0]), source=source)
2180 2180
2181 2181 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2182 2182
2183 2183 def changegroup(self, basenodes, source):
2184 2184 # to avoid a race we use changegroupsubset() (issue1320)
2185 2185 return self.changegroupsubset(basenodes, self.heads(), source)
2186 2186
2187 2187 def _changegroup(self, nodes, source):
2188 2188 """Compute the changegroup of all nodes that we have that a recipient
2189 2189 doesn't. Return a chunkbuffer object whose read() method will return
2190 2190 successive changegroup chunks.
2191 2191
2192 2192 This is much easier than the previous function as we can assume that
2193 2193 the recipient has any changenode we aren't sending them.
2194 2194
2195 2195 nodes is the set of nodes to send"""
2196 2196
2197 2197 cl = self.changelog
2198 2198 mf = self.manifest
2199 2199 mfs = {}
2200 2200 changedfiles = set()
2201 2201 fstate = ['']
2202 2202 count = [0, 0]
2203 2203
2204 2204 self.hook('preoutgoing', throw=True, source=source)
2205 2205 self.changegroupinfo(nodes, source)
2206 2206
2207 2207 revset = set([cl.rev(n) for n in nodes])
2208 2208
2209 2209 def gennodelst(log):
2210 2210 ln, llr = log.node, log.linkrev
2211 2211 return [ln(r) for r in log if llr(r) in revset]
2212 2212
2213 2213 progress = self.ui.progress
2214 2214 _bundling = _('bundling')
2215 2215 _changesets = _('changesets')
2216 2216 _manifests = _('manifests')
2217 2217 _files = _('files')
2218 2218
2219 2219 def lookup(revlog, x):
2220 2220 if revlog == cl:
2221 2221 c = cl.read(x)
2222 2222 changedfiles.update(c[3])
2223 2223 mfs.setdefault(c[0], x)
2224 2224 count[0] += 1
2225 2225 progress(_bundling, count[0],
2226 2226 unit=_changesets, total=count[1])
2227 2227 return x
2228 2228 elif revlog == mf:
2229 2229 count[0] += 1
2230 2230 progress(_bundling, count[0],
2231 2231 unit=_manifests, total=count[1])
2232 2232 return cl.node(revlog.linkrev(revlog.rev(x)))
2233 2233 else:
2234 2234 progress(_bundling, count[0], item=fstate[0],
2235 2235 total=count[1], unit=_files)
2236 2236 return cl.node(revlog.linkrev(revlog.rev(x)))
2237 2237
2238 2238 bundler = changegroup.bundle10(lookup)
2239 2239 reorder = self.ui.config('bundle', 'reorder', 'auto')
2240 2240 if reorder == 'auto':
2241 2241 reorder = None
2242 2242 else:
2243 2243 reorder = util.parsebool(reorder)
2244 2244
2245 2245 def gengroup():
2246 2246 '''yield a sequence of changegroup chunks (strings)'''
2247 2247 # construct a list of all changed files
2248 2248
2249 2249 count[:] = [0, len(nodes)]
2250 2250 for chunk in cl.group(nodes, bundler, reorder=reorder):
2251 2251 yield chunk
2252 2252 progress(_bundling, None)
2253 2253
2254 2254 count[:] = [0, len(mfs)]
2255 2255 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2256 2256 yield chunk
2257 2257 progress(_bundling, None)
2258 2258
2259 2259 count[:] = [0, len(changedfiles)]
2260 2260 for fname in sorted(changedfiles):
2261 2261 filerevlog = self.file(fname)
2262 2262 if not len(filerevlog):
2263 2263 raise util.Abort(_("empty or missing revlog for %s")
2264 2264 % fname)
2265 2265 fstate[0] = fname
2266 2266 nodelist = gennodelst(filerevlog)
2267 2267 if nodelist:
2268 2268 count[0] += 1
2269 2269 yield bundler.fileheader(fname)
2270 2270 for chunk in filerevlog.group(nodelist, bundler, reorder):
2271 2271 yield chunk
2272 2272 yield bundler.close()
2273 2273 progress(_bundling, None)
2274 2274
2275 2275 if nodes:
2276 2276 self.hook('outgoing', node=hex(nodes[0]), source=source)
2277 2277
2278 2278 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2279 2279
2280 2280 def addchangegroup(self, source, srctype, url, emptyok=False):
2281 2281 """Add the changegroup returned by source.read() to this repo.
2282 2282 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2283 2283 the URL of the repo where this changegroup is coming from.
2284 2284
2285 2285 Return an integer summarizing the change to this repo:
2286 2286 - nothing changed or no source: 0
2287 2287 - more heads than before: 1+added heads (2..n)
2288 2288 - fewer heads than before: -1-removed heads (-2..-n)
2289 2289 - number of heads stays the same: 1
2290 2290 """
2291 2291 def csmap(x):
2292 2292 self.ui.debug("add changeset %s\n" % short(x))
2293 2293 return len(cl)
2294 2294
2295 2295 def revmap(x):
2296 2296 return cl.rev(x)
2297 2297
2298 2298 if not source:
2299 2299 return 0
2300 2300
2301 2301 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2302 2302
2303 2303 changesets = files = revisions = 0
2304 2304 efiles = set()
2305 2305
2306 2306 # write changelog data to temp files so concurrent readers will not see
2307 2307 # inconsistent view
2308 2308 cl = self.changelog
2309 2309 cl.delayupdate()
2310 2310 oldheads = cl.heads()
2311 2311
2312 2312 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2313 2313 try:
2314 2314 trp = weakref.proxy(tr)
2315 2315 # pull off the changeset group
2316 2316 self.ui.status(_("adding changesets\n"))
2317 2317 clstart = len(cl)
2318 2318 class prog(object):
2319 2319 step = _('changesets')
2320 2320 count = 1
2321 2321 ui = self.ui
2322 2322 total = None
2323 2323 def __call__(self):
2324 2324 self.ui.progress(self.step, self.count, unit=_('chunks'),
2325 2325 total=self.total)
2326 2326 self.count += 1
2327 2327 pr = prog()
2328 2328 source.callback = pr
2329 2329
2330 2330 source.changelogheader()
2331 2331 srccontent = cl.addgroup(source, csmap, trp)
2332 2332 if not (srccontent or emptyok):
2333 2333 raise util.Abort(_("received changelog group is empty"))
2334 2334 clend = len(cl)
2335 2335 changesets = clend - clstart
2336 2336 for c in xrange(clstart, clend):
2337 2337 efiles.update(self[c].files())
2338 2338 efiles = len(efiles)
2339 2339 self.ui.progress(_('changesets'), None)
2340 2340
2341 2341 # pull off the manifest group
2342 2342 self.ui.status(_("adding manifests\n"))
2343 2343 pr.step = _('manifests')
2344 2344 pr.count = 1
2345 2345 pr.total = changesets # manifests <= changesets
2346 2346 # no need to check for empty manifest group here:
2347 2347 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2348 2348 # no new manifest will be created and the manifest group will
2349 2349 # be empty during the pull
2350 2350 source.manifestheader()
2351 2351 self.manifest.addgroup(source, revmap, trp)
2352 2352 self.ui.progress(_('manifests'), None)
2353 2353
2354 2354 needfiles = {}
2355 2355 if self.ui.configbool('server', 'validate', default=False):
2356 2356 # validate incoming csets have their manifests
2357 2357 for cset in xrange(clstart, clend):
2358 2358 mfest = self.changelog.read(self.changelog.node(cset))[0]
2359 2359 mfest = self.manifest.readdelta(mfest)
2360 2360 # store file nodes we must see
2361 2361 for f, n in mfest.iteritems():
2362 2362 needfiles.setdefault(f, set()).add(n)
2363 2363
2364 2364 # process the files
2365 2365 self.ui.status(_("adding file changes\n"))
2366 2366 pr.step = _('files')
2367 2367 pr.count = 1
2368 2368 pr.total = efiles
2369 2369 source.callback = None
2370 2370
2371 2371 while True:
2372 2372 chunkdata = source.filelogheader()
2373 2373 if not chunkdata:
2374 2374 break
2375 2375 f = chunkdata["filename"]
2376 2376 self.ui.debug("adding %s revisions\n" % f)
2377 2377 pr()
2378 2378 fl = self.file(f)
2379 2379 o = len(fl)
2380 2380 if not fl.addgroup(source, revmap, trp):
2381 2381 raise util.Abort(_("received file revlog group is empty"))
2382 2382 revisions += len(fl) - o
2383 2383 files += 1
2384 2384 if f in needfiles:
2385 2385 needs = needfiles[f]
2386 2386 for new in xrange(o, len(fl)):
2387 2387 n = fl.node(new)
2388 2388 if n in needs:
2389 2389 needs.remove(n)
2390 2390 if not needs:
2391 2391 del needfiles[f]
2392 2392 self.ui.progress(_('files'), None)
2393 2393
2394 2394 for f, needs in needfiles.iteritems():
2395 2395 fl = self.file(f)
2396 2396 for n in needs:
2397 2397 try:
2398 2398 fl.rev(n)
2399 2399 except error.LookupError:
2400 2400 raise util.Abort(
2401 2401 _('missing file data for %s:%s - run hg verify') %
2402 2402 (f, hex(n)))
2403 2403
2404 2404 dh = 0
2405 2405 if oldheads:
2406 2406 heads = cl.heads()
2407 2407 dh = len(heads) - len(oldheads)
2408 2408 for h in heads:
2409 2409 if h not in oldheads and self[h].closesbranch():
2410 2410 dh -= 1
2411 2411 htext = ""
2412 2412 if dh:
2413 2413 htext = _(" (%+d heads)") % dh
2414 2414
2415 2415 self.ui.status(_("added %d changesets"
2416 2416 " with %d changes to %d files%s\n")
2417 2417 % (changesets, revisions, files, htext))
2418 2418 obsolete.clearobscaches(self)
2419 2419
2420 2420 if changesets > 0:
2421 2421 p = lambda: cl.writepending() and self.root or ""
2422 2422 self.hook('pretxnchangegroup', throw=True,
2423 2423 node=hex(cl.node(clstart)), source=srctype,
2424 2424 url=url, pending=p)
2425 2425
2426 2426 added = [cl.node(r) for r in xrange(clstart, clend)]
2427 2427 publishing = self.ui.configbool('phases', 'publish', True)
2428 2428 if srctype == 'push':
2429 2429 # Old server can not push the boundary themself.
2430 2430 # New server won't push the boundary if changeset already
2431 2431 # existed locally as secrete
2432 2432 #
2433 2433 # We should not use added here but the list of all change in
2434 2434 # the bundle
2435 2435 if publishing:
2436 2436 phases.advanceboundary(self, phases.public, srccontent)
2437 2437 else:
2438 2438 phases.advanceboundary(self, phases.draft, srccontent)
2439 2439 phases.retractboundary(self, phases.draft, added)
2440 2440 elif srctype != 'strip':
2441 2441 # publishing only alter behavior during push
2442 2442 #
2443 2443 # strip should not touch boundary at all
2444 2444 phases.retractboundary(self, phases.draft, added)
2445 2445
2446 2446 # make changelog see real files again
2447 2447 cl.finalize(trp)
2448 2448
2449 2449 tr.close()
2450 2450
2451 2451 if changesets > 0:
2452 2452 self.updatebranchcache()
2453 2453 def runhooks():
2454 2454 # forcefully update the on-disk branch cache
2455 2455 self.ui.debug("updating the branch cache\n")
2456 2456 self.hook("changegroup", node=hex(cl.node(clstart)),
2457 2457 source=srctype, url=url)
2458 2458
2459 2459 for n in added:
2460 2460 self.hook("incoming", node=hex(n), source=srctype,
2461 2461 url=url)
2462 2462 self._afterlock(runhooks)
2463 2463
2464 2464 finally:
2465 2465 tr.release()
2466 2466 # never return 0 here:
2467 2467 if dh < 0:
2468 2468 return dh - 1
2469 2469 else:
2470 2470 return dh + 1
2471 2471
2472 2472 def stream_in(self, remote, requirements):
2473 2473 lock = self.lock()
2474 2474 try:
2475 # Save remote branchmap. We will use it later
2476 # to speed up branchcache creation
2477 rbranchmap = None
2478 if remote.capable("branchmap"):
2479 rbranchmap = remote.branchmap()
2480
2475 2481 fp = remote.stream_out()
2476 2482 l = fp.readline()
2477 2483 try:
2478 2484 resp = int(l)
2479 2485 except ValueError:
2480 2486 raise error.ResponseError(
2481 2487 _('unexpected response from remote server:'), l)
2482 2488 if resp == 1:
2483 2489 raise util.Abort(_('operation forbidden by server'))
2484 2490 elif resp == 2:
2485 2491 raise util.Abort(_('locking the remote repository failed'))
2486 2492 elif resp != 0:
2487 2493 raise util.Abort(_('the server sent an unknown error code'))
2488 2494 self.ui.status(_('streaming all changes\n'))
2489 2495 l = fp.readline()
2490 2496 try:
2491 2497 total_files, total_bytes = map(int, l.split(' ', 1))
2492 2498 except (ValueError, TypeError):
2493 2499 raise error.ResponseError(
2494 2500 _('unexpected response from remote server:'), l)
2495 2501 self.ui.status(_('%d files to transfer, %s of data\n') %
2496 2502 (total_files, util.bytecount(total_bytes)))
2497 2503 handled_bytes = 0
2498 2504 self.ui.progress(_('clone'), 0, total=total_bytes)
2499 2505 start = time.time()
2500 2506 for i in xrange(total_files):
2501 2507 # XXX doesn't support '\n' or '\r' in filenames
2502 2508 l = fp.readline()
2503 2509 try:
2504 2510 name, size = l.split('\0', 1)
2505 2511 size = int(size)
2506 2512 except (ValueError, TypeError):
2507 2513 raise error.ResponseError(
2508 2514 _('unexpected response from remote server:'), l)
2509 2515 if self.ui.debugflag:
2510 2516 self.ui.debug('adding %s (%s)\n' %
2511 2517 (name, util.bytecount(size)))
2512 2518 # for backwards compat, name was partially encoded
2513 2519 ofp = self.sopener(store.decodedir(name), 'w')
2514 2520 for chunk in util.filechunkiter(fp, limit=size):
2515 2521 handled_bytes += len(chunk)
2516 2522 self.ui.progress(_('clone'), handled_bytes,
2517 2523 total=total_bytes)
2518 2524 ofp.write(chunk)
2519 2525 ofp.close()
2520 2526 elapsed = time.time() - start
2521 2527 if elapsed <= 0:
2522 2528 elapsed = 0.001
2523 2529 self.ui.progress(_('clone'), None)
2524 2530 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2525 2531 (util.bytecount(total_bytes), elapsed,
2526 2532 util.bytecount(total_bytes / elapsed)))
2527 2533
2528 2534 # new requirements = old non-format requirements +
2529 2535 # new format-related
2530 2536 # requirements from the streamed-in repository
2531 2537 requirements.update(set(self.requirements) - self.supportedformats)
2532 2538 self._applyrequirements(requirements)
2533 2539 self._writerequirements()
2534 2540
2541 if rbranchmap:
2542 rbheads = []
2543 for bheads in rbranchmap.itervalues():
2544 rbheads.extend(bheads)
2545
2546 self.branchcache = rbranchmap
2547 if rbheads:
2548 rtiprev = max((int(self.changelog.rev(node))
2549 for node in rbheads))
2550 self._writebranchcache(self.branchcache,
2551 self[rtiprev].node(), rtiprev)
2535 2552 self.invalidate()
2536 2553 return len(self.heads()) + 1
2537 2554 finally:
2538 2555 lock.release()
2539 2556
2540 2557 def clone(self, remote, heads=[], stream=False):
2541 2558 '''clone remote repository.
2542 2559
2543 2560 keyword arguments:
2544 2561 heads: list of revs to clone (forces use of pull)
2545 2562 stream: use streaming clone if possible'''
2546 2563
2547 2564 # now, all clients that can request uncompressed clones can
2548 2565 # read repo formats supported by all servers that can serve
2549 2566 # them.
2550 2567
2551 2568 # if revlog format changes, client will have to check version
2552 2569 # and format flags on "stream" capability, and use
2553 2570 # uncompressed only if compatible.
2554 2571
2555 2572 if not stream:
2556 2573 # if the server explicitly prefers to stream (for fast LANs)
2557 2574 stream = remote.capable('stream-preferred')
2558 2575
2559 2576 if stream and not heads:
2560 2577 # 'stream' means remote revlog format is revlogv1 only
2561 2578 if remote.capable('stream'):
2562 2579 return self.stream_in(remote, set(('revlogv1',)))
2563 2580 # otherwise, 'streamreqs' contains the remote revlog format
2564 2581 streamreqs = remote.capable('streamreqs')
2565 2582 if streamreqs:
2566 2583 streamreqs = set(streamreqs.split(','))
2567 2584 # if we support it, stream in and adjust our requirements
2568 2585 if not streamreqs - self.supportedformats:
2569 2586 return self.stream_in(remote, streamreqs)
2570 2587 return self.pull(remote, heads)
2571 2588
2572 2589 def pushkey(self, namespace, key, old, new):
2573 2590 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2574 2591 old=old, new=new)
2575 2592 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2576 2593 ret = pushkey.push(self, namespace, key, old, new)
2577 2594 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2578 2595 ret=ret)
2579 2596 return ret
2580 2597
2581 2598 def listkeys(self, namespace):
2582 2599 self.hook('prelistkeys', throw=True, namespace=namespace)
2583 2600 self.ui.debug('listing keys for "%s"\n' % namespace)
2584 2601 values = pushkey.list(self, namespace)
2585 2602 self.hook('listkeys', namespace=namespace, values=values)
2586 2603 return values
2587 2604
2588 2605 def debugwireargs(self, one, two, three=None, four=None, five=None):
2589 2606 '''used to test argument passing over the wire'''
2590 2607 return "%s %s %s %s %s" % (one, two, three, four, five)
2591 2608
2592 2609 def savecommitmessage(self, text):
2593 2610 fp = self.opener('last-message.txt', 'wb')
2594 2611 try:
2595 2612 fp.write(text)
2596 2613 finally:
2597 2614 fp.close()
2598 2615 return self.pathto(fp.name[len(self.root)+1:])
2599 2616
2600 2617 # used to avoid circular references so destructors work
2601 2618 def aftertrans(files):
2602 2619 renamefiles = [tuple(t) for t in files]
2603 2620 def a():
2604 2621 for src, dest in renamefiles:
2605 2622 try:
2606 2623 util.rename(src, dest)
2607 2624 except OSError: # journal file does not yet exist
2608 2625 pass
2609 2626 return a
2610 2627
2611 2628 def undoname(fn):
2612 2629 base, name = os.path.split(fn)
2613 2630 assert name.startswith('journal')
2614 2631 return os.path.join(base, name.replace('journal', 'undo', 1))
2615 2632
2616 2633 def instance(ui, path, create):
2617 2634 return localrepository(ui, util.urllocalpath(path), create)
2618 2635
2619 2636 def islocal(path):
2620 2637 return True
@@ -1,124 +1,125 b''
1 1 $ "$TESTDIR/hghave" serve || exit 80
2 2
3 3 $ hg init a
4 4 $ cd a
5 5 $ echo a > a
6 6 $ hg ci -Ama -d '1123456789 0'
7 7 adding a
8 8 $ hg --config server.uncompressed=True serve -p $HGPORT -d --pid-file=hg.pid
9 9 $ cat hg.pid >> $DAEMON_PIDS
10 10 $ cd ..
11 11 $ "$TESTDIR/tinyproxy.py" $HGPORT1 localhost >proxy.log 2>&1 </dev/null &
12 12 $ while [ ! -f proxy.pid ]; do sleep 0; done
13 13 $ cat proxy.pid >> $DAEMON_PIDS
14 14
15 15 url for proxy, stream
16 16
17 17 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone --uncompressed http://localhost:$HGPORT/ b
18 18 streaming all changes
19 19 3 files to transfer, 303 bytes of data
20 20 transferred * bytes in * seconds (*/sec) (glob)
21 21 updating to branch default
22 22 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 23 $ cd b
24 24 $ hg verify
25 25 checking changesets
26 26 checking manifests
27 27 crosschecking files in changesets and manifests
28 28 checking files
29 29 1 files, 1 changesets, 1 total revisions
30 30 $ cd ..
31 31
32 32 url for proxy, pull
33 33
34 34 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone http://localhost:$HGPORT/ b-pull
35 35 requesting all changes
36 36 adding changesets
37 37 adding manifests
38 38 adding file changes
39 39 added 1 changesets with 1 changes to 1 files
40 40 updating to branch default
41 41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 42 $ cd b-pull
43 43 $ hg verify
44 44 checking changesets
45 45 checking manifests
46 46 crosschecking files in changesets and manifests
47 47 checking files
48 48 1 files, 1 changesets, 1 total revisions
49 49 $ cd ..
50 50
51 51 host:port for proxy
52 52
53 53 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ c
54 54 requesting all changes
55 55 adding changesets
56 56 adding manifests
57 57 adding file changes
58 58 added 1 changesets with 1 changes to 1 files
59 59 updating to branch default
60 60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 61
62 62 proxy url with user name and password
63 63
64 64 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ d
65 65 requesting all changes
66 66 adding changesets
67 67 adding manifests
68 68 adding file changes
69 69 added 1 changesets with 1 changes to 1 files
70 70 updating to branch default
71 71 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 72
73 73 url with user name and password
74 74
75 75 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://user:passwd@localhost:$HGPORT/ e
76 76 requesting all changes
77 77 adding changesets
78 78 adding manifests
79 79 adding file changes
80 80 added 1 changesets with 1 changes to 1 files
81 81 updating to branch default
82 82 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
83 83
84 84 bad host:port for proxy
85 85
86 86 $ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
87 87 abort: error: Connection refused
88 88 [255]
89 89
90 90 do not use the proxy if it is in the no list
91 91
92 92 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.no=localhost http://localhost:$HGPORT/ g
93 93 requesting all changes
94 94 adding changesets
95 95 adding manifests
96 96 adding file changes
97 97 added 1 changesets with 1 changes to 1 files
98 98 updating to branch default
99 99 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 100 $ cat proxy.log
101 101 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
102 * - - [*] "GET http://localhost:$HGPORT/?cmd=branchmap HTTP/1.1" - - (glob)
102 103 * - - [*] "GET http://localhost:$HGPORT/?cmd=stream_out HTTP/1.1" - - (glob)
103 104 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
104 105 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
105 106 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
106 107 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
107 108 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
108 109 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
109 110 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
110 111 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
111 112 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
112 113 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
113 114 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
114 115 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
115 116 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
116 117 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
117 118 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
118 119 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
119 120 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
120 121 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
121 122 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
122 123 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
123 124 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
124 125
General Comments 0
You need to be logged in to leave comments. Login now