##// END OF EJS Templates
peer: remove cancopy from peer api; use directly on repo instead
Sune Foldager -
r17194:32a6a33b default
parent child Browse files
Show More
@@ -1,597 +1,598
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from i18n import _
10 10 from lock import release
11 11 from node import hex, nullid
12 12 import localrepo, bundlerepo, httppeer, sshpeer, statichttprepo, bookmarks
13 13 import lock, util, extensions, error, node, scmutil
14 14 import cmdutil, discovery
15 15 import merge as mergemod
16 16 import verify as verifymod
17 17 import errno, os, shutil
18 18
19 19 def _local(path):
20 20 path = util.expandpath(util.urllocalpath(path))
21 21 return (os.path.isfile(path) and bundlerepo or localrepo)
22 22
23 23 def addbranchrevs(lrepo, other, branches, revs):
24 24 peer = other.peer() # a courtesy to callers using a localrepo for other
25 25 hashbranch, branches = branches
26 26 if not hashbranch and not branches:
27 27 return revs or None, revs and revs[0] or None
28 28 revs = revs and list(revs) or []
29 29 if not peer.capable('branchmap'):
30 30 if branches:
31 31 raise util.Abort(_("remote branch lookup not supported"))
32 32 revs.append(hashbranch)
33 33 return revs, revs[0]
34 34 branchmap = peer.branchmap()
35 35
36 36 def primary(branch):
37 37 if branch == '.':
38 38 if not lrepo:
39 39 raise util.Abort(_("dirstate branch not accessible"))
40 40 branch = lrepo.dirstate.branch()
41 41 if branch in branchmap:
42 42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
43 43 return True
44 44 else:
45 45 return False
46 46
47 47 for branch in branches:
48 48 if not primary(branch):
49 49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
50 50 if hashbranch:
51 51 if not primary(hashbranch):
52 52 revs.append(hashbranch)
53 53 return revs, revs[0]
54 54
55 55 def parseurl(path, branches=None):
56 56 '''parse url#branch, returning (url, (branch, branches))'''
57 57
58 58 u = util.url(path)
59 59 branch = None
60 60 if u.fragment:
61 61 branch = u.fragment
62 62 u.fragment = None
63 63 return str(u), (branch, branches or [])
64 64
65 65 schemes = {
66 66 'bundle': bundlerepo,
67 67 'file': _local,
68 68 'http': httppeer,
69 69 'https': httppeer,
70 70 'ssh': sshpeer,
71 71 'static-http': statichttprepo,
72 72 }
73 73
74 74 def _peerlookup(path):
75 75 u = util.url(path)
76 76 scheme = u.scheme or 'file'
77 77 thing = schemes.get(scheme) or schemes['file']
78 78 try:
79 79 return thing(path)
80 80 except TypeError:
81 81 return thing
82 82
83 83 def islocal(repo):
84 84 '''return true if repo or path is local'''
85 85 if isinstance(repo, str):
86 86 try:
87 87 return _peerlookup(repo).islocal(repo)
88 88 except AttributeError:
89 89 return False
90 90 return repo.local()
91 91
92 92 def _peerorrepo(ui, path, create=False):
93 93 """return a repository object for the specified path"""
94 94 obj = _peerlookup(path).instance(ui, path, create)
95 95 ui = getattr(obj, "ui", ui)
96 96 for name, module in extensions.extensions():
97 97 hook = getattr(module, 'reposetup', None)
98 98 if hook:
99 99 hook(ui, obj)
100 100 return obj
101 101
102 102 def repository(ui, path='', create=False):
103 103 """return a repository object for the specified path"""
104 104 peer = _peerorrepo(ui, path, create)
105 105 repo = peer.local()
106 106 if not repo:
107 107 raise util.Abort(_("repository '%s' is not local") %
108 108 (path or peer.url()))
109 109 return repo
110 110
111 111 def peer(uiorrepo, opts, path, create=False):
112 112 '''return a repository peer for the specified path'''
113 113 rui = remoteui(uiorrepo, opts)
114 114 return _peerorrepo(rui, path, create).peer()
115 115
116 116 def defaultdest(source):
117 117 '''return default destination of clone if none is given'''
118 118 return os.path.basename(os.path.normpath(source))
119 119
120 120 def share(ui, source, dest=None, update=True):
121 121 '''create a shared repository'''
122 122
123 123 if not islocal(source):
124 124 raise util.Abort(_('can only share local repositories'))
125 125
126 126 if not dest:
127 127 dest = defaultdest(source)
128 128 else:
129 129 dest = ui.expandpath(dest)
130 130
131 131 if isinstance(source, str):
132 132 origsource = ui.expandpath(source)
133 133 source, branches = parseurl(origsource)
134 134 srcrepo = repository(ui, source)
135 135 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
136 136 else:
137 137 srcrepo = source.local()
138 138 origsource = source = srcrepo.url()
139 139 checkout = None
140 140
141 141 sharedpath = srcrepo.sharedpath # if our source is already sharing
142 142
143 143 root = os.path.realpath(dest)
144 144 roothg = os.path.join(root, '.hg')
145 145
146 146 if os.path.exists(roothg):
147 147 raise util.Abort(_('destination already exists'))
148 148
149 149 if not os.path.isdir(root):
150 150 os.mkdir(root)
151 151 util.makedir(roothg, notindexed=True)
152 152
153 153 requirements = ''
154 154 try:
155 155 requirements = srcrepo.opener.read('requires')
156 156 except IOError, inst:
157 157 if inst.errno != errno.ENOENT:
158 158 raise
159 159
160 160 requirements += 'shared\n'
161 161 util.writefile(os.path.join(roothg, 'requires'), requirements)
162 162 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
163 163
164 164 r = repository(ui, root)
165 165
166 166 default = srcrepo.ui.config('paths', 'default')
167 167 if default:
168 168 fp = r.opener("hgrc", "w", text=True)
169 169 fp.write("[paths]\n")
170 170 fp.write("default = %s\n" % default)
171 171 fp.close()
172 172
173 173 if update:
174 174 r.ui.status(_("updating working directory\n"))
175 175 if update is not True:
176 176 checkout = update
177 177 for test in (checkout, 'default', 'tip'):
178 178 if test is None:
179 179 continue
180 180 try:
181 181 uprev = r.lookup(test)
182 182 break
183 183 except error.RepoLookupError:
184 184 continue
185 185 _update(r, uprev)
186 186
187 187 def copystore(ui, srcrepo, destpath):
188 188 '''copy files from store of srcrepo in destpath
189 189
190 190 returns destlock
191 191 '''
192 192 destlock = None
193 193 try:
194 194 hardlink = None
195 195 num = 0
196 196 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
197 197 for f in srcrepo.store.copylist():
198 198 if srcpublishing and f.endswith('phaseroots'):
199 199 continue
200 200 src = os.path.join(srcrepo.sharedpath, f)
201 201 dst = os.path.join(destpath, f)
202 202 dstbase = os.path.dirname(dst)
203 203 if dstbase and not os.path.exists(dstbase):
204 204 os.mkdir(dstbase)
205 205 if os.path.exists(src):
206 206 if dst.endswith('data'):
207 207 # lock to avoid premature writing to the target
208 208 destlock = lock.lock(os.path.join(dstbase, "lock"))
209 209 hardlink, n = util.copyfiles(src, dst, hardlink)
210 210 num += n
211 211 if hardlink:
212 212 ui.debug("linked %d files\n" % num)
213 213 else:
214 214 ui.debug("copied %d files\n" % num)
215 215 return destlock
216 216 except: # re-raises
217 217 release(destlock)
218 218 raise
219 219
220 220 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
221 221 update=True, stream=False, branch=None):
222 222 """Make a copy of an existing repository.
223 223
224 224 Create a copy of an existing repository in a new directory. The
225 225 source and destination are URLs, as passed to the repository
226 226 function. Returns a pair of repository peers, the source and
227 227 newly created destination.
228 228
229 229 The location of the source is added to the new repository's
230 230 .hg/hgrc file, as the default to be used for future pulls and
231 231 pushes.
232 232
233 233 If an exception is raised, the partly cloned/updated destination
234 234 repository will be deleted.
235 235
236 236 Arguments:
237 237
238 238 source: repository object or URL
239 239
240 240 dest: URL of destination repository to create (defaults to base
241 241 name of source repository)
242 242
243 243 pull: always pull from source repository, even in local case
244 244
245 245 stream: stream raw data uncompressed from repository (fast over
246 246 LAN, slow over WAN)
247 247
248 248 rev: revision to clone up to (implies pull=True)
249 249
250 250 update: update working directory after clone completes, if
251 251 destination is local repository (True means update to default rev,
252 252 anything else is treated as a revision)
253 253
254 254 branch: branches to clone
255 255 """
256 256
257 257 if isinstance(source, str):
258 258 origsource = ui.expandpath(source)
259 259 source, branch = parseurl(origsource, branch)
260 260 srcpeer = peer(ui, peeropts, source)
261 261 else:
262 262 srcpeer = source.peer() # in case we were called with a localrepo
263 263 branch = (None, branch or [])
264 264 origsource = source = srcpeer.url()
265 265 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
266 266
267 267 if dest is None:
268 268 dest = defaultdest(source)
269 269 ui.status(_("destination directory: %s\n") % dest)
270 270 else:
271 271 dest = ui.expandpath(dest)
272 272
273 273 dest = util.urllocalpath(dest)
274 274 source = util.urllocalpath(source)
275 275
276 276 if not dest:
277 277 raise util.Abort(_("empty destination path is not valid"))
278 278 if os.path.exists(dest):
279 279 if not os.path.isdir(dest):
280 280 raise util.Abort(_("destination '%s' already exists") % dest)
281 281 elif os.listdir(dest):
282 282 raise util.Abort(_("destination '%s' is not empty") % dest)
283 283
284 284 class DirCleanup(object):
285 285 def __init__(self, dir_):
286 286 self.rmtree = shutil.rmtree
287 287 self.dir_ = dir_
288 288 def close(self):
289 289 self.dir_ = None
290 290 def cleanup(self):
291 291 if self.dir_:
292 292 self.rmtree(self.dir_, True)
293 293
294 294 srclock = destlock = dircleanup = None
295 295 srcrepo = srcpeer.local()
296 296 try:
297 297 abspath = origsource
298 298 if islocal(origsource):
299 299 abspath = os.path.abspath(util.urllocalpath(origsource))
300 300
301 301 if islocal(dest):
302 302 dircleanup = DirCleanup(dest)
303 303
304 304 copy = False
305 if srcpeer.cancopy() and islocal(dest) and not srcrepo.revs("secret()"):
305 if (srcrepo and srcrepo.cancopy() and islocal(dest)
306 and not srcrepo.revs("secret()")):
306 307 copy = not pull and not rev
307 308
308 309 if copy:
309 310 try:
310 311 # we use a lock here because if we race with commit, we
311 312 # can end up with extra data in the cloned revlogs that's
312 313 # not pointed to by changesets, thus causing verify to
313 314 # fail
314 315 srclock = srcrepo.lock(wait=False)
315 316 except error.LockError:
316 317 copy = False
317 318
318 319 if copy:
319 320 srcrepo.hook('preoutgoing', throw=True, source='clone')
320 321 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
321 322 if not os.path.exists(dest):
322 323 os.mkdir(dest)
323 324 else:
324 325 # only clean up directories we create ourselves
325 326 dircleanup.dir_ = hgdir
326 327 try:
327 328 destpath = hgdir
328 329 util.makedir(destpath, notindexed=True)
329 330 except OSError, inst:
330 331 if inst.errno == errno.EEXIST:
331 332 dircleanup.close()
332 333 raise util.Abort(_("destination '%s' already exists")
333 334 % dest)
334 335 raise
335 336
336 337 destlock = copystore(ui, srcrepo, destpath)
337 338
338 339 # we need to re-init the repo after manually copying the data
339 340 # into it
340 341 destpeer = peer(ui, peeropts, dest)
341 342 srcrepo.hook('outgoing', source='clone',
342 343 node=node.hex(node.nullid))
343 344 else:
344 345 try:
345 346 destpeer = peer(ui, peeropts, dest, create=True)
346 347 except OSError, inst:
347 348 if inst.errno == errno.EEXIST:
348 349 dircleanup.close()
349 350 raise util.Abort(_("destination '%s' already exists")
350 351 % dest)
351 352 raise
352 353
353 354 revs = None
354 355 if rev:
355 356 if not srcpeer.capable('lookup'):
356 357 raise util.Abort(_("src repository does not support "
357 358 "revision lookup and so doesn't "
358 359 "support clone by revision"))
359 360 revs = [srcpeer.lookup(r) for r in rev]
360 361 checkout = revs[0]
361 362 if destpeer.local():
362 363 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
363 364 elif srcrepo:
364 365 srcrepo.push(destpeer, revs=revs)
365 366 else:
366 367 raise util.Abort(_("clone from remote to remote not supported"))
367 368
368 369 if dircleanup:
369 370 dircleanup.close()
370 371
371 372 # clone all bookmarks except divergent ones
372 373 destrepo = destpeer.local()
373 374 if destrepo and srcpeer.capable("pushkey"):
374 375 rb = srcpeer.listkeys('bookmarks')
375 376 for k, n in rb.iteritems():
376 377 try:
377 378 m = destrepo.lookup(n)
378 379 destrepo._bookmarks[k] = m
379 380 except error.RepoLookupError:
380 381 pass
381 382 if rb:
382 383 bookmarks.write(destrepo)
383 384 elif srcrepo and destpeer.capable("pushkey"):
384 385 for k, n in srcrepo._bookmarks.iteritems():
385 386 destpeer.pushkey('bookmarks', k, '', hex(n))
386 387
387 388 if destrepo:
388 389 fp = destrepo.opener("hgrc", "w", text=True)
389 390 fp.write("[paths]\n")
390 391 u = util.url(abspath)
391 392 u.passwd = None
392 393 defaulturl = str(u)
393 394 fp.write("default = %s\n" % defaulturl)
394 395 fp.close()
395 396
396 397 destrepo.ui.setconfig('paths', 'default', defaulturl)
397 398
398 399 if update:
399 400 if update is not True:
400 401 checkout = srcrepo.lookup(update)
401 402 for test in (checkout, 'default', 'tip'):
402 403 if test is None:
403 404 continue
404 405 try:
405 406 uprev = destrepo.lookup(test)
406 407 break
407 408 except error.RepoLookupError:
408 409 continue
409 410 bn = destrepo[uprev].branch()
410 411 destrepo.ui.status(_("updating to branch %s\n") % bn)
411 412 _update(destrepo, uprev)
412 413
413 414 return srcpeer, destpeer
414 415 finally:
415 416 release(srclock, destlock)
416 417 if dircleanup is not None:
417 418 dircleanup.cleanup()
418 419 if srcpeer is not None:
419 420 srcpeer.close()
420 421
421 422 def _showstats(repo, stats):
422 423 repo.ui.status(_("%d files updated, %d files merged, "
423 424 "%d files removed, %d files unresolved\n") % stats)
424 425
425 426 def update(repo, node):
426 427 """update the working directory to node, merging linear changes"""
427 428 stats = mergemod.update(repo, node, False, False, None)
428 429 _showstats(repo, stats)
429 430 if stats[3]:
430 431 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
431 432 return stats[3] > 0
432 433
433 434 # naming conflict in clone()
434 435 _update = update
435 436
436 437 def clean(repo, node, show_stats=True):
437 438 """forcibly switch the working directory to node, clobbering changes"""
438 439 stats = mergemod.update(repo, node, False, True, None)
439 440 if show_stats:
440 441 _showstats(repo, stats)
441 442 return stats[3] > 0
442 443
443 444 def merge(repo, node, force=None, remind=True):
444 445 """Branch merge with node, resolving changes. Return true if any
445 446 unresolved conflicts."""
446 447 stats = mergemod.update(repo, node, True, force, False)
447 448 _showstats(repo, stats)
448 449 if stats[3]:
449 450 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
450 451 "or 'hg update -C .' to abandon\n"))
451 452 elif remind:
452 453 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
453 454 return stats[3] > 0
454 455
455 456 def _incoming(displaychlist, subreporecurse, ui, repo, source,
456 457 opts, buffered=False):
457 458 """
458 459 Helper for incoming / gincoming.
459 460 displaychlist gets called with
460 461 (remoterepo, incomingchangesetlist, displayer) parameters,
461 462 and is supposed to contain only code that can't be unified.
462 463 """
463 464 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
464 465 other = peer(repo, opts, source)
465 466 ui.status(_('comparing with %s\n') % util.hidepassword(source))
466 467 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
467 468
468 469 if revs:
469 470 revs = [other.lookup(rev) for rev in revs]
470 471 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
471 472 revs, opts["bundle"], opts["force"])
472 473 try:
473 474 if not chlist:
474 475 ui.status(_("no changes found\n"))
475 476 return subreporecurse()
476 477
477 478 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
478 479
479 480 # XXX once graphlog extension makes it into core,
480 481 # should be replaced by a if graph/else
481 482 displaychlist(other, chlist, displayer)
482 483
483 484 displayer.close()
484 485 finally:
485 486 cleanupfn()
486 487 subreporecurse()
487 488 return 0 # exit code is zero since we found incoming changes
488 489
489 490 def incoming(ui, repo, source, opts):
490 491 def subreporecurse():
491 492 ret = 1
492 493 if opts.get('subrepos'):
493 494 ctx = repo[None]
494 495 for subpath in sorted(ctx.substate):
495 496 sub = ctx.sub(subpath)
496 497 ret = min(ret, sub.incoming(ui, source, opts))
497 498 return ret
498 499
499 500 def display(other, chlist, displayer):
500 501 limit = cmdutil.loglimit(opts)
501 502 if opts.get('newest_first'):
502 503 chlist.reverse()
503 504 count = 0
504 505 for n in chlist:
505 506 if limit is not None and count >= limit:
506 507 break
507 508 parents = [p for p in other.changelog.parents(n) if p != nullid]
508 509 if opts.get('no_merges') and len(parents) == 2:
509 510 continue
510 511 count += 1
511 512 displayer.show(other[n])
512 513 return _incoming(display, subreporecurse, ui, repo, source, opts)
513 514
514 515 def _outgoing(ui, repo, dest, opts):
515 516 dest = ui.expandpath(dest or 'default-push', dest or 'default')
516 517 dest, branches = parseurl(dest, opts.get('branch'))
517 518 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
518 519 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
519 520 if revs:
520 521 revs = [repo.lookup(rev) for rev in revs]
521 522
522 523 other = peer(repo, opts, dest)
523 524 outgoing = discovery.findcommonoutgoing(repo, other, revs,
524 525 force=opts.get('force'))
525 526 o = outgoing.missing
526 527 if not o:
527 528 scmutil.nochangesfound(repo.ui, outgoing.excluded)
528 529 return None
529 530 return o
530 531
531 532 def outgoing(ui, repo, dest, opts):
532 533 def recurse():
533 534 ret = 1
534 535 if opts.get('subrepos'):
535 536 ctx = repo[None]
536 537 for subpath in sorted(ctx.substate):
537 538 sub = ctx.sub(subpath)
538 539 ret = min(ret, sub.outgoing(ui, dest, opts))
539 540 return ret
540 541
541 542 limit = cmdutil.loglimit(opts)
542 543 o = _outgoing(ui, repo, dest, opts)
543 544 if o is None:
544 545 return recurse()
545 546
546 547 if opts.get('newest_first'):
547 548 o.reverse()
548 549 displayer = cmdutil.show_changeset(ui, repo, opts)
549 550 count = 0
550 551 for n in o:
551 552 if limit is not None and count >= limit:
552 553 break
553 554 parents = [p for p in repo.changelog.parents(n) if p != nullid]
554 555 if opts.get('no_merges') and len(parents) == 2:
555 556 continue
556 557 count += 1
557 558 displayer.show(repo[n])
558 559 displayer.close()
559 560 recurse()
560 561 return 0 # exit code is zero since we found outgoing changes
561 562
562 563 def revert(repo, node, choose):
563 564 """revert changes to revision in node without updating dirstate"""
564 565 return mergemod.update(repo, node, False, True, choose)[3] > 0
565 566
566 567 def verify(repo):
567 568 """verify the consistency of a repository"""
568 569 return verifymod.verify(repo)
569 570
570 571 def remoteui(src, opts):
571 572 'build a remote ui from ui or repo and opts'
572 573 if util.safehasattr(src, 'baseui'): # looks like a repository
573 574 dst = src.baseui.copy() # drop repo-specific config
574 575 src = src.ui # copy target options from repo
575 576 else: # assume it's a global ui object
576 577 dst = src.copy() # keep all global options
577 578
578 579 # copy ssh-specific options
579 580 for o in 'ssh', 'remotecmd':
580 581 v = opts.get(o) or src.config('ui', o)
581 582 if v:
582 583 dst.setconfig("ui", o, v)
583 584
584 585 # copy bundle-specific options
585 586 r = src.config('bundle', 'mainreporoot')
586 587 if r:
587 588 dst.setconfig('bundle', 'mainreporoot', r)
588 589
589 590 # copy selected local settings to the remote ui
590 591 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
591 592 for key, val in src.configitems(sect):
592 593 dst.setconfig(sect, key, val)
593 594 v = src.config('web', 'cacerts')
594 595 if v:
595 596 dst.setconfig('web', 'cacerts', util.expandpath(v))
596 597
597 598 return dst
@@ -1,2571 +1,2568
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 def cancopy(self):
50 return self._repo.cancopy() # so bundlerepo can override
51
52 49 def canpush(self):
53 50 return True
54 51
55 52 def url(self):
56 53 return self._repo.url()
57 54
58 55 def lookup(self, key):
59 56 return self._repo.lookup(key)
60 57
61 58 def branchmap(self):
62 59 return self._repo.branchmap()
63 60
64 61 def heads(self):
65 62 return self._repo.heads()
66 63
67 64 def known(self, nodes):
68 65 return self._repo.known(nodes)
69 66
70 67 def getbundle(self, source, heads=None, common=None):
71 68 return self._repo.getbundle(source, heads=heads, common=common)
72 69
73 70 # TODO We might want to move the next two calls into legacypeer and add
74 71 # unbundle instead.
75 72
76 73 def lock(self):
77 74 return self._repo.lock()
78 75
79 76 def addchangegroup(self, cg, source, url):
80 77 return self._repo.addchangegroup(cg, source, url)
81 78
82 79 def pushkey(self, namespace, key, old, new):
83 80 return self._repo.pushkey(namespace, key, old, new)
84 81
85 82 def listkeys(self, namespace):
86 83 return self._repo.listkeys(namespace)
87 84
88 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
89 86 '''used to test argument passing over the wire'''
90 87 return "%s %s %s %s %s" % (one, two, three, four, five)
91 88
92 89 class locallegacypeer(localpeer):
93 90 '''peer extension which implements legacy methods too; used for tests with
94 91 restricted capabilities'''
95 92
96 93 def __init__(self, repo):
97 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
98 95
99 96 def branches(self, nodes):
100 97 return self._repo.branches(nodes)
101 98
102 99 def between(self, pairs):
103 100 return self._repo.between(pairs)
104 101
105 102 def changegroup(self, basenodes, source):
106 103 return self._repo.changegroup(basenodes, source)
107 104
108 105 def changegroupsubset(self, bases, heads, source):
109 106 return self._repo.changegroupsubset(bases, heads, source)
110 107
111 108 class localrepository(object):
112 109
113 110 supportedformats = set(('revlogv1', 'generaldelta'))
114 111 supported = supportedformats | set(('store', 'fncache', 'shared',
115 112 'dotencode'))
116 113 openerreqs = set(('revlogv1', 'generaldelta'))
117 114 requirements = ['revlogv1']
118 115
119 116 def _baserequirements(self, create):
120 117 return self.requirements[:]
121 118
122 119 def __init__(self, baseui, path=None, create=False):
123 120 self.wopener = scmutil.opener(path, expand=True)
124 121 self.wvfs = self.wopener
125 122 self.root = self.wvfs.base
126 123 self.path = self.wvfs.join(".hg")
127 124 self.origroot = path
128 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
129 126 self.opener = scmutil.opener(self.path)
130 127 self.vfs = self.opener
131 128 self.baseui = baseui
132 129 self.ui = baseui.copy()
133 130 # A list of callback to shape the phase if no data were found.
134 131 # Callback are in the form: func(repo, roots) --> processed root.
135 132 # This list it to be filled by extension during repo setup
136 133 self._phasedefaults = []
137 134
138 135 try:
139 136 self.ui.readconfig(self.join("hgrc"), self.root)
140 137 extensions.loadall(self.ui)
141 138 except IOError:
142 139 pass
143 140
144 141 if not self.vfs.isdir():
145 142 if create:
146 143 if not self.wvfs.exists():
147 144 self.wvfs.makedirs()
148 145 self.vfs.makedir(notindexed=True)
149 146 requirements = self._baserequirements(create)
150 147 if self.ui.configbool('format', 'usestore', True):
151 148 self.vfs.mkdir("store")
152 149 requirements.append("store")
153 150 if self.ui.configbool('format', 'usefncache', True):
154 151 requirements.append("fncache")
155 152 if self.ui.configbool('format', 'dotencode', True):
156 153 requirements.append('dotencode')
157 154 # create an invalid changelog
158 155 self.vfs.append(
159 156 "00changelog.i",
160 157 '\0\0\0\2' # represents revlogv2
161 158 ' dummy changelog to prevent using the old repo layout'
162 159 )
163 160 if self.ui.configbool('format', 'generaldelta', False):
164 161 requirements.append("generaldelta")
165 162 requirements = set(requirements)
166 163 else:
167 164 raise error.RepoError(_("repository %s not found") % path)
168 165 elif create:
169 166 raise error.RepoError(_("repository %s already exists") % path)
170 167 else:
171 168 try:
172 169 requirements = scmutil.readrequires(self.vfs, self.supported)
173 170 except IOError, inst:
174 171 if inst.errno != errno.ENOENT:
175 172 raise
176 173 requirements = set()
177 174
178 175 self.sharedpath = self.path
179 176 try:
180 177 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
181 178 if not os.path.exists(s):
182 179 raise error.RepoError(
183 180 _('.hg/sharedpath points to nonexistent directory %s') % s)
184 181 self.sharedpath = s
185 182 except IOError, inst:
186 183 if inst.errno != errno.ENOENT:
187 184 raise
188 185
189 186 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
190 187 self.spath = self.store.path
191 188 self.sopener = self.store.opener
192 189 self.svfs = self.sopener
193 190 self.sjoin = self.store.join
194 191 self.opener.createmode = self.store.createmode
195 192 self._applyrequirements(requirements)
196 193 if create:
197 194 self._writerequirements()
198 195
199 196
200 197 self._branchcache = None
201 198 self._branchcachetip = None
202 199 self.filterpats = {}
203 200 self._datafilters = {}
204 201 self._transref = self._lockref = self._wlockref = None
205 202
206 203 # A cache for various files under .hg/ that tracks file changes,
207 204 # (used by the filecache decorator)
208 205 #
209 206 # Maps a property name to its util.filecacheentry
210 207 self._filecache = {}
211 208
212 209 def close(self):
213 210 pass
214 211
215 212 def _restrictcapabilities(self, caps):
216 213 return caps
217 214
218 215 def _applyrequirements(self, requirements):
219 216 self.requirements = requirements
220 217 self.sopener.options = dict((r, 1) for r in requirements
221 218 if r in self.openerreqs)
222 219
223 220 def _writerequirements(self):
224 221 reqfile = self.opener("requires", "w")
225 222 for r in self.requirements:
226 223 reqfile.write("%s\n" % r)
227 224 reqfile.close()
228 225
229 226 def _checknested(self, path):
230 227 """Determine if path is a legal nested repository."""
231 228 if not path.startswith(self.root):
232 229 return False
233 230 subpath = path[len(self.root) + 1:]
234 231 normsubpath = util.pconvert(subpath)
235 232
236 233 # XXX: Checking against the current working copy is wrong in
237 234 # the sense that it can reject things like
238 235 #
239 236 # $ hg cat -r 10 sub/x.txt
240 237 #
241 238 # if sub/ is no longer a subrepository in the working copy
242 239 # parent revision.
243 240 #
244 241 # However, it can of course also allow things that would have
245 242 # been rejected before, such as the above cat command if sub/
246 243 # is a subrepository now, but was a normal directory before.
247 244 # The old path auditor would have rejected by mistake since it
248 245 # panics when it sees sub/.hg/.
249 246 #
250 247 # All in all, checking against the working copy seems sensible
251 248 # since we want to prevent access to nested repositories on
252 249 # the filesystem *now*.
253 250 ctx = self[None]
254 251 parts = util.splitpath(subpath)
255 252 while parts:
256 253 prefix = '/'.join(parts)
257 254 if prefix in ctx.substate:
258 255 if prefix == normsubpath:
259 256 return True
260 257 else:
261 258 sub = ctx.sub(prefix)
262 259 return sub.checknested(subpath[len(prefix) + 1:])
263 260 else:
264 261 parts.pop()
265 262 return False
266 263
267 264 def peer(self):
268 265 return localpeer(self) # not cached to avoid reference cycle
269 266
270 267 @filecache('bookmarks')
271 268 def _bookmarks(self):
272 269 return bookmarks.read(self)
273 270
274 271 @filecache('bookmarks.current')
275 272 def _bookmarkcurrent(self):
276 273 return bookmarks.readcurrent(self)
277 274
278 275 def _writebookmarks(self, marks):
279 276 bookmarks.write(self)
280 277
281 278 def bookmarkheads(self, bookmark):
282 279 name = bookmark.split('@', 1)[0]
283 280 heads = []
284 281 for mark, n in self._bookmarks.iteritems():
285 282 if mark.split('@', 1)[0] == name:
286 283 heads.append(n)
287 284 return heads
288 285
289 286 @storecache('phaseroots')
290 287 def _phasecache(self):
291 288 return phases.phasecache(self, self._phasedefaults)
292 289
293 290 @storecache('obsstore')
294 291 def obsstore(self):
295 292 store = obsolete.obsstore(self.sopener)
296 293 return store
297 294
298 295 @storecache('00changelog.i')
299 296 def changelog(self):
300 297 c = changelog.changelog(self.sopener)
301 298 if 'HG_PENDING' in os.environ:
302 299 p = os.environ['HG_PENDING']
303 300 if p.startswith(self.root):
304 301 c.readpending('00changelog.i.a')
305 302 return c
306 303
307 304 @storecache('00manifest.i')
308 305 def manifest(self):
309 306 return manifest.manifest(self.sopener)
310 307
311 308 @filecache('dirstate')
312 309 def dirstate(self):
313 310 warned = [0]
314 311 def validate(node):
315 312 try:
316 313 self.changelog.rev(node)
317 314 return node
318 315 except error.LookupError:
319 316 if not warned[0]:
320 317 warned[0] = True
321 318 self.ui.warn(_("warning: ignoring unknown"
322 319 " working parent %s!\n") % short(node))
323 320 return nullid
324 321
325 322 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
326 323
327 324 def __getitem__(self, changeid):
328 325 if changeid is None:
329 326 return context.workingctx(self)
330 327 return context.changectx(self, changeid)
331 328
332 329 def __contains__(self, changeid):
333 330 try:
334 331 return bool(self.lookup(changeid))
335 332 except error.RepoLookupError:
336 333 return False
337 334
338 335 def __nonzero__(self):
339 336 return True
340 337
341 338 def __len__(self):
342 339 return len(self.changelog)
343 340
344 341 def __iter__(self):
345 342 for i in xrange(len(self)):
346 343 yield i
347 344
348 345 def revs(self, expr, *args):
349 346 '''Return a list of revisions matching the given revset'''
350 347 expr = revset.formatspec(expr, *args)
351 348 m = revset.match(None, expr)
352 349 return [r for r in m(self, range(len(self)))]
353 350
354 351 def set(self, expr, *args):
355 352 '''
356 353 Yield a context for each matching revision, after doing arg
357 354 replacement via revset.formatspec
358 355 '''
359 356 for r in self.revs(expr, *args):
360 357 yield self[r]
361 358
362 359 def url(self):
363 360 return 'file:' + self.root
364 361
365 362 def hook(self, name, throw=False, **args):
366 363 return hook.hook(self.ui, self, name, throw, **args)
367 364
368 365 tag_disallowed = ':\r\n'
369 366
370 367 def _tag(self, names, node, message, local, user, date, extra={}):
371 368 if isinstance(names, str):
372 369 allchars = names
373 370 names = (names,)
374 371 else:
375 372 allchars = ''.join(names)
376 373 for c in self.tag_disallowed:
377 374 if c in allchars:
378 375 raise util.Abort(_('%r cannot be used in a tag name') % c)
379 376
380 377 branches = self.branchmap()
381 378 for name in names:
382 379 self.hook('pretag', throw=True, node=hex(node), tag=name,
383 380 local=local)
384 381 if name in branches:
385 382 self.ui.warn(_("warning: tag %s conflicts with existing"
386 383 " branch name\n") % name)
387 384
388 385 def writetags(fp, names, munge, prevtags):
389 386 fp.seek(0, 2)
390 387 if prevtags and prevtags[-1] != '\n':
391 388 fp.write('\n')
392 389 for name in names:
393 390 m = munge and munge(name) or name
394 391 if (self._tagscache.tagtypes and
395 392 name in self._tagscache.tagtypes):
396 393 old = self.tags().get(name, nullid)
397 394 fp.write('%s %s\n' % (hex(old), m))
398 395 fp.write('%s %s\n' % (hex(node), m))
399 396 fp.close()
400 397
401 398 prevtags = ''
402 399 if local:
403 400 try:
404 401 fp = self.opener('localtags', 'r+')
405 402 except IOError:
406 403 fp = self.opener('localtags', 'a')
407 404 else:
408 405 prevtags = fp.read()
409 406
410 407 # local tags are stored in the current charset
411 408 writetags(fp, names, None, prevtags)
412 409 for name in names:
413 410 self.hook('tag', node=hex(node), tag=name, local=local)
414 411 return
415 412
416 413 try:
417 414 fp = self.wfile('.hgtags', 'rb+')
418 415 except IOError, e:
419 416 if e.errno != errno.ENOENT:
420 417 raise
421 418 fp = self.wfile('.hgtags', 'ab')
422 419 else:
423 420 prevtags = fp.read()
424 421
425 422 # committed tags are stored in UTF-8
426 423 writetags(fp, names, encoding.fromlocal, prevtags)
427 424
428 425 fp.close()
429 426
430 427 self.invalidatecaches()
431 428
432 429 if '.hgtags' not in self.dirstate:
433 430 self[None].add(['.hgtags'])
434 431
435 432 m = matchmod.exact(self.root, '', ['.hgtags'])
436 433 tagnode = self.commit(message, user, date, extra=extra, match=m)
437 434
438 435 for name in names:
439 436 self.hook('tag', node=hex(node), tag=name, local=local)
440 437
441 438 return tagnode
442 439
443 440 def tag(self, names, node, message, local, user, date):
444 441 '''tag a revision with one or more symbolic names.
445 442
446 443 names is a list of strings or, when adding a single tag, names may be a
447 444 string.
448 445
449 446 if local is True, the tags are stored in a per-repository file.
450 447 otherwise, they are stored in the .hgtags file, and a new
451 448 changeset is committed with the change.
452 449
453 450 keyword arguments:
454 451
455 452 local: whether to store tags in non-version-controlled file
456 453 (default False)
457 454
458 455 message: commit message to use if committing
459 456
460 457 user: name of user to use if committing
461 458
462 459 date: date tuple to use if committing'''
463 460
464 461 if not local:
465 462 for x in self.status()[:5]:
466 463 if '.hgtags' in x:
467 464 raise util.Abort(_('working copy of .hgtags is changed '
468 465 '(please commit .hgtags manually)'))
469 466
470 467 self.tags() # instantiate the cache
471 468 self._tag(names, node, message, local, user, date)
472 469
473 470 @propertycache
474 471 def _tagscache(self):
475 472 '''Returns a tagscache object that contains various tags related
476 473 caches.'''
477 474
478 475 # This simplifies its cache management by having one decorated
479 476 # function (this one) and the rest simply fetch things from it.
480 477 class tagscache(object):
481 478 def __init__(self):
482 479 # These two define the set of tags for this repository. tags
483 480 # maps tag name to node; tagtypes maps tag name to 'global' or
484 481 # 'local'. (Global tags are defined by .hgtags across all
485 482 # heads, and local tags are defined in .hg/localtags.)
486 483 # They constitute the in-memory cache of tags.
487 484 self.tags = self.tagtypes = None
488 485
489 486 self.nodetagscache = self.tagslist = None
490 487
491 488 cache = tagscache()
492 489 cache.tags, cache.tagtypes = self._findtags()
493 490
494 491 return cache
495 492
496 493 def tags(self):
497 494 '''return a mapping of tag to node'''
498 495 t = {}
499 496 for k, v in self._tagscache.tags.iteritems():
500 497 try:
501 498 # ignore tags to unknown nodes
502 499 self.changelog.rev(v)
503 500 t[k] = v
504 501 except (error.LookupError, ValueError):
505 502 pass
506 503 return t
507 504
508 505 def _findtags(self):
509 506 '''Do the hard work of finding tags. Return a pair of dicts
510 507 (tags, tagtypes) where tags maps tag name to node, and tagtypes
511 508 maps tag name to a string like \'global\' or \'local\'.
512 509 Subclasses or extensions are free to add their own tags, but
513 510 should be aware that the returned dicts will be retained for the
514 511 duration of the localrepo object.'''
515 512
516 513 # XXX what tagtype should subclasses/extensions use? Currently
517 514 # mq and bookmarks add tags, but do not set the tagtype at all.
518 515 # Should each extension invent its own tag type? Should there
519 516 # be one tagtype for all such "virtual" tags? Or is the status
520 517 # quo fine?
521 518
522 519 alltags = {} # map tag name to (node, hist)
523 520 tagtypes = {}
524 521
525 522 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
526 523 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
527 524
528 525 # Build the return dicts. Have to re-encode tag names because
529 526 # the tags module always uses UTF-8 (in order not to lose info
530 527 # writing to the cache), but the rest of Mercurial wants them in
531 528 # local encoding.
532 529 tags = {}
533 530 for (name, (node, hist)) in alltags.iteritems():
534 531 if node != nullid:
535 532 tags[encoding.tolocal(name)] = node
536 533 tags['tip'] = self.changelog.tip()
537 534 tagtypes = dict([(encoding.tolocal(name), value)
538 535 for (name, value) in tagtypes.iteritems()])
539 536 return (tags, tagtypes)
540 537
541 538 def tagtype(self, tagname):
542 539 '''
543 540 return the type of the given tag. result can be:
544 541
545 542 'local' : a local tag
546 543 'global' : a global tag
547 544 None : tag does not exist
548 545 '''
549 546
550 547 return self._tagscache.tagtypes.get(tagname)
551 548
552 549 def tagslist(self):
553 550 '''return a list of tags ordered by revision'''
554 551 if not self._tagscache.tagslist:
555 552 l = []
556 553 for t, n in self.tags().iteritems():
557 554 r = self.changelog.rev(n)
558 555 l.append((r, t, n))
559 556 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
560 557
561 558 return self._tagscache.tagslist
562 559
563 560 def nodetags(self, node):
564 561 '''return the tags associated with a node'''
565 562 if not self._tagscache.nodetagscache:
566 563 nodetagscache = {}
567 564 for t, n in self._tagscache.tags.iteritems():
568 565 nodetagscache.setdefault(n, []).append(t)
569 566 for tags in nodetagscache.itervalues():
570 567 tags.sort()
571 568 self._tagscache.nodetagscache = nodetagscache
572 569 return self._tagscache.nodetagscache.get(node, [])
573 570
574 571 def nodebookmarks(self, node):
575 572 marks = []
576 573 for bookmark, n in self._bookmarks.iteritems():
577 574 if n == node:
578 575 marks.append(bookmark)
579 576 return sorted(marks)
580 577
581 578 def _branchtags(self, partial, lrev):
582 579 # TODO: rename this function?
583 580 tiprev = len(self) - 1
584 581 if lrev != tiprev:
585 582 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
586 583 self._updatebranchcache(partial, ctxgen)
587 584 self._writebranchcache(partial, self.changelog.tip(), tiprev)
588 585
589 586 return partial
590 587
591 588 def updatebranchcache(self):
592 589 tip = self.changelog.tip()
593 590 if self._branchcache is not None and self._branchcachetip == tip:
594 591 return
595 592
596 593 oldtip = self._branchcachetip
597 594 self._branchcachetip = tip
598 595 if oldtip is None or oldtip not in self.changelog.nodemap:
599 596 partial, last, lrev = self._readbranchcache()
600 597 else:
601 598 lrev = self.changelog.rev(oldtip)
602 599 partial = self._branchcache
603 600
604 601 self._branchtags(partial, lrev)
605 602 # this private cache holds all heads (not just the branch tips)
606 603 self._branchcache = partial
607 604
608 605 def branchmap(self):
609 606 '''returns a dictionary {branch: [branchheads]}'''
610 607 self.updatebranchcache()
611 608 return self._branchcache
612 609
613 610 def _branchtip(self, heads):
614 611 '''return the tipmost branch head in heads'''
615 612 tip = heads[-1]
616 613 for h in reversed(heads):
617 614 if not self[h].closesbranch():
618 615 tip = h
619 616 break
620 617 return tip
621 618
622 619 def branchtip(self, branch):
623 620 '''return the tip node for a given branch'''
624 621 if branch not in self.branchmap():
625 622 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
626 623 return self._branchtip(self.branchmap()[branch])
627 624
628 625 def branchtags(self):
629 626 '''return a dict where branch names map to the tipmost head of
630 627 the branch, open heads come before closed'''
631 628 bt = {}
632 629 for bn, heads in self.branchmap().iteritems():
633 630 bt[bn] = self._branchtip(heads)
634 631 return bt
635 632
636 633 def _readbranchcache(self):
637 634 partial = {}
638 635 try:
639 636 f = self.opener("cache/branchheads")
640 637 lines = f.read().split('\n')
641 638 f.close()
642 639 except (IOError, OSError):
643 640 return {}, nullid, nullrev
644 641
645 642 try:
646 643 last, lrev = lines.pop(0).split(" ", 1)
647 644 last, lrev = bin(last), int(lrev)
648 645 if lrev >= len(self) or self[lrev].node() != last:
649 646 # invalidate the cache
650 647 raise ValueError('invalidating branch cache (tip differs)')
651 648 for l in lines:
652 649 if not l:
653 650 continue
654 651 node, label = l.split(" ", 1)
655 652 label = encoding.tolocal(label.strip())
656 653 if not node in self:
657 654 raise ValueError('invalidating branch cache because node '+
658 655 '%s does not exist' % node)
659 656 partial.setdefault(label, []).append(bin(node))
660 657 except KeyboardInterrupt:
661 658 raise
662 659 except Exception, inst:
663 660 if self.ui.debugflag:
664 661 self.ui.warn(str(inst), '\n')
665 662 partial, last, lrev = {}, nullid, nullrev
666 663 return partial, last, lrev
667 664
668 665 def _writebranchcache(self, branches, tip, tiprev):
669 666 try:
670 667 f = self.opener("cache/branchheads", "w", atomictemp=True)
671 668 f.write("%s %s\n" % (hex(tip), tiprev))
672 669 for label, nodes in branches.iteritems():
673 670 for node in nodes:
674 671 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
675 672 f.close()
676 673 except (IOError, OSError):
677 674 pass
678 675
679 676 def _updatebranchcache(self, partial, ctxgen):
680 677 """Given a branchhead cache, partial, that may have extra nodes or be
681 678 missing heads, and a generator of nodes that are at least a superset of
682 679 heads missing, this function updates partial to be correct.
683 680 """
684 681 # collect new branch entries
685 682 newbranches = {}
686 683 for c in ctxgen:
687 684 newbranches.setdefault(c.branch(), []).append(c.node())
688 685 # if older branchheads are reachable from new ones, they aren't
689 686 # really branchheads. Note checking parents is insufficient:
690 687 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
691 688 for branch, newnodes in newbranches.iteritems():
692 689 bheads = partial.setdefault(branch, [])
693 690 # Remove candidate heads that no longer are in the repo (e.g., as
694 691 # the result of a strip that just happened). Avoid using 'node in
695 692 # self' here because that dives down into branchcache code somewhat
696 693 # recrusively.
697 694 bheadrevs = [self.changelog.rev(node) for node in bheads
698 695 if self.changelog.hasnode(node)]
699 696 newheadrevs = [self.changelog.rev(node) for node in newnodes
700 697 if self.changelog.hasnode(node)]
701 698 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
702 699 # Remove duplicates - nodes that are in newheadrevs and are already
703 700 # in bheadrevs. This can happen if you strip a node whose parent
704 701 # was already a head (because they're on different branches).
705 702 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
706 703
707 704 # Starting from tip means fewer passes over reachable. If we know
708 705 # the new candidates are not ancestors of existing heads, we don't
709 706 # have to examine ancestors of existing heads
710 707 if ctxisnew:
711 708 iterrevs = sorted(newheadrevs)
712 709 else:
713 710 iterrevs = list(bheadrevs)
714 711
715 712 # This loop prunes out two kinds of heads - heads that are
716 713 # superceded by a head in newheadrevs, and newheadrevs that are not
717 714 # heads because an existing head is their descendant.
718 715 while iterrevs:
719 716 latest = iterrevs.pop()
720 717 if latest not in bheadrevs:
721 718 continue
722 719 ancestors = set(self.changelog.ancestors([latest],
723 720 bheadrevs[0]))
724 721 if ancestors:
725 722 bheadrevs = [b for b in bheadrevs if b not in ancestors]
726 723 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
727 724
728 725 # There may be branches that cease to exist when the last commit in the
729 726 # branch was stripped. This code filters them out. Note that the
730 727 # branch that ceased to exist may not be in newbranches because
731 728 # newbranches is the set of candidate heads, which when you strip the
732 729 # last commit in a branch will be the parent branch.
733 730 for branch in partial:
734 731 nodes = [head for head in partial[branch]
735 732 if self.changelog.hasnode(head)]
736 733 if not nodes:
737 734 del partial[branch]
738 735
739 736 def lookup(self, key):
740 737 return self[key].node()
741 738
742 739 def lookupbranch(self, key, remote=None):
743 740 repo = remote or self
744 741 if key in repo.branchmap():
745 742 return key
746 743
747 744 repo = (remote and remote.local()) and remote or self
748 745 return repo[key].branch()
749 746
750 747 def known(self, nodes):
751 748 nm = self.changelog.nodemap
752 749 pc = self._phasecache
753 750 result = []
754 751 for n in nodes:
755 752 r = nm.get(n)
756 753 resp = not (r is None or pc.phase(self, r) >= phases.secret)
757 754 result.append(resp)
758 755 return result
759 756
760 757 def local(self):
761 758 return self
762 759
763 760 def cancopy(self):
764 761 return self.local() # so statichttprepo's override of local() works
765 762
766 763 def join(self, f):
767 764 return os.path.join(self.path, f)
768 765
769 766 def wjoin(self, f):
770 767 return os.path.join(self.root, f)
771 768
772 769 def file(self, f):
773 770 if f[0] == '/':
774 771 f = f[1:]
775 772 return filelog.filelog(self.sopener, f)
776 773
777 774 def changectx(self, changeid):
778 775 return self[changeid]
779 776
780 777 def parents(self, changeid=None):
781 778 '''get list of changectxs for parents of changeid'''
782 779 return self[changeid].parents()
783 780
784 781 def setparents(self, p1, p2=nullid):
785 782 copies = self.dirstate.setparents(p1, p2)
786 783 if copies:
787 784 # Adjust copy records, the dirstate cannot do it, it
788 785 # requires access to parents manifests. Preserve them
789 786 # only for entries added to first parent.
790 787 pctx = self[p1]
791 788 for f in copies:
792 789 if f not in pctx and copies[f] in pctx:
793 790 self.dirstate.copy(copies[f], f)
794 791
795 792 def filectx(self, path, changeid=None, fileid=None):
796 793 """changeid can be a changeset revision, node, or tag.
797 794 fileid can be a file revision or node."""
798 795 return context.filectx(self, path, changeid, fileid)
799 796
800 797 def getcwd(self):
801 798 return self.dirstate.getcwd()
802 799
803 800 def pathto(self, f, cwd=None):
804 801 return self.dirstate.pathto(f, cwd)
805 802
806 803 def wfile(self, f, mode='r'):
807 804 return self.wopener(f, mode)
808 805
809 806 def _link(self, f):
810 807 return os.path.islink(self.wjoin(f))
811 808
812 809 def _loadfilter(self, filter):
813 810 if filter not in self.filterpats:
814 811 l = []
815 812 for pat, cmd in self.ui.configitems(filter):
816 813 if cmd == '!':
817 814 continue
818 815 mf = matchmod.match(self.root, '', [pat])
819 816 fn = None
820 817 params = cmd
821 818 for name, filterfn in self._datafilters.iteritems():
822 819 if cmd.startswith(name):
823 820 fn = filterfn
824 821 params = cmd[len(name):].lstrip()
825 822 break
826 823 if not fn:
827 824 fn = lambda s, c, **kwargs: util.filter(s, c)
828 825 # Wrap old filters not supporting keyword arguments
829 826 if not inspect.getargspec(fn)[2]:
830 827 oldfn = fn
831 828 fn = lambda s, c, **kwargs: oldfn(s, c)
832 829 l.append((mf, fn, params))
833 830 self.filterpats[filter] = l
834 831 return self.filterpats[filter]
835 832
836 833 def _filter(self, filterpats, filename, data):
837 834 for mf, fn, cmd in filterpats:
838 835 if mf(filename):
839 836 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
840 837 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
841 838 break
842 839
843 840 return data
844 841
845 842 @propertycache
846 843 def _encodefilterpats(self):
847 844 return self._loadfilter('encode')
848 845
849 846 @propertycache
850 847 def _decodefilterpats(self):
851 848 return self._loadfilter('decode')
852 849
853 850 def adddatafilter(self, name, filter):
854 851 self._datafilters[name] = filter
855 852
856 853 def wread(self, filename):
857 854 if self._link(filename):
858 855 data = os.readlink(self.wjoin(filename))
859 856 else:
860 857 data = self.wopener.read(filename)
861 858 return self._filter(self._encodefilterpats, filename, data)
862 859
863 860 def wwrite(self, filename, data, flags):
864 861 data = self._filter(self._decodefilterpats, filename, data)
865 862 if 'l' in flags:
866 863 self.wopener.symlink(data, filename)
867 864 else:
868 865 self.wopener.write(filename, data)
869 866 if 'x' in flags:
870 867 util.setflags(self.wjoin(filename), False, True)
871 868
872 869 def wwritedata(self, filename, data):
873 870 return self._filter(self._decodefilterpats, filename, data)
874 871
875 872 def transaction(self, desc):
876 873 tr = self._transref and self._transref() or None
877 874 if tr and tr.running():
878 875 return tr.nest()
879 876
880 877 # abort here if the journal already exists
881 878 if os.path.exists(self.sjoin("journal")):
882 879 raise error.RepoError(
883 880 _("abandoned transaction found - run hg recover"))
884 881
885 882 self._writejournal(desc)
886 883 renames = [(x, undoname(x)) for x in self._journalfiles()]
887 884
888 885 tr = transaction.transaction(self.ui.warn, self.sopener,
889 886 self.sjoin("journal"),
890 887 aftertrans(renames),
891 888 self.store.createmode)
892 889 self._transref = weakref.ref(tr)
893 890 return tr
894 891
895 892 def _journalfiles(self):
896 893 return (self.sjoin('journal'), self.join('journal.dirstate'),
897 894 self.join('journal.branch'), self.join('journal.desc'),
898 895 self.join('journal.bookmarks'),
899 896 self.sjoin('journal.phaseroots'))
900 897
901 898 def undofiles(self):
902 899 return [undoname(x) for x in self._journalfiles()]
903 900
904 901 def _writejournal(self, desc):
905 902 self.opener.write("journal.dirstate",
906 903 self.opener.tryread("dirstate"))
907 904 self.opener.write("journal.branch",
908 905 encoding.fromlocal(self.dirstate.branch()))
909 906 self.opener.write("journal.desc",
910 907 "%d\n%s\n" % (len(self), desc))
911 908 self.opener.write("journal.bookmarks",
912 909 self.opener.tryread("bookmarks"))
913 910 self.sopener.write("journal.phaseroots",
914 911 self.sopener.tryread("phaseroots"))
915 912
916 913 def recover(self):
917 914 lock = self.lock()
918 915 try:
919 916 if os.path.exists(self.sjoin("journal")):
920 917 self.ui.status(_("rolling back interrupted transaction\n"))
921 918 transaction.rollback(self.sopener, self.sjoin("journal"),
922 919 self.ui.warn)
923 920 self.invalidate()
924 921 return True
925 922 else:
926 923 self.ui.warn(_("no interrupted transaction available\n"))
927 924 return False
928 925 finally:
929 926 lock.release()
930 927
931 928 def rollback(self, dryrun=False, force=False):
932 929 wlock = lock = None
933 930 try:
934 931 wlock = self.wlock()
935 932 lock = self.lock()
936 933 if os.path.exists(self.sjoin("undo")):
937 934 return self._rollback(dryrun, force)
938 935 else:
939 936 self.ui.warn(_("no rollback information available\n"))
940 937 return 1
941 938 finally:
942 939 release(lock, wlock)
943 940
944 941 def _rollback(self, dryrun, force):
945 942 ui = self.ui
946 943 try:
947 944 args = self.opener.read('undo.desc').splitlines()
948 945 (oldlen, desc, detail) = (int(args[0]), args[1], None)
949 946 if len(args) >= 3:
950 947 detail = args[2]
951 948 oldtip = oldlen - 1
952 949
953 950 if detail and ui.verbose:
954 951 msg = (_('repository tip rolled back to revision %s'
955 952 ' (undo %s: %s)\n')
956 953 % (oldtip, desc, detail))
957 954 else:
958 955 msg = (_('repository tip rolled back to revision %s'
959 956 ' (undo %s)\n')
960 957 % (oldtip, desc))
961 958 except IOError:
962 959 msg = _('rolling back unknown transaction\n')
963 960 desc = None
964 961
965 962 if not force and self['.'] != self['tip'] and desc == 'commit':
966 963 raise util.Abort(
967 964 _('rollback of last commit while not checked out '
968 965 'may lose data'), hint=_('use -f to force'))
969 966
970 967 ui.status(msg)
971 968 if dryrun:
972 969 return 0
973 970
974 971 parents = self.dirstate.parents()
975 972 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
976 973 if os.path.exists(self.join('undo.bookmarks')):
977 974 util.rename(self.join('undo.bookmarks'),
978 975 self.join('bookmarks'))
979 976 if os.path.exists(self.sjoin('undo.phaseroots')):
980 977 util.rename(self.sjoin('undo.phaseroots'),
981 978 self.sjoin('phaseroots'))
982 979 self.invalidate()
983 980
984 981 parentgone = (parents[0] not in self.changelog.nodemap or
985 982 parents[1] not in self.changelog.nodemap)
986 983 if parentgone:
987 984 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
988 985 try:
989 986 branch = self.opener.read('undo.branch')
990 987 self.dirstate.setbranch(branch)
991 988 except IOError:
992 989 ui.warn(_('named branch could not be reset: '
993 990 'current branch is still \'%s\'\n')
994 991 % self.dirstate.branch())
995 992
996 993 self.dirstate.invalidate()
997 994 parents = tuple([p.rev() for p in self.parents()])
998 995 if len(parents) > 1:
999 996 ui.status(_('working directory now based on '
1000 997 'revisions %d and %d\n') % parents)
1001 998 else:
1002 999 ui.status(_('working directory now based on '
1003 1000 'revision %d\n') % parents)
1004 1001 # TODO: if we know which new heads may result from this rollback, pass
1005 1002 # them to destroy(), which will prevent the branchhead cache from being
1006 1003 # invalidated.
1007 1004 self.destroyed()
1008 1005 return 0
1009 1006
1010 1007 def invalidatecaches(self):
1011 1008 def delcache(name):
1012 1009 try:
1013 1010 delattr(self, name)
1014 1011 except AttributeError:
1015 1012 pass
1016 1013
1017 1014 delcache('_tagscache')
1018 1015
1019 1016 self._branchcache = None # in UTF-8
1020 1017 self._branchcachetip = None
1021 1018
1022 1019 def invalidatedirstate(self):
1023 1020 '''Invalidates the dirstate, causing the next call to dirstate
1024 1021 to check if it was modified since the last time it was read,
1025 1022 rereading it if it has.
1026 1023
1027 1024 This is different to dirstate.invalidate() that it doesn't always
1028 1025 rereads the dirstate. Use dirstate.invalidate() if you want to
1029 1026 explicitly read the dirstate again (i.e. restoring it to a previous
1030 1027 known good state).'''
1031 1028 if 'dirstate' in self.__dict__:
1032 1029 for k in self.dirstate._filecache:
1033 1030 try:
1034 1031 delattr(self.dirstate, k)
1035 1032 except AttributeError:
1036 1033 pass
1037 1034 delattr(self, 'dirstate')
1038 1035
1039 1036 def invalidate(self):
1040 1037 for k in self._filecache:
1041 1038 # dirstate is invalidated separately in invalidatedirstate()
1042 1039 if k == 'dirstate':
1043 1040 continue
1044 1041
1045 1042 try:
1046 1043 delattr(self, k)
1047 1044 except AttributeError:
1048 1045 pass
1049 1046 self.invalidatecaches()
1050 1047
1051 1048 # Discard all cache entries to force reloading everything.
1052 1049 self._filecache.clear()
1053 1050
1054 1051 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1055 1052 try:
1056 1053 l = lock.lock(lockname, 0, releasefn, desc=desc)
1057 1054 except error.LockHeld, inst:
1058 1055 if not wait:
1059 1056 raise
1060 1057 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1061 1058 (desc, inst.locker))
1062 1059 # default to 600 seconds timeout
1063 1060 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1064 1061 releasefn, desc=desc)
1065 1062 if acquirefn:
1066 1063 acquirefn()
1067 1064 return l
1068 1065
1069 1066 def _afterlock(self, callback):
1070 1067 """add a callback to the current repository lock.
1071 1068
1072 1069 The callback will be executed on lock release."""
1073 1070 l = self._lockref and self._lockref()
1074 1071 if l:
1075 1072 l.postrelease.append(callback)
1076 1073 else:
1077 1074 callback()
1078 1075
1079 1076 def lock(self, wait=True):
1080 1077 '''Lock the repository store (.hg/store) and return a weak reference
1081 1078 to the lock. Use this before modifying the store (e.g. committing or
1082 1079 stripping). If you are opening a transaction, get a lock as well.)'''
1083 1080 l = self._lockref and self._lockref()
1084 1081 if l is not None and l.held:
1085 1082 l.lock()
1086 1083 return l
1087 1084
1088 1085 def unlock():
1089 1086 self.store.write()
1090 1087 if '_phasecache' in vars(self):
1091 1088 self._phasecache.write()
1092 1089 for k, ce in self._filecache.items():
1093 1090 if k == 'dirstate':
1094 1091 continue
1095 1092 ce.refresh()
1096 1093
1097 1094 l = self._lock(self.sjoin("lock"), wait, unlock,
1098 1095 self.invalidate, _('repository %s') % self.origroot)
1099 1096 self._lockref = weakref.ref(l)
1100 1097 return l
1101 1098
1102 1099 def wlock(self, wait=True):
1103 1100 '''Lock the non-store parts of the repository (everything under
1104 1101 .hg except .hg/store) and return a weak reference to the lock.
1105 1102 Use this before modifying files in .hg.'''
1106 1103 l = self._wlockref and self._wlockref()
1107 1104 if l is not None and l.held:
1108 1105 l.lock()
1109 1106 return l
1110 1107
1111 1108 def unlock():
1112 1109 self.dirstate.write()
1113 1110 ce = self._filecache.get('dirstate')
1114 1111 if ce:
1115 1112 ce.refresh()
1116 1113
1117 1114 l = self._lock(self.join("wlock"), wait, unlock,
1118 1115 self.invalidatedirstate, _('working directory of %s') %
1119 1116 self.origroot)
1120 1117 self._wlockref = weakref.ref(l)
1121 1118 return l
1122 1119
1123 1120 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1124 1121 """
1125 1122 commit an individual file as part of a larger transaction
1126 1123 """
1127 1124
1128 1125 fname = fctx.path()
1129 1126 text = fctx.data()
1130 1127 flog = self.file(fname)
1131 1128 fparent1 = manifest1.get(fname, nullid)
1132 1129 fparent2 = fparent2o = manifest2.get(fname, nullid)
1133 1130
1134 1131 meta = {}
1135 1132 copy = fctx.renamed()
1136 1133 if copy and copy[0] != fname:
1137 1134 # Mark the new revision of this file as a copy of another
1138 1135 # file. This copy data will effectively act as a parent
1139 1136 # of this new revision. If this is a merge, the first
1140 1137 # parent will be the nullid (meaning "look up the copy data")
1141 1138 # and the second one will be the other parent. For example:
1142 1139 #
1143 1140 # 0 --- 1 --- 3 rev1 changes file foo
1144 1141 # \ / rev2 renames foo to bar and changes it
1145 1142 # \- 2 -/ rev3 should have bar with all changes and
1146 1143 # should record that bar descends from
1147 1144 # bar in rev2 and foo in rev1
1148 1145 #
1149 1146 # this allows this merge to succeed:
1150 1147 #
1151 1148 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1152 1149 # \ / merging rev3 and rev4 should use bar@rev2
1153 1150 # \- 2 --- 4 as the merge base
1154 1151 #
1155 1152
1156 1153 cfname = copy[0]
1157 1154 crev = manifest1.get(cfname)
1158 1155 newfparent = fparent2
1159 1156
1160 1157 if manifest2: # branch merge
1161 1158 if fparent2 == nullid or crev is None: # copied on remote side
1162 1159 if cfname in manifest2:
1163 1160 crev = manifest2[cfname]
1164 1161 newfparent = fparent1
1165 1162
1166 1163 # find source in nearest ancestor if we've lost track
1167 1164 if not crev:
1168 1165 self.ui.debug(" %s: searching for copy revision for %s\n" %
1169 1166 (fname, cfname))
1170 1167 for ancestor in self[None].ancestors():
1171 1168 if cfname in ancestor:
1172 1169 crev = ancestor[cfname].filenode()
1173 1170 break
1174 1171
1175 1172 if crev:
1176 1173 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1177 1174 meta["copy"] = cfname
1178 1175 meta["copyrev"] = hex(crev)
1179 1176 fparent1, fparent2 = nullid, newfparent
1180 1177 else:
1181 1178 self.ui.warn(_("warning: can't find ancestor for '%s' "
1182 1179 "copied from '%s'!\n") % (fname, cfname))
1183 1180
1184 1181 elif fparent2 != nullid:
1185 1182 # is one parent an ancestor of the other?
1186 1183 fparentancestor = flog.ancestor(fparent1, fparent2)
1187 1184 if fparentancestor == fparent1:
1188 1185 fparent1, fparent2 = fparent2, nullid
1189 1186 elif fparentancestor == fparent2:
1190 1187 fparent2 = nullid
1191 1188
1192 1189 # is the file changed?
1193 1190 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1194 1191 changelist.append(fname)
1195 1192 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1196 1193
1197 1194 # are just the flags changed during merge?
1198 1195 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1199 1196 changelist.append(fname)
1200 1197
1201 1198 return fparent1
1202 1199
1203 1200 def commit(self, text="", user=None, date=None, match=None, force=False,
1204 1201 editor=False, extra={}):
1205 1202 """Add a new revision to current repository.
1206 1203
1207 1204 Revision information is gathered from the working directory,
1208 1205 match can be used to filter the committed files. If editor is
1209 1206 supplied, it is called to get a commit message.
1210 1207 """
1211 1208
1212 1209 def fail(f, msg):
1213 1210 raise util.Abort('%s: %s' % (f, msg))
1214 1211
1215 1212 if not match:
1216 1213 match = matchmod.always(self.root, '')
1217 1214
1218 1215 if not force:
1219 1216 vdirs = []
1220 1217 match.dir = vdirs.append
1221 1218 match.bad = fail
1222 1219
1223 1220 wlock = self.wlock()
1224 1221 try:
1225 1222 wctx = self[None]
1226 1223 merge = len(wctx.parents()) > 1
1227 1224
1228 1225 if (not force and merge and match and
1229 1226 (match.files() or match.anypats())):
1230 1227 raise util.Abort(_('cannot partially commit a merge '
1231 1228 '(do not specify files or patterns)'))
1232 1229
1233 1230 changes = self.status(match=match, clean=force)
1234 1231 if force:
1235 1232 changes[0].extend(changes[6]) # mq may commit unchanged files
1236 1233
1237 1234 # check subrepos
1238 1235 subs = []
1239 1236 commitsubs = set()
1240 1237 newstate = wctx.substate.copy()
1241 1238 # only manage subrepos and .hgsubstate if .hgsub is present
1242 1239 if '.hgsub' in wctx:
1243 1240 # we'll decide whether to track this ourselves, thanks
1244 1241 if '.hgsubstate' in changes[0]:
1245 1242 changes[0].remove('.hgsubstate')
1246 1243 if '.hgsubstate' in changes[2]:
1247 1244 changes[2].remove('.hgsubstate')
1248 1245
1249 1246 # compare current state to last committed state
1250 1247 # build new substate based on last committed state
1251 1248 oldstate = wctx.p1().substate
1252 1249 for s in sorted(newstate.keys()):
1253 1250 if not match(s):
1254 1251 # ignore working copy, use old state if present
1255 1252 if s in oldstate:
1256 1253 newstate[s] = oldstate[s]
1257 1254 continue
1258 1255 if not force:
1259 1256 raise util.Abort(
1260 1257 _("commit with new subrepo %s excluded") % s)
1261 1258 if wctx.sub(s).dirty(True):
1262 1259 if not self.ui.configbool('ui', 'commitsubrepos'):
1263 1260 raise util.Abort(
1264 1261 _("uncommitted changes in subrepo %s") % s,
1265 1262 hint=_("use --subrepos for recursive commit"))
1266 1263 subs.append(s)
1267 1264 commitsubs.add(s)
1268 1265 else:
1269 1266 bs = wctx.sub(s).basestate()
1270 1267 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1271 1268 if oldstate.get(s, (None, None, None))[1] != bs:
1272 1269 subs.append(s)
1273 1270
1274 1271 # check for removed subrepos
1275 1272 for p in wctx.parents():
1276 1273 r = [s for s in p.substate if s not in newstate]
1277 1274 subs += [s for s in r if match(s)]
1278 1275 if subs:
1279 1276 if (not match('.hgsub') and
1280 1277 '.hgsub' in (wctx.modified() + wctx.added())):
1281 1278 raise util.Abort(
1282 1279 _("can't commit subrepos without .hgsub"))
1283 1280 changes[0].insert(0, '.hgsubstate')
1284 1281
1285 1282 elif '.hgsub' in changes[2]:
1286 1283 # clean up .hgsubstate when .hgsub is removed
1287 1284 if ('.hgsubstate' in wctx and
1288 1285 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1289 1286 changes[2].insert(0, '.hgsubstate')
1290 1287
1291 1288 # make sure all explicit patterns are matched
1292 1289 if not force and match.files():
1293 1290 matched = set(changes[0] + changes[1] + changes[2])
1294 1291
1295 1292 for f in match.files():
1296 1293 if f == '.' or f in matched or f in wctx.substate:
1297 1294 continue
1298 1295 if f in changes[3]: # missing
1299 1296 fail(f, _('file not found!'))
1300 1297 if f in vdirs: # visited directory
1301 1298 d = f + '/'
1302 1299 for mf in matched:
1303 1300 if mf.startswith(d):
1304 1301 break
1305 1302 else:
1306 1303 fail(f, _("no match under directory!"))
1307 1304 elif f not in self.dirstate:
1308 1305 fail(f, _("file not tracked!"))
1309 1306
1310 1307 if (not force and not extra.get("close") and not merge
1311 1308 and not (changes[0] or changes[1] or changes[2])
1312 1309 and wctx.branch() == wctx.p1().branch()):
1313 1310 return None
1314 1311
1315 1312 if merge and changes[3]:
1316 1313 raise util.Abort(_("cannot commit merge with missing files"))
1317 1314
1318 1315 ms = mergemod.mergestate(self)
1319 1316 for f in changes[0]:
1320 1317 if f in ms and ms[f] == 'u':
1321 1318 raise util.Abort(_("unresolved merge conflicts "
1322 1319 "(see hg help resolve)"))
1323 1320
1324 1321 cctx = context.workingctx(self, text, user, date, extra, changes)
1325 1322 if editor:
1326 1323 cctx._text = editor(self, cctx, subs)
1327 1324 edited = (text != cctx._text)
1328 1325
1329 1326 # commit subs and write new state
1330 1327 if subs:
1331 1328 for s in sorted(commitsubs):
1332 1329 sub = wctx.sub(s)
1333 1330 self.ui.status(_('committing subrepository %s\n') %
1334 1331 subrepo.subrelpath(sub))
1335 1332 sr = sub.commit(cctx._text, user, date)
1336 1333 newstate[s] = (newstate[s][0], sr)
1337 1334 subrepo.writestate(self, newstate)
1338 1335
1339 1336 # Save commit message in case this transaction gets rolled back
1340 1337 # (e.g. by a pretxncommit hook). Leave the content alone on
1341 1338 # the assumption that the user will use the same editor again.
1342 1339 msgfn = self.savecommitmessage(cctx._text)
1343 1340
1344 1341 p1, p2 = self.dirstate.parents()
1345 1342 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1346 1343 try:
1347 1344 self.hook("precommit", throw=True, parent1=hookp1,
1348 1345 parent2=hookp2)
1349 1346 ret = self.commitctx(cctx, True)
1350 1347 except: # re-raises
1351 1348 if edited:
1352 1349 self.ui.write(
1353 1350 _('note: commit message saved in %s\n') % msgfn)
1354 1351 raise
1355 1352
1356 1353 # update bookmarks, dirstate and mergestate
1357 1354 bookmarks.update(self, [p1, p2], ret)
1358 1355 for f in changes[0] + changes[1]:
1359 1356 self.dirstate.normal(f)
1360 1357 for f in changes[2]:
1361 1358 self.dirstate.drop(f)
1362 1359 self.dirstate.setparents(ret)
1363 1360 ms.reset()
1364 1361 finally:
1365 1362 wlock.release()
1366 1363
1367 1364 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1368 1365 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1369 1366 self._afterlock(commithook)
1370 1367 return ret
1371 1368
1372 1369 def commitctx(self, ctx, error=False):
1373 1370 """Add a new revision to current repository.
1374 1371 Revision information is passed via the context argument.
1375 1372 """
1376 1373
1377 1374 tr = lock = None
1378 1375 removed = list(ctx.removed())
1379 1376 p1, p2 = ctx.p1(), ctx.p2()
1380 1377 user = ctx.user()
1381 1378
1382 1379 lock = self.lock()
1383 1380 try:
1384 1381 tr = self.transaction("commit")
1385 1382 trp = weakref.proxy(tr)
1386 1383
1387 1384 if ctx.files():
1388 1385 m1 = p1.manifest().copy()
1389 1386 m2 = p2.manifest()
1390 1387
1391 1388 # check in files
1392 1389 new = {}
1393 1390 changed = []
1394 1391 linkrev = len(self)
1395 1392 for f in sorted(ctx.modified() + ctx.added()):
1396 1393 self.ui.note(f + "\n")
1397 1394 try:
1398 1395 fctx = ctx[f]
1399 1396 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1400 1397 changed)
1401 1398 m1.set(f, fctx.flags())
1402 1399 except OSError, inst:
1403 1400 self.ui.warn(_("trouble committing %s!\n") % f)
1404 1401 raise
1405 1402 except IOError, inst:
1406 1403 errcode = getattr(inst, 'errno', errno.ENOENT)
1407 1404 if error or errcode and errcode != errno.ENOENT:
1408 1405 self.ui.warn(_("trouble committing %s!\n") % f)
1409 1406 raise
1410 1407 else:
1411 1408 removed.append(f)
1412 1409
1413 1410 # update manifest
1414 1411 m1.update(new)
1415 1412 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1416 1413 drop = [f for f in removed if f in m1]
1417 1414 for f in drop:
1418 1415 del m1[f]
1419 1416 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1420 1417 p2.manifestnode(), (new, drop))
1421 1418 files = changed + removed
1422 1419 else:
1423 1420 mn = p1.manifestnode()
1424 1421 files = []
1425 1422
1426 1423 # update changelog
1427 1424 self.changelog.delayupdate()
1428 1425 n = self.changelog.add(mn, files, ctx.description(),
1429 1426 trp, p1.node(), p2.node(),
1430 1427 user, ctx.date(), ctx.extra().copy())
1431 1428 p = lambda: self.changelog.writepending() and self.root or ""
1432 1429 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1433 1430 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1434 1431 parent2=xp2, pending=p)
1435 1432 self.changelog.finalize(trp)
1436 1433 # set the new commit is proper phase
1437 1434 targetphase = phases.newcommitphase(self.ui)
1438 1435 if targetphase:
1439 1436 # retract boundary do not alter parent changeset.
1440 1437 # if a parent have higher the resulting phase will
1441 1438 # be compliant anyway
1442 1439 #
1443 1440 # if minimal phase was 0 we don't need to retract anything
1444 1441 phases.retractboundary(self, targetphase, [n])
1445 1442 tr.close()
1446 1443 self.updatebranchcache()
1447 1444 return n
1448 1445 finally:
1449 1446 if tr:
1450 1447 tr.release()
1451 1448 lock.release()
1452 1449
1453 1450 def destroyed(self, newheadnodes=None):
1454 1451 '''Inform the repository that nodes have been destroyed.
1455 1452 Intended for use by strip and rollback, so there's a common
1456 1453 place for anything that has to be done after destroying history.
1457 1454
1458 1455 If you know the branchheadcache was uptodate before nodes were removed
1459 1456 and you also know the set of candidate new heads that may have resulted
1460 1457 from the destruction, you can set newheadnodes. This will enable the
1461 1458 code to update the branchheads cache, rather than having future code
1462 1459 decide it's invalid and regenrating it from scratch.
1463 1460 '''
1464 1461 # If we have info, newheadnodes, on how to update the branch cache, do
1465 1462 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1466 1463 # will be caught the next time it is read.
1467 1464 if newheadnodes:
1468 1465 tiprev = len(self) - 1
1469 1466 ctxgen = (self[node] for node in newheadnodes
1470 1467 if self.changelog.hasnode(node))
1471 1468 self._updatebranchcache(self._branchcache, ctxgen)
1472 1469 self._writebranchcache(self._branchcache, self.changelog.tip(),
1473 1470 tiprev)
1474 1471
1475 1472 # Ensure the persistent tag cache is updated. Doing it now
1476 1473 # means that the tag cache only has to worry about destroyed
1477 1474 # heads immediately after a strip/rollback. That in turn
1478 1475 # guarantees that "cachetip == currenttip" (comparing both rev
1479 1476 # and node) always means no nodes have been added or destroyed.
1480 1477
1481 1478 # XXX this is suboptimal when qrefresh'ing: we strip the current
1482 1479 # head, refresh the tag cache, then immediately add a new head.
1483 1480 # But I think doing it this way is necessary for the "instant
1484 1481 # tag cache retrieval" case to work.
1485 1482 self.invalidatecaches()
1486 1483
1487 1484 def walk(self, match, node=None):
1488 1485 '''
1489 1486 walk recursively through the directory tree or a given
1490 1487 changeset, finding all files matched by the match
1491 1488 function
1492 1489 '''
1493 1490 return self[node].walk(match)
1494 1491
1495 1492 def status(self, node1='.', node2=None, match=None,
1496 1493 ignored=False, clean=False, unknown=False,
1497 1494 listsubrepos=False):
1498 1495 """return status of files between two nodes or node and working
1499 1496 directory.
1500 1497
1501 1498 If node1 is None, use the first dirstate parent instead.
1502 1499 If node2 is None, compare node1 with working directory.
1503 1500 """
1504 1501
1505 1502 def mfmatches(ctx):
1506 1503 mf = ctx.manifest().copy()
1507 1504 if match.always():
1508 1505 return mf
1509 1506 for fn in mf.keys():
1510 1507 if not match(fn):
1511 1508 del mf[fn]
1512 1509 return mf
1513 1510
1514 1511 if isinstance(node1, context.changectx):
1515 1512 ctx1 = node1
1516 1513 else:
1517 1514 ctx1 = self[node1]
1518 1515 if isinstance(node2, context.changectx):
1519 1516 ctx2 = node2
1520 1517 else:
1521 1518 ctx2 = self[node2]
1522 1519
1523 1520 working = ctx2.rev() is None
1524 1521 parentworking = working and ctx1 == self['.']
1525 1522 match = match or matchmod.always(self.root, self.getcwd())
1526 1523 listignored, listclean, listunknown = ignored, clean, unknown
1527 1524
1528 1525 # load earliest manifest first for caching reasons
1529 1526 if not working and ctx2.rev() < ctx1.rev():
1530 1527 ctx2.manifest()
1531 1528
1532 1529 if not parentworking:
1533 1530 def bad(f, msg):
1534 1531 # 'f' may be a directory pattern from 'match.files()',
1535 1532 # so 'f not in ctx1' is not enough
1536 1533 if f not in ctx1 and f not in ctx1.dirs():
1537 1534 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1538 1535 match.bad = bad
1539 1536
1540 1537 if working: # we need to scan the working dir
1541 1538 subrepos = []
1542 1539 if '.hgsub' in self.dirstate:
1543 1540 subrepos = ctx2.substate.keys()
1544 1541 s = self.dirstate.status(match, subrepos, listignored,
1545 1542 listclean, listunknown)
1546 1543 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1547 1544
1548 1545 # check for any possibly clean files
1549 1546 if parentworking and cmp:
1550 1547 fixup = []
1551 1548 # do a full compare of any files that might have changed
1552 1549 for f in sorted(cmp):
1553 1550 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1554 1551 or ctx1[f].cmp(ctx2[f])):
1555 1552 modified.append(f)
1556 1553 else:
1557 1554 fixup.append(f)
1558 1555
1559 1556 # update dirstate for files that are actually clean
1560 1557 if fixup:
1561 1558 if listclean:
1562 1559 clean += fixup
1563 1560
1564 1561 try:
1565 1562 # updating the dirstate is optional
1566 1563 # so we don't wait on the lock
1567 1564 wlock = self.wlock(False)
1568 1565 try:
1569 1566 for f in fixup:
1570 1567 self.dirstate.normal(f)
1571 1568 finally:
1572 1569 wlock.release()
1573 1570 except error.LockError:
1574 1571 pass
1575 1572
1576 1573 if not parentworking:
1577 1574 mf1 = mfmatches(ctx1)
1578 1575 if working:
1579 1576 # we are comparing working dir against non-parent
1580 1577 # generate a pseudo-manifest for the working dir
1581 1578 mf2 = mfmatches(self['.'])
1582 1579 for f in cmp + modified + added:
1583 1580 mf2[f] = None
1584 1581 mf2.set(f, ctx2.flags(f))
1585 1582 for f in removed:
1586 1583 if f in mf2:
1587 1584 del mf2[f]
1588 1585 else:
1589 1586 # we are comparing two revisions
1590 1587 deleted, unknown, ignored = [], [], []
1591 1588 mf2 = mfmatches(ctx2)
1592 1589
1593 1590 modified, added, clean = [], [], []
1594 1591 withflags = mf1.withflags() | mf2.withflags()
1595 1592 for fn in mf2:
1596 1593 if fn in mf1:
1597 1594 if (fn not in deleted and
1598 1595 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1599 1596 (mf1[fn] != mf2[fn] and
1600 1597 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1601 1598 modified.append(fn)
1602 1599 elif listclean:
1603 1600 clean.append(fn)
1604 1601 del mf1[fn]
1605 1602 elif fn not in deleted:
1606 1603 added.append(fn)
1607 1604 removed = mf1.keys()
1608 1605
1609 1606 if working and modified and not self.dirstate._checklink:
1610 1607 # Symlink placeholders may get non-symlink-like contents
1611 1608 # via user error or dereferencing by NFS or Samba servers,
1612 1609 # so we filter out any placeholders that don't look like a
1613 1610 # symlink
1614 1611 sane = []
1615 1612 for f in modified:
1616 1613 if ctx2.flags(f) == 'l':
1617 1614 d = ctx2[f].data()
1618 1615 if len(d) >= 1024 or '\n' in d or util.binary(d):
1619 1616 self.ui.debug('ignoring suspect symlink placeholder'
1620 1617 ' "%s"\n' % f)
1621 1618 continue
1622 1619 sane.append(f)
1623 1620 modified = sane
1624 1621
1625 1622 r = modified, added, removed, deleted, unknown, ignored, clean
1626 1623
1627 1624 if listsubrepos:
1628 1625 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1629 1626 if working:
1630 1627 rev2 = None
1631 1628 else:
1632 1629 rev2 = ctx2.substate[subpath][1]
1633 1630 try:
1634 1631 submatch = matchmod.narrowmatcher(subpath, match)
1635 1632 s = sub.status(rev2, match=submatch, ignored=listignored,
1636 1633 clean=listclean, unknown=listunknown,
1637 1634 listsubrepos=True)
1638 1635 for rfiles, sfiles in zip(r, s):
1639 1636 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1640 1637 except error.LookupError:
1641 1638 self.ui.status(_("skipping missing subrepository: %s\n")
1642 1639 % subpath)
1643 1640
1644 1641 for l in r:
1645 1642 l.sort()
1646 1643 return r
1647 1644
1648 1645 def heads(self, start=None):
1649 1646 heads = self.changelog.heads(start)
1650 1647 # sort the output in rev descending order
1651 1648 return sorted(heads, key=self.changelog.rev, reverse=True)
1652 1649
1653 1650 def branchheads(self, branch=None, start=None, closed=False):
1654 1651 '''return a (possibly filtered) list of heads for the given branch
1655 1652
1656 1653 Heads are returned in topological order, from newest to oldest.
1657 1654 If branch is None, use the dirstate branch.
1658 1655 If start is not None, return only heads reachable from start.
1659 1656 If closed is True, return heads that are marked as closed as well.
1660 1657 '''
1661 1658 if branch is None:
1662 1659 branch = self[None].branch()
1663 1660 branches = self.branchmap()
1664 1661 if branch not in branches:
1665 1662 return []
1666 1663 # the cache returns heads ordered lowest to highest
1667 1664 bheads = list(reversed(branches[branch]))
1668 1665 if start is not None:
1669 1666 # filter out the heads that cannot be reached from startrev
1670 1667 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1671 1668 bheads = [h for h in bheads if h in fbheads]
1672 1669 if not closed:
1673 1670 bheads = [h for h in bheads if not self[h].closesbranch()]
1674 1671 return bheads
1675 1672
1676 1673 def branches(self, nodes):
1677 1674 if not nodes:
1678 1675 nodes = [self.changelog.tip()]
1679 1676 b = []
1680 1677 for n in nodes:
1681 1678 t = n
1682 1679 while True:
1683 1680 p = self.changelog.parents(n)
1684 1681 if p[1] != nullid or p[0] == nullid:
1685 1682 b.append((t, n, p[0], p[1]))
1686 1683 break
1687 1684 n = p[0]
1688 1685 return b
1689 1686
1690 1687 def between(self, pairs):
1691 1688 r = []
1692 1689
1693 1690 for top, bottom in pairs:
1694 1691 n, l, i = top, [], 0
1695 1692 f = 1
1696 1693
1697 1694 while n != bottom and n != nullid:
1698 1695 p = self.changelog.parents(n)[0]
1699 1696 if i == f:
1700 1697 l.append(n)
1701 1698 f = f * 2
1702 1699 n = p
1703 1700 i += 1
1704 1701
1705 1702 r.append(l)
1706 1703
1707 1704 return r
1708 1705
1709 1706 def pull(self, remote, heads=None, force=False):
1710 1707 # don't open transaction for nothing or you break future useful
1711 1708 # rollback call
1712 1709 tr = None
1713 1710 trname = 'pull\n' + util.hidepassword(remote.url())
1714 1711 lock = self.lock()
1715 1712 try:
1716 1713 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1717 1714 force=force)
1718 1715 common, fetch, rheads = tmp
1719 1716 if not fetch:
1720 1717 self.ui.status(_("no changes found\n"))
1721 1718 added = []
1722 1719 result = 0
1723 1720 else:
1724 1721 tr = self.transaction(trname)
1725 1722 if heads is None and list(common) == [nullid]:
1726 1723 self.ui.status(_("requesting all changes\n"))
1727 1724 elif heads is None and remote.capable('changegroupsubset'):
1728 1725 # issue1320, avoid a race if remote changed after discovery
1729 1726 heads = rheads
1730 1727
1731 1728 if remote.capable('getbundle'):
1732 1729 cg = remote.getbundle('pull', common=common,
1733 1730 heads=heads or rheads)
1734 1731 elif heads is None:
1735 1732 cg = remote.changegroup(fetch, 'pull')
1736 1733 elif not remote.capable('changegroupsubset'):
1737 1734 raise util.Abort(_("partial pull cannot be done because "
1738 1735 "other repository doesn't support "
1739 1736 "changegroupsubset."))
1740 1737 else:
1741 1738 cg = remote.changegroupsubset(fetch, heads, 'pull')
1742 1739 clstart = len(self.changelog)
1743 1740 result = self.addchangegroup(cg, 'pull', remote.url())
1744 1741 clend = len(self.changelog)
1745 1742 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1746 1743
1747 1744 # compute target subset
1748 1745 if heads is None:
1749 1746 # We pulled every thing possible
1750 1747 # sync on everything common
1751 1748 subset = common + added
1752 1749 else:
1753 1750 # We pulled a specific subset
1754 1751 # sync on this subset
1755 1752 subset = heads
1756 1753
1757 1754 # Get remote phases data from remote
1758 1755 remotephases = remote.listkeys('phases')
1759 1756 publishing = bool(remotephases.get('publishing', False))
1760 1757 if remotephases and not publishing:
1761 1758 # remote is new and unpublishing
1762 1759 pheads, _dr = phases.analyzeremotephases(self, subset,
1763 1760 remotephases)
1764 1761 phases.advanceboundary(self, phases.public, pheads)
1765 1762 phases.advanceboundary(self, phases.draft, subset)
1766 1763 else:
1767 1764 # Remote is old or publishing all common changesets
1768 1765 # should be seen as public
1769 1766 phases.advanceboundary(self, phases.public, subset)
1770 1767
1771 1768 remoteobs = remote.listkeys('obsolete')
1772 1769 if 'dump' in remoteobs:
1773 1770 if tr is None:
1774 1771 tr = self.transaction(trname)
1775 1772 data = base85.b85decode(remoteobs['dump'])
1776 1773 self.obsstore.mergemarkers(tr, data)
1777 1774 if tr is not None:
1778 1775 tr.close()
1779 1776 finally:
1780 1777 if tr is not None:
1781 1778 tr.release()
1782 1779 lock.release()
1783 1780
1784 1781 return result
1785 1782
1786 1783 def checkpush(self, force, revs):
1787 1784 """Extensions can override this function if additional checks have
1788 1785 to be performed before pushing, or call it if they override push
1789 1786 command.
1790 1787 """
1791 1788 pass
1792 1789
1793 1790 def push(self, remote, force=False, revs=None, newbranch=False):
1794 1791 '''Push outgoing changesets (limited by revs) from the current
1795 1792 repository to remote. Return an integer:
1796 1793 - None means nothing to push
1797 1794 - 0 means HTTP error
1798 1795 - 1 means we pushed and remote head count is unchanged *or*
1799 1796 we have outgoing changesets but refused to push
1800 1797 - other values as described by addchangegroup()
1801 1798 '''
1802 1799 # there are two ways to push to remote repo:
1803 1800 #
1804 1801 # addchangegroup assumes local user can lock remote
1805 1802 # repo (local filesystem, old ssh servers).
1806 1803 #
1807 1804 # unbundle assumes local user cannot lock remote repo (new ssh
1808 1805 # servers, http servers).
1809 1806
1810 1807 if not remote.canpush():
1811 1808 raise util.Abort(_("destination does not support push"))
1812 1809 # get local lock as we might write phase data
1813 1810 locallock = self.lock()
1814 1811 try:
1815 1812 self.checkpush(force, revs)
1816 1813 lock = None
1817 1814 unbundle = remote.capable('unbundle')
1818 1815 if not unbundle:
1819 1816 lock = remote.lock()
1820 1817 try:
1821 1818 # discovery
1822 1819 fci = discovery.findcommonincoming
1823 1820 commoninc = fci(self, remote, force=force)
1824 1821 common, inc, remoteheads = commoninc
1825 1822 fco = discovery.findcommonoutgoing
1826 1823 outgoing = fco(self, remote, onlyheads=revs,
1827 1824 commoninc=commoninc, force=force)
1828 1825
1829 1826
1830 1827 if not outgoing.missing:
1831 1828 # nothing to push
1832 1829 scmutil.nochangesfound(self.ui, outgoing.excluded)
1833 1830 ret = None
1834 1831 else:
1835 1832 # something to push
1836 1833 if not force:
1837 1834 # if self.obsstore == False --> no obsolete
1838 1835 # then, save the iteration
1839 1836 if self.obsstore:
1840 1837 # this message are here for 80 char limit reason
1841 1838 mso = _("push includes an obsolete changeset: %s!")
1842 1839 msu = _("push includes an unstable changeset: %s!")
1843 1840 # If we are to push if there is at least one
1844 1841 # obsolete or unstable changeset in missing, at
1845 1842 # least one of the missinghead will be obsolete or
1846 1843 # unstable. So checking heads only is ok
1847 1844 for node in outgoing.missingheads:
1848 1845 ctx = self[node]
1849 1846 if ctx.obsolete():
1850 1847 raise util.Abort(_(mso) % ctx)
1851 1848 elif ctx.unstable():
1852 1849 raise util.Abort(_(msu) % ctx)
1853 1850 discovery.checkheads(self, remote, outgoing,
1854 1851 remoteheads, newbranch,
1855 1852 bool(inc))
1856 1853
1857 1854 # create a changegroup from local
1858 1855 if revs is None and not outgoing.excluded:
1859 1856 # push everything,
1860 1857 # use the fast path, no race possible on push
1861 1858 cg = self._changegroup(outgoing.missing, 'push')
1862 1859 else:
1863 1860 cg = self.getlocalbundle('push', outgoing)
1864 1861
1865 1862 # apply changegroup to remote
1866 1863 if unbundle:
1867 1864 # local repo finds heads on server, finds out what
1868 1865 # revs it must push. once revs transferred, if server
1869 1866 # finds it has different heads (someone else won
1870 1867 # commit/push race), server aborts.
1871 1868 if force:
1872 1869 remoteheads = ['force']
1873 1870 # ssh: return remote's addchangegroup()
1874 1871 # http: return remote's addchangegroup() or 0 for error
1875 1872 ret = remote.unbundle(cg, remoteheads, 'push')
1876 1873 else:
1877 1874 # we return an integer indicating remote head count
1878 1875 # change
1879 1876 ret = remote.addchangegroup(cg, 'push', self.url())
1880 1877
1881 1878 if ret:
1882 1879 # push succeed, synchonize target of the push
1883 1880 cheads = outgoing.missingheads
1884 1881 elif revs is None:
1885 1882 # All out push fails. synchronize all common
1886 1883 cheads = outgoing.commonheads
1887 1884 else:
1888 1885 # I want cheads = heads(::missingheads and ::commonheads)
1889 1886 # (missingheads is revs with secret changeset filtered out)
1890 1887 #
1891 1888 # This can be expressed as:
1892 1889 # cheads = ( (missingheads and ::commonheads)
1893 1890 # + (commonheads and ::missingheads))"
1894 1891 # )
1895 1892 #
1896 1893 # while trying to push we already computed the following:
1897 1894 # common = (::commonheads)
1898 1895 # missing = ((commonheads::missingheads) - commonheads)
1899 1896 #
1900 1897 # We can pick:
1901 1898 # * missingheads part of comon (::commonheads)
1902 1899 common = set(outgoing.common)
1903 1900 cheads = [node for node in revs if node in common]
1904 1901 # and
1905 1902 # * commonheads parents on missing
1906 1903 revset = self.set('%ln and parents(roots(%ln))',
1907 1904 outgoing.commonheads,
1908 1905 outgoing.missing)
1909 1906 cheads.extend(c.node() for c in revset)
1910 1907 # even when we don't push, exchanging phase data is useful
1911 1908 remotephases = remote.listkeys('phases')
1912 1909 if not remotephases: # old server or public only repo
1913 1910 phases.advanceboundary(self, phases.public, cheads)
1914 1911 # don't push any phase data as there is nothing to push
1915 1912 else:
1916 1913 ana = phases.analyzeremotephases(self, cheads, remotephases)
1917 1914 pheads, droots = ana
1918 1915 ### Apply remote phase on local
1919 1916 if remotephases.get('publishing', False):
1920 1917 phases.advanceboundary(self, phases.public, cheads)
1921 1918 else: # publish = False
1922 1919 phases.advanceboundary(self, phases.public, pheads)
1923 1920 phases.advanceboundary(self, phases.draft, cheads)
1924 1921 ### Apply local phase on remote
1925 1922
1926 1923 # Get the list of all revs draft on remote by public here.
1927 1924 # XXX Beware that revset break if droots is not strictly
1928 1925 # XXX root we may want to ensure it is but it is costly
1929 1926 outdated = self.set('heads((%ln::%ln) and public())',
1930 1927 droots, cheads)
1931 1928 for newremotehead in outdated:
1932 1929 r = remote.pushkey('phases',
1933 1930 newremotehead.hex(),
1934 1931 str(phases.draft),
1935 1932 str(phases.public))
1936 1933 if not r:
1937 1934 self.ui.warn(_('updating %s to public failed!\n')
1938 1935 % newremotehead)
1939 1936 if ('obsolete' in remote.listkeys('namespaces')
1940 1937 and self.obsstore):
1941 1938 data = self.listkeys('obsolete')['dump']
1942 1939 r = remote.pushkey('obsolete', 'dump', '', data)
1943 1940 if not r:
1944 1941 self.ui.warn(_('failed to push obsolete markers!\n'))
1945 1942 finally:
1946 1943 if lock is not None:
1947 1944 lock.release()
1948 1945 finally:
1949 1946 locallock.release()
1950 1947
1951 1948 self.ui.debug("checking for updated bookmarks\n")
1952 1949 rb = remote.listkeys('bookmarks')
1953 1950 for k in rb.keys():
1954 1951 if k in self._bookmarks:
1955 1952 nr, nl = rb[k], hex(self._bookmarks[k])
1956 1953 if nr in self:
1957 1954 cr = self[nr]
1958 1955 cl = self[nl]
1959 1956 if cl in cr.descendants():
1960 1957 r = remote.pushkey('bookmarks', k, nr, nl)
1961 1958 if r:
1962 1959 self.ui.status(_("updating bookmark %s\n") % k)
1963 1960 else:
1964 1961 self.ui.warn(_('updating bookmark %s'
1965 1962 ' failed!\n') % k)
1966 1963
1967 1964 return ret
1968 1965
1969 1966 def changegroupinfo(self, nodes, source):
1970 1967 if self.ui.verbose or source == 'bundle':
1971 1968 self.ui.status(_("%d changesets found\n") % len(nodes))
1972 1969 if self.ui.debugflag:
1973 1970 self.ui.debug("list of changesets:\n")
1974 1971 for node in nodes:
1975 1972 self.ui.debug("%s\n" % hex(node))
1976 1973
1977 1974 def changegroupsubset(self, bases, heads, source):
1978 1975 """Compute a changegroup consisting of all the nodes that are
1979 1976 descendants of any of the bases and ancestors of any of the heads.
1980 1977 Return a chunkbuffer object whose read() method will return
1981 1978 successive changegroup chunks.
1982 1979
1983 1980 It is fairly complex as determining which filenodes and which
1984 1981 manifest nodes need to be included for the changeset to be complete
1985 1982 is non-trivial.
1986 1983
1987 1984 Another wrinkle is doing the reverse, figuring out which changeset in
1988 1985 the changegroup a particular filenode or manifestnode belongs to.
1989 1986 """
1990 1987 cl = self.changelog
1991 1988 if not bases:
1992 1989 bases = [nullid]
1993 1990 csets, bases, heads = cl.nodesbetween(bases, heads)
1994 1991 # We assume that all ancestors of bases are known
1995 1992 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1996 1993 return self._changegroupsubset(common, csets, heads, source)
1997 1994
1998 1995 def getlocalbundle(self, source, outgoing):
1999 1996 """Like getbundle, but taking a discovery.outgoing as an argument.
2000 1997
2001 1998 This is only implemented for local repos and reuses potentially
2002 1999 precomputed sets in outgoing."""
2003 2000 if not outgoing.missing:
2004 2001 return None
2005 2002 return self._changegroupsubset(outgoing.common,
2006 2003 outgoing.missing,
2007 2004 outgoing.missingheads,
2008 2005 source)
2009 2006
2010 2007 def getbundle(self, source, heads=None, common=None):
2011 2008 """Like changegroupsubset, but returns the set difference between the
2012 2009 ancestors of heads and the ancestors common.
2013 2010
2014 2011 If heads is None, use the local heads. If common is None, use [nullid].
2015 2012
2016 2013 The nodes in common might not all be known locally due to the way the
2017 2014 current discovery protocol works.
2018 2015 """
2019 2016 cl = self.changelog
2020 2017 if common:
2021 2018 nm = cl.nodemap
2022 2019 common = [n for n in common if n in nm]
2023 2020 else:
2024 2021 common = [nullid]
2025 2022 if not heads:
2026 2023 heads = cl.heads()
2027 2024 return self.getlocalbundle(source,
2028 2025 discovery.outgoing(cl, common, heads))
2029 2026
2030 2027 def _changegroupsubset(self, commonrevs, csets, heads, source):
2031 2028
2032 2029 cl = self.changelog
2033 2030 mf = self.manifest
2034 2031 mfs = {} # needed manifests
2035 2032 fnodes = {} # needed file nodes
2036 2033 changedfiles = set()
2037 2034 fstate = ['', {}]
2038 2035 count = [0, 0]
2039 2036
2040 2037 # can we go through the fast path ?
2041 2038 heads.sort()
2042 2039 if heads == sorted(self.heads()):
2043 2040 return self._changegroup(csets, source)
2044 2041
2045 2042 # slow path
2046 2043 self.hook('preoutgoing', throw=True, source=source)
2047 2044 self.changegroupinfo(csets, source)
2048 2045
2049 2046 # filter any nodes that claim to be part of the known set
2050 2047 def prune(revlog, missing):
2051 2048 rr, rl = revlog.rev, revlog.linkrev
2052 2049 return [n for n in missing
2053 2050 if rl(rr(n)) not in commonrevs]
2054 2051
2055 2052 progress = self.ui.progress
2056 2053 _bundling = _('bundling')
2057 2054 _changesets = _('changesets')
2058 2055 _manifests = _('manifests')
2059 2056 _files = _('files')
2060 2057
2061 2058 def lookup(revlog, x):
2062 2059 if revlog == cl:
2063 2060 c = cl.read(x)
2064 2061 changedfiles.update(c[3])
2065 2062 mfs.setdefault(c[0], x)
2066 2063 count[0] += 1
2067 2064 progress(_bundling, count[0],
2068 2065 unit=_changesets, total=count[1])
2069 2066 return x
2070 2067 elif revlog == mf:
2071 2068 clnode = mfs[x]
2072 2069 mdata = mf.readfast(x)
2073 2070 for f, n in mdata.iteritems():
2074 2071 if f in changedfiles:
2075 2072 fnodes[f].setdefault(n, clnode)
2076 2073 count[0] += 1
2077 2074 progress(_bundling, count[0],
2078 2075 unit=_manifests, total=count[1])
2079 2076 return clnode
2080 2077 else:
2081 2078 progress(_bundling, count[0], item=fstate[0],
2082 2079 unit=_files, total=count[1])
2083 2080 return fstate[1][x]
2084 2081
2085 2082 bundler = changegroup.bundle10(lookup)
2086 2083 reorder = self.ui.config('bundle', 'reorder', 'auto')
2087 2084 if reorder == 'auto':
2088 2085 reorder = None
2089 2086 else:
2090 2087 reorder = util.parsebool(reorder)
2091 2088
2092 2089 def gengroup():
2093 2090 # Create a changenode group generator that will call our functions
2094 2091 # back to lookup the owning changenode and collect information.
2095 2092 count[:] = [0, len(csets)]
2096 2093 for chunk in cl.group(csets, bundler, reorder=reorder):
2097 2094 yield chunk
2098 2095 progress(_bundling, None)
2099 2096
2100 2097 # Create a generator for the manifestnodes that calls our lookup
2101 2098 # and data collection functions back.
2102 2099 for f in changedfiles:
2103 2100 fnodes[f] = {}
2104 2101 count[:] = [0, len(mfs)]
2105 2102 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2106 2103 yield chunk
2107 2104 progress(_bundling, None)
2108 2105
2109 2106 mfs.clear()
2110 2107
2111 2108 # Go through all our files in order sorted by name.
2112 2109 count[:] = [0, len(changedfiles)]
2113 2110 for fname in sorted(changedfiles):
2114 2111 filerevlog = self.file(fname)
2115 2112 if not len(filerevlog):
2116 2113 raise util.Abort(_("empty or missing revlog for %s")
2117 2114 % fname)
2118 2115 fstate[0] = fname
2119 2116 fstate[1] = fnodes.pop(fname, {})
2120 2117
2121 2118 nodelist = prune(filerevlog, fstate[1])
2122 2119 if nodelist:
2123 2120 count[0] += 1
2124 2121 yield bundler.fileheader(fname)
2125 2122 for chunk in filerevlog.group(nodelist, bundler, reorder):
2126 2123 yield chunk
2127 2124
2128 2125 # Signal that no more groups are left.
2129 2126 yield bundler.close()
2130 2127 progress(_bundling, None)
2131 2128
2132 2129 if csets:
2133 2130 self.hook('outgoing', node=hex(csets[0]), source=source)
2134 2131
2135 2132 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2136 2133
2137 2134 def changegroup(self, basenodes, source):
2138 2135 # to avoid a race we use changegroupsubset() (issue1320)
2139 2136 return self.changegroupsubset(basenodes, self.heads(), source)
2140 2137
2141 2138 def _changegroup(self, nodes, source):
2142 2139 """Compute the changegroup of all nodes that we have that a recipient
2143 2140 doesn't. Return a chunkbuffer object whose read() method will return
2144 2141 successive changegroup chunks.
2145 2142
2146 2143 This is much easier than the previous function as we can assume that
2147 2144 the recipient has any changenode we aren't sending them.
2148 2145
2149 2146 nodes is the set of nodes to send"""
2150 2147
2151 2148 cl = self.changelog
2152 2149 mf = self.manifest
2153 2150 mfs = {}
2154 2151 changedfiles = set()
2155 2152 fstate = ['']
2156 2153 count = [0, 0]
2157 2154
2158 2155 self.hook('preoutgoing', throw=True, source=source)
2159 2156 self.changegroupinfo(nodes, source)
2160 2157
2161 2158 revset = set([cl.rev(n) for n in nodes])
2162 2159
2163 2160 def gennodelst(log):
2164 2161 ln, llr = log.node, log.linkrev
2165 2162 return [ln(r) for r in log if llr(r) in revset]
2166 2163
2167 2164 progress = self.ui.progress
2168 2165 _bundling = _('bundling')
2169 2166 _changesets = _('changesets')
2170 2167 _manifests = _('manifests')
2171 2168 _files = _('files')
2172 2169
2173 2170 def lookup(revlog, x):
2174 2171 if revlog == cl:
2175 2172 c = cl.read(x)
2176 2173 changedfiles.update(c[3])
2177 2174 mfs.setdefault(c[0], x)
2178 2175 count[0] += 1
2179 2176 progress(_bundling, count[0],
2180 2177 unit=_changesets, total=count[1])
2181 2178 return x
2182 2179 elif revlog == mf:
2183 2180 count[0] += 1
2184 2181 progress(_bundling, count[0],
2185 2182 unit=_manifests, total=count[1])
2186 2183 return cl.node(revlog.linkrev(revlog.rev(x)))
2187 2184 else:
2188 2185 progress(_bundling, count[0], item=fstate[0],
2189 2186 total=count[1], unit=_files)
2190 2187 return cl.node(revlog.linkrev(revlog.rev(x)))
2191 2188
2192 2189 bundler = changegroup.bundle10(lookup)
2193 2190 reorder = self.ui.config('bundle', 'reorder', 'auto')
2194 2191 if reorder == 'auto':
2195 2192 reorder = None
2196 2193 else:
2197 2194 reorder = util.parsebool(reorder)
2198 2195
2199 2196 def gengroup():
2200 2197 '''yield a sequence of changegroup chunks (strings)'''
2201 2198 # construct a list of all changed files
2202 2199
2203 2200 count[:] = [0, len(nodes)]
2204 2201 for chunk in cl.group(nodes, bundler, reorder=reorder):
2205 2202 yield chunk
2206 2203 progress(_bundling, None)
2207 2204
2208 2205 count[:] = [0, len(mfs)]
2209 2206 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2210 2207 yield chunk
2211 2208 progress(_bundling, None)
2212 2209
2213 2210 count[:] = [0, len(changedfiles)]
2214 2211 for fname in sorted(changedfiles):
2215 2212 filerevlog = self.file(fname)
2216 2213 if not len(filerevlog):
2217 2214 raise util.Abort(_("empty or missing revlog for %s")
2218 2215 % fname)
2219 2216 fstate[0] = fname
2220 2217 nodelist = gennodelst(filerevlog)
2221 2218 if nodelist:
2222 2219 count[0] += 1
2223 2220 yield bundler.fileheader(fname)
2224 2221 for chunk in filerevlog.group(nodelist, bundler, reorder):
2225 2222 yield chunk
2226 2223 yield bundler.close()
2227 2224 progress(_bundling, None)
2228 2225
2229 2226 if nodes:
2230 2227 self.hook('outgoing', node=hex(nodes[0]), source=source)
2231 2228
2232 2229 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2233 2230
2234 2231 def addchangegroup(self, source, srctype, url, emptyok=False):
2235 2232 """Add the changegroup returned by source.read() to this repo.
2236 2233 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2237 2234 the URL of the repo where this changegroup is coming from.
2238 2235
2239 2236 Return an integer summarizing the change to this repo:
2240 2237 - nothing changed or no source: 0
2241 2238 - more heads than before: 1+added heads (2..n)
2242 2239 - fewer heads than before: -1-removed heads (-2..-n)
2243 2240 - number of heads stays the same: 1
2244 2241 """
2245 2242 def csmap(x):
2246 2243 self.ui.debug("add changeset %s\n" % short(x))
2247 2244 return len(cl)
2248 2245
2249 2246 def revmap(x):
2250 2247 return cl.rev(x)
2251 2248
2252 2249 if not source:
2253 2250 return 0
2254 2251
2255 2252 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2256 2253
2257 2254 changesets = files = revisions = 0
2258 2255 efiles = set()
2259 2256
2260 2257 # write changelog data to temp files so concurrent readers will not see
2261 2258 # inconsistent view
2262 2259 cl = self.changelog
2263 2260 cl.delayupdate()
2264 2261 oldheads = cl.heads()
2265 2262
2266 2263 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2267 2264 try:
2268 2265 trp = weakref.proxy(tr)
2269 2266 # pull off the changeset group
2270 2267 self.ui.status(_("adding changesets\n"))
2271 2268 clstart = len(cl)
2272 2269 class prog(object):
2273 2270 step = _('changesets')
2274 2271 count = 1
2275 2272 ui = self.ui
2276 2273 total = None
2277 2274 def __call__(self):
2278 2275 self.ui.progress(self.step, self.count, unit=_('chunks'),
2279 2276 total=self.total)
2280 2277 self.count += 1
2281 2278 pr = prog()
2282 2279 source.callback = pr
2283 2280
2284 2281 source.changelogheader()
2285 2282 srccontent = cl.addgroup(source, csmap, trp)
2286 2283 if not (srccontent or emptyok):
2287 2284 raise util.Abort(_("received changelog group is empty"))
2288 2285 clend = len(cl)
2289 2286 changesets = clend - clstart
2290 2287 for c in xrange(clstart, clend):
2291 2288 efiles.update(self[c].files())
2292 2289 efiles = len(efiles)
2293 2290 self.ui.progress(_('changesets'), None)
2294 2291
2295 2292 # pull off the manifest group
2296 2293 self.ui.status(_("adding manifests\n"))
2297 2294 pr.step = _('manifests')
2298 2295 pr.count = 1
2299 2296 pr.total = changesets # manifests <= changesets
2300 2297 # no need to check for empty manifest group here:
2301 2298 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2302 2299 # no new manifest will be created and the manifest group will
2303 2300 # be empty during the pull
2304 2301 source.manifestheader()
2305 2302 self.manifest.addgroup(source, revmap, trp)
2306 2303 self.ui.progress(_('manifests'), None)
2307 2304
2308 2305 needfiles = {}
2309 2306 if self.ui.configbool('server', 'validate', default=False):
2310 2307 # validate incoming csets have their manifests
2311 2308 for cset in xrange(clstart, clend):
2312 2309 mfest = self.changelog.read(self.changelog.node(cset))[0]
2313 2310 mfest = self.manifest.readdelta(mfest)
2314 2311 # store file nodes we must see
2315 2312 for f, n in mfest.iteritems():
2316 2313 needfiles.setdefault(f, set()).add(n)
2317 2314
2318 2315 # process the files
2319 2316 self.ui.status(_("adding file changes\n"))
2320 2317 pr.step = _('files')
2321 2318 pr.count = 1
2322 2319 pr.total = efiles
2323 2320 source.callback = None
2324 2321
2325 2322 while True:
2326 2323 chunkdata = source.filelogheader()
2327 2324 if not chunkdata:
2328 2325 break
2329 2326 f = chunkdata["filename"]
2330 2327 self.ui.debug("adding %s revisions\n" % f)
2331 2328 pr()
2332 2329 fl = self.file(f)
2333 2330 o = len(fl)
2334 2331 if not fl.addgroup(source, revmap, trp):
2335 2332 raise util.Abort(_("received file revlog group is empty"))
2336 2333 revisions += len(fl) - o
2337 2334 files += 1
2338 2335 if f in needfiles:
2339 2336 needs = needfiles[f]
2340 2337 for new in xrange(o, len(fl)):
2341 2338 n = fl.node(new)
2342 2339 if n in needs:
2343 2340 needs.remove(n)
2344 2341 if not needs:
2345 2342 del needfiles[f]
2346 2343 self.ui.progress(_('files'), None)
2347 2344
2348 2345 for f, needs in needfiles.iteritems():
2349 2346 fl = self.file(f)
2350 2347 for n in needs:
2351 2348 try:
2352 2349 fl.rev(n)
2353 2350 except error.LookupError:
2354 2351 raise util.Abort(
2355 2352 _('missing file data for %s:%s - run hg verify') %
2356 2353 (f, hex(n)))
2357 2354
2358 2355 dh = 0
2359 2356 if oldheads:
2360 2357 heads = cl.heads()
2361 2358 dh = len(heads) - len(oldheads)
2362 2359 for h in heads:
2363 2360 if h not in oldheads and self[h].closesbranch():
2364 2361 dh -= 1
2365 2362 htext = ""
2366 2363 if dh:
2367 2364 htext = _(" (%+d heads)") % dh
2368 2365
2369 2366 self.ui.status(_("added %d changesets"
2370 2367 " with %d changes to %d files%s\n")
2371 2368 % (changesets, revisions, files, htext))
2372 2369
2373 2370 if changesets > 0:
2374 2371 p = lambda: cl.writepending() and self.root or ""
2375 2372 self.hook('pretxnchangegroup', throw=True,
2376 2373 node=hex(cl.node(clstart)), source=srctype,
2377 2374 url=url, pending=p)
2378 2375
2379 2376 added = [cl.node(r) for r in xrange(clstart, clend)]
2380 2377 publishing = self.ui.configbool('phases', 'publish', True)
2381 2378 if srctype == 'push':
2382 2379 # Old server can not push the boundary themself.
2383 2380 # New server won't push the boundary if changeset already
2384 2381 # existed locally as secrete
2385 2382 #
2386 2383 # We should not use added here but the list of all change in
2387 2384 # the bundle
2388 2385 if publishing:
2389 2386 phases.advanceboundary(self, phases.public, srccontent)
2390 2387 else:
2391 2388 phases.advanceboundary(self, phases.draft, srccontent)
2392 2389 phases.retractboundary(self, phases.draft, added)
2393 2390 elif srctype != 'strip':
2394 2391 # publishing only alter behavior during push
2395 2392 #
2396 2393 # strip should not touch boundary at all
2397 2394 phases.retractboundary(self, phases.draft, added)
2398 2395
2399 2396 # make changelog see real files again
2400 2397 cl.finalize(trp)
2401 2398
2402 2399 tr.close()
2403 2400
2404 2401 if changesets > 0:
2405 2402 def runhooks():
2406 2403 # forcefully update the on-disk branch cache
2407 2404 self.ui.debug("updating the branch cache\n")
2408 2405 self.updatebranchcache()
2409 2406 self.hook("changegroup", node=hex(cl.node(clstart)),
2410 2407 source=srctype, url=url)
2411 2408
2412 2409 for n in added:
2413 2410 self.hook("incoming", node=hex(n), source=srctype,
2414 2411 url=url)
2415 2412 self._afterlock(runhooks)
2416 2413
2417 2414 finally:
2418 2415 tr.release()
2419 2416 # never return 0 here:
2420 2417 if dh < 0:
2421 2418 return dh - 1
2422 2419 else:
2423 2420 return dh + 1
2424 2421
2425 2422 def stream_in(self, remote, requirements):
2426 2423 lock = self.lock()
2427 2424 try:
2428 2425 fp = remote.stream_out()
2429 2426 l = fp.readline()
2430 2427 try:
2431 2428 resp = int(l)
2432 2429 except ValueError:
2433 2430 raise error.ResponseError(
2434 2431 _('unexpected response from remote server:'), l)
2435 2432 if resp == 1:
2436 2433 raise util.Abort(_('operation forbidden by server'))
2437 2434 elif resp == 2:
2438 2435 raise util.Abort(_('locking the remote repository failed'))
2439 2436 elif resp != 0:
2440 2437 raise util.Abort(_('the server sent an unknown error code'))
2441 2438 self.ui.status(_('streaming all changes\n'))
2442 2439 l = fp.readline()
2443 2440 try:
2444 2441 total_files, total_bytes = map(int, l.split(' ', 1))
2445 2442 except (ValueError, TypeError):
2446 2443 raise error.ResponseError(
2447 2444 _('unexpected response from remote server:'), l)
2448 2445 self.ui.status(_('%d files to transfer, %s of data\n') %
2449 2446 (total_files, util.bytecount(total_bytes)))
2450 2447 handled_bytes = 0
2451 2448 self.ui.progress(_('clone'), 0, total=total_bytes)
2452 2449 start = time.time()
2453 2450 for i in xrange(total_files):
2454 2451 # XXX doesn't support '\n' or '\r' in filenames
2455 2452 l = fp.readline()
2456 2453 try:
2457 2454 name, size = l.split('\0', 1)
2458 2455 size = int(size)
2459 2456 except (ValueError, TypeError):
2460 2457 raise error.ResponseError(
2461 2458 _('unexpected response from remote server:'), l)
2462 2459 if self.ui.debugflag:
2463 2460 self.ui.debug('adding %s (%s)\n' %
2464 2461 (name, util.bytecount(size)))
2465 2462 # for backwards compat, name was partially encoded
2466 2463 ofp = self.sopener(store.decodedir(name), 'w')
2467 2464 for chunk in util.filechunkiter(fp, limit=size):
2468 2465 handled_bytes += len(chunk)
2469 2466 self.ui.progress(_('clone'), handled_bytes,
2470 2467 total=total_bytes)
2471 2468 ofp.write(chunk)
2472 2469 ofp.close()
2473 2470 elapsed = time.time() - start
2474 2471 if elapsed <= 0:
2475 2472 elapsed = 0.001
2476 2473 self.ui.progress(_('clone'), None)
2477 2474 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2478 2475 (util.bytecount(total_bytes), elapsed,
2479 2476 util.bytecount(total_bytes / elapsed)))
2480 2477
2481 2478 # new requirements = old non-format requirements +
2482 2479 # new format-related
2483 2480 # requirements from the streamed-in repository
2484 2481 requirements.update(set(self.requirements) - self.supportedformats)
2485 2482 self._applyrequirements(requirements)
2486 2483 self._writerequirements()
2487 2484
2488 2485 self.invalidate()
2489 2486 return len(self.heads()) + 1
2490 2487 finally:
2491 2488 lock.release()
2492 2489
2493 2490 def clone(self, remote, heads=[], stream=False):
2494 2491 '''clone remote repository.
2495 2492
2496 2493 keyword arguments:
2497 2494 heads: list of revs to clone (forces use of pull)
2498 2495 stream: use streaming clone if possible'''
2499 2496
2500 2497 # now, all clients that can request uncompressed clones can
2501 2498 # read repo formats supported by all servers that can serve
2502 2499 # them.
2503 2500
2504 2501 # if revlog format changes, client will have to check version
2505 2502 # and format flags on "stream" capability, and use
2506 2503 # uncompressed only if compatible.
2507 2504
2508 2505 if not stream:
2509 2506 # if the server explicitely prefer to stream (for fast LANs)
2510 2507 stream = remote.capable('stream-preferred')
2511 2508
2512 2509 if stream and not heads:
2513 2510 # 'stream' means remote revlog format is revlogv1 only
2514 2511 if remote.capable('stream'):
2515 2512 return self.stream_in(remote, set(('revlogv1',)))
2516 2513 # otherwise, 'streamreqs' contains the remote revlog format
2517 2514 streamreqs = remote.capable('streamreqs')
2518 2515 if streamreqs:
2519 2516 streamreqs = set(streamreqs.split(','))
2520 2517 # if we support it, stream in and adjust our requirements
2521 2518 if not streamreqs - self.supportedformats:
2522 2519 return self.stream_in(remote, streamreqs)
2523 2520 return self.pull(remote, heads)
2524 2521
2525 2522 def pushkey(self, namespace, key, old, new):
2526 2523 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2527 2524 old=old, new=new)
2528 2525 ret = pushkey.push(self, namespace, key, old, new)
2529 2526 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2530 2527 ret=ret)
2531 2528 return ret
2532 2529
2533 2530 def listkeys(self, namespace):
2534 2531 self.hook('prelistkeys', throw=True, namespace=namespace)
2535 2532 values = pushkey.list(self, namespace)
2536 2533 self.hook('listkeys', namespace=namespace, values=values)
2537 2534 return values
2538 2535
2539 2536 def debugwireargs(self, one, two, three=None, four=None, five=None):
2540 2537 '''used to test argument passing over the wire'''
2541 2538 return "%s %s %s %s %s" % (one, two, three, four, five)
2542 2539
2543 2540 def savecommitmessage(self, text):
2544 2541 fp = self.opener('last-message.txt', 'wb')
2545 2542 try:
2546 2543 fp.write(text)
2547 2544 finally:
2548 2545 fp.close()
2549 2546 return self.pathto(fp.name[len(self.root)+1:])
2550 2547
2551 2548 # used to avoid circular references so destructors work
2552 2549 def aftertrans(files):
2553 2550 renamefiles = [tuple(t) for t in files]
2554 2551 def a():
2555 2552 for src, dest in renamefiles:
2556 2553 try:
2557 2554 util.rename(src, dest)
2558 2555 except OSError: # journal file does not yet exist
2559 2556 pass
2560 2557 return a
2561 2558
2562 2559 def undoname(fn):
2563 2560 base, name = os.path.split(fn)
2564 2561 assert name.startswith('journal')
2565 2562 return os.path.join(base, name.replace('journal', 'undo', 1))
2566 2563
2567 2564 def instance(ui, path, create):
2568 2565 return localrepository(ui, util.urllocalpath(path), create)
2569 2566
2570 2567 def islocal(path):
2571 2568 return True
@@ -1,52 +1,49
1 1 # peer.py - repository base classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from i18n import _
10 10 import error
11 11
12 12 class peerrepository(object):
13 13
14 14 def capable(self, name):
15 15 '''tell whether repo supports named capability.
16 16 return False if not supported.
17 17 if boolean capability, return True.
18 18 if string capability, return string.'''
19 19 caps = self._capabilities()
20 20 if name in caps:
21 21 return True
22 22 name_eq = name + '='
23 23 for cap in caps:
24 24 if cap.startswith(name_eq):
25 25 return cap[len(name_eq):]
26 26 return False
27 27
28 28 def requirecap(self, name, purpose):
29 29 '''raise an exception if the given capability is not present'''
30 30 if not self.capable(name):
31 31 raise error.CapabilityError(
32 32 _('cannot %s; remote repository does not '
33 33 'support the %r capability') % (purpose, name))
34 34
35 35 def local(self):
36 36 '''return peer as a localrepo, or None'''
37 37 return None
38 38
39 39 def peer(self):
40 40 return self
41 41
42 42 def peer(self):
43 43 return self
44 44
45 def cancopy(self):
46 return False
47
48 45 def canpush(self):
49 46 return True
50 47
51 48 def close(self):
52 49 pass
General Comments 0
You need to be logged in to leave comments. Login now