##// END OF EJS Templates
localrepo: use the path relative to "self.vfs" instead of "path" argument...
FUJIWARA Katsunori -
r17159:36a30168 default
parent child Browse files
Show More
@@ -1,586 +1,588
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from i18n import _
10 10 from lock import release
11 11 from node import hex, nullid
12 12 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks
13 13 import lock, util, extensions, error, node, scmutil
14 14 import cmdutil, discovery
15 15 import merge as mergemod
16 16 import verify as verifymod
17 17 import errno, os, shutil
18 18
19 19 def _local(path):
20 20 path = util.expandpath(util.urllocalpath(path))
21 21 return (os.path.isfile(path) and bundlerepo or localrepo)
22 22
23 23 def addbranchrevs(lrepo, repo, branches, revs):
24 24 hashbranch, branches = branches
25 25 if not hashbranch and not branches:
26 26 return revs or None, revs and revs[0] or None
27 27 revs = revs and list(revs) or []
28 28 if not repo.capable('branchmap'):
29 29 if branches:
30 30 raise util.Abort(_("remote branch lookup not supported"))
31 31 revs.append(hashbranch)
32 32 return revs, revs[0]
33 33 branchmap = repo.branchmap()
34 34
35 35 def primary(branch):
36 36 if branch == '.':
37 37 if not lrepo or not lrepo.local():
38 38 raise util.Abort(_("dirstate branch not accessible"))
39 39 branch = lrepo.dirstate.branch()
40 40 if branch in branchmap:
41 41 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
42 42 return True
43 43 else:
44 44 return False
45 45
46 46 for branch in branches:
47 47 if not primary(branch):
48 48 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
49 49 if hashbranch:
50 50 if not primary(hashbranch):
51 51 revs.append(hashbranch)
52 52 return revs, revs[0]
53 53
54 54 def parseurl(path, branches=None):
55 55 '''parse url#branch, returning (url, (branch, branches))'''
56 56
57 57 u = util.url(path)
58 58 branch = None
59 59 if u.fragment:
60 60 branch = u.fragment
61 61 u.fragment = None
62 62 return str(u), (branch, branches or [])
63 63
64 64 schemes = {
65 65 'bundle': bundlerepo,
66 66 'file': _local,
67 67 'http': httprepo,
68 68 'https': httprepo,
69 69 'ssh': sshrepo,
70 70 'static-http': statichttprepo,
71 71 }
72 72
73 73 def _peerlookup(path):
74 74 u = util.url(path)
75 75 scheme = u.scheme or 'file'
76 76 thing = schemes.get(scheme) or schemes['file']
77 77 try:
78 78 return thing(path)
79 79 except TypeError:
80 80 return thing
81 81
82 82 def islocal(repo):
83 83 '''return true if repo or path is local'''
84 84 if isinstance(repo, str):
85 85 try:
86 86 return _peerlookup(repo).islocal(repo)
87 87 except AttributeError:
88 88 return False
89 89 return repo.local()
90 90
91 91 def repository(ui, path='', create=False):
92 92 """return a repository object for the specified path"""
93 93 repo = _peerlookup(path).instance(ui, path, create)
94 94 ui = getattr(repo, "ui", ui)
95 95 for name, module in extensions.extensions():
96 96 hook = getattr(module, 'reposetup', None)
97 97 if hook:
98 98 hook(ui, repo)
99 99 return repo
100 100
101 101 def peer(uiorrepo, opts, path, create=False):
102 102 '''return a repository peer for the specified path'''
103 103 rui = remoteui(uiorrepo, opts)
104 104 return repository(rui, path, create)
105 105
106 106 def defaultdest(source):
107 107 '''return default destination of clone if none is given'''
108 108 return os.path.basename(os.path.normpath(source))
109 109
110 110 def share(ui, source, dest=None, update=True):
111 111 '''create a shared repository'''
112 112
113 113 if not islocal(source):
114 114 raise util.Abort(_('can only share local repositories'))
115 115
116 116 if not dest:
117 117 dest = defaultdest(source)
118 118 else:
119 119 dest = ui.expandpath(dest)
120 120
121 121 if isinstance(source, str):
122 122 origsource = ui.expandpath(source)
123 123 source, branches = parseurl(origsource)
124 124 srcrepo = repository(ui, source)
125 125 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
126 126 else:
127 127 srcrepo = source
128 128 origsource = source = srcrepo.url()
129 129 checkout = None
130 130
131 131 sharedpath = srcrepo.sharedpath # if our source is already sharing
132 132
133 133 root = os.path.realpath(dest)
134 134 roothg = os.path.join(root, '.hg')
135 135
136 136 if os.path.exists(roothg):
137 137 raise util.Abort(_('destination already exists'))
138 138
139 139 if not os.path.isdir(root):
140 140 os.mkdir(root)
141 141 util.makedir(roothg, notindexed=True)
142 142
143 143 requirements = ''
144 144 try:
145 145 requirements = srcrepo.opener.read('requires')
146 146 except IOError, inst:
147 147 if inst.errno != errno.ENOENT:
148 148 raise
149 149
150 150 requirements += 'shared\n'
151 151 util.writefile(os.path.join(roothg, 'requires'), requirements)
152 152 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
153 153
154 154 r = repository(ui, root)
155 155
156 156 default = srcrepo.ui.config('paths', 'default')
157 157 if default:
158 158 fp = r.opener("hgrc", "w", text=True)
159 159 fp.write("[paths]\n")
160 160 fp.write("default = %s\n" % default)
161 161 fp.close()
162 162
163 163 if update:
164 164 r.ui.status(_("updating working directory\n"))
165 165 if update is not True:
166 166 checkout = update
167 167 for test in (checkout, 'default', 'tip'):
168 168 if test is None:
169 169 continue
170 170 try:
171 171 uprev = r.lookup(test)
172 172 break
173 173 except error.RepoLookupError:
174 174 continue
175 175 _update(r, uprev)
176 176
177 177 def copystore(ui, srcrepo, destpath):
178 178 '''copy files from store of srcrepo in destpath
179 179
180 180 returns destlock
181 181 '''
182 182 destlock = None
183 183 try:
184 184 hardlink = None
185 185 num = 0
186 186 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
187 187 for f in srcrepo.store.copylist():
188 188 if srcpublishing and f.endswith('phaseroots'):
189 189 continue
190 190 src = os.path.join(srcrepo.sharedpath, f)
191 191 dst = os.path.join(destpath, f)
192 192 dstbase = os.path.dirname(dst)
193 193 if dstbase and not os.path.exists(dstbase):
194 194 os.mkdir(dstbase)
195 195 if os.path.exists(src):
196 196 if dst.endswith('data'):
197 197 # lock to avoid premature writing to the target
198 198 destlock = lock.lock(os.path.join(dstbase, "lock"))
199 199 hardlink, n = util.copyfiles(src, dst, hardlink)
200 200 num += n
201 201 if hardlink:
202 202 ui.debug("linked %d files\n" % num)
203 203 else:
204 204 ui.debug("copied %d files\n" % num)
205 205 return destlock
206 206 except: # re-raises
207 207 release(destlock)
208 208 raise
209 209
210 210 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
211 211 update=True, stream=False, branch=None):
212 212 """Make a copy of an existing repository.
213 213
214 214 Create a copy of an existing repository in a new directory. The
215 215 source and destination are URLs, as passed to the repository
216 216 function. Returns a pair of repository objects, the source and
217 217 newly created destination.
218 218
219 219 The location of the source is added to the new repository's
220 220 .hg/hgrc file, as the default to be used for future pulls and
221 221 pushes.
222 222
223 223 If an exception is raised, the partly cloned/updated destination
224 224 repository will be deleted.
225 225
226 226 Arguments:
227 227
228 228 source: repository object or URL
229 229
230 230 dest: URL of destination repository to create (defaults to base
231 231 name of source repository)
232 232
233 233 pull: always pull from source repository, even in local case
234 234
235 235 stream: stream raw data uncompressed from repository (fast over
236 236 LAN, slow over WAN)
237 237
238 238 rev: revision to clone up to (implies pull=True)
239 239
240 240 update: update working directory after clone completes, if
241 241 destination is local repository (True means update to default rev,
242 242 anything else is treated as a revision)
243 243
244 244 branch: branches to clone
245 245 """
246 246
247 247 if isinstance(source, str):
248 248 origsource = ui.expandpath(source)
249 249 source, branch = parseurl(origsource, branch)
250 250 srcrepo = repository(remoteui(ui, peeropts), source)
251 251 else:
252 252 srcrepo = source
253 253 branch = (None, branch or [])
254 254 origsource = source = srcrepo.url()
255 255 rev, checkout = addbranchrevs(srcrepo, srcrepo, branch, rev)
256 256
257 257 if dest is None:
258 258 dest = defaultdest(source)
259 259 ui.status(_("destination directory: %s\n") % dest)
260 260 else:
261 261 dest = ui.expandpath(dest)
262 262
263 263 dest = util.urllocalpath(dest)
264 264 source = util.urllocalpath(source)
265 265
266 if not dest:
267 raise util.Abort(_("empty destination path is not valid"))
266 268 if os.path.exists(dest):
267 269 if not os.path.isdir(dest):
268 270 raise util.Abort(_("destination '%s' already exists") % dest)
269 271 elif os.listdir(dest):
270 272 raise util.Abort(_("destination '%s' is not empty") % dest)
271 273
272 274 class DirCleanup(object):
273 275 def __init__(self, dir_):
274 276 self.rmtree = shutil.rmtree
275 277 self.dir_ = dir_
276 278 def close(self):
277 279 self.dir_ = None
278 280 def cleanup(self):
279 281 if self.dir_:
280 282 self.rmtree(self.dir_, True)
281 283
282 284 srclock = destlock = dircleanup = None
283 285 try:
284 286 abspath = origsource
285 287 if islocal(origsource):
286 288 abspath = os.path.abspath(util.urllocalpath(origsource))
287 289
288 290 if islocal(dest):
289 291 dircleanup = DirCleanup(dest)
290 292
291 293 copy = False
292 294 if srcrepo.cancopy() and islocal(dest) and not srcrepo.revs("secret()"):
293 295 copy = not pull and not rev
294 296
295 297 if copy:
296 298 try:
297 299 # we use a lock here because if we race with commit, we
298 300 # can end up with extra data in the cloned revlogs that's
299 301 # not pointed to by changesets, thus causing verify to
300 302 # fail
301 303 srclock = srcrepo.lock(wait=False)
302 304 except error.LockError:
303 305 copy = False
304 306
305 307 if copy:
306 308 srcrepo.hook('preoutgoing', throw=True, source='clone')
307 309 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
308 310 if not os.path.exists(dest):
309 311 os.mkdir(dest)
310 312 else:
311 313 # only clean up directories we create ourselves
312 314 dircleanup.dir_ = hgdir
313 315 try:
314 316 destpath = hgdir
315 317 util.makedir(destpath, notindexed=True)
316 318 except OSError, inst:
317 319 if inst.errno == errno.EEXIST:
318 320 dircleanup.close()
319 321 raise util.Abort(_("destination '%s' already exists")
320 322 % dest)
321 323 raise
322 324
323 325 destlock = copystore(ui, srcrepo, destpath)
324 326
325 327 # we need to re-init the repo after manually copying the data
326 328 # into it
327 329 destrepo = repository(remoteui(ui, peeropts), dest)
328 330 srcrepo.hook('outgoing', source='clone',
329 331 node=node.hex(node.nullid))
330 332 else:
331 333 try:
332 334 destrepo = repository(remoteui(ui, peeropts), dest,
333 335 create=True)
334 336 except OSError, inst:
335 337 if inst.errno == errno.EEXIST:
336 338 dircleanup.close()
337 339 raise util.Abort(_("destination '%s' already exists")
338 340 % dest)
339 341 raise
340 342
341 343 revs = None
342 344 if rev:
343 345 if not srcrepo.capable('lookup'):
344 346 raise util.Abort(_("src repository does not support "
345 347 "revision lookup and so doesn't "
346 348 "support clone by revision"))
347 349 revs = [srcrepo.lookup(r) for r in rev]
348 350 checkout = revs[0]
349 351 if destrepo.local():
350 352 destrepo.clone(srcrepo, heads=revs, stream=stream)
351 353 elif srcrepo.local():
352 354 srcrepo.push(destrepo, revs=revs)
353 355 else:
354 356 raise util.Abort(_("clone from remote to remote not supported"))
355 357
356 358 if dircleanup:
357 359 dircleanup.close()
358 360
359 361 # clone all bookmarks except divergent ones
360 362 if destrepo.local() and srcrepo.capable("pushkey"):
361 363 rb = srcrepo.listkeys('bookmarks')
362 364 for k, n in rb.iteritems():
363 365 try:
364 366 m = destrepo.lookup(n)
365 367 destrepo._bookmarks[k] = m
366 368 except error.RepoLookupError:
367 369 pass
368 370 if rb:
369 371 bookmarks.write(destrepo)
370 372 elif srcrepo.local() and destrepo.capable("pushkey"):
371 373 for k, n in srcrepo._bookmarks.iteritems():
372 374 destrepo.pushkey('bookmarks', k, '', hex(n))
373 375
374 376 if destrepo.local():
375 377 fp = destrepo.opener("hgrc", "w", text=True)
376 378 fp.write("[paths]\n")
377 379 u = util.url(abspath)
378 380 u.passwd = None
379 381 defaulturl = str(u)
380 382 fp.write("default = %s\n" % defaulturl)
381 383 fp.close()
382 384
383 385 destrepo.ui.setconfig('paths', 'default', defaulturl)
384 386
385 387 if update:
386 388 if update is not True:
387 389 checkout = update
388 390 if srcrepo.local():
389 391 checkout = srcrepo.lookup(update)
390 392 for test in (checkout, 'default', 'tip'):
391 393 if test is None:
392 394 continue
393 395 try:
394 396 uprev = destrepo.lookup(test)
395 397 break
396 398 except error.RepoLookupError:
397 399 continue
398 400 bn = destrepo[uprev].branch()
399 401 destrepo.ui.status(_("updating to branch %s\n") % bn)
400 402 _update(destrepo, uprev)
401 403
402 404 return srcrepo, destrepo
403 405 finally:
404 406 release(srclock, destlock)
405 407 if dircleanup is not None:
406 408 dircleanup.cleanup()
407 409 if srcrepo is not None:
408 410 srcrepo.close()
409 411
410 412 def _showstats(repo, stats):
411 413 repo.ui.status(_("%d files updated, %d files merged, "
412 414 "%d files removed, %d files unresolved\n") % stats)
413 415
414 416 def update(repo, node):
415 417 """update the working directory to node, merging linear changes"""
416 418 stats = mergemod.update(repo, node, False, False, None)
417 419 _showstats(repo, stats)
418 420 if stats[3]:
419 421 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
420 422 return stats[3] > 0
421 423
422 424 # naming conflict in clone()
423 425 _update = update
424 426
425 427 def clean(repo, node, show_stats=True):
426 428 """forcibly switch the working directory to node, clobbering changes"""
427 429 stats = mergemod.update(repo, node, False, True, None)
428 430 if show_stats:
429 431 _showstats(repo, stats)
430 432 return stats[3] > 0
431 433
432 434 def merge(repo, node, force=None, remind=True):
433 435 """Branch merge with node, resolving changes. Return true if any
434 436 unresolved conflicts."""
435 437 stats = mergemod.update(repo, node, True, force, False)
436 438 _showstats(repo, stats)
437 439 if stats[3]:
438 440 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
439 441 "or 'hg update -C .' to abandon\n"))
440 442 elif remind:
441 443 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
442 444 return stats[3] > 0
443 445
444 446 def _incoming(displaychlist, subreporecurse, ui, repo, source,
445 447 opts, buffered=False):
446 448 """
447 449 Helper for incoming / gincoming.
448 450 displaychlist gets called with
449 451 (remoterepo, incomingchangesetlist, displayer) parameters,
450 452 and is supposed to contain only code that can't be unified.
451 453 """
452 454 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
453 455 other = peer(repo, opts, source)
454 456 ui.status(_('comparing with %s\n') % util.hidepassword(source))
455 457 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
456 458
457 459 if revs:
458 460 revs = [other.lookup(rev) for rev in revs]
459 461 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
460 462 revs, opts["bundle"], opts["force"])
461 463 try:
462 464 if not chlist:
463 465 ui.status(_("no changes found\n"))
464 466 return subreporecurse()
465 467
466 468 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
467 469
468 470 # XXX once graphlog extension makes it into core,
469 471 # should be replaced by a if graph/else
470 472 displaychlist(other, chlist, displayer)
471 473
472 474 displayer.close()
473 475 finally:
474 476 cleanupfn()
475 477 subreporecurse()
476 478 return 0 # exit code is zero since we found incoming changes
477 479
478 480 def incoming(ui, repo, source, opts):
479 481 def subreporecurse():
480 482 ret = 1
481 483 if opts.get('subrepos'):
482 484 ctx = repo[None]
483 485 for subpath in sorted(ctx.substate):
484 486 sub = ctx.sub(subpath)
485 487 ret = min(ret, sub.incoming(ui, source, opts))
486 488 return ret
487 489
488 490 def display(other, chlist, displayer):
489 491 limit = cmdutil.loglimit(opts)
490 492 if opts.get('newest_first'):
491 493 chlist.reverse()
492 494 count = 0
493 495 for n in chlist:
494 496 if limit is not None and count >= limit:
495 497 break
496 498 parents = [p for p in other.changelog.parents(n) if p != nullid]
497 499 if opts.get('no_merges') and len(parents) == 2:
498 500 continue
499 501 count += 1
500 502 displayer.show(other[n])
501 503 return _incoming(display, subreporecurse, ui, repo, source, opts)
502 504
503 505 def _outgoing(ui, repo, dest, opts):
504 506 dest = ui.expandpath(dest or 'default-push', dest or 'default')
505 507 dest, branches = parseurl(dest, opts.get('branch'))
506 508 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
507 509 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
508 510 if revs:
509 511 revs = [repo.lookup(rev) for rev in revs]
510 512
511 513 other = peer(repo, opts, dest)
512 514 outgoing = discovery.findcommonoutgoing(repo, other, revs,
513 515 force=opts.get('force'))
514 516 o = outgoing.missing
515 517 if not o:
516 518 scmutil.nochangesfound(repo.ui, outgoing.excluded)
517 519 return None
518 520 return o
519 521
520 522 def outgoing(ui, repo, dest, opts):
521 523 def recurse():
522 524 ret = 1
523 525 if opts.get('subrepos'):
524 526 ctx = repo[None]
525 527 for subpath in sorted(ctx.substate):
526 528 sub = ctx.sub(subpath)
527 529 ret = min(ret, sub.outgoing(ui, dest, opts))
528 530 return ret
529 531
530 532 limit = cmdutil.loglimit(opts)
531 533 o = _outgoing(ui, repo, dest, opts)
532 534 if o is None:
533 535 return recurse()
534 536
535 537 if opts.get('newest_first'):
536 538 o.reverse()
537 539 displayer = cmdutil.show_changeset(ui, repo, opts)
538 540 count = 0
539 541 for n in o:
540 542 if limit is not None and count >= limit:
541 543 break
542 544 parents = [p for p in repo.changelog.parents(n) if p != nullid]
543 545 if opts.get('no_merges') and len(parents) == 2:
544 546 continue
545 547 count += 1
546 548 displayer.show(repo[n])
547 549 displayer.close()
548 550 recurse()
549 551 return 0 # exit code is zero since we found outgoing changes
550 552
551 553 def revert(repo, node, choose):
552 554 """revert changes to revision in node without updating dirstate"""
553 555 return mergemod.update(repo, node, False, True, choose)[3] > 0
554 556
555 557 def verify(repo):
556 558 """verify the consistency of a repository"""
557 559 return verifymod.verify(repo)
558 560
559 561 def remoteui(src, opts):
560 562 'build a remote ui from ui or repo and opts'
561 563 if util.safehasattr(src, 'baseui'): # looks like a repository
562 564 dst = src.baseui.copy() # drop repo-specific config
563 565 src = src.ui # copy target options from repo
564 566 else: # assume it's a global ui object
565 567 dst = src.copy() # keep all global options
566 568
567 569 # copy ssh-specific options
568 570 for o in 'ssh', 'remotecmd':
569 571 v = opts.get(o) or src.config('ui', o)
570 572 if v:
571 573 dst.setconfig("ui", o, v)
572 574
573 575 # copy bundle-specific options
574 576 r = src.config('bundle', 'mainreporoot')
575 577 if r:
576 578 dst.setconfig('bundle', 'mainreporoot', r)
577 579
578 580 # copy selected local settings to the remote ui
579 581 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
580 582 for key, val in src.configitems(sect):
581 583 dst.setconfig(sect, key, val)
582 584 v = src.config('web', 'cacerts')
583 585 if v:
584 586 dst.setconfig('web', 'cacerts', util.expandpath(v))
585 587
586 588 return dst
@@ -1,2457 +1,2457
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 class localrepository(repo.repository):
27 27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 28 'known', 'getbundle'))
29 29 supportedformats = set(('revlogv1', 'generaldelta'))
30 30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 31 'dotencode'))
32 32 openerreqs = set(('revlogv1', 'generaldelta'))
33 33 requirements = ['revlogv1']
34 34
35 35 def _baserequirements(self, create):
36 36 return self.requirements[:]
37 37
38 38 def __init__(self, baseui, path=None, create=False):
39 39 repo.repository.__init__(self)
40 40 self.wopener = scmutil.opener(path, expand=True)
41 41 self.wvfs = self.wopener
42 42 self.root = self.wvfs.base
43 43 self.path = self.wvfs.join(".hg")
44 44 self.origroot = path
45 45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
46 46 self.opener = scmutil.opener(self.path)
47 47 self.vfs = self.opener
48 48 self.baseui = baseui
49 49 self.ui = baseui.copy()
50 50 # A list of callback to shape the phase if no data were found.
51 51 # Callback are in the form: func(repo, roots) --> processed root.
52 52 # This list it to be filled by extension during repo setup
53 53 self._phasedefaults = []
54 54
55 55 try:
56 56 self.ui.readconfig(self.join("hgrc"), self.root)
57 57 extensions.loadall(self.ui)
58 58 except IOError:
59 59 pass
60 60
61 61 if not os.path.isdir(self.path):
62 62 if create:
63 if not os.path.exists(path):
64 util.makedirs(path)
63 if not os.path.exists(self.root):
64 util.makedirs(self.root)
65 65 util.makedir(self.path, notindexed=True)
66 66 requirements = self._baserequirements(create)
67 67 if self.ui.configbool('format', 'usestore', True):
68 68 os.mkdir(os.path.join(self.path, "store"))
69 69 requirements.append("store")
70 70 if self.ui.configbool('format', 'usefncache', True):
71 71 requirements.append("fncache")
72 72 if self.ui.configbool('format', 'dotencode', True):
73 73 requirements.append('dotencode')
74 74 # create an invalid changelog
75 75 self.opener.append(
76 76 "00changelog.i",
77 77 '\0\0\0\2' # represents revlogv2
78 78 ' dummy changelog to prevent using the old repo layout'
79 79 )
80 80 if self.ui.configbool('format', 'generaldelta', False):
81 81 requirements.append("generaldelta")
82 82 requirements = set(requirements)
83 83 else:
84 84 raise error.RepoError(_("repository %s not found") % path)
85 85 elif create:
86 86 raise error.RepoError(_("repository %s already exists") % path)
87 87 else:
88 88 try:
89 89 requirements = scmutil.readrequires(self.opener, self.supported)
90 90 except IOError, inst:
91 91 if inst.errno != errno.ENOENT:
92 92 raise
93 93 requirements = set()
94 94
95 95 self.sharedpath = self.path
96 96 try:
97 97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
98 98 if not os.path.exists(s):
99 99 raise error.RepoError(
100 100 _('.hg/sharedpath points to nonexistent directory %s') % s)
101 101 self.sharedpath = s
102 102 except IOError, inst:
103 103 if inst.errno != errno.ENOENT:
104 104 raise
105 105
106 106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
107 107 self.spath = self.store.path
108 108 self.sopener = self.store.opener
109 109 self.svfs = self.sopener
110 110 self.sjoin = self.store.join
111 111 self.opener.createmode = self.store.createmode
112 112 self._applyrequirements(requirements)
113 113 if create:
114 114 self._writerequirements()
115 115
116 116
117 117 self._branchcache = None
118 118 self._branchcachetip = None
119 119 self.filterpats = {}
120 120 self._datafilters = {}
121 121 self._transref = self._lockref = self._wlockref = None
122 122
123 123 # A cache for various files under .hg/ that tracks file changes,
124 124 # (used by the filecache decorator)
125 125 #
126 126 # Maps a property name to its util.filecacheentry
127 127 self._filecache = {}
128 128
129 129 def _applyrequirements(self, requirements):
130 130 self.requirements = requirements
131 131 self.sopener.options = dict((r, 1) for r in requirements
132 132 if r in self.openerreqs)
133 133
134 134 def _writerequirements(self):
135 135 reqfile = self.opener("requires", "w")
136 136 for r in self.requirements:
137 137 reqfile.write("%s\n" % r)
138 138 reqfile.close()
139 139
140 140 def _checknested(self, path):
141 141 """Determine if path is a legal nested repository."""
142 142 if not path.startswith(self.root):
143 143 return False
144 144 subpath = path[len(self.root) + 1:]
145 145 normsubpath = util.pconvert(subpath)
146 146
147 147 # XXX: Checking against the current working copy is wrong in
148 148 # the sense that it can reject things like
149 149 #
150 150 # $ hg cat -r 10 sub/x.txt
151 151 #
152 152 # if sub/ is no longer a subrepository in the working copy
153 153 # parent revision.
154 154 #
155 155 # However, it can of course also allow things that would have
156 156 # been rejected before, such as the above cat command if sub/
157 157 # is a subrepository now, but was a normal directory before.
158 158 # The old path auditor would have rejected by mistake since it
159 159 # panics when it sees sub/.hg/.
160 160 #
161 161 # All in all, checking against the working copy seems sensible
162 162 # since we want to prevent access to nested repositories on
163 163 # the filesystem *now*.
164 164 ctx = self[None]
165 165 parts = util.splitpath(subpath)
166 166 while parts:
167 167 prefix = '/'.join(parts)
168 168 if prefix in ctx.substate:
169 169 if prefix == normsubpath:
170 170 return True
171 171 else:
172 172 sub = ctx.sub(prefix)
173 173 return sub.checknested(subpath[len(prefix) + 1:])
174 174 else:
175 175 parts.pop()
176 176 return False
177 177
178 178 @filecache('bookmarks')
179 179 def _bookmarks(self):
180 180 return bookmarks.read(self)
181 181
182 182 @filecache('bookmarks.current')
183 183 def _bookmarkcurrent(self):
184 184 return bookmarks.readcurrent(self)
185 185
186 186 def _writebookmarks(self, marks):
187 187 bookmarks.write(self)
188 188
189 189 def bookmarkheads(self, bookmark):
190 190 name = bookmark.split('@', 1)[0]
191 191 heads = []
192 192 for mark, n in self._bookmarks.iteritems():
193 193 if mark.split('@', 1)[0] == name:
194 194 heads.append(n)
195 195 return heads
196 196
197 197 @storecache('phaseroots')
198 198 def _phasecache(self):
199 199 return phases.phasecache(self, self._phasedefaults)
200 200
201 201 @storecache('obsstore')
202 202 def obsstore(self):
203 203 store = obsolete.obsstore(self.sopener)
204 204 return store
205 205
206 206 @storecache('00changelog.i')
207 207 def changelog(self):
208 208 c = changelog.changelog(self.sopener)
209 209 if 'HG_PENDING' in os.environ:
210 210 p = os.environ['HG_PENDING']
211 211 if p.startswith(self.root):
212 212 c.readpending('00changelog.i.a')
213 213 return c
214 214
215 215 @storecache('00manifest.i')
216 216 def manifest(self):
217 217 return manifest.manifest(self.sopener)
218 218
219 219 @filecache('dirstate')
220 220 def dirstate(self):
221 221 warned = [0]
222 222 def validate(node):
223 223 try:
224 224 self.changelog.rev(node)
225 225 return node
226 226 except error.LookupError:
227 227 if not warned[0]:
228 228 warned[0] = True
229 229 self.ui.warn(_("warning: ignoring unknown"
230 230 " working parent %s!\n") % short(node))
231 231 return nullid
232 232
233 233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
234 234
235 235 def __getitem__(self, changeid):
236 236 if changeid is None:
237 237 return context.workingctx(self)
238 238 return context.changectx(self, changeid)
239 239
240 240 def __contains__(self, changeid):
241 241 try:
242 242 return bool(self.lookup(changeid))
243 243 except error.RepoLookupError:
244 244 return False
245 245
246 246 def __nonzero__(self):
247 247 return True
248 248
249 249 def __len__(self):
250 250 return len(self.changelog)
251 251
252 252 def __iter__(self):
253 253 for i in xrange(len(self)):
254 254 yield i
255 255
256 256 def revs(self, expr, *args):
257 257 '''Return a list of revisions matching the given revset'''
258 258 expr = revset.formatspec(expr, *args)
259 259 m = revset.match(None, expr)
260 260 return [r for r in m(self, range(len(self)))]
261 261
262 262 def set(self, expr, *args):
263 263 '''
264 264 Yield a context for each matching revision, after doing arg
265 265 replacement via revset.formatspec
266 266 '''
267 267 for r in self.revs(expr, *args):
268 268 yield self[r]
269 269
270 270 def url(self):
271 271 return 'file:' + self.root
272 272
273 273 def hook(self, name, throw=False, **args):
274 274 return hook.hook(self.ui, self, name, throw, **args)
275 275
276 276 tag_disallowed = ':\r\n'
277 277
278 278 def _tag(self, names, node, message, local, user, date, extra={}):
279 279 if isinstance(names, str):
280 280 allchars = names
281 281 names = (names,)
282 282 else:
283 283 allchars = ''.join(names)
284 284 for c in self.tag_disallowed:
285 285 if c in allchars:
286 286 raise util.Abort(_('%r cannot be used in a tag name') % c)
287 287
288 288 branches = self.branchmap()
289 289 for name in names:
290 290 self.hook('pretag', throw=True, node=hex(node), tag=name,
291 291 local=local)
292 292 if name in branches:
293 293 self.ui.warn(_("warning: tag %s conflicts with existing"
294 294 " branch name\n") % name)
295 295
296 296 def writetags(fp, names, munge, prevtags):
297 297 fp.seek(0, 2)
298 298 if prevtags and prevtags[-1] != '\n':
299 299 fp.write('\n')
300 300 for name in names:
301 301 m = munge and munge(name) or name
302 302 if (self._tagscache.tagtypes and
303 303 name in self._tagscache.tagtypes):
304 304 old = self.tags().get(name, nullid)
305 305 fp.write('%s %s\n' % (hex(old), m))
306 306 fp.write('%s %s\n' % (hex(node), m))
307 307 fp.close()
308 308
309 309 prevtags = ''
310 310 if local:
311 311 try:
312 312 fp = self.opener('localtags', 'r+')
313 313 except IOError:
314 314 fp = self.opener('localtags', 'a')
315 315 else:
316 316 prevtags = fp.read()
317 317
318 318 # local tags are stored in the current charset
319 319 writetags(fp, names, None, prevtags)
320 320 for name in names:
321 321 self.hook('tag', node=hex(node), tag=name, local=local)
322 322 return
323 323
324 324 try:
325 325 fp = self.wfile('.hgtags', 'rb+')
326 326 except IOError, e:
327 327 if e.errno != errno.ENOENT:
328 328 raise
329 329 fp = self.wfile('.hgtags', 'ab')
330 330 else:
331 331 prevtags = fp.read()
332 332
333 333 # committed tags are stored in UTF-8
334 334 writetags(fp, names, encoding.fromlocal, prevtags)
335 335
336 336 fp.close()
337 337
338 338 self.invalidatecaches()
339 339
340 340 if '.hgtags' not in self.dirstate:
341 341 self[None].add(['.hgtags'])
342 342
343 343 m = matchmod.exact(self.root, '', ['.hgtags'])
344 344 tagnode = self.commit(message, user, date, extra=extra, match=m)
345 345
346 346 for name in names:
347 347 self.hook('tag', node=hex(node), tag=name, local=local)
348 348
349 349 return tagnode
350 350
351 351 def tag(self, names, node, message, local, user, date):
352 352 '''tag a revision with one or more symbolic names.
353 353
354 354 names is a list of strings or, when adding a single tag, names may be a
355 355 string.
356 356
357 357 if local is True, the tags are stored in a per-repository file.
358 358 otherwise, they are stored in the .hgtags file, and a new
359 359 changeset is committed with the change.
360 360
361 361 keyword arguments:
362 362
363 363 local: whether to store tags in non-version-controlled file
364 364 (default False)
365 365
366 366 message: commit message to use if committing
367 367
368 368 user: name of user to use if committing
369 369
370 370 date: date tuple to use if committing'''
371 371
372 372 if not local:
373 373 for x in self.status()[:5]:
374 374 if '.hgtags' in x:
375 375 raise util.Abort(_('working copy of .hgtags is changed '
376 376 '(please commit .hgtags manually)'))
377 377
378 378 self.tags() # instantiate the cache
379 379 self._tag(names, node, message, local, user, date)
380 380
381 381 @propertycache
382 382 def _tagscache(self):
383 383 '''Returns a tagscache object that contains various tags related
384 384 caches.'''
385 385
386 386 # This simplifies its cache management by having one decorated
387 387 # function (this one) and the rest simply fetch things from it.
388 388 class tagscache(object):
389 389 def __init__(self):
390 390 # These two define the set of tags for this repository. tags
391 391 # maps tag name to node; tagtypes maps tag name to 'global' or
392 392 # 'local'. (Global tags are defined by .hgtags across all
393 393 # heads, and local tags are defined in .hg/localtags.)
394 394 # They constitute the in-memory cache of tags.
395 395 self.tags = self.tagtypes = None
396 396
397 397 self.nodetagscache = self.tagslist = None
398 398
399 399 cache = tagscache()
400 400 cache.tags, cache.tagtypes = self._findtags()
401 401
402 402 return cache
403 403
404 404 def tags(self):
405 405 '''return a mapping of tag to node'''
406 406 t = {}
407 407 for k, v in self._tagscache.tags.iteritems():
408 408 try:
409 409 # ignore tags to unknown nodes
410 410 self.changelog.rev(v)
411 411 t[k] = v
412 412 except (error.LookupError, ValueError):
413 413 pass
414 414 return t
415 415
416 416 def _findtags(self):
417 417 '''Do the hard work of finding tags. Return a pair of dicts
418 418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
419 419 maps tag name to a string like \'global\' or \'local\'.
420 420 Subclasses or extensions are free to add their own tags, but
421 421 should be aware that the returned dicts will be retained for the
422 422 duration of the localrepo object.'''
423 423
424 424 # XXX what tagtype should subclasses/extensions use? Currently
425 425 # mq and bookmarks add tags, but do not set the tagtype at all.
426 426 # Should each extension invent its own tag type? Should there
427 427 # be one tagtype for all such "virtual" tags? Or is the status
428 428 # quo fine?
429 429
430 430 alltags = {} # map tag name to (node, hist)
431 431 tagtypes = {}
432 432
433 433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
434 434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
435 435
436 436 # Build the return dicts. Have to re-encode tag names because
437 437 # the tags module always uses UTF-8 (in order not to lose info
438 438 # writing to the cache), but the rest of Mercurial wants them in
439 439 # local encoding.
440 440 tags = {}
441 441 for (name, (node, hist)) in alltags.iteritems():
442 442 if node != nullid:
443 443 tags[encoding.tolocal(name)] = node
444 444 tags['tip'] = self.changelog.tip()
445 445 tagtypes = dict([(encoding.tolocal(name), value)
446 446 for (name, value) in tagtypes.iteritems()])
447 447 return (tags, tagtypes)
448 448
449 449 def tagtype(self, tagname):
450 450 '''
451 451 return the type of the given tag. result can be:
452 452
453 453 'local' : a local tag
454 454 'global' : a global tag
455 455 None : tag does not exist
456 456 '''
457 457
458 458 return self._tagscache.tagtypes.get(tagname)
459 459
460 460 def tagslist(self):
461 461 '''return a list of tags ordered by revision'''
462 462 if not self._tagscache.tagslist:
463 463 l = []
464 464 for t, n in self.tags().iteritems():
465 465 r = self.changelog.rev(n)
466 466 l.append((r, t, n))
467 467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
468 468
469 469 return self._tagscache.tagslist
470 470
471 471 def nodetags(self, node):
472 472 '''return the tags associated with a node'''
473 473 if not self._tagscache.nodetagscache:
474 474 nodetagscache = {}
475 475 for t, n in self._tagscache.tags.iteritems():
476 476 nodetagscache.setdefault(n, []).append(t)
477 477 for tags in nodetagscache.itervalues():
478 478 tags.sort()
479 479 self._tagscache.nodetagscache = nodetagscache
480 480 return self._tagscache.nodetagscache.get(node, [])
481 481
482 482 def nodebookmarks(self, node):
483 483 marks = []
484 484 for bookmark, n in self._bookmarks.iteritems():
485 485 if n == node:
486 486 marks.append(bookmark)
487 487 return sorted(marks)
488 488
489 489 def _branchtags(self, partial, lrev):
490 490 # TODO: rename this function?
491 491 tiprev = len(self) - 1
492 492 if lrev != tiprev:
493 493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
494 494 self._updatebranchcache(partial, ctxgen)
495 495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
496 496
497 497 return partial
498 498
499 499 def updatebranchcache(self):
500 500 tip = self.changelog.tip()
501 501 if self._branchcache is not None and self._branchcachetip == tip:
502 502 return
503 503
504 504 oldtip = self._branchcachetip
505 505 self._branchcachetip = tip
506 506 if oldtip is None or oldtip not in self.changelog.nodemap:
507 507 partial, last, lrev = self._readbranchcache()
508 508 else:
509 509 lrev = self.changelog.rev(oldtip)
510 510 partial = self._branchcache
511 511
512 512 self._branchtags(partial, lrev)
513 513 # this private cache holds all heads (not just the branch tips)
514 514 self._branchcache = partial
515 515
516 516 def branchmap(self):
517 517 '''returns a dictionary {branch: [branchheads]}'''
518 518 self.updatebranchcache()
519 519 return self._branchcache
520 520
521 521 def _branchtip(self, heads):
522 522 '''return the tipmost branch head in heads'''
523 523 tip = heads[-1]
524 524 for h in reversed(heads):
525 525 if not self[h].closesbranch():
526 526 tip = h
527 527 break
528 528 return tip
529 529
530 530 def branchtip(self, branch):
531 531 '''return the tip node for a given branch'''
532 532 if branch not in self.branchmap():
533 533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
534 534 return self._branchtip(self.branchmap()[branch])
535 535
536 536 def branchtags(self):
537 537 '''return a dict where branch names map to the tipmost head of
538 538 the branch, open heads come before closed'''
539 539 bt = {}
540 540 for bn, heads in self.branchmap().iteritems():
541 541 bt[bn] = self._branchtip(heads)
542 542 return bt
543 543
544 544 def _readbranchcache(self):
545 545 partial = {}
546 546 try:
547 547 f = self.opener("cache/branchheads")
548 548 lines = f.read().split('\n')
549 549 f.close()
550 550 except (IOError, OSError):
551 551 return {}, nullid, nullrev
552 552
553 553 try:
554 554 last, lrev = lines.pop(0).split(" ", 1)
555 555 last, lrev = bin(last), int(lrev)
556 556 if lrev >= len(self) or self[lrev].node() != last:
557 557 # invalidate the cache
558 558 raise ValueError('invalidating branch cache (tip differs)')
559 559 for l in lines:
560 560 if not l:
561 561 continue
562 562 node, label = l.split(" ", 1)
563 563 label = encoding.tolocal(label.strip())
564 564 if not node in self:
565 565 raise ValueError('invalidating branch cache because node '+
566 566 '%s does not exist' % node)
567 567 partial.setdefault(label, []).append(bin(node))
568 568 except KeyboardInterrupt:
569 569 raise
570 570 except Exception, inst:
571 571 if self.ui.debugflag:
572 572 self.ui.warn(str(inst), '\n')
573 573 partial, last, lrev = {}, nullid, nullrev
574 574 return partial, last, lrev
575 575
576 576 def _writebranchcache(self, branches, tip, tiprev):
577 577 try:
578 578 f = self.opener("cache/branchheads", "w", atomictemp=True)
579 579 f.write("%s %s\n" % (hex(tip), tiprev))
580 580 for label, nodes in branches.iteritems():
581 581 for node in nodes:
582 582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
583 583 f.close()
584 584 except (IOError, OSError):
585 585 pass
586 586
587 587 def _updatebranchcache(self, partial, ctxgen):
588 588 """Given a branchhead cache, partial, that may have extra nodes or be
589 589 missing heads, and a generator of nodes that are at least a superset of
590 590 heads missing, this function updates partial to be correct.
591 591 """
592 592 # collect new branch entries
593 593 newbranches = {}
594 594 for c in ctxgen:
595 595 newbranches.setdefault(c.branch(), []).append(c.node())
596 596 # if older branchheads are reachable from new ones, they aren't
597 597 # really branchheads. Note checking parents is insufficient:
598 598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
599 599 for branch, newnodes in newbranches.iteritems():
600 600 bheads = partial.setdefault(branch, [])
601 601 # Remove candidate heads that no longer are in the repo (e.g., as
602 602 # the result of a strip that just happened). Avoid using 'node in
603 603 # self' here because that dives down into branchcache code somewhat
604 604 # recrusively.
605 605 bheadrevs = [self.changelog.rev(node) for node in bheads
606 606 if self.changelog.hasnode(node)]
607 607 newheadrevs = [self.changelog.rev(node) for node in newnodes
608 608 if self.changelog.hasnode(node)]
609 609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
610 610 # Remove duplicates - nodes that are in newheadrevs and are already
611 611 # in bheadrevs. This can happen if you strip a node whose parent
612 612 # was already a head (because they're on different branches).
613 613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
614 614
615 615 # Starting from tip means fewer passes over reachable. If we know
616 616 # the new candidates are not ancestors of existing heads, we don't
617 617 # have to examine ancestors of existing heads
618 618 if ctxisnew:
619 619 iterrevs = sorted(newheadrevs)
620 620 else:
621 621 iterrevs = list(bheadrevs)
622 622
623 623 # This loop prunes out two kinds of heads - heads that are
624 624 # superceded by a head in newheadrevs, and newheadrevs that are not
625 625 # heads because an existing head is their descendant.
626 626 while iterrevs:
627 627 latest = iterrevs.pop()
628 628 if latest not in bheadrevs:
629 629 continue
630 630 ancestors = set(self.changelog.ancestors([latest],
631 631 bheadrevs[0]))
632 632 if ancestors:
633 633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
634 634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
635 635
636 636 # There may be branches that cease to exist when the last commit in the
637 637 # branch was stripped. This code filters them out. Note that the
638 638 # branch that ceased to exist may not be in newbranches because
639 639 # newbranches is the set of candidate heads, which when you strip the
640 640 # last commit in a branch will be the parent branch.
641 641 for branch in partial:
642 642 nodes = [head for head in partial[branch]
643 643 if self.changelog.hasnode(head)]
644 644 if not nodes:
645 645 del partial[branch]
646 646
647 647 def lookup(self, key):
648 648 return self[key].node()
649 649
650 650 def lookupbranch(self, key, remote=None):
651 651 repo = remote or self
652 652 if key in repo.branchmap():
653 653 return key
654 654
655 655 repo = (remote and remote.local()) and remote or self
656 656 return repo[key].branch()
657 657
658 658 def known(self, nodes):
659 659 nm = self.changelog.nodemap
660 660 pc = self._phasecache
661 661 result = []
662 662 for n in nodes:
663 663 r = nm.get(n)
664 664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
665 665 result.append(resp)
666 666 return result
667 667
668 668 def local(self):
669 669 return self
670 670
671 671 def join(self, f):
672 672 return os.path.join(self.path, f)
673 673
674 674 def wjoin(self, f):
675 675 return os.path.join(self.root, f)
676 676
677 677 def file(self, f):
678 678 if f[0] == '/':
679 679 f = f[1:]
680 680 return filelog.filelog(self.sopener, f)
681 681
682 682 def changectx(self, changeid):
683 683 return self[changeid]
684 684
685 685 def parents(self, changeid=None):
686 686 '''get list of changectxs for parents of changeid'''
687 687 return self[changeid].parents()
688 688
689 689 def setparents(self, p1, p2=nullid):
690 690 copies = self.dirstate.setparents(p1, p2)
691 691 if copies:
692 692 # Adjust copy records, the dirstate cannot do it, it
693 693 # requires access to parents manifests. Preserve them
694 694 # only for entries added to first parent.
695 695 pctx = self[p1]
696 696 for f in copies:
697 697 if f not in pctx and copies[f] in pctx:
698 698 self.dirstate.copy(copies[f], f)
699 699
700 700 def filectx(self, path, changeid=None, fileid=None):
701 701 """changeid can be a changeset revision, node, or tag.
702 702 fileid can be a file revision or node."""
703 703 return context.filectx(self, path, changeid, fileid)
704 704
705 705 def getcwd(self):
706 706 return self.dirstate.getcwd()
707 707
708 708 def pathto(self, f, cwd=None):
709 709 return self.dirstate.pathto(f, cwd)
710 710
711 711 def wfile(self, f, mode='r'):
712 712 return self.wopener(f, mode)
713 713
714 714 def _link(self, f):
715 715 return os.path.islink(self.wjoin(f))
716 716
717 717 def _loadfilter(self, filter):
718 718 if filter not in self.filterpats:
719 719 l = []
720 720 for pat, cmd in self.ui.configitems(filter):
721 721 if cmd == '!':
722 722 continue
723 723 mf = matchmod.match(self.root, '', [pat])
724 724 fn = None
725 725 params = cmd
726 726 for name, filterfn in self._datafilters.iteritems():
727 727 if cmd.startswith(name):
728 728 fn = filterfn
729 729 params = cmd[len(name):].lstrip()
730 730 break
731 731 if not fn:
732 732 fn = lambda s, c, **kwargs: util.filter(s, c)
733 733 # Wrap old filters not supporting keyword arguments
734 734 if not inspect.getargspec(fn)[2]:
735 735 oldfn = fn
736 736 fn = lambda s, c, **kwargs: oldfn(s, c)
737 737 l.append((mf, fn, params))
738 738 self.filterpats[filter] = l
739 739 return self.filterpats[filter]
740 740
741 741 def _filter(self, filterpats, filename, data):
742 742 for mf, fn, cmd in filterpats:
743 743 if mf(filename):
744 744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
745 745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
746 746 break
747 747
748 748 return data
749 749
750 750 @propertycache
751 751 def _encodefilterpats(self):
752 752 return self._loadfilter('encode')
753 753
754 754 @propertycache
755 755 def _decodefilterpats(self):
756 756 return self._loadfilter('decode')
757 757
758 758 def adddatafilter(self, name, filter):
759 759 self._datafilters[name] = filter
760 760
761 761 def wread(self, filename):
762 762 if self._link(filename):
763 763 data = os.readlink(self.wjoin(filename))
764 764 else:
765 765 data = self.wopener.read(filename)
766 766 return self._filter(self._encodefilterpats, filename, data)
767 767
768 768 def wwrite(self, filename, data, flags):
769 769 data = self._filter(self._decodefilterpats, filename, data)
770 770 if 'l' in flags:
771 771 self.wopener.symlink(data, filename)
772 772 else:
773 773 self.wopener.write(filename, data)
774 774 if 'x' in flags:
775 775 util.setflags(self.wjoin(filename), False, True)
776 776
777 777 def wwritedata(self, filename, data):
778 778 return self._filter(self._decodefilterpats, filename, data)
779 779
780 780 def transaction(self, desc):
781 781 tr = self._transref and self._transref() or None
782 782 if tr and tr.running():
783 783 return tr.nest()
784 784
785 785 # abort here if the journal already exists
786 786 if os.path.exists(self.sjoin("journal")):
787 787 raise error.RepoError(
788 788 _("abandoned transaction found - run hg recover"))
789 789
790 790 self._writejournal(desc)
791 791 renames = [(x, undoname(x)) for x in self._journalfiles()]
792 792
793 793 tr = transaction.transaction(self.ui.warn, self.sopener,
794 794 self.sjoin("journal"),
795 795 aftertrans(renames),
796 796 self.store.createmode)
797 797 self._transref = weakref.ref(tr)
798 798 return tr
799 799
800 800 def _journalfiles(self):
801 801 return (self.sjoin('journal'), self.join('journal.dirstate'),
802 802 self.join('journal.branch'), self.join('journal.desc'),
803 803 self.join('journal.bookmarks'),
804 804 self.sjoin('journal.phaseroots'))
805 805
806 806 def undofiles(self):
807 807 return [undoname(x) for x in self._journalfiles()]
808 808
809 809 def _writejournal(self, desc):
810 810 self.opener.write("journal.dirstate",
811 811 self.opener.tryread("dirstate"))
812 812 self.opener.write("journal.branch",
813 813 encoding.fromlocal(self.dirstate.branch()))
814 814 self.opener.write("journal.desc",
815 815 "%d\n%s\n" % (len(self), desc))
816 816 self.opener.write("journal.bookmarks",
817 817 self.opener.tryread("bookmarks"))
818 818 self.sopener.write("journal.phaseroots",
819 819 self.sopener.tryread("phaseroots"))
820 820
821 821 def recover(self):
822 822 lock = self.lock()
823 823 try:
824 824 if os.path.exists(self.sjoin("journal")):
825 825 self.ui.status(_("rolling back interrupted transaction\n"))
826 826 transaction.rollback(self.sopener, self.sjoin("journal"),
827 827 self.ui.warn)
828 828 self.invalidate()
829 829 return True
830 830 else:
831 831 self.ui.warn(_("no interrupted transaction available\n"))
832 832 return False
833 833 finally:
834 834 lock.release()
835 835
836 836 def rollback(self, dryrun=False, force=False):
837 837 wlock = lock = None
838 838 try:
839 839 wlock = self.wlock()
840 840 lock = self.lock()
841 841 if os.path.exists(self.sjoin("undo")):
842 842 return self._rollback(dryrun, force)
843 843 else:
844 844 self.ui.warn(_("no rollback information available\n"))
845 845 return 1
846 846 finally:
847 847 release(lock, wlock)
848 848
849 849 def _rollback(self, dryrun, force):
850 850 ui = self.ui
851 851 try:
852 852 args = self.opener.read('undo.desc').splitlines()
853 853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
854 854 if len(args) >= 3:
855 855 detail = args[2]
856 856 oldtip = oldlen - 1
857 857
858 858 if detail and ui.verbose:
859 859 msg = (_('repository tip rolled back to revision %s'
860 860 ' (undo %s: %s)\n')
861 861 % (oldtip, desc, detail))
862 862 else:
863 863 msg = (_('repository tip rolled back to revision %s'
864 864 ' (undo %s)\n')
865 865 % (oldtip, desc))
866 866 except IOError:
867 867 msg = _('rolling back unknown transaction\n')
868 868 desc = None
869 869
870 870 if not force and self['.'] != self['tip'] and desc == 'commit':
871 871 raise util.Abort(
872 872 _('rollback of last commit while not checked out '
873 873 'may lose data'), hint=_('use -f to force'))
874 874
875 875 ui.status(msg)
876 876 if dryrun:
877 877 return 0
878 878
879 879 parents = self.dirstate.parents()
880 880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
881 881 if os.path.exists(self.join('undo.bookmarks')):
882 882 util.rename(self.join('undo.bookmarks'),
883 883 self.join('bookmarks'))
884 884 if os.path.exists(self.sjoin('undo.phaseroots')):
885 885 util.rename(self.sjoin('undo.phaseroots'),
886 886 self.sjoin('phaseroots'))
887 887 self.invalidate()
888 888
889 889 parentgone = (parents[0] not in self.changelog.nodemap or
890 890 parents[1] not in self.changelog.nodemap)
891 891 if parentgone:
892 892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
893 893 try:
894 894 branch = self.opener.read('undo.branch')
895 895 self.dirstate.setbranch(branch)
896 896 except IOError:
897 897 ui.warn(_('named branch could not be reset: '
898 898 'current branch is still \'%s\'\n')
899 899 % self.dirstate.branch())
900 900
901 901 self.dirstate.invalidate()
902 902 parents = tuple([p.rev() for p in self.parents()])
903 903 if len(parents) > 1:
904 904 ui.status(_('working directory now based on '
905 905 'revisions %d and %d\n') % parents)
906 906 else:
907 907 ui.status(_('working directory now based on '
908 908 'revision %d\n') % parents)
909 909 # TODO: if we know which new heads may result from this rollback, pass
910 910 # them to destroy(), which will prevent the branchhead cache from being
911 911 # invalidated.
912 912 self.destroyed()
913 913 return 0
914 914
915 915 def invalidatecaches(self):
916 916 def delcache(name):
917 917 try:
918 918 delattr(self, name)
919 919 except AttributeError:
920 920 pass
921 921
922 922 delcache('_tagscache')
923 923
924 924 self._branchcache = None # in UTF-8
925 925 self._branchcachetip = None
926 926
927 927 def invalidatedirstate(self):
928 928 '''Invalidates the dirstate, causing the next call to dirstate
929 929 to check if it was modified since the last time it was read,
930 930 rereading it if it has.
931 931
932 932 This is different to dirstate.invalidate() that it doesn't always
933 933 rereads the dirstate. Use dirstate.invalidate() if you want to
934 934 explicitly read the dirstate again (i.e. restoring it to a previous
935 935 known good state).'''
936 936 if 'dirstate' in self.__dict__:
937 937 for k in self.dirstate._filecache:
938 938 try:
939 939 delattr(self.dirstate, k)
940 940 except AttributeError:
941 941 pass
942 942 delattr(self, 'dirstate')
943 943
944 944 def invalidate(self):
945 945 for k in self._filecache:
946 946 # dirstate is invalidated separately in invalidatedirstate()
947 947 if k == 'dirstate':
948 948 continue
949 949
950 950 try:
951 951 delattr(self, k)
952 952 except AttributeError:
953 953 pass
954 954 self.invalidatecaches()
955 955
956 956 # Discard all cache entries to force reloading everything.
957 957 self._filecache.clear()
958 958
959 959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
960 960 try:
961 961 l = lock.lock(lockname, 0, releasefn, desc=desc)
962 962 except error.LockHeld, inst:
963 963 if not wait:
964 964 raise
965 965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
966 966 (desc, inst.locker))
967 967 # default to 600 seconds timeout
968 968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
969 969 releasefn, desc=desc)
970 970 if acquirefn:
971 971 acquirefn()
972 972 return l
973 973
974 974 def _afterlock(self, callback):
975 975 """add a callback to the current repository lock.
976 976
977 977 The callback will be executed on lock release."""
978 978 l = self._lockref and self._lockref()
979 979 if l:
980 980 l.postrelease.append(callback)
981 981 else:
982 982 callback()
983 983
984 984 def lock(self, wait=True):
985 985 '''Lock the repository store (.hg/store) and return a weak reference
986 986 to the lock. Use this before modifying the store (e.g. committing or
987 987 stripping). If you are opening a transaction, get a lock as well.)'''
988 988 l = self._lockref and self._lockref()
989 989 if l is not None and l.held:
990 990 l.lock()
991 991 return l
992 992
993 993 def unlock():
994 994 self.store.write()
995 995 if '_phasecache' in vars(self):
996 996 self._phasecache.write()
997 997 for k, ce in self._filecache.items():
998 998 if k == 'dirstate':
999 999 continue
1000 1000 ce.refresh()
1001 1001
1002 1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1003 1003 self.invalidate, _('repository %s') % self.origroot)
1004 1004 self._lockref = weakref.ref(l)
1005 1005 return l
1006 1006
1007 1007 def wlock(self, wait=True):
1008 1008 '''Lock the non-store parts of the repository (everything under
1009 1009 .hg except .hg/store) and return a weak reference to the lock.
1010 1010 Use this before modifying files in .hg.'''
1011 1011 l = self._wlockref and self._wlockref()
1012 1012 if l is not None and l.held:
1013 1013 l.lock()
1014 1014 return l
1015 1015
1016 1016 def unlock():
1017 1017 self.dirstate.write()
1018 1018 ce = self._filecache.get('dirstate')
1019 1019 if ce:
1020 1020 ce.refresh()
1021 1021
1022 1022 l = self._lock(self.join("wlock"), wait, unlock,
1023 1023 self.invalidatedirstate, _('working directory of %s') %
1024 1024 self.origroot)
1025 1025 self._wlockref = weakref.ref(l)
1026 1026 return l
1027 1027
1028 1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1029 1029 """
1030 1030 commit an individual file as part of a larger transaction
1031 1031 """
1032 1032
1033 1033 fname = fctx.path()
1034 1034 text = fctx.data()
1035 1035 flog = self.file(fname)
1036 1036 fparent1 = manifest1.get(fname, nullid)
1037 1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1038 1038
1039 1039 meta = {}
1040 1040 copy = fctx.renamed()
1041 1041 if copy and copy[0] != fname:
1042 1042 # Mark the new revision of this file as a copy of another
1043 1043 # file. This copy data will effectively act as a parent
1044 1044 # of this new revision. If this is a merge, the first
1045 1045 # parent will be the nullid (meaning "look up the copy data")
1046 1046 # and the second one will be the other parent. For example:
1047 1047 #
1048 1048 # 0 --- 1 --- 3 rev1 changes file foo
1049 1049 # \ / rev2 renames foo to bar and changes it
1050 1050 # \- 2 -/ rev3 should have bar with all changes and
1051 1051 # should record that bar descends from
1052 1052 # bar in rev2 and foo in rev1
1053 1053 #
1054 1054 # this allows this merge to succeed:
1055 1055 #
1056 1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1057 1057 # \ / merging rev3 and rev4 should use bar@rev2
1058 1058 # \- 2 --- 4 as the merge base
1059 1059 #
1060 1060
1061 1061 cfname = copy[0]
1062 1062 crev = manifest1.get(cfname)
1063 1063 newfparent = fparent2
1064 1064
1065 1065 if manifest2: # branch merge
1066 1066 if fparent2 == nullid or crev is None: # copied on remote side
1067 1067 if cfname in manifest2:
1068 1068 crev = manifest2[cfname]
1069 1069 newfparent = fparent1
1070 1070
1071 1071 # find source in nearest ancestor if we've lost track
1072 1072 if not crev:
1073 1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1074 1074 (fname, cfname))
1075 1075 for ancestor in self[None].ancestors():
1076 1076 if cfname in ancestor:
1077 1077 crev = ancestor[cfname].filenode()
1078 1078 break
1079 1079
1080 1080 if crev:
1081 1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1082 1082 meta["copy"] = cfname
1083 1083 meta["copyrev"] = hex(crev)
1084 1084 fparent1, fparent2 = nullid, newfparent
1085 1085 else:
1086 1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1087 1087 "copied from '%s'!\n") % (fname, cfname))
1088 1088
1089 1089 elif fparent2 != nullid:
1090 1090 # is one parent an ancestor of the other?
1091 1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1092 1092 if fparentancestor == fparent1:
1093 1093 fparent1, fparent2 = fparent2, nullid
1094 1094 elif fparentancestor == fparent2:
1095 1095 fparent2 = nullid
1096 1096
1097 1097 # is the file changed?
1098 1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1099 1099 changelist.append(fname)
1100 1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1101 1101
1102 1102 # are just the flags changed during merge?
1103 1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1104 1104 changelist.append(fname)
1105 1105
1106 1106 return fparent1
1107 1107
1108 1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1109 1109 editor=False, extra={}):
1110 1110 """Add a new revision to current repository.
1111 1111
1112 1112 Revision information is gathered from the working directory,
1113 1113 match can be used to filter the committed files. If editor is
1114 1114 supplied, it is called to get a commit message.
1115 1115 """
1116 1116
1117 1117 def fail(f, msg):
1118 1118 raise util.Abort('%s: %s' % (f, msg))
1119 1119
1120 1120 if not match:
1121 1121 match = matchmod.always(self.root, '')
1122 1122
1123 1123 if not force:
1124 1124 vdirs = []
1125 1125 match.dir = vdirs.append
1126 1126 match.bad = fail
1127 1127
1128 1128 wlock = self.wlock()
1129 1129 try:
1130 1130 wctx = self[None]
1131 1131 merge = len(wctx.parents()) > 1
1132 1132
1133 1133 if (not force and merge and match and
1134 1134 (match.files() or match.anypats())):
1135 1135 raise util.Abort(_('cannot partially commit a merge '
1136 1136 '(do not specify files or patterns)'))
1137 1137
1138 1138 changes = self.status(match=match, clean=force)
1139 1139 if force:
1140 1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1141 1141
1142 1142 # check subrepos
1143 1143 subs = []
1144 1144 commitsubs = set()
1145 1145 newstate = wctx.substate.copy()
1146 1146 # only manage subrepos and .hgsubstate if .hgsub is present
1147 1147 if '.hgsub' in wctx:
1148 1148 # we'll decide whether to track this ourselves, thanks
1149 1149 if '.hgsubstate' in changes[0]:
1150 1150 changes[0].remove('.hgsubstate')
1151 1151 if '.hgsubstate' in changes[2]:
1152 1152 changes[2].remove('.hgsubstate')
1153 1153
1154 1154 # compare current state to last committed state
1155 1155 # build new substate based on last committed state
1156 1156 oldstate = wctx.p1().substate
1157 1157 for s in sorted(newstate.keys()):
1158 1158 if not match(s):
1159 1159 # ignore working copy, use old state if present
1160 1160 if s in oldstate:
1161 1161 newstate[s] = oldstate[s]
1162 1162 continue
1163 1163 if not force:
1164 1164 raise util.Abort(
1165 1165 _("commit with new subrepo %s excluded") % s)
1166 1166 if wctx.sub(s).dirty(True):
1167 1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1168 1168 raise util.Abort(
1169 1169 _("uncommitted changes in subrepo %s") % s,
1170 1170 hint=_("use --subrepos for recursive commit"))
1171 1171 subs.append(s)
1172 1172 commitsubs.add(s)
1173 1173 else:
1174 1174 bs = wctx.sub(s).basestate()
1175 1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1176 1176 if oldstate.get(s, (None, None, None))[1] != bs:
1177 1177 subs.append(s)
1178 1178
1179 1179 # check for removed subrepos
1180 1180 for p in wctx.parents():
1181 1181 r = [s for s in p.substate if s not in newstate]
1182 1182 subs += [s for s in r if match(s)]
1183 1183 if subs:
1184 1184 if (not match('.hgsub') and
1185 1185 '.hgsub' in (wctx.modified() + wctx.added())):
1186 1186 raise util.Abort(
1187 1187 _("can't commit subrepos without .hgsub"))
1188 1188 changes[0].insert(0, '.hgsubstate')
1189 1189
1190 1190 elif '.hgsub' in changes[2]:
1191 1191 # clean up .hgsubstate when .hgsub is removed
1192 1192 if ('.hgsubstate' in wctx and
1193 1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1194 1194 changes[2].insert(0, '.hgsubstate')
1195 1195
1196 1196 # make sure all explicit patterns are matched
1197 1197 if not force and match.files():
1198 1198 matched = set(changes[0] + changes[1] + changes[2])
1199 1199
1200 1200 for f in match.files():
1201 1201 if f == '.' or f in matched or f in wctx.substate:
1202 1202 continue
1203 1203 if f in changes[3]: # missing
1204 1204 fail(f, _('file not found!'))
1205 1205 if f in vdirs: # visited directory
1206 1206 d = f + '/'
1207 1207 for mf in matched:
1208 1208 if mf.startswith(d):
1209 1209 break
1210 1210 else:
1211 1211 fail(f, _("no match under directory!"))
1212 1212 elif f not in self.dirstate:
1213 1213 fail(f, _("file not tracked!"))
1214 1214
1215 1215 if (not force and not extra.get("close") and not merge
1216 1216 and not (changes[0] or changes[1] or changes[2])
1217 1217 and wctx.branch() == wctx.p1().branch()):
1218 1218 return None
1219 1219
1220 1220 if merge and changes[3]:
1221 1221 raise util.Abort(_("cannot commit merge with missing files"))
1222 1222
1223 1223 ms = mergemod.mergestate(self)
1224 1224 for f in changes[0]:
1225 1225 if f in ms and ms[f] == 'u':
1226 1226 raise util.Abort(_("unresolved merge conflicts "
1227 1227 "(see hg help resolve)"))
1228 1228
1229 1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1230 1230 if editor:
1231 1231 cctx._text = editor(self, cctx, subs)
1232 1232 edited = (text != cctx._text)
1233 1233
1234 1234 # commit subs and write new state
1235 1235 if subs:
1236 1236 for s in sorted(commitsubs):
1237 1237 sub = wctx.sub(s)
1238 1238 self.ui.status(_('committing subrepository %s\n') %
1239 1239 subrepo.subrelpath(sub))
1240 1240 sr = sub.commit(cctx._text, user, date)
1241 1241 newstate[s] = (newstate[s][0], sr)
1242 1242 subrepo.writestate(self, newstate)
1243 1243
1244 1244 # Save commit message in case this transaction gets rolled back
1245 1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1246 1246 # the assumption that the user will use the same editor again.
1247 1247 msgfn = self.savecommitmessage(cctx._text)
1248 1248
1249 1249 p1, p2 = self.dirstate.parents()
1250 1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1251 1251 try:
1252 1252 self.hook("precommit", throw=True, parent1=hookp1,
1253 1253 parent2=hookp2)
1254 1254 ret = self.commitctx(cctx, True)
1255 1255 except: # re-raises
1256 1256 if edited:
1257 1257 self.ui.write(
1258 1258 _('note: commit message saved in %s\n') % msgfn)
1259 1259 raise
1260 1260
1261 1261 # update bookmarks, dirstate and mergestate
1262 1262 bookmarks.update(self, [p1, p2], ret)
1263 1263 for f in changes[0] + changes[1]:
1264 1264 self.dirstate.normal(f)
1265 1265 for f in changes[2]:
1266 1266 self.dirstate.drop(f)
1267 1267 self.dirstate.setparents(ret)
1268 1268 ms.reset()
1269 1269 finally:
1270 1270 wlock.release()
1271 1271
1272 1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1273 1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1274 1274 self._afterlock(commithook)
1275 1275 return ret
1276 1276
1277 1277 def commitctx(self, ctx, error=False):
1278 1278 """Add a new revision to current repository.
1279 1279 Revision information is passed via the context argument.
1280 1280 """
1281 1281
1282 1282 tr = lock = None
1283 1283 removed = list(ctx.removed())
1284 1284 p1, p2 = ctx.p1(), ctx.p2()
1285 1285 user = ctx.user()
1286 1286
1287 1287 lock = self.lock()
1288 1288 try:
1289 1289 tr = self.transaction("commit")
1290 1290 trp = weakref.proxy(tr)
1291 1291
1292 1292 if ctx.files():
1293 1293 m1 = p1.manifest().copy()
1294 1294 m2 = p2.manifest()
1295 1295
1296 1296 # check in files
1297 1297 new = {}
1298 1298 changed = []
1299 1299 linkrev = len(self)
1300 1300 for f in sorted(ctx.modified() + ctx.added()):
1301 1301 self.ui.note(f + "\n")
1302 1302 try:
1303 1303 fctx = ctx[f]
1304 1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1305 1305 changed)
1306 1306 m1.set(f, fctx.flags())
1307 1307 except OSError, inst:
1308 1308 self.ui.warn(_("trouble committing %s!\n") % f)
1309 1309 raise
1310 1310 except IOError, inst:
1311 1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1312 1312 if error or errcode and errcode != errno.ENOENT:
1313 1313 self.ui.warn(_("trouble committing %s!\n") % f)
1314 1314 raise
1315 1315 else:
1316 1316 removed.append(f)
1317 1317
1318 1318 # update manifest
1319 1319 m1.update(new)
1320 1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1321 1321 drop = [f for f in removed if f in m1]
1322 1322 for f in drop:
1323 1323 del m1[f]
1324 1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1325 1325 p2.manifestnode(), (new, drop))
1326 1326 files = changed + removed
1327 1327 else:
1328 1328 mn = p1.manifestnode()
1329 1329 files = []
1330 1330
1331 1331 # update changelog
1332 1332 self.changelog.delayupdate()
1333 1333 n = self.changelog.add(mn, files, ctx.description(),
1334 1334 trp, p1.node(), p2.node(),
1335 1335 user, ctx.date(), ctx.extra().copy())
1336 1336 p = lambda: self.changelog.writepending() and self.root or ""
1337 1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1338 1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1339 1339 parent2=xp2, pending=p)
1340 1340 self.changelog.finalize(trp)
1341 1341 # set the new commit is proper phase
1342 1342 targetphase = phases.newcommitphase(self.ui)
1343 1343 if targetphase:
1344 1344 # retract boundary do not alter parent changeset.
1345 1345 # if a parent have higher the resulting phase will
1346 1346 # be compliant anyway
1347 1347 #
1348 1348 # if minimal phase was 0 we don't need to retract anything
1349 1349 phases.retractboundary(self, targetphase, [n])
1350 1350 tr.close()
1351 1351 self.updatebranchcache()
1352 1352 return n
1353 1353 finally:
1354 1354 if tr:
1355 1355 tr.release()
1356 1356 lock.release()
1357 1357
1358 1358 def destroyed(self, newheadnodes=None):
1359 1359 '''Inform the repository that nodes have been destroyed.
1360 1360 Intended for use by strip and rollback, so there's a common
1361 1361 place for anything that has to be done after destroying history.
1362 1362
1363 1363 If you know the branchheadcache was uptodate before nodes were removed
1364 1364 and you also know the set of candidate new heads that may have resulted
1365 1365 from the destruction, you can set newheadnodes. This will enable the
1366 1366 code to update the branchheads cache, rather than having future code
1367 1367 decide it's invalid and regenrating it from scratch.
1368 1368 '''
1369 1369 # If we have info, newheadnodes, on how to update the branch cache, do
1370 1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1371 1371 # will be caught the next time it is read.
1372 1372 if newheadnodes:
1373 1373 tiprev = len(self) - 1
1374 1374 ctxgen = (self[node] for node in newheadnodes
1375 1375 if self.changelog.hasnode(node))
1376 1376 self._updatebranchcache(self._branchcache, ctxgen)
1377 1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1378 1378 tiprev)
1379 1379
1380 1380 # Ensure the persistent tag cache is updated. Doing it now
1381 1381 # means that the tag cache only has to worry about destroyed
1382 1382 # heads immediately after a strip/rollback. That in turn
1383 1383 # guarantees that "cachetip == currenttip" (comparing both rev
1384 1384 # and node) always means no nodes have been added or destroyed.
1385 1385
1386 1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1387 1387 # head, refresh the tag cache, then immediately add a new head.
1388 1388 # But I think doing it this way is necessary for the "instant
1389 1389 # tag cache retrieval" case to work.
1390 1390 self.invalidatecaches()
1391 1391
1392 1392 def walk(self, match, node=None):
1393 1393 '''
1394 1394 walk recursively through the directory tree or a given
1395 1395 changeset, finding all files matched by the match
1396 1396 function
1397 1397 '''
1398 1398 return self[node].walk(match)
1399 1399
1400 1400 def status(self, node1='.', node2=None, match=None,
1401 1401 ignored=False, clean=False, unknown=False,
1402 1402 listsubrepos=False):
1403 1403 """return status of files between two nodes or node and working
1404 1404 directory.
1405 1405
1406 1406 If node1 is None, use the first dirstate parent instead.
1407 1407 If node2 is None, compare node1 with working directory.
1408 1408 """
1409 1409
1410 1410 def mfmatches(ctx):
1411 1411 mf = ctx.manifest().copy()
1412 1412 if match.always():
1413 1413 return mf
1414 1414 for fn in mf.keys():
1415 1415 if not match(fn):
1416 1416 del mf[fn]
1417 1417 return mf
1418 1418
1419 1419 if isinstance(node1, context.changectx):
1420 1420 ctx1 = node1
1421 1421 else:
1422 1422 ctx1 = self[node1]
1423 1423 if isinstance(node2, context.changectx):
1424 1424 ctx2 = node2
1425 1425 else:
1426 1426 ctx2 = self[node2]
1427 1427
1428 1428 working = ctx2.rev() is None
1429 1429 parentworking = working and ctx1 == self['.']
1430 1430 match = match or matchmod.always(self.root, self.getcwd())
1431 1431 listignored, listclean, listunknown = ignored, clean, unknown
1432 1432
1433 1433 # load earliest manifest first for caching reasons
1434 1434 if not working and ctx2.rev() < ctx1.rev():
1435 1435 ctx2.manifest()
1436 1436
1437 1437 if not parentworking:
1438 1438 def bad(f, msg):
1439 1439 # 'f' may be a directory pattern from 'match.files()',
1440 1440 # so 'f not in ctx1' is not enough
1441 1441 if f not in ctx1 and f not in ctx1.dirs():
1442 1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1443 1443 match.bad = bad
1444 1444
1445 1445 if working: # we need to scan the working dir
1446 1446 subrepos = []
1447 1447 if '.hgsub' in self.dirstate:
1448 1448 subrepos = ctx2.substate.keys()
1449 1449 s = self.dirstate.status(match, subrepos, listignored,
1450 1450 listclean, listunknown)
1451 1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1452 1452
1453 1453 # check for any possibly clean files
1454 1454 if parentworking and cmp:
1455 1455 fixup = []
1456 1456 # do a full compare of any files that might have changed
1457 1457 for f in sorted(cmp):
1458 1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1459 1459 or ctx1[f].cmp(ctx2[f])):
1460 1460 modified.append(f)
1461 1461 else:
1462 1462 fixup.append(f)
1463 1463
1464 1464 # update dirstate for files that are actually clean
1465 1465 if fixup:
1466 1466 if listclean:
1467 1467 clean += fixup
1468 1468
1469 1469 try:
1470 1470 # updating the dirstate is optional
1471 1471 # so we don't wait on the lock
1472 1472 wlock = self.wlock(False)
1473 1473 try:
1474 1474 for f in fixup:
1475 1475 self.dirstate.normal(f)
1476 1476 finally:
1477 1477 wlock.release()
1478 1478 except error.LockError:
1479 1479 pass
1480 1480
1481 1481 if not parentworking:
1482 1482 mf1 = mfmatches(ctx1)
1483 1483 if working:
1484 1484 # we are comparing working dir against non-parent
1485 1485 # generate a pseudo-manifest for the working dir
1486 1486 mf2 = mfmatches(self['.'])
1487 1487 for f in cmp + modified + added:
1488 1488 mf2[f] = None
1489 1489 mf2.set(f, ctx2.flags(f))
1490 1490 for f in removed:
1491 1491 if f in mf2:
1492 1492 del mf2[f]
1493 1493 else:
1494 1494 # we are comparing two revisions
1495 1495 deleted, unknown, ignored = [], [], []
1496 1496 mf2 = mfmatches(ctx2)
1497 1497
1498 1498 modified, added, clean = [], [], []
1499 1499 withflags = mf1.withflags() | mf2.withflags()
1500 1500 for fn in mf2:
1501 1501 if fn in mf1:
1502 1502 if (fn not in deleted and
1503 1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1504 1504 (mf1[fn] != mf2[fn] and
1505 1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1506 1506 modified.append(fn)
1507 1507 elif listclean:
1508 1508 clean.append(fn)
1509 1509 del mf1[fn]
1510 1510 elif fn not in deleted:
1511 1511 added.append(fn)
1512 1512 removed = mf1.keys()
1513 1513
1514 1514 if working and modified and not self.dirstate._checklink:
1515 1515 # Symlink placeholders may get non-symlink-like contents
1516 1516 # via user error or dereferencing by NFS or Samba servers,
1517 1517 # so we filter out any placeholders that don't look like a
1518 1518 # symlink
1519 1519 sane = []
1520 1520 for f in modified:
1521 1521 if ctx2.flags(f) == 'l':
1522 1522 d = ctx2[f].data()
1523 1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1524 1524 self.ui.debug('ignoring suspect symlink placeholder'
1525 1525 ' "%s"\n' % f)
1526 1526 continue
1527 1527 sane.append(f)
1528 1528 modified = sane
1529 1529
1530 1530 r = modified, added, removed, deleted, unknown, ignored, clean
1531 1531
1532 1532 if listsubrepos:
1533 1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1534 1534 if working:
1535 1535 rev2 = None
1536 1536 else:
1537 1537 rev2 = ctx2.substate[subpath][1]
1538 1538 try:
1539 1539 submatch = matchmod.narrowmatcher(subpath, match)
1540 1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1541 1541 clean=listclean, unknown=listunknown,
1542 1542 listsubrepos=True)
1543 1543 for rfiles, sfiles in zip(r, s):
1544 1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1545 1545 except error.LookupError:
1546 1546 self.ui.status(_("skipping missing subrepository: %s\n")
1547 1547 % subpath)
1548 1548
1549 1549 for l in r:
1550 1550 l.sort()
1551 1551 return r
1552 1552
1553 1553 def heads(self, start=None):
1554 1554 heads = self.changelog.heads(start)
1555 1555 # sort the output in rev descending order
1556 1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1557 1557
1558 1558 def branchheads(self, branch=None, start=None, closed=False):
1559 1559 '''return a (possibly filtered) list of heads for the given branch
1560 1560
1561 1561 Heads are returned in topological order, from newest to oldest.
1562 1562 If branch is None, use the dirstate branch.
1563 1563 If start is not None, return only heads reachable from start.
1564 1564 If closed is True, return heads that are marked as closed as well.
1565 1565 '''
1566 1566 if branch is None:
1567 1567 branch = self[None].branch()
1568 1568 branches = self.branchmap()
1569 1569 if branch not in branches:
1570 1570 return []
1571 1571 # the cache returns heads ordered lowest to highest
1572 1572 bheads = list(reversed(branches[branch]))
1573 1573 if start is not None:
1574 1574 # filter out the heads that cannot be reached from startrev
1575 1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1576 1576 bheads = [h for h in bheads if h in fbheads]
1577 1577 if not closed:
1578 1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1579 1579 return bheads
1580 1580
1581 1581 def branches(self, nodes):
1582 1582 if not nodes:
1583 1583 nodes = [self.changelog.tip()]
1584 1584 b = []
1585 1585 for n in nodes:
1586 1586 t = n
1587 1587 while True:
1588 1588 p = self.changelog.parents(n)
1589 1589 if p[1] != nullid or p[0] == nullid:
1590 1590 b.append((t, n, p[0], p[1]))
1591 1591 break
1592 1592 n = p[0]
1593 1593 return b
1594 1594
1595 1595 def between(self, pairs):
1596 1596 r = []
1597 1597
1598 1598 for top, bottom in pairs:
1599 1599 n, l, i = top, [], 0
1600 1600 f = 1
1601 1601
1602 1602 while n != bottom and n != nullid:
1603 1603 p = self.changelog.parents(n)[0]
1604 1604 if i == f:
1605 1605 l.append(n)
1606 1606 f = f * 2
1607 1607 n = p
1608 1608 i += 1
1609 1609
1610 1610 r.append(l)
1611 1611
1612 1612 return r
1613 1613
1614 1614 def pull(self, remote, heads=None, force=False):
1615 1615 # don't open transaction for nothing or you break future useful
1616 1616 # rollback call
1617 1617 tr = None
1618 1618 trname = 'pull\n' + util.hidepassword(remote.url())
1619 1619 lock = self.lock()
1620 1620 try:
1621 1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1622 1622 force=force)
1623 1623 common, fetch, rheads = tmp
1624 1624 if not fetch:
1625 1625 self.ui.status(_("no changes found\n"))
1626 1626 added = []
1627 1627 result = 0
1628 1628 else:
1629 1629 tr = self.transaction(trname)
1630 1630 if heads is None and list(common) == [nullid]:
1631 1631 self.ui.status(_("requesting all changes\n"))
1632 1632 elif heads is None and remote.capable('changegroupsubset'):
1633 1633 # issue1320, avoid a race if remote changed after discovery
1634 1634 heads = rheads
1635 1635
1636 1636 if remote.capable('getbundle'):
1637 1637 cg = remote.getbundle('pull', common=common,
1638 1638 heads=heads or rheads)
1639 1639 elif heads is None:
1640 1640 cg = remote.changegroup(fetch, 'pull')
1641 1641 elif not remote.capable('changegroupsubset'):
1642 1642 raise util.Abort(_("partial pull cannot be done because "
1643 1643 "other repository doesn't support "
1644 1644 "changegroupsubset."))
1645 1645 else:
1646 1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1647 1647 clstart = len(self.changelog)
1648 1648 result = self.addchangegroup(cg, 'pull', remote.url())
1649 1649 clend = len(self.changelog)
1650 1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1651 1651
1652 1652 # compute target subset
1653 1653 if heads is None:
1654 1654 # We pulled every thing possible
1655 1655 # sync on everything common
1656 1656 subset = common + added
1657 1657 else:
1658 1658 # We pulled a specific subset
1659 1659 # sync on this subset
1660 1660 subset = heads
1661 1661
1662 1662 # Get remote phases data from remote
1663 1663 remotephases = remote.listkeys('phases')
1664 1664 publishing = bool(remotephases.get('publishing', False))
1665 1665 if remotephases and not publishing:
1666 1666 # remote is new and unpublishing
1667 1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1668 1668 remotephases)
1669 1669 phases.advanceboundary(self, phases.public, pheads)
1670 1670 phases.advanceboundary(self, phases.draft, subset)
1671 1671 else:
1672 1672 # Remote is old or publishing all common changesets
1673 1673 # should be seen as public
1674 1674 phases.advanceboundary(self, phases.public, subset)
1675 1675
1676 1676 remoteobs = remote.listkeys('obsolete')
1677 1677 if 'dump' in remoteobs:
1678 1678 if tr is None:
1679 1679 tr = self.transaction(trname)
1680 1680 data = base85.b85decode(remoteobs['dump'])
1681 1681 self.obsstore.mergemarkers(tr, data)
1682 1682 if tr is not None:
1683 1683 tr.close()
1684 1684 finally:
1685 1685 if tr is not None:
1686 1686 tr.release()
1687 1687 lock.release()
1688 1688
1689 1689 return result
1690 1690
1691 1691 def checkpush(self, force, revs):
1692 1692 """Extensions can override this function if additional checks have
1693 1693 to be performed before pushing, or call it if they override push
1694 1694 command.
1695 1695 """
1696 1696 pass
1697 1697
1698 1698 def push(self, remote, force=False, revs=None, newbranch=False):
1699 1699 '''Push outgoing changesets (limited by revs) from the current
1700 1700 repository to remote. Return an integer:
1701 1701 - None means nothing to push
1702 1702 - 0 means HTTP error
1703 1703 - 1 means we pushed and remote head count is unchanged *or*
1704 1704 we have outgoing changesets but refused to push
1705 1705 - other values as described by addchangegroup()
1706 1706 '''
1707 1707 # there are two ways to push to remote repo:
1708 1708 #
1709 1709 # addchangegroup assumes local user can lock remote
1710 1710 # repo (local filesystem, old ssh servers).
1711 1711 #
1712 1712 # unbundle assumes local user cannot lock remote repo (new ssh
1713 1713 # servers, http servers).
1714 1714
1715 1715 # get local lock as we might write phase data
1716 1716 locallock = self.lock()
1717 1717 try:
1718 1718 self.checkpush(force, revs)
1719 1719 lock = None
1720 1720 unbundle = remote.capable('unbundle')
1721 1721 if not unbundle:
1722 1722 lock = remote.lock()
1723 1723 try:
1724 1724 # discovery
1725 1725 fci = discovery.findcommonincoming
1726 1726 commoninc = fci(self, remote, force=force)
1727 1727 common, inc, remoteheads = commoninc
1728 1728 fco = discovery.findcommonoutgoing
1729 1729 outgoing = fco(self, remote, onlyheads=revs,
1730 1730 commoninc=commoninc, force=force)
1731 1731
1732 1732
1733 1733 if not outgoing.missing:
1734 1734 # nothing to push
1735 1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1736 1736 ret = None
1737 1737 else:
1738 1738 # something to push
1739 1739 if not force:
1740 1740 discovery.checkheads(self, remote, outgoing,
1741 1741 remoteheads, newbranch,
1742 1742 bool(inc))
1743 1743
1744 1744 # create a changegroup from local
1745 1745 if revs is None and not outgoing.excluded:
1746 1746 # push everything,
1747 1747 # use the fast path, no race possible on push
1748 1748 cg = self._changegroup(outgoing.missing, 'push')
1749 1749 else:
1750 1750 cg = self.getlocalbundle('push', outgoing)
1751 1751
1752 1752 # apply changegroup to remote
1753 1753 if unbundle:
1754 1754 # local repo finds heads on server, finds out what
1755 1755 # revs it must push. once revs transferred, if server
1756 1756 # finds it has different heads (someone else won
1757 1757 # commit/push race), server aborts.
1758 1758 if force:
1759 1759 remoteheads = ['force']
1760 1760 # ssh: return remote's addchangegroup()
1761 1761 # http: return remote's addchangegroup() or 0 for error
1762 1762 ret = remote.unbundle(cg, remoteheads, 'push')
1763 1763 else:
1764 1764 # we return an integer indicating remote head count
1765 1765 # change
1766 1766 ret = remote.addchangegroup(cg, 'push', self.url())
1767 1767
1768 1768 if ret:
1769 1769 # push succeed, synchonize target of the push
1770 1770 cheads = outgoing.missingheads
1771 1771 elif revs is None:
1772 1772 # All out push fails. synchronize all common
1773 1773 cheads = outgoing.commonheads
1774 1774 else:
1775 1775 # I want cheads = heads(::missingheads and ::commonheads)
1776 1776 # (missingheads is revs with secret changeset filtered out)
1777 1777 #
1778 1778 # This can be expressed as:
1779 1779 # cheads = ( (missingheads and ::commonheads)
1780 1780 # + (commonheads and ::missingheads))"
1781 1781 # )
1782 1782 #
1783 1783 # while trying to push we already computed the following:
1784 1784 # common = (::commonheads)
1785 1785 # missing = ((commonheads::missingheads) - commonheads)
1786 1786 #
1787 1787 # We can pick:
1788 1788 # * missingheads part of comon (::commonheads)
1789 1789 common = set(outgoing.common)
1790 1790 cheads = [node for node in revs if node in common]
1791 1791 # and
1792 1792 # * commonheads parents on missing
1793 1793 revset = self.set('%ln and parents(roots(%ln))',
1794 1794 outgoing.commonheads,
1795 1795 outgoing.missing)
1796 1796 cheads.extend(c.node() for c in revset)
1797 1797 # even when we don't push, exchanging phase data is useful
1798 1798 remotephases = remote.listkeys('phases')
1799 1799 if not remotephases: # old server or public only repo
1800 1800 phases.advanceboundary(self, phases.public, cheads)
1801 1801 # don't push any phase data as there is nothing to push
1802 1802 else:
1803 1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1804 1804 pheads, droots = ana
1805 1805 ### Apply remote phase on local
1806 1806 if remotephases.get('publishing', False):
1807 1807 phases.advanceboundary(self, phases.public, cheads)
1808 1808 else: # publish = False
1809 1809 phases.advanceboundary(self, phases.public, pheads)
1810 1810 phases.advanceboundary(self, phases.draft, cheads)
1811 1811 ### Apply local phase on remote
1812 1812
1813 1813 # Get the list of all revs draft on remote by public here.
1814 1814 # XXX Beware that revset break if droots is not strictly
1815 1815 # XXX root we may want to ensure it is but it is costly
1816 1816 outdated = self.set('heads((%ln::%ln) and public())',
1817 1817 droots, cheads)
1818 1818 for newremotehead in outdated:
1819 1819 r = remote.pushkey('phases',
1820 1820 newremotehead.hex(),
1821 1821 str(phases.draft),
1822 1822 str(phases.public))
1823 1823 if not r:
1824 1824 self.ui.warn(_('updating %s to public failed!\n')
1825 1825 % newremotehead)
1826 1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1827 1827 data = self.listkeys('obsolete')['dump']
1828 1828 r = remote.pushkey('obsolete', 'dump', '', data)
1829 1829 if not r:
1830 1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1831 1831 finally:
1832 1832 if lock is not None:
1833 1833 lock.release()
1834 1834 finally:
1835 1835 locallock.release()
1836 1836
1837 1837 self.ui.debug("checking for updated bookmarks\n")
1838 1838 rb = remote.listkeys('bookmarks')
1839 1839 for k in rb.keys():
1840 1840 if k in self._bookmarks:
1841 1841 nr, nl = rb[k], hex(self._bookmarks[k])
1842 1842 if nr in self:
1843 1843 cr = self[nr]
1844 1844 cl = self[nl]
1845 1845 if cl in cr.descendants():
1846 1846 r = remote.pushkey('bookmarks', k, nr, nl)
1847 1847 if r:
1848 1848 self.ui.status(_("updating bookmark %s\n") % k)
1849 1849 else:
1850 1850 self.ui.warn(_('updating bookmark %s'
1851 1851 ' failed!\n') % k)
1852 1852
1853 1853 return ret
1854 1854
1855 1855 def changegroupinfo(self, nodes, source):
1856 1856 if self.ui.verbose or source == 'bundle':
1857 1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1858 1858 if self.ui.debugflag:
1859 1859 self.ui.debug("list of changesets:\n")
1860 1860 for node in nodes:
1861 1861 self.ui.debug("%s\n" % hex(node))
1862 1862
1863 1863 def changegroupsubset(self, bases, heads, source):
1864 1864 """Compute a changegroup consisting of all the nodes that are
1865 1865 descendants of any of the bases and ancestors of any of the heads.
1866 1866 Return a chunkbuffer object whose read() method will return
1867 1867 successive changegroup chunks.
1868 1868
1869 1869 It is fairly complex as determining which filenodes and which
1870 1870 manifest nodes need to be included for the changeset to be complete
1871 1871 is non-trivial.
1872 1872
1873 1873 Another wrinkle is doing the reverse, figuring out which changeset in
1874 1874 the changegroup a particular filenode or manifestnode belongs to.
1875 1875 """
1876 1876 cl = self.changelog
1877 1877 if not bases:
1878 1878 bases = [nullid]
1879 1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1880 1880 # We assume that all ancestors of bases are known
1881 1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1882 1882 return self._changegroupsubset(common, csets, heads, source)
1883 1883
1884 1884 def getlocalbundle(self, source, outgoing):
1885 1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1886 1886
1887 1887 This is only implemented for local repos and reuses potentially
1888 1888 precomputed sets in outgoing."""
1889 1889 if not outgoing.missing:
1890 1890 return None
1891 1891 return self._changegroupsubset(outgoing.common,
1892 1892 outgoing.missing,
1893 1893 outgoing.missingheads,
1894 1894 source)
1895 1895
1896 1896 def getbundle(self, source, heads=None, common=None):
1897 1897 """Like changegroupsubset, but returns the set difference between the
1898 1898 ancestors of heads and the ancestors common.
1899 1899
1900 1900 If heads is None, use the local heads. If common is None, use [nullid].
1901 1901
1902 1902 The nodes in common might not all be known locally due to the way the
1903 1903 current discovery protocol works.
1904 1904 """
1905 1905 cl = self.changelog
1906 1906 if common:
1907 1907 nm = cl.nodemap
1908 1908 common = [n for n in common if n in nm]
1909 1909 else:
1910 1910 common = [nullid]
1911 1911 if not heads:
1912 1912 heads = cl.heads()
1913 1913 return self.getlocalbundle(source,
1914 1914 discovery.outgoing(cl, common, heads))
1915 1915
1916 1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1917 1917
1918 1918 cl = self.changelog
1919 1919 mf = self.manifest
1920 1920 mfs = {} # needed manifests
1921 1921 fnodes = {} # needed file nodes
1922 1922 changedfiles = set()
1923 1923 fstate = ['', {}]
1924 1924 count = [0, 0]
1925 1925
1926 1926 # can we go through the fast path ?
1927 1927 heads.sort()
1928 1928 if heads == sorted(self.heads()):
1929 1929 return self._changegroup(csets, source)
1930 1930
1931 1931 # slow path
1932 1932 self.hook('preoutgoing', throw=True, source=source)
1933 1933 self.changegroupinfo(csets, source)
1934 1934
1935 1935 # filter any nodes that claim to be part of the known set
1936 1936 def prune(revlog, missing):
1937 1937 rr, rl = revlog.rev, revlog.linkrev
1938 1938 return [n for n in missing
1939 1939 if rl(rr(n)) not in commonrevs]
1940 1940
1941 1941 progress = self.ui.progress
1942 1942 _bundling = _('bundling')
1943 1943 _changesets = _('changesets')
1944 1944 _manifests = _('manifests')
1945 1945 _files = _('files')
1946 1946
1947 1947 def lookup(revlog, x):
1948 1948 if revlog == cl:
1949 1949 c = cl.read(x)
1950 1950 changedfiles.update(c[3])
1951 1951 mfs.setdefault(c[0], x)
1952 1952 count[0] += 1
1953 1953 progress(_bundling, count[0],
1954 1954 unit=_changesets, total=count[1])
1955 1955 return x
1956 1956 elif revlog == mf:
1957 1957 clnode = mfs[x]
1958 1958 mdata = mf.readfast(x)
1959 1959 for f, n in mdata.iteritems():
1960 1960 if f in changedfiles:
1961 1961 fnodes[f].setdefault(n, clnode)
1962 1962 count[0] += 1
1963 1963 progress(_bundling, count[0],
1964 1964 unit=_manifests, total=count[1])
1965 1965 return clnode
1966 1966 else:
1967 1967 progress(_bundling, count[0], item=fstate[0],
1968 1968 unit=_files, total=count[1])
1969 1969 return fstate[1][x]
1970 1970
1971 1971 bundler = changegroup.bundle10(lookup)
1972 1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1973 1973 if reorder == 'auto':
1974 1974 reorder = None
1975 1975 else:
1976 1976 reorder = util.parsebool(reorder)
1977 1977
1978 1978 def gengroup():
1979 1979 # Create a changenode group generator that will call our functions
1980 1980 # back to lookup the owning changenode and collect information.
1981 1981 count[:] = [0, len(csets)]
1982 1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1983 1983 yield chunk
1984 1984 progress(_bundling, None)
1985 1985
1986 1986 # Create a generator for the manifestnodes that calls our lookup
1987 1987 # and data collection functions back.
1988 1988 for f in changedfiles:
1989 1989 fnodes[f] = {}
1990 1990 count[:] = [0, len(mfs)]
1991 1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1992 1992 yield chunk
1993 1993 progress(_bundling, None)
1994 1994
1995 1995 mfs.clear()
1996 1996
1997 1997 # Go through all our files in order sorted by name.
1998 1998 count[:] = [0, len(changedfiles)]
1999 1999 for fname in sorted(changedfiles):
2000 2000 filerevlog = self.file(fname)
2001 2001 if not len(filerevlog):
2002 2002 raise util.Abort(_("empty or missing revlog for %s")
2003 2003 % fname)
2004 2004 fstate[0] = fname
2005 2005 fstate[1] = fnodes.pop(fname, {})
2006 2006
2007 2007 nodelist = prune(filerevlog, fstate[1])
2008 2008 if nodelist:
2009 2009 count[0] += 1
2010 2010 yield bundler.fileheader(fname)
2011 2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2012 2012 yield chunk
2013 2013
2014 2014 # Signal that no more groups are left.
2015 2015 yield bundler.close()
2016 2016 progress(_bundling, None)
2017 2017
2018 2018 if csets:
2019 2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2020 2020
2021 2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2022 2022
2023 2023 def changegroup(self, basenodes, source):
2024 2024 # to avoid a race we use changegroupsubset() (issue1320)
2025 2025 return self.changegroupsubset(basenodes, self.heads(), source)
2026 2026
2027 2027 def _changegroup(self, nodes, source):
2028 2028 """Compute the changegroup of all nodes that we have that a recipient
2029 2029 doesn't. Return a chunkbuffer object whose read() method will return
2030 2030 successive changegroup chunks.
2031 2031
2032 2032 This is much easier than the previous function as we can assume that
2033 2033 the recipient has any changenode we aren't sending them.
2034 2034
2035 2035 nodes is the set of nodes to send"""
2036 2036
2037 2037 cl = self.changelog
2038 2038 mf = self.manifest
2039 2039 mfs = {}
2040 2040 changedfiles = set()
2041 2041 fstate = ['']
2042 2042 count = [0, 0]
2043 2043
2044 2044 self.hook('preoutgoing', throw=True, source=source)
2045 2045 self.changegroupinfo(nodes, source)
2046 2046
2047 2047 revset = set([cl.rev(n) for n in nodes])
2048 2048
2049 2049 def gennodelst(log):
2050 2050 ln, llr = log.node, log.linkrev
2051 2051 return [ln(r) for r in log if llr(r) in revset]
2052 2052
2053 2053 progress = self.ui.progress
2054 2054 _bundling = _('bundling')
2055 2055 _changesets = _('changesets')
2056 2056 _manifests = _('manifests')
2057 2057 _files = _('files')
2058 2058
2059 2059 def lookup(revlog, x):
2060 2060 if revlog == cl:
2061 2061 c = cl.read(x)
2062 2062 changedfiles.update(c[3])
2063 2063 mfs.setdefault(c[0], x)
2064 2064 count[0] += 1
2065 2065 progress(_bundling, count[0],
2066 2066 unit=_changesets, total=count[1])
2067 2067 return x
2068 2068 elif revlog == mf:
2069 2069 count[0] += 1
2070 2070 progress(_bundling, count[0],
2071 2071 unit=_manifests, total=count[1])
2072 2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2073 2073 else:
2074 2074 progress(_bundling, count[0], item=fstate[0],
2075 2075 total=count[1], unit=_files)
2076 2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2077 2077
2078 2078 bundler = changegroup.bundle10(lookup)
2079 2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2080 2080 if reorder == 'auto':
2081 2081 reorder = None
2082 2082 else:
2083 2083 reorder = util.parsebool(reorder)
2084 2084
2085 2085 def gengroup():
2086 2086 '''yield a sequence of changegroup chunks (strings)'''
2087 2087 # construct a list of all changed files
2088 2088
2089 2089 count[:] = [0, len(nodes)]
2090 2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2091 2091 yield chunk
2092 2092 progress(_bundling, None)
2093 2093
2094 2094 count[:] = [0, len(mfs)]
2095 2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2096 2096 yield chunk
2097 2097 progress(_bundling, None)
2098 2098
2099 2099 count[:] = [0, len(changedfiles)]
2100 2100 for fname in sorted(changedfiles):
2101 2101 filerevlog = self.file(fname)
2102 2102 if not len(filerevlog):
2103 2103 raise util.Abort(_("empty or missing revlog for %s")
2104 2104 % fname)
2105 2105 fstate[0] = fname
2106 2106 nodelist = gennodelst(filerevlog)
2107 2107 if nodelist:
2108 2108 count[0] += 1
2109 2109 yield bundler.fileheader(fname)
2110 2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2111 2111 yield chunk
2112 2112 yield bundler.close()
2113 2113 progress(_bundling, None)
2114 2114
2115 2115 if nodes:
2116 2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2117 2117
2118 2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2119 2119
2120 2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2121 2121 """Add the changegroup returned by source.read() to this repo.
2122 2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2123 2123 the URL of the repo where this changegroup is coming from.
2124 2124
2125 2125 Return an integer summarizing the change to this repo:
2126 2126 - nothing changed or no source: 0
2127 2127 - more heads than before: 1+added heads (2..n)
2128 2128 - fewer heads than before: -1-removed heads (-2..-n)
2129 2129 - number of heads stays the same: 1
2130 2130 """
2131 2131 def csmap(x):
2132 2132 self.ui.debug("add changeset %s\n" % short(x))
2133 2133 return len(cl)
2134 2134
2135 2135 def revmap(x):
2136 2136 return cl.rev(x)
2137 2137
2138 2138 if not source:
2139 2139 return 0
2140 2140
2141 2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2142 2142
2143 2143 changesets = files = revisions = 0
2144 2144 efiles = set()
2145 2145
2146 2146 # write changelog data to temp files so concurrent readers will not see
2147 2147 # inconsistent view
2148 2148 cl = self.changelog
2149 2149 cl.delayupdate()
2150 2150 oldheads = cl.heads()
2151 2151
2152 2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2153 2153 try:
2154 2154 trp = weakref.proxy(tr)
2155 2155 # pull off the changeset group
2156 2156 self.ui.status(_("adding changesets\n"))
2157 2157 clstart = len(cl)
2158 2158 class prog(object):
2159 2159 step = _('changesets')
2160 2160 count = 1
2161 2161 ui = self.ui
2162 2162 total = None
2163 2163 def __call__(self):
2164 2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2165 2165 total=self.total)
2166 2166 self.count += 1
2167 2167 pr = prog()
2168 2168 source.callback = pr
2169 2169
2170 2170 source.changelogheader()
2171 2171 srccontent = cl.addgroup(source, csmap, trp)
2172 2172 if not (srccontent or emptyok):
2173 2173 raise util.Abort(_("received changelog group is empty"))
2174 2174 clend = len(cl)
2175 2175 changesets = clend - clstart
2176 2176 for c in xrange(clstart, clend):
2177 2177 efiles.update(self[c].files())
2178 2178 efiles = len(efiles)
2179 2179 self.ui.progress(_('changesets'), None)
2180 2180
2181 2181 # pull off the manifest group
2182 2182 self.ui.status(_("adding manifests\n"))
2183 2183 pr.step = _('manifests')
2184 2184 pr.count = 1
2185 2185 pr.total = changesets # manifests <= changesets
2186 2186 # no need to check for empty manifest group here:
2187 2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2188 2188 # no new manifest will be created and the manifest group will
2189 2189 # be empty during the pull
2190 2190 source.manifestheader()
2191 2191 self.manifest.addgroup(source, revmap, trp)
2192 2192 self.ui.progress(_('manifests'), None)
2193 2193
2194 2194 needfiles = {}
2195 2195 if self.ui.configbool('server', 'validate', default=False):
2196 2196 # validate incoming csets have their manifests
2197 2197 for cset in xrange(clstart, clend):
2198 2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2199 2199 mfest = self.manifest.readdelta(mfest)
2200 2200 # store file nodes we must see
2201 2201 for f, n in mfest.iteritems():
2202 2202 needfiles.setdefault(f, set()).add(n)
2203 2203
2204 2204 # process the files
2205 2205 self.ui.status(_("adding file changes\n"))
2206 2206 pr.step = _('files')
2207 2207 pr.count = 1
2208 2208 pr.total = efiles
2209 2209 source.callback = None
2210 2210
2211 2211 while True:
2212 2212 chunkdata = source.filelogheader()
2213 2213 if not chunkdata:
2214 2214 break
2215 2215 f = chunkdata["filename"]
2216 2216 self.ui.debug("adding %s revisions\n" % f)
2217 2217 pr()
2218 2218 fl = self.file(f)
2219 2219 o = len(fl)
2220 2220 if not fl.addgroup(source, revmap, trp):
2221 2221 raise util.Abort(_("received file revlog group is empty"))
2222 2222 revisions += len(fl) - o
2223 2223 files += 1
2224 2224 if f in needfiles:
2225 2225 needs = needfiles[f]
2226 2226 for new in xrange(o, len(fl)):
2227 2227 n = fl.node(new)
2228 2228 if n in needs:
2229 2229 needs.remove(n)
2230 2230 if not needs:
2231 2231 del needfiles[f]
2232 2232 self.ui.progress(_('files'), None)
2233 2233
2234 2234 for f, needs in needfiles.iteritems():
2235 2235 fl = self.file(f)
2236 2236 for n in needs:
2237 2237 try:
2238 2238 fl.rev(n)
2239 2239 except error.LookupError:
2240 2240 raise util.Abort(
2241 2241 _('missing file data for %s:%s - run hg verify') %
2242 2242 (f, hex(n)))
2243 2243
2244 2244 dh = 0
2245 2245 if oldheads:
2246 2246 heads = cl.heads()
2247 2247 dh = len(heads) - len(oldheads)
2248 2248 for h in heads:
2249 2249 if h not in oldheads and self[h].closesbranch():
2250 2250 dh -= 1
2251 2251 htext = ""
2252 2252 if dh:
2253 2253 htext = _(" (%+d heads)") % dh
2254 2254
2255 2255 self.ui.status(_("added %d changesets"
2256 2256 " with %d changes to %d files%s\n")
2257 2257 % (changesets, revisions, files, htext))
2258 2258
2259 2259 if changesets > 0:
2260 2260 p = lambda: cl.writepending() and self.root or ""
2261 2261 self.hook('pretxnchangegroup', throw=True,
2262 2262 node=hex(cl.node(clstart)), source=srctype,
2263 2263 url=url, pending=p)
2264 2264
2265 2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2266 2266 publishing = self.ui.configbool('phases', 'publish', True)
2267 2267 if srctype == 'push':
2268 2268 # Old server can not push the boundary themself.
2269 2269 # New server won't push the boundary if changeset already
2270 2270 # existed locally as secrete
2271 2271 #
2272 2272 # We should not use added here but the list of all change in
2273 2273 # the bundle
2274 2274 if publishing:
2275 2275 phases.advanceboundary(self, phases.public, srccontent)
2276 2276 else:
2277 2277 phases.advanceboundary(self, phases.draft, srccontent)
2278 2278 phases.retractboundary(self, phases.draft, added)
2279 2279 elif srctype != 'strip':
2280 2280 # publishing only alter behavior during push
2281 2281 #
2282 2282 # strip should not touch boundary at all
2283 2283 phases.retractboundary(self, phases.draft, added)
2284 2284
2285 2285 # make changelog see real files again
2286 2286 cl.finalize(trp)
2287 2287
2288 2288 tr.close()
2289 2289
2290 2290 if changesets > 0:
2291 2291 def runhooks():
2292 2292 # forcefully update the on-disk branch cache
2293 2293 self.ui.debug("updating the branch cache\n")
2294 2294 self.updatebranchcache()
2295 2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2296 2296 source=srctype, url=url)
2297 2297
2298 2298 for n in added:
2299 2299 self.hook("incoming", node=hex(n), source=srctype,
2300 2300 url=url)
2301 2301 self._afterlock(runhooks)
2302 2302
2303 2303 finally:
2304 2304 tr.release()
2305 2305 # never return 0 here:
2306 2306 if dh < 0:
2307 2307 return dh - 1
2308 2308 else:
2309 2309 return dh + 1
2310 2310
2311 2311 def stream_in(self, remote, requirements):
2312 2312 lock = self.lock()
2313 2313 try:
2314 2314 fp = remote.stream_out()
2315 2315 l = fp.readline()
2316 2316 try:
2317 2317 resp = int(l)
2318 2318 except ValueError:
2319 2319 raise error.ResponseError(
2320 2320 _('unexpected response from remote server:'), l)
2321 2321 if resp == 1:
2322 2322 raise util.Abort(_('operation forbidden by server'))
2323 2323 elif resp == 2:
2324 2324 raise util.Abort(_('locking the remote repository failed'))
2325 2325 elif resp != 0:
2326 2326 raise util.Abort(_('the server sent an unknown error code'))
2327 2327 self.ui.status(_('streaming all changes\n'))
2328 2328 l = fp.readline()
2329 2329 try:
2330 2330 total_files, total_bytes = map(int, l.split(' ', 1))
2331 2331 except (ValueError, TypeError):
2332 2332 raise error.ResponseError(
2333 2333 _('unexpected response from remote server:'), l)
2334 2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2335 2335 (total_files, util.bytecount(total_bytes)))
2336 2336 handled_bytes = 0
2337 2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2338 2338 start = time.time()
2339 2339 for i in xrange(total_files):
2340 2340 # XXX doesn't support '\n' or '\r' in filenames
2341 2341 l = fp.readline()
2342 2342 try:
2343 2343 name, size = l.split('\0', 1)
2344 2344 size = int(size)
2345 2345 except (ValueError, TypeError):
2346 2346 raise error.ResponseError(
2347 2347 _('unexpected response from remote server:'), l)
2348 2348 if self.ui.debugflag:
2349 2349 self.ui.debug('adding %s (%s)\n' %
2350 2350 (name, util.bytecount(size)))
2351 2351 # for backwards compat, name was partially encoded
2352 2352 ofp = self.sopener(store.decodedir(name), 'w')
2353 2353 for chunk in util.filechunkiter(fp, limit=size):
2354 2354 handled_bytes += len(chunk)
2355 2355 self.ui.progress(_('clone'), handled_bytes,
2356 2356 total=total_bytes)
2357 2357 ofp.write(chunk)
2358 2358 ofp.close()
2359 2359 elapsed = time.time() - start
2360 2360 if elapsed <= 0:
2361 2361 elapsed = 0.001
2362 2362 self.ui.progress(_('clone'), None)
2363 2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2364 2364 (util.bytecount(total_bytes), elapsed,
2365 2365 util.bytecount(total_bytes / elapsed)))
2366 2366
2367 2367 # new requirements = old non-format requirements +
2368 2368 # new format-related
2369 2369 # requirements from the streamed-in repository
2370 2370 requirements.update(set(self.requirements) - self.supportedformats)
2371 2371 self._applyrequirements(requirements)
2372 2372 self._writerequirements()
2373 2373
2374 2374 self.invalidate()
2375 2375 return len(self.heads()) + 1
2376 2376 finally:
2377 2377 lock.release()
2378 2378
2379 2379 def clone(self, remote, heads=[], stream=False):
2380 2380 '''clone remote repository.
2381 2381
2382 2382 keyword arguments:
2383 2383 heads: list of revs to clone (forces use of pull)
2384 2384 stream: use streaming clone if possible'''
2385 2385
2386 2386 # now, all clients that can request uncompressed clones can
2387 2387 # read repo formats supported by all servers that can serve
2388 2388 # them.
2389 2389
2390 2390 # if revlog format changes, client will have to check version
2391 2391 # and format flags on "stream" capability, and use
2392 2392 # uncompressed only if compatible.
2393 2393
2394 2394 if not stream:
2395 2395 # if the server explicitely prefer to stream (for fast LANs)
2396 2396 stream = remote.capable('stream-preferred')
2397 2397
2398 2398 if stream and not heads:
2399 2399 # 'stream' means remote revlog format is revlogv1 only
2400 2400 if remote.capable('stream'):
2401 2401 return self.stream_in(remote, set(('revlogv1',)))
2402 2402 # otherwise, 'streamreqs' contains the remote revlog format
2403 2403 streamreqs = remote.capable('streamreqs')
2404 2404 if streamreqs:
2405 2405 streamreqs = set(streamreqs.split(','))
2406 2406 # if we support it, stream in and adjust our requirements
2407 2407 if not streamreqs - self.supportedformats:
2408 2408 return self.stream_in(remote, streamreqs)
2409 2409 return self.pull(remote, heads)
2410 2410
2411 2411 def pushkey(self, namespace, key, old, new):
2412 2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2413 2413 old=old, new=new)
2414 2414 ret = pushkey.push(self, namespace, key, old, new)
2415 2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2416 2416 ret=ret)
2417 2417 return ret
2418 2418
2419 2419 def listkeys(self, namespace):
2420 2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2421 2421 values = pushkey.list(self, namespace)
2422 2422 self.hook('listkeys', namespace=namespace, values=values)
2423 2423 return values
2424 2424
2425 2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2426 2426 '''used to test argument passing over the wire'''
2427 2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2428 2428
2429 2429 def savecommitmessage(self, text):
2430 2430 fp = self.opener('last-message.txt', 'wb')
2431 2431 try:
2432 2432 fp.write(text)
2433 2433 finally:
2434 2434 fp.close()
2435 2435 return self.pathto(fp.name[len(self.root)+1:])
2436 2436
2437 2437 # used to avoid circular references so destructors work
2438 2438 def aftertrans(files):
2439 2439 renamefiles = [tuple(t) for t in files]
2440 2440 def a():
2441 2441 for src, dest in renamefiles:
2442 2442 try:
2443 2443 util.rename(src, dest)
2444 2444 except OSError: # journal file does not yet exist
2445 2445 pass
2446 2446 return a
2447 2447
2448 2448 def undoname(fn):
2449 2449 base, name = os.path.split(fn)
2450 2450 assert name.startswith('journal')
2451 2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2452 2452
2453 2453 def instance(ui, path, create):
2454 2454 return localrepository(ui, util.urllocalpath(path), create)
2455 2455
2456 2456 def islocal(path):
2457 2457 return True
@@ -1,603 +1,603
1 1 Setting up test
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo 0 > afile
6 6 $ hg add afile
7 7 $ hg commit -m "0.0"
8 8 $ echo 1 >> afile
9 9 $ hg commit -m "0.1"
10 10 $ echo 2 >> afile
11 11 $ hg commit -m "0.2"
12 12 $ echo 3 >> afile
13 13 $ hg commit -m "0.3"
14 14 $ hg update -C 0
15 15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 16 $ echo 1 >> afile
17 17 $ hg commit -m "1.1"
18 18 created new head
19 19 $ echo 2 >> afile
20 20 $ hg commit -m "1.2"
21 21 $ echo "a line" > fred
22 22 $ echo 3 >> afile
23 23 $ hg add fred
24 24 $ hg commit -m "1.3"
25 25 $ hg mv afile adifferentfile
26 26 $ hg commit -m "1.3m"
27 27 $ hg update -C 3
28 28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 29 $ hg mv afile anotherfile
30 30 $ hg commit -m "0.3m"
31 31 $ hg verify
32 32 checking changesets
33 33 checking manifests
34 34 crosschecking files in changesets and manifests
35 35 checking files
36 36 4 files, 9 changesets, 7 total revisions
37 37 $ cd ..
38 38 $ hg init empty
39 39
40 40 Bundle and phase
41 41
42 42 $ hg -R test phase --force --secret 0
43 43 $ hg -R test bundle phase.hg empty
44 44 searching for changes
45 45 no changes found (ignored 9 secret changesets)
46 46 [1]
47 47 $ hg -R test phase --draft -r 'head()'
48 48
49 49 Bundle --all
50 50
51 51 $ hg -R test bundle --all all.hg
52 52 9 changesets found
53 53
54 54 Bundle test to full.hg
55 55
56 56 $ hg -R test bundle full.hg empty
57 57 searching for changes
58 58 9 changesets found
59 59
60 60 Unbundle full.hg in test
61 61
62 62 $ hg -R test unbundle full.hg
63 63 adding changesets
64 64 adding manifests
65 65 adding file changes
66 66 added 0 changesets with 0 changes to 4 files
67 67 (run 'hg update' to get a working copy)
68 68
69 69 Verify empty
70 70
71 71 $ hg -R empty heads
72 72 [1]
73 73 $ hg -R empty verify
74 74 checking changesets
75 75 checking manifests
76 76 crosschecking files in changesets and manifests
77 77 checking files
78 78 0 files, 0 changesets, 0 total revisions
79 79
80 80 Pull full.hg into test (using --cwd)
81 81
82 82 $ hg --cwd test pull ../full.hg
83 83 pulling from ../full.hg
84 84 searching for changes
85 85 no changes found
86 86
87 87 Pull full.hg into empty (using --cwd)
88 88
89 89 $ hg --cwd empty pull ../full.hg
90 90 pulling from ../full.hg
91 91 requesting all changes
92 92 adding changesets
93 93 adding manifests
94 94 adding file changes
95 95 added 9 changesets with 7 changes to 4 files (+1 heads)
96 96 (run 'hg heads' to see heads, 'hg merge' to merge)
97 97
98 98 Rollback empty
99 99
100 100 $ hg -R empty rollback
101 101 repository tip rolled back to revision -1 (undo pull)
102 102
103 103 Pull full.hg into empty again (using --cwd)
104 104
105 105 $ hg --cwd empty pull ../full.hg
106 106 pulling from ../full.hg
107 107 requesting all changes
108 108 adding changesets
109 109 adding manifests
110 110 adding file changes
111 111 added 9 changesets with 7 changes to 4 files (+1 heads)
112 112 (run 'hg heads' to see heads, 'hg merge' to merge)
113 113
114 114 Pull full.hg into test (using -R)
115 115
116 116 $ hg -R test pull full.hg
117 117 pulling from full.hg
118 118 searching for changes
119 119 no changes found
120 120
121 121 Pull full.hg into empty (using -R)
122 122
123 123 $ hg -R empty pull full.hg
124 124 pulling from full.hg
125 125 searching for changes
126 126 no changes found
127 127
128 128 Rollback empty
129 129
130 130 $ hg -R empty rollback
131 131 repository tip rolled back to revision -1 (undo pull)
132 132
133 133 Pull full.hg into empty again (using -R)
134 134
135 135 $ hg -R empty pull full.hg
136 136 pulling from full.hg
137 137 requesting all changes
138 138 adding changesets
139 139 adding manifests
140 140 adding file changes
141 141 added 9 changesets with 7 changes to 4 files (+1 heads)
142 142 (run 'hg heads' to see heads, 'hg merge' to merge)
143 143
144 144 Log -R full.hg in fresh empty
145 145
146 146 $ rm -r empty
147 147 $ hg init empty
148 148 $ cd empty
149 149 $ hg -R bundle://../full.hg log
150 150 changeset: 8:aa35859c02ea
151 151 tag: tip
152 152 parent: 3:eebf5a27f8ca
153 153 user: test
154 154 date: Thu Jan 01 00:00:00 1970 +0000
155 155 summary: 0.3m
156 156
157 157 changeset: 7:a6a34bfa0076
158 158 user: test
159 159 date: Thu Jan 01 00:00:00 1970 +0000
160 160 summary: 1.3m
161 161
162 162 changeset: 6:7373c1169842
163 163 user: test
164 164 date: Thu Jan 01 00:00:00 1970 +0000
165 165 summary: 1.3
166 166
167 167 changeset: 5:1bb50a9436a7
168 168 user: test
169 169 date: Thu Jan 01 00:00:00 1970 +0000
170 170 summary: 1.2
171 171
172 172 changeset: 4:095197eb4973
173 173 parent: 0:f9ee2f85a263
174 174 user: test
175 175 date: Thu Jan 01 00:00:00 1970 +0000
176 176 summary: 1.1
177 177
178 178 changeset: 3:eebf5a27f8ca
179 179 user: test
180 180 date: Thu Jan 01 00:00:00 1970 +0000
181 181 summary: 0.3
182 182
183 183 changeset: 2:e38ba6f5b7e0
184 184 user: test
185 185 date: Thu Jan 01 00:00:00 1970 +0000
186 186 summary: 0.2
187 187
188 188 changeset: 1:34c2bf6b0626
189 189 user: test
190 190 date: Thu Jan 01 00:00:00 1970 +0000
191 191 summary: 0.1
192 192
193 193 changeset: 0:f9ee2f85a263
194 194 user: test
195 195 date: Thu Jan 01 00:00:00 1970 +0000
196 196 summary: 0.0
197 197
198 198 Make sure bundlerepo doesn't leak tempfiles (issue2491)
199 199
200 200 $ ls .hg
201 201 00changelog.i
202 202 cache
203 203 requires
204 204 store
205 205
206 206 Pull ../full.hg into empty (with hook)
207 207
208 208 $ echo "[hooks]" >> .hg/hgrc
209 209 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup" >> .hg/hgrc
210 210
211 211 doesn't work (yet ?)
212 212
213 213 hg -R bundle://../full.hg verify
214 214
215 215 $ hg pull bundle://../full.hg
216 216 pulling from bundle:../full.hg
217 217 requesting all changes
218 218 adding changesets
219 219 adding manifests
220 220 adding file changes
221 221 added 9 changesets with 7 changes to 4 files (+1 heads)
222 222 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:../full.hg
223 223 (run 'hg heads' to see heads, 'hg merge' to merge)
224 224
225 225 Rollback empty
226 226
227 227 $ hg rollback
228 228 repository tip rolled back to revision -1 (undo pull)
229 229 $ cd ..
230 230
231 231 Log -R bundle:empty+full.hg
232 232
233 233 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
234 234 8 7 6 5 4 3 2 1 0
235 235
236 236 Pull full.hg into empty again (using -R; with hook)
237 237
238 238 $ hg -R empty pull full.hg
239 239 pulling from full.hg
240 240 requesting all changes
241 241 adding changesets
242 242 adding manifests
243 243 adding file changes
244 244 added 9 changesets with 7 changes to 4 files (+1 heads)
245 245 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
246 246 (run 'hg heads' to see heads, 'hg merge' to merge)
247 247
248 248 Create partial clones
249 249
250 250 $ rm -r empty
251 251 $ hg init empty
252 252 $ hg clone -r 3 test partial
253 253 adding changesets
254 254 adding manifests
255 255 adding file changes
256 256 added 4 changesets with 4 changes to 1 files
257 257 updating to branch default
258 258 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 259 $ hg clone partial partial2
260 260 updating to branch default
261 261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
262 262 $ cd partial
263 263
264 264 Log -R full.hg in partial
265 265
266 266 $ hg -R bundle://../full.hg log
267 267 changeset: 8:aa35859c02ea
268 268 tag: tip
269 269 parent: 3:eebf5a27f8ca
270 270 user: test
271 271 date: Thu Jan 01 00:00:00 1970 +0000
272 272 summary: 0.3m
273 273
274 274 changeset: 7:a6a34bfa0076
275 275 user: test
276 276 date: Thu Jan 01 00:00:00 1970 +0000
277 277 summary: 1.3m
278 278
279 279 changeset: 6:7373c1169842
280 280 user: test
281 281 date: Thu Jan 01 00:00:00 1970 +0000
282 282 summary: 1.3
283 283
284 284 changeset: 5:1bb50a9436a7
285 285 user: test
286 286 date: Thu Jan 01 00:00:00 1970 +0000
287 287 summary: 1.2
288 288
289 289 changeset: 4:095197eb4973
290 290 parent: 0:f9ee2f85a263
291 291 user: test
292 292 date: Thu Jan 01 00:00:00 1970 +0000
293 293 summary: 1.1
294 294
295 295 changeset: 3:eebf5a27f8ca
296 296 user: test
297 297 date: Thu Jan 01 00:00:00 1970 +0000
298 298 summary: 0.3
299 299
300 300 changeset: 2:e38ba6f5b7e0
301 301 user: test
302 302 date: Thu Jan 01 00:00:00 1970 +0000
303 303 summary: 0.2
304 304
305 305 changeset: 1:34c2bf6b0626
306 306 user: test
307 307 date: Thu Jan 01 00:00:00 1970 +0000
308 308 summary: 0.1
309 309
310 310 changeset: 0:f9ee2f85a263
311 311 user: test
312 312 date: Thu Jan 01 00:00:00 1970 +0000
313 313 summary: 0.0
314 314
315 315
316 316 Incoming full.hg in partial
317 317
318 318 $ hg incoming bundle://../full.hg
319 319 comparing with bundle:../full.hg
320 320 searching for changes
321 321 changeset: 4:095197eb4973
322 322 parent: 0:f9ee2f85a263
323 323 user: test
324 324 date: Thu Jan 01 00:00:00 1970 +0000
325 325 summary: 1.1
326 326
327 327 changeset: 5:1bb50a9436a7
328 328 user: test
329 329 date: Thu Jan 01 00:00:00 1970 +0000
330 330 summary: 1.2
331 331
332 332 changeset: 6:7373c1169842
333 333 user: test
334 334 date: Thu Jan 01 00:00:00 1970 +0000
335 335 summary: 1.3
336 336
337 337 changeset: 7:a6a34bfa0076
338 338 user: test
339 339 date: Thu Jan 01 00:00:00 1970 +0000
340 340 summary: 1.3m
341 341
342 342 changeset: 8:aa35859c02ea
343 343 tag: tip
344 344 parent: 3:eebf5a27f8ca
345 345 user: test
346 346 date: Thu Jan 01 00:00:00 1970 +0000
347 347 summary: 0.3m
348 348
349 349
350 350 Outgoing -R full.hg vs partial2 in partial
351 351
352 352 $ hg -R bundle://../full.hg outgoing ../partial2
353 353 comparing with ../partial2
354 354 searching for changes
355 355 changeset: 4:095197eb4973
356 356 parent: 0:f9ee2f85a263
357 357 user: test
358 358 date: Thu Jan 01 00:00:00 1970 +0000
359 359 summary: 1.1
360 360
361 361 changeset: 5:1bb50a9436a7
362 362 user: test
363 363 date: Thu Jan 01 00:00:00 1970 +0000
364 364 summary: 1.2
365 365
366 366 changeset: 6:7373c1169842
367 367 user: test
368 368 date: Thu Jan 01 00:00:00 1970 +0000
369 369 summary: 1.3
370 370
371 371 changeset: 7:a6a34bfa0076
372 372 user: test
373 373 date: Thu Jan 01 00:00:00 1970 +0000
374 374 summary: 1.3m
375 375
376 376 changeset: 8:aa35859c02ea
377 377 tag: tip
378 378 parent: 3:eebf5a27f8ca
379 379 user: test
380 380 date: Thu Jan 01 00:00:00 1970 +0000
381 381 summary: 0.3m
382 382
383 383
384 384 Outgoing -R does-not-exist.hg vs partial2 in partial
385 385
386 386 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
387 387 abort: *../does-not-exist.hg* (glob)
388 388 [255]
389 389 $ cd ..
390 390
391 391 hide outer repo
392 392 $ hg init
393 393
394 394 Direct clone from bundle (all-history)
395 395
396 396 $ hg clone full.hg full-clone
397 397 requesting all changes
398 398 adding changesets
399 399 adding manifests
400 400 adding file changes
401 401 added 9 changesets with 7 changes to 4 files (+1 heads)
402 402 updating to branch default
403 403 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 404 $ hg -R full-clone heads
405 405 changeset: 8:aa35859c02ea
406 406 tag: tip
407 407 parent: 3:eebf5a27f8ca
408 408 user: test
409 409 date: Thu Jan 01 00:00:00 1970 +0000
410 410 summary: 0.3m
411 411
412 412 changeset: 7:a6a34bfa0076
413 413 user: test
414 414 date: Thu Jan 01 00:00:00 1970 +0000
415 415 summary: 1.3m
416 416
417 417 $ rm -r full-clone
418 418
419 419 When cloning from a non-copiable repository into '', do not
420 420 recurse infinitely (issue 2528)
421 421
422 422 $ hg clone full.hg ''
423 abort: * (glob)
423 abort: empty destination path is not valid
424 424 [255]
425 425
426 426 test for http://mercurial.selenic.com/bts/issue216
427 427
428 428 Unbundle incremental bundles into fresh empty in one go
429 429
430 430 $ rm -r empty
431 431 $ hg init empty
432 432 $ hg -R test bundle --base null -r 0 ../0.hg
433 433 1 changesets found
434 434 $ hg -R test bundle --base 0 -r 1 ../1.hg
435 435 1 changesets found
436 436 $ hg -R empty unbundle -u ../0.hg ../1.hg
437 437 adding changesets
438 438 adding manifests
439 439 adding file changes
440 440 added 1 changesets with 1 changes to 1 files
441 441 adding changesets
442 442 adding manifests
443 443 adding file changes
444 444 added 1 changesets with 1 changes to 1 files
445 445 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
446 446
447 447 test for 540d1059c802
448 448
449 449 test for 540d1059c802
450 450
451 451 $ hg init orig
452 452 $ cd orig
453 453 $ echo foo > foo
454 454 $ hg add foo
455 455 $ hg ci -m 'add foo'
456 456
457 457 $ hg clone . ../copy
458 458 updating to branch default
459 459 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
460 460 $ hg tag foo
461 461
462 462 $ cd ../copy
463 463 $ echo >> foo
464 464 $ hg ci -m 'change foo'
465 465 $ hg bundle ../bundle.hg ../orig
466 466 searching for changes
467 467 1 changesets found
468 468
469 469 $ cd ../orig
470 470 $ hg incoming ../bundle.hg
471 471 comparing with ../bundle.hg
472 472 searching for changes
473 473 changeset: 2:ed1b79f46b9a
474 474 tag: tip
475 475 parent: 0:bbd179dfa0a7
476 476 user: test
477 477 date: Thu Jan 01 00:00:00 1970 +0000
478 478 summary: change foo
479 479
480 480 $ cd ..
481 481
482 482 test bundle with # in the filename (issue2154):
483 483
484 484 $ cp bundle.hg 'test#bundle.hg'
485 485 $ cd orig
486 486 $ hg incoming '../test#bundle.hg'
487 487 comparing with ../test
488 488 abort: unknown revision 'bundle.hg'!
489 489 [255]
490 490
491 491 note that percent encoding is not handled:
492 492
493 493 $ hg incoming ../test%23bundle.hg
494 494 abort: repository ../test%23bundle.hg not found!
495 495 [255]
496 496 $ cd ..
497 497
498 498 test for http://mercurial.selenic.com/bts/issue1144
499 499
500 500 test that verify bundle does not traceback
501 501
502 502 partial history bundle, fails w/ unkown parent
503 503
504 504 $ hg -R bundle.hg verify
505 505 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
506 506 [255]
507 507
508 508 full history bundle, refuses to verify non-local repo
509 509
510 510 $ hg -R all.hg verify
511 511 abort: cannot verify bundle or remote repos
512 512 [255]
513 513
514 514 but, regular verify must continue to work
515 515
516 516 $ hg -R orig verify
517 517 checking changesets
518 518 checking manifests
519 519 crosschecking files in changesets and manifests
520 520 checking files
521 521 2 files, 2 changesets, 2 total revisions
522 522
523 523 diff against bundle
524 524
525 525 $ hg init b
526 526 $ cd b
527 527 $ hg -R ../all.hg diff -r tip
528 528 diff -r aa35859c02ea anotherfile
529 529 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
530 530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
531 531 @@ -1,4 +0,0 @@
532 532 -0
533 533 -1
534 534 -2
535 535 -3
536 536 $ cd ..
537 537
538 538 bundle single branch
539 539
540 540 $ hg init branchy
541 541 $ cd branchy
542 542 $ echo a >a
543 543 $ echo x >x
544 544 $ hg ci -Ama
545 545 adding a
546 546 adding x
547 547 $ echo c >c
548 548 $ echo xx >x
549 549 $ hg ci -Amc
550 550 adding c
551 551 $ echo c1 >c1
552 552 $ hg ci -Amc1
553 553 adding c1
554 554 $ hg up 0
555 555 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
556 556 $ echo b >b
557 557 $ hg ci -Amb
558 558 adding b
559 559 created new head
560 560 $ echo b1 >b1
561 561 $ echo xx >x
562 562 $ hg ci -Amb1
563 563 adding b1
564 564 $ hg clone -q -r2 . part
565 565
566 566 == bundling via incoming
567 567
568 568 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
569 569 comparing with .
570 570 searching for changes
571 571 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
572 572 057f4db07f61970e1c11e83be79e9d08adc4dc31
573 573
574 574 == bundling
575 575
576 576 $ hg bundle bundle.hg part --debug
577 577 query 1; heads
578 578 searching for changes
579 579 all remote heads known locally
580 580 2 changesets found
581 581 list of changesets:
582 582 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
583 583 057f4db07f61970e1c11e83be79e9d08adc4dc31
584 584 bundling: 1/2 changesets (50.00%)
585 585 bundling: 2/2 changesets (100.00%)
586 586 bundling: 1/2 manifests (50.00%)
587 587 bundling: 2/2 manifests (100.00%)
588 588 bundling: b 1/3 files (33.33%)
589 589 bundling: b1 2/3 files (66.67%)
590 590 bundling: x 3/3 files (100.00%)
591 591
592 592 == Test for issue3441
593 593
594 594 $ hg clone -q -r0 . part2
595 595 $ hg -q -R part2 pull bundle.hg
596 596 $ hg -R part2 verify
597 597 checking changesets
598 598 checking manifests
599 599 crosschecking files in changesets and manifests
600 600 checking files
601 601 4 files, 3 changesets, 5 total revisions
602 602
603 603 $ cd ..
@@ -1,565 +1,565
1 1 Prepare repo a:
2 2
3 3 $ hg init a
4 4 $ cd a
5 5 $ echo a > a
6 6 $ hg add a
7 7 $ hg commit -m test
8 8 $ echo first line > b
9 9 $ hg add b
10 10
11 11 Create a non-inlined filelog:
12 12
13 13 $ python -c 'file("data1", "wb").write("".join("%s\n" % x for x in range(10000)))'
14 14 $ for j in 0 1 2 3 4 5 6 7 8 9; do
15 15 > cat data1 >> b
16 16 > hg commit -m test
17 17 > done
18 18
19 19 List files in store/data (should show a 'b.d'):
20 20
21 21 $ for i in .hg/store/data/*; do
22 22 > echo $i
23 23 > done
24 24 .hg/store/data/a.i
25 25 .hg/store/data/b.d
26 26 .hg/store/data/b.i
27 27
28 28 Default operation:
29 29
30 30 $ hg clone . ../b
31 31 updating to branch default
32 32 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
33 33 $ cd ../b
34 34 $ cat a
35 35 a
36 36 $ hg verify
37 37 checking changesets
38 38 checking manifests
39 39 crosschecking files in changesets and manifests
40 40 checking files
41 41 2 files, 11 changesets, 11 total revisions
42 42
43 43 Invalid dest '' must abort:
44 44
45 45 $ hg clone . ''
46 abort: * (glob)
46 abort: empty destination path is not valid
47 47 [255]
48 48
49 49 No update, with debug option:
50 50
51 51 #if hardlink
52 52 $ hg --debug clone -U . ../c
53 53 linked 8 files
54 54 #else
55 55 $ hg --debug clone -U . ../c
56 56 copied 8 files
57 57 #endif
58 58 $ cd ../c
59 59 $ cat a 2>/dev/null || echo "a not present"
60 60 a not present
61 61 $ hg verify
62 62 checking changesets
63 63 checking manifests
64 64 crosschecking files in changesets and manifests
65 65 checking files
66 66 2 files, 11 changesets, 11 total revisions
67 67
68 68 Default destination:
69 69
70 70 $ mkdir ../d
71 71 $ cd ../d
72 72 $ hg clone ../a
73 73 destination directory: a
74 74 updating to branch default
75 75 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 76 $ cd a
77 77 $ hg cat a
78 78 a
79 79 $ cd ../..
80 80
81 81 Check that we drop the 'file:' from the path before writing the .hgrc:
82 82
83 83 $ hg clone file:a e
84 84 updating to branch default
85 85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 86 $ grep 'file:' e/.hg/hgrc
87 87 [1]
88 88
89 89 Check that path aliases are expanded:
90 90
91 91 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
92 92 $ hg -R f showconfig paths.default
93 93 $TESTTMP/a#0 (glob)
94 94
95 95 Use --pull:
96 96
97 97 $ hg clone --pull a g
98 98 requesting all changes
99 99 adding changesets
100 100 adding manifests
101 101 adding file changes
102 102 added 11 changesets with 11 changes to 2 files
103 103 updating to branch default
104 104 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
105 105 $ hg -R g verify
106 106 checking changesets
107 107 checking manifests
108 108 crosschecking files in changesets and manifests
109 109 checking files
110 110 2 files, 11 changesets, 11 total revisions
111 111
112 112 Invalid dest '' with --pull must abort (issue2528):
113 113
114 114 $ hg clone --pull a ''
115 abort: * (glob)
115 abort: empty destination path is not valid
116 116 [255]
117 117
118 118 Clone to '.':
119 119
120 120 $ mkdir h
121 121 $ cd h
122 122 $ hg clone ../a .
123 123 updating to branch default
124 124 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
125 125 $ cd ..
126 126
127 127
128 128 *** Tests for option -u ***
129 129
130 130 Adding some more history to repo a:
131 131
132 132 $ cd a
133 133 $ hg tag ref1
134 134 $ echo the quick brown fox >a
135 135 $ hg ci -m "hacked default"
136 136 $ hg up ref1
137 137 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
138 138 $ hg branch stable
139 139 marked working directory as branch stable
140 140 (branches are permanent and global, did you want a bookmark?)
141 141 $ echo some text >a
142 142 $ hg ci -m "starting branch stable"
143 143 $ hg tag ref2
144 144 $ echo some more text >a
145 145 $ hg ci -m "another change for branch stable"
146 146 $ hg up ref2
147 147 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
148 148 $ hg parents
149 149 changeset: 13:e8ece76546a6
150 150 branch: stable
151 151 tag: ref2
152 152 parent: 10:a7949464abda
153 153 user: test
154 154 date: Thu Jan 01 00:00:00 1970 +0000
155 155 summary: starting branch stable
156 156
157 157
158 158 Repo a has two heads:
159 159
160 160 $ hg heads
161 161 changeset: 15:0aae7cf88f0d
162 162 branch: stable
163 163 tag: tip
164 164 user: test
165 165 date: Thu Jan 01 00:00:00 1970 +0000
166 166 summary: another change for branch stable
167 167
168 168 changeset: 12:f21241060d6a
169 169 user: test
170 170 date: Thu Jan 01 00:00:00 1970 +0000
171 171 summary: hacked default
172 172
173 173
174 174 $ cd ..
175 175
176 176
177 177 Testing --noupdate with --updaterev (must abort):
178 178
179 179 $ hg clone --noupdate --updaterev 1 a ua
180 180 abort: cannot specify both --noupdate and --updaterev
181 181 [255]
182 182
183 183
184 184 Testing clone -u:
185 185
186 186 $ hg clone -u . a ua
187 187 updating to branch stable
188 188 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
189 189
190 190 Repo ua has both heads:
191 191
192 192 $ hg -R ua heads
193 193 changeset: 15:0aae7cf88f0d
194 194 branch: stable
195 195 tag: tip
196 196 user: test
197 197 date: Thu Jan 01 00:00:00 1970 +0000
198 198 summary: another change for branch stable
199 199
200 200 changeset: 12:f21241060d6a
201 201 user: test
202 202 date: Thu Jan 01 00:00:00 1970 +0000
203 203 summary: hacked default
204 204
205 205
206 206 Same revision checked out in repo a and ua:
207 207
208 208 $ hg -R a parents --template "{node|short}\n"
209 209 e8ece76546a6
210 210 $ hg -R ua parents --template "{node|short}\n"
211 211 e8ece76546a6
212 212
213 213 $ rm -r ua
214 214
215 215
216 216 Testing clone --pull -u:
217 217
218 218 $ hg clone --pull -u . a ua
219 219 requesting all changes
220 220 adding changesets
221 221 adding manifests
222 222 adding file changes
223 223 added 16 changesets with 16 changes to 3 files (+1 heads)
224 224 updating to branch stable
225 225 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 226
227 227 Repo ua has both heads:
228 228
229 229 $ hg -R ua heads
230 230 changeset: 15:0aae7cf88f0d
231 231 branch: stable
232 232 tag: tip
233 233 user: test
234 234 date: Thu Jan 01 00:00:00 1970 +0000
235 235 summary: another change for branch stable
236 236
237 237 changeset: 12:f21241060d6a
238 238 user: test
239 239 date: Thu Jan 01 00:00:00 1970 +0000
240 240 summary: hacked default
241 241
242 242
243 243 Same revision checked out in repo a and ua:
244 244
245 245 $ hg -R a parents --template "{node|short}\n"
246 246 e8ece76546a6
247 247 $ hg -R ua parents --template "{node|short}\n"
248 248 e8ece76546a6
249 249
250 250 $ rm -r ua
251 251
252 252
253 253 Testing clone -u <branch>:
254 254
255 255 $ hg clone -u stable a ua
256 256 updating to branch stable
257 257 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
258 258
259 259 Repo ua has both heads:
260 260
261 261 $ hg -R ua heads
262 262 changeset: 15:0aae7cf88f0d
263 263 branch: stable
264 264 tag: tip
265 265 user: test
266 266 date: Thu Jan 01 00:00:00 1970 +0000
267 267 summary: another change for branch stable
268 268
269 269 changeset: 12:f21241060d6a
270 270 user: test
271 271 date: Thu Jan 01 00:00:00 1970 +0000
272 272 summary: hacked default
273 273
274 274
275 275 Branch 'stable' is checked out:
276 276
277 277 $ hg -R ua parents
278 278 changeset: 15:0aae7cf88f0d
279 279 branch: stable
280 280 tag: tip
281 281 user: test
282 282 date: Thu Jan 01 00:00:00 1970 +0000
283 283 summary: another change for branch stable
284 284
285 285
286 286 $ rm -r ua
287 287
288 288
289 289 Testing default checkout:
290 290
291 291 $ hg clone a ua
292 292 updating to branch default
293 293 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
294 294
295 295 Repo ua has both heads:
296 296
297 297 $ hg -R ua heads
298 298 changeset: 15:0aae7cf88f0d
299 299 branch: stable
300 300 tag: tip
301 301 user: test
302 302 date: Thu Jan 01 00:00:00 1970 +0000
303 303 summary: another change for branch stable
304 304
305 305 changeset: 12:f21241060d6a
306 306 user: test
307 307 date: Thu Jan 01 00:00:00 1970 +0000
308 308 summary: hacked default
309 309
310 310
311 311 Branch 'default' is checked out:
312 312
313 313 $ hg -R ua parents
314 314 changeset: 12:f21241060d6a
315 315 user: test
316 316 date: Thu Jan 01 00:00:00 1970 +0000
317 317 summary: hacked default
318 318
319 319
320 320 $ rm -r ua
321 321
322 322
323 323 Testing #<branch>:
324 324
325 325 $ hg clone -u . a#stable ua
326 326 adding changesets
327 327 adding manifests
328 328 adding file changes
329 329 added 14 changesets with 14 changes to 3 files
330 330 updating to branch stable
331 331 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
332 332
333 333 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
334 334
335 335 $ hg -R ua heads
336 336 changeset: 13:0aae7cf88f0d
337 337 branch: stable
338 338 tag: tip
339 339 user: test
340 340 date: Thu Jan 01 00:00:00 1970 +0000
341 341 summary: another change for branch stable
342 342
343 343 changeset: 10:a7949464abda
344 344 user: test
345 345 date: Thu Jan 01 00:00:00 1970 +0000
346 346 summary: test
347 347
348 348
349 349 Same revision checked out in repo a and ua:
350 350
351 351 $ hg -R a parents --template "{node|short}\n"
352 352 e8ece76546a6
353 353 $ hg -R ua parents --template "{node|short}\n"
354 354 e8ece76546a6
355 355
356 356 $ rm -r ua
357 357
358 358
359 359 Testing -u -r <branch>:
360 360
361 361 $ hg clone -u . -r stable a ua
362 362 adding changesets
363 363 adding manifests
364 364 adding file changes
365 365 added 14 changesets with 14 changes to 3 files
366 366 updating to branch stable
367 367 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
368 368
369 369 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
370 370
371 371 $ hg -R ua heads
372 372 changeset: 13:0aae7cf88f0d
373 373 branch: stable
374 374 tag: tip
375 375 user: test
376 376 date: Thu Jan 01 00:00:00 1970 +0000
377 377 summary: another change for branch stable
378 378
379 379 changeset: 10:a7949464abda
380 380 user: test
381 381 date: Thu Jan 01 00:00:00 1970 +0000
382 382 summary: test
383 383
384 384
385 385 Same revision checked out in repo a and ua:
386 386
387 387 $ hg -R a parents --template "{node|short}\n"
388 388 e8ece76546a6
389 389 $ hg -R ua parents --template "{node|short}\n"
390 390 e8ece76546a6
391 391
392 392 $ rm -r ua
393 393
394 394
395 395 Testing -r <branch>:
396 396
397 397 $ hg clone -r stable a ua
398 398 adding changesets
399 399 adding manifests
400 400 adding file changes
401 401 added 14 changesets with 14 changes to 3 files
402 402 updating to branch stable
403 403 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 404
405 405 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
406 406
407 407 $ hg -R ua heads
408 408 changeset: 13:0aae7cf88f0d
409 409 branch: stable
410 410 tag: tip
411 411 user: test
412 412 date: Thu Jan 01 00:00:00 1970 +0000
413 413 summary: another change for branch stable
414 414
415 415 changeset: 10:a7949464abda
416 416 user: test
417 417 date: Thu Jan 01 00:00:00 1970 +0000
418 418 summary: test
419 419
420 420
421 421 Branch 'stable' is checked out:
422 422
423 423 $ hg -R ua parents
424 424 changeset: 13:0aae7cf88f0d
425 425 branch: stable
426 426 tag: tip
427 427 user: test
428 428 date: Thu Jan 01 00:00:00 1970 +0000
429 429 summary: another change for branch stable
430 430
431 431
432 432 $ rm -r ua
433 433
434 434
435 435 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
436 436 iterable in addbranchrevs()
437 437
438 438 $ cat <<EOF > simpleclone.py
439 439 > from mercurial import ui, hg
440 440 > myui = ui.ui()
441 441 > repo = hg.repository(myui, 'a')
442 442 > hg.clone(myui, {}, repo, dest="ua")
443 443 > EOF
444 444
445 445 $ python simpleclone.py
446 446 updating to branch default
447 447 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
448 448
449 449 $ rm -r ua
450 450
451 451 $ cat <<EOF > branchclone.py
452 452 > from mercurial import ui, hg, extensions
453 453 > myui = ui.ui()
454 454 > extensions.loadall(myui)
455 455 > repo = hg.repository(myui, 'a')
456 456 > hg.clone(myui, {}, repo, dest="ua", branch=["stable",])
457 457 > EOF
458 458
459 459 $ python branchclone.py
460 460 adding changesets
461 461 adding manifests
462 462 adding file changes
463 463 added 14 changesets with 14 changes to 3 files
464 464 updating to branch stable
465 465 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
466 466 $ rm -r ua
467 467
468 468
469 469 Testing failures:
470 470
471 471 $ mkdir fail
472 472 $ cd fail
473 473
474 474 No local source
475 475
476 476 $ hg clone a b
477 477 abort: repository a not found!
478 478 [255]
479 479
480 480 No remote source
481 481
482 482 $ hg clone http://127.0.0.1:3121/a b
483 483 abort: error: *refused* (glob)
484 484 [255]
485 485 $ rm -rf b # work around bug with http clone
486 486
487 487
488 488 #if unix-permissions
489 489
490 490 Inaccessible source
491 491
492 492 $ mkdir a
493 493 $ chmod 000 a
494 494 $ hg clone a b
495 495 abort: repository a not found!
496 496 [255]
497 497
498 498 Inaccessible destination
499 499
500 500 $ hg init b
501 501 $ cd b
502 502 $ hg clone . ../a
503 503 abort: Permission denied: ../a
504 504 [255]
505 505 $ cd ..
506 506 $ chmod 700 a
507 507 $ rm -r a b
508 508
509 509 #endif
510 510
511 511
512 512 #if fifo
513 513
514 514 Source of wrong type
515 515
516 516 $ mkfifo a
517 517 $ hg clone a b
518 518 abort: repository a not found!
519 519 [255]
520 520 $ rm a
521 521
522 522 #endif
523 523
524 524 Default destination, same directory
525 525
526 526 $ hg init q
527 527 $ hg clone q
528 528 destination directory: q
529 529 abort: destination 'q' is not empty
530 530 [255]
531 531
532 532 destination directory not empty
533 533
534 534 $ mkdir a
535 535 $ echo stuff > a/a
536 536 $ hg clone q a
537 537 abort: destination 'a' is not empty
538 538 [255]
539 539
540 540
541 541 #if unix-permissions
542 542
543 543 leave existing directory in place after clone failure
544 544
545 545 $ hg init c
546 546 $ cd c
547 547 $ echo c > c
548 548 $ hg commit -A -m test
549 549 adding c
550 550 $ chmod -rx .hg/store/data
551 551 $ cd ..
552 552 $ mkdir d
553 553 $ hg clone c d 2> err
554 554 [255]
555 555 $ test -d d
556 556 $ test -d d/.hg
557 557 [1]
558 558
559 559 reenable perm to allow deletion
560 560
561 561 $ chmod +rx c/.hg/store/data
562 562
563 563 #endif
564 564
565 565 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now