##// END OF EJS Templates
local-clone: also copy tags related caches...
marmoute -
r32495:a25cc3ca default
parent child Browse files
Show More
@@ -1,1061 +1,1064 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18
19 19 from . import (
20 20 bookmarks,
21 21 bundlerepo,
22 22 cmdutil,
23 23 destutil,
24 24 discovery,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 httppeer,
29 29 localrepo,
30 30 lock,
31 31 merge as mergemod,
32 32 node,
33 33 phases,
34 34 repoview,
35 35 scmutil,
36 36 sshpeer,
37 37 statichttprepo,
38 38 ui as uimod,
39 39 unionrepo,
40 40 url,
41 41 util,
42 42 verify as verifymod,
43 43 vfs as vfsmod,
44 44 )
45 45
46 46 release = lock.release
47 47
48 48 # shared features
49 49 sharedbookmarks = 'bookmarks'
50 50
51 51 def _local(path):
52 52 path = util.expandpath(util.urllocalpath(path))
53 53 return (os.path.isfile(path) and bundlerepo or localrepo)
54 54
55 55 def addbranchrevs(lrepo, other, branches, revs):
56 56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 57 hashbranch, branches = branches
58 58 if not hashbranch and not branches:
59 59 x = revs or None
60 60 if util.safehasattr(revs, 'first'):
61 61 y = revs.first()
62 62 elif revs:
63 63 y = revs[0]
64 64 else:
65 65 y = None
66 66 return x, y
67 67 if revs:
68 68 revs = list(revs)
69 69 else:
70 70 revs = []
71 71
72 72 if not peer.capable('branchmap'):
73 73 if branches:
74 74 raise error.Abort(_("remote branch lookup not supported"))
75 75 revs.append(hashbranch)
76 76 return revs, revs[0]
77 77 branchmap = peer.branchmap()
78 78
79 79 def primary(branch):
80 80 if branch == '.':
81 81 if not lrepo:
82 82 raise error.Abort(_("dirstate branch not accessible"))
83 83 branch = lrepo.dirstate.branch()
84 84 if branch in branchmap:
85 85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 86 return True
87 87 else:
88 88 return False
89 89
90 90 for branch in branches:
91 91 if not primary(branch):
92 92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 93 if hashbranch:
94 94 if not primary(hashbranch):
95 95 revs.append(hashbranch)
96 96 return revs, revs[0]
97 97
98 98 def parseurl(path, branches=None):
99 99 '''parse url#branch, returning (url, (branch, branches))'''
100 100
101 101 u = util.url(path)
102 102 branch = None
103 103 if u.fragment:
104 104 branch = u.fragment
105 105 u.fragment = None
106 106 return bytes(u), (branch, branches or [])
107 107
108 108 schemes = {
109 109 'bundle': bundlerepo,
110 110 'union': unionrepo,
111 111 'file': _local,
112 112 'http': httppeer,
113 113 'https': httppeer,
114 114 'ssh': sshpeer,
115 115 'static-http': statichttprepo,
116 116 }
117 117
118 118 def _peerlookup(path):
119 119 u = util.url(path)
120 120 scheme = u.scheme or 'file'
121 121 thing = schemes.get(scheme) or schemes['file']
122 122 try:
123 123 return thing(path)
124 124 except TypeError:
125 125 # we can't test callable(thing) because 'thing' can be an unloaded
126 126 # module that implements __call__
127 127 if not util.safehasattr(thing, 'instance'):
128 128 raise
129 129 return thing
130 130
131 131 def islocal(repo):
132 132 '''return true if repo (or path pointing to repo) is local'''
133 133 if isinstance(repo, str):
134 134 try:
135 135 return _peerlookup(repo).islocal(repo)
136 136 except AttributeError:
137 137 return False
138 138 return repo.local()
139 139
140 140 def openpath(ui, path):
141 141 '''open path with open if local, url.open if remote'''
142 142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 143 if pathurl.islocal():
144 144 return util.posixfile(pathurl.localpath(), 'rb')
145 145 else:
146 146 return url.open(ui, path)
147 147
148 148 # a list of (ui, repo) functions called for wire peer initialization
149 149 wirepeersetupfuncs = []
150 150
151 151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 152 """return a repository object for the specified path"""
153 153 obj = _peerlookup(path).instance(ui, path, create)
154 154 ui = getattr(obj, "ui", ui)
155 155 for f in presetupfuncs or []:
156 156 f(ui, obj)
157 157 for name, module in extensions.extensions(ui):
158 158 hook = getattr(module, 'reposetup', None)
159 159 if hook:
160 160 hook(ui, obj)
161 161 if not obj.local():
162 162 for f in wirepeersetupfuncs:
163 163 f(ui, obj)
164 164 return obj
165 165
166 166 def repository(ui, path='', create=False, presetupfuncs=None):
167 167 """return a repository object for the specified path"""
168 168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 169 repo = peer.local()
170 170 if not repo:
171 171 raise error.Abort(_("repository '%s' is not local") %
172 172 (path or peer.url()))
173 173 return repo.filtered('visible')
174 174
175 175 def peer(uiorrepo, opts, path, create=False):
176 176 '''return a repository peer for the specified path'''
177 177 rui = remoteui(uiorrepo, opts)
178 178 return _peerorrepo(rui, path, create).peer()
179 179
180 180 def defaultdest(source):
181 181 '''return default destination of clone if none is given
182 182
183 183 >>> defaultdest('foo')
184 184 'foo'
185 185 >>> defaultdest('/foo/bar')
186 186 'bar'
187 187 >>> defaultdest('/')
188 188 ''
189 189 >>> defaultdest('')
190 190 ''
191 191 >>> defaultdest('http://example.org/')
192 192 ''
193 193 >>> defaultdest('http://example.org/foo/')
194 194 'foo'
195 195 '''
196 196 path = util.url(source).path
197 197 if not path:
198 198 return ''
199 199 return os.path.basename(os.path.normpath(path))
200 200
201 201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 202 relative=False):
203 203 '''create a shared repository'''
204 204
205 205 if not islocal(source):
206 206 raise error.Abort(_('can only share local repositories'))
207 207
208 208 if not dest:
209 209 dest = defaultdest(source)
210 210 else:
211 211 dest = ui.expandpath(dest)
212 212
213 213 if isinstance(source, str):
214 214 origsource = ui.expandpath(source)
215 215 source, branches = parseurl(origsource)
216 216 srcrepo = repository(ui, source)
217 217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 218 else:
219 219 srcrepo = source.local()
220 220 origsource = source = srcrepo.url()
221 221 checkout = None
222 222
223 223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224 224
225 225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227 227
228 228 if destvfs.lexists():
229 229 raise error.Abort(_('destination already exists'))
230 230
231 231 if not destwvfs.isdir():
232 232 destwvfs.mkdir()
233 233 destvfs.makedir()
234 234
235 235 requirements = ''
236 236 try:
237 237 requirements = srcrepo.vfs.read('requires')
238 238 except IOError as inst:
239 239 if inst.errno != errno.ENOENT:
240 240 raise
241 241
242 242 if relative:
243 243 try:
244 244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 245 requirements += 'relshared\n'
246 246 except IOError as e:
247 247 raise error.Abort(_('cannot calculate relative path'),
248 248 hint=str(e))
249 249 else:
250 250 requirements += 'shared\n'
251 251
252 252 destvfs.write('requires', requirements)
253 253 destvfs.write('sharedpath', sharedpath)
254 254
255 255 r = repository(ui, destwvfs.base)
256 256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 257 _postshareupdate(r, update, checkout=checkout)
258 258
259 259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
260 260 """Called after a new shared repo is created.
261 261
262 262 The new repo only has a requirements file and pointer to the source.
263 263 This function configures additional shared data.
264 264
265 265 Extensions can wrap this function and write additional entries to
266 266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
267 267 """
268 268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
269 269 if default:
270 270 fp = destrepo.vfs("hgrc", "w", text=True)
271 271 fp.write("[paths]\n")
272 272 fp.write("default = %s\n" % default)
273 273 fp.close()
274 274
275 275 with destrepo.wlock():
276 276 if bookmarks:
277 277 fp = destrepo.vfs('shared', 'w')
278 278 fp.write(sharedbookmarks + '\n')
279 279 fp.close()
280 280
281 281 def _postshareupdate(repo, update, checkout=None):
282 282 """Maybe perform a working directory update after a shared repo is created.
283 283
284 284 ``update`` can be a boolean or a revision to update to.
285 285 """
286 286 if not update:
287 287 return
288 288
289 289 repo.ui.status(_("updating working directory\n"))
290 290 if update is not True:
291 291 checkout = update
292 292 for test in (checkout, 'default', 'tip'):
293 293 if test is None:
294 294 continue
295 295 try:
296 296 uprev = repo.lookup(test)
297 297 break
298 298 except error.RepoLookupError:
299 299 continue
300 300 _update(repo, uprev)
301 301
302 302 def copystore(ui, srcrepo, destpath):
303 303 '''copy files from store of srcrepo in destpath
304 304
305 305 returns destlock
306 306 '''
307 307 destlock = None
308 308 try:
309 309 hardlink = None
310 310 num = 0
311 311 closetopic = [None]
312 312 def prog(topic, pos):
313 313 if pos is None:
314 314 closetopic[0] = topic
315 315 else:
316 316 ui.progress(topic, pos + num)
317 317 srcpublishing = srcrepo.publishing()
318 318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
319 319 dstvfs = vfsmod.vfs(destpath)
320 320 for f in srcrepo.store.copylist():
321 321 if srcpublishing and f.endswith('phaseroots'):
322 322 continue
323 323 dstbase = os.path.dirname(f)
324 324 if dstbase and not dstvfs.exists(dstbase):
325 325 dstvfs.mkdir(dstbase)
326 326 if srcvfs.exists(f):
327 327 if f.endswith('data'):
328 328 # 'dstbase' may be empty (e.g. revlog format 0)
329 329 lockfile = os.path.join(dstbase, "lock")
330 330 # lock to avoid premature writing to the target
331 331 destlock = lock.lock(dstvfs, lockfile)
332 332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
333 333 hardlink, progress=prog)
334 334 num += n
335 335 if hardlink:
336 336 ui.debug("linked %d files\n" % num)
337 337 if closetopic[0]:
338 338 ui.progress(closetopic[0], None)
339 339 else:
340 340 ui.debug("copied %d files\n" % num)
341 341 if closetopic[0]:
342 342 ui.progress(closetopic[0], None)
343 343 return destlock
344 344 except: # re-raises
345 345 release(destlock)
346 346 raise
347 347
348 348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
349 349 rev=None, update=True, stream=False):
350 350 """Perform a clone using a shared repo.
351 351
352 352 The store for the repository will be located at <sharepath>/.hg. The
353 353 specified revisions will be cloned or pulled from "source". A shared repo
354 354 will be created at "dest" and a working copy will be created if "update" is
355 355 True.
356 356 """
357 357 revs = None
358 358 if rev:
359 359 if not srcpeer.capable('lookup'):
360 360 raise error.Abort(_("src repository does not support "
361 361 "revision lookup and so doesn't "
362 362 "support clone by revision"))
363 363 revs = [srcpeer.lookup(r) for r in rev]
364 364
365 365 # Obtain a lock before checking for or cloning the pooled repo otherwise
366 366 # 2 clients may race creating or populating it.
367 367 pooldir = os.path.dirname(sharepath)
368 368 # lock class requires the directory to exist.
369 369 try:
370 370 util.makedir(pooldir, False)
371 371 except OSError as e:
372 372 if e.errno != errno.EEXIST:
373 373 raise
374 374
375 375 poolvfs = vfsmod.vfs(pooldir)
376 376 basename = os.path.basename(sharepath)
377 377
378 378 with lock.lock(poolvfs, '%s.lock' % basename):
379 379 if os.path.exists(sharepath):
380 380 ui.status(_('(sharing from existing pooled repository %s)\n') %
381 381 basename)
382 382 else:
383 383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
384 384 # Always use pull mode because hardlinks in share mode don't work
385 385 # well. Never update because working copies aren't necessary in
386 386 # share mode.
387 387 clone(ui, peeropts, source, dest=sharepath, pull=True,
388 388 rev=rev, update=False, stream=stream)
389 389
390 390 # Resolve the value to put in [paths] section for the source.
391 391 if islocal(source):
392 392 defaultpath = os.path.abspath(util.urllocalpath(source))
393 393 else:
394 394 defaultpath = source
395 395
396 396 sharerepo = repository(ui, path=sharepath)
397 397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
398 398 defaultpath=defaultpath)
399 399
400 400 # We need to perform a pull against the dest repo to fetch bookmarks
401 401 # and other non-store data that isn't shared by default. In the case of
402 402 # non-existing shared repo, this means we pull from the remote twice. This
403 403 # is a bit weird. But at the time it was implemented, there wasn't an easy
404 404 # way to pull just non-changegroup data.
405 405 destrepo = repository(ui, path=dest)
406 406 exchange.pull(destrepo, srcpeer, heads=revs)
407 407
408 408 _postshareupdate(destrepo, update)
409 409
410 410 return srcpeer, peer(ui, peeropts, dest)
411 411
412 412 # Recomputing branch cache might be slow on big repos,
413 413 # so just copy it
414 414 def _copycache(srcrepo, dstcachedir, fname):
415 415 """copy a cache from srcrepo to destcachedir (if it exists)"""
416 416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
417 417 dstbranchcache = os.path.join(dstcachedir, fname)
418 418 if os.path.exists(srcbranchcache):
419 419 if not os.path.exists(dstcachedir):
420 420 os.mkdir(dstcachedir)
421 421 util.copyfile(srcbranchcache, dstbranchcache)
422 422
423 423 def _cachetocopy(srcrepo):
424 424 """return the list of cache file valuable to copy during a clone"""
425 425 # In local clones we're copying all nodes, not just served
426 426 # ones. Therefore copy all branch caches over.
427 427 cachefiles = ['branch2']
428 428 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
429 429 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
430 cachefiles += ['tags2']
431 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
432 cachefiles += ['hgtagsfnodes1']
430 433 return cachefiles
431 434
432 435 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
433 436 update=True, stream=False, branch=None, shareopts=None):
434 437 """Make a copy of an existing repository.
435 438
436 439 Create a copy of an existing repository in a new directory. The
437 440 source and destination are URLs, as passed to the repository
438 441 function. Returns a pair of repository peers, the source and
439 442 newly created destination.
440 443
441 444 The location of the source is added to the new repository's
442 445 .hg/hgrc file, as the default to be used for future pulls and
443 446 pushes.
444 447
445 448 If an exception is raised, the partly cloned/updated destination
446 449 repository will be deleted.
447 450
448 451 Arguments:
449 452
450 453 source: repository object or URL
451 454
452 455 dest: URL of destination repository to create (defaults to base
453 456 name of source repository)
454 457
455 458 pull: always pull from source repository, even in local case or if the
456 459 server prefers streaming
457 460
458 461 stream: stream raw data uncompressed from repository (fast over
459 462 LAN, slow over WAN)
460 463
461 464 rev: revision to clone up to (implies pull=True)
462 465
463 466 update: update working directory after clone completes, if
464 467 destination is local repository (True means update to default rev,
465 468 anything else is treated as a revision)
466 469
467 470 branch: branches to clone
468 471
469 472 shareopts: dict of options to control auto sharing behavior. The "pool" key
470 473 activates auto sharing mode and defines the directory for stores. The
471 474 "mode" key determines how to construct the directory name of the shared
472 475 repository. "identity" means the name is derived from the node of the first
473 476 changeset in the repository. "remote" means the name is derived from the
474 477 remote's path/URL. Defaults to "identity."
475 478 """
476 479
477 480 if isinstance(source, str):
478 481 origsource = ui.expandpath(source)
479 482 source, branch = parseurl(origsource, branch)
480 483 srcpeer = peer(ui, peeropts, source)
481 484 else:
482 485 srcpeer = source.peer() # in case we were called with a localrepo
483 486 branch = (None, branch or [])
484 487 origsource = source = srcpeer.url()
485 488 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
486 489
487 490 if dest is None:
488 491 dest = defaultdest(source)
489 492 if dest:
490 493 ui.status(_("destination directory: %s\n") % dest)
491 494 else:
492 495 dest = ui.expandpath(dest)
493 496
494 497 dest = util.urllocalpath(dest)
495 498 source = util.urllocalpath(source)
496 499
497 500 if not dest:
498 501 raise error.Abort(_("empty destination path is not valid"))
499 502
500 503 destvfs = vfsmod.vfs(dest, expandpath=True)
501 504 if destvfs.lexists():
502 505 if not destvfs.isdir():
503 506 raise error.Abort(_("destination '%s' already exists") % dest)
504 507 elif destvfs.listdir():
505 508 raise error.Abort(_("destination '%s' is not empty") % dest)
506 509
507 510 shareopts = shareopts or {}
508 511 sharepool = shareopts.get('pool')
509 512 sharenamemode = shareopts.get('mode')
510 513 if sharepool and islocal(dest):
511 514 sharepath = None
512 515 if sharenamemode == 'identity':
513 516 # Resolve the name from the initial changeset in the remote
514 517 # repository. This returns nullid when the remote is empty. It
515 518 # raises RepoLookupError if revision 0 is filtered or otherwise
516 519 # not available. If we fail to resolve, sharing is not enabled.
517 520 try:
518 521 rootnode = srcpeer.lookup('0')
519 522 if rootnode != node.nullid:
520 523 sharepath = os.path.join(sharepool, node.hex(rootnode))
521 524 else:
522 525 ui.status(_('(not using pooled storage: '
523 526 'remote appears to be empty)\n'))
524 527 except error.RepoLookupError:
525 528 ui.status(_('(not using pooled storage: '
526 529 'unable to resolve identity of remote)\n'))
527 530 elif sharenamemode == 'remote':
528 531 sharepath = os.path.join(
529 532 sharepool, hashlib.sha1(source).hexdigest())
530 533 else:
531 534 raise error.Abort(_('unknown share naming mode: %s') %
532 535 sharenamemode)
533 536
534 537 if sharepath:
535 538 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
536 539 dest, pull=pull, rev=rev, update=update,
537 540 stream=stream)
538 541
539 542 srclock = destlock = cleandir = None
540 543 srcrepo = srcpeer.local()
541 544 try:
542 545 abspath = origsource
543 546 if islocal(origsource):
544 547 abspath = os.path.abspath(util.urllocalpath(origsource))
545 548
546 549 if islocal(dest):
547 550 cleandir = dest
548 551
549 552 copy = False
550 553 if (srcrepo and srcrepo.cancopy() and islocal(dest)
551 554 and not phases.hassecret(srcrepo)):
552 555 copy = not pull and not rev
553 556
554 557 if copy:
555 558 try:
556 559 # we use a lock here because if we race with commit, we
557 560 # can end up with extra data in the cloned revlogs that's
558 561 # not pointed to by changesets, thus causing verify to
559 562 # fail
560 563 srclock = srcrepo.lock(wait=False)
561 564 except error.LockError:
562 565 copy = False
563 566
564 567 if copy:
565 568 srcrepo.hook('preoutgoing', throw=True, source='clone')
566 569 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
567 570 if not os.path.exists(dest):
568 571 os.mkdir(dest)
569 572 else:
570 573 # only clean up directories we create ourselves
571 574 cleandir = hgdir
572 575 try:
573 576 destpath = hgdir
574 577 util.makedir(destpath, notindexed=True)
575 578 except OSError as inst:
576 579 if inst.errno == errno.EEXIST:
577 580 cleandir = None
578 581 raise error.Abort(_("destination '%s' already exists")
579 582 % dest)
580 583 raise
581 584
582 585 destlock = copystore(ui, srcrepo, destpath)
583 586 # copy bookmarks over
584 587 srcbookmarks = srcrepo.vfs.join('bookmarks')
585 588 dstbookmarks = os.path.join(destpath, 'bookmarks')
586 589 if os.path.exists(srcbookmarks):
587 590 util.copyfile(srcbookmarks, dstbookmarks)
588 591
589 592 dstcachedir = os.path.join(destpath, 'cache')
590 593 for cache in _cachetocopy(srcrepo):
591 594 _copycache(srcrepo, dstcachedir, cache)
592 595
593 596 # we need to re-init the repo after manually copying the data
594 597 # into it
595 598 destpeer = peer(srcrepo, peeropts, dest)
596 599 srcrepo.hook('outgoing', source='clone',
597 600 node=node.hex(node.nullid))
598 601 else:
599 602 try:
600 603 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
601 604 # only pass ui when no srcrepo
602 605 except OSError as inst:
603 606 if inst.errno == errno.EEXIST:
604 607 cleandir = None
605 608 raise error.Abort(_("destination '%s' already exists")
606 609 % dest)
607 610 raise
608 611
609 612 revs = None
610 613 if rev:
611 614 if not srcpeer.capable('lookup'):
612 615 raise error.Abort(_("src repository does not support "
613 616 "revision lookup and so doesn't "
614 617 "support clone by revision"))
615 618 revs = [srcpeer.lookup(r) for r in rev]
616 619 checkout = revs[0]
617 620 local = destpeer.local()
618 621 if local:
619 622 if not stream:
620 623 if pull:
621 624 stream = False
622 625 else:
623 626 stream = None
624 627 # internal config: ui.quietbookmarkmove
625 628 overrides = {('ui', 'quietbookmarkmove'): True}
626 629 with local.ui.configoverride(overrides, 'clone'):
627 630 exchange.pull(local, srcpeer, revs,
628 631 streamclonerequested=stream)
629 632 elif srcrepo:
630 633 exchange.push(srcrepo, destpeer, revs=revs,
631 634 bookmarks=srcrepo._bookmarks.keys())
632 635 else:
633 636 raise error.Abort(_("clone from remote to remote not supported")
634 637 )
635 638
636 639 cleandir = None
637 640
638 641 destrepo = destpeer.local()
639 642 if destrepo:
640 643 template = uimod.samplehgrcs['cloned']
641 644 fp = destrepo.vfs("hgrc", "w", text=True)
642 645 u = util.url(abspath)
643 646 u.passwd = None
644 647 defaulturl = str(u)
645 648 fp.write(template % defaulturl)
646 649 fp.close()
647 650
648 651 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
649 652
650 653 if update:
651 654 if update is not True:
652 655 checkout = srcpeer.lookup(update)
653 656 uprev = None
654 657 status = None
655 658 if checkout is not None:
656 659 try:
657 660 uprev = destrepo.lookup(checkout)
658 661 except error.RepoLookupError:
659 662 if update is not True:
660 663 try:
661 664 uprev = destrepo.lookup(update)
662 665 except error.RepoLookupError:
663 666 pass
664 667 if uprev is None:
665 668 try:
666 669 uprev = destrepo._bookmarks['@']
667 670 update = '@'
668 671 bn = destrepo[uprev].branch()
669 672 if bn == 'default':
670 673 status = _("updating to bookmark @\n")
671 674 else:
672 675 status = (_("updating to bookmark @ on branch %s\n")
673 676 % bn)
674 677 except KeyError:
675 678 try:
676 679 uprev = destrepo.branchtip('default')
677 680 except error.RepoLookupError:
678 681 uprev = destrepo.lookup('tip')
679 682 if not status:
680 683 bn = destrepo[uprev].branch()
681 684 status = _("updating to branch %s\n") % bn
682 685 destrepo.ui.status(status)
683 686 _update(destrepo, uprev)
684 687 if update in destrepo._bookmarks:
685 688 bookmarks.activate(destrepo, update)
686 689 finally:
687 690 release(srclock, destlock)
688 691 if cleandir is not None:
689 692 shutil.rmtree(cleandir, True)
690 693 if srcpeer is not None:
691 694 srcpeer.close()
692 695 return srcpeer, destpeer
693 696
694 697 def _showstats(repo, stats, quietempty=False):
695 698 if quietempty and not any(stats):
696 699 return
697 700 repo.ui.status(_("%d files updated, %d files merged, "
698 701 "%d files removed, %d files unresolved\n") % stats)
699 702
700 703 def updaterepo(repo, node, overwrite, updatecheck=None):
701 704 """Update the working directory to node.
702 705
703 706 When overwrite is set, changes are clobbered, merged else
704 707
705 708 returns stats (see pydoc mercurial.merge.applyupdates)"""
706 709 return mergemod.update(repo, node, False, overwrite,
707 710 labels=['working copy', 'destination'],
708 711 updatecheck=updatecheck)
709 712
710 713 def update(repo, node, quietempty=False, updatecheck=None):
711 714 """update the working directory to node"""
712 715 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
713 716 _showstats(repo, stats, quietempty)
714 717 if stats[3]:
715 718 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
716 719 return stats[3] > 0
717 720
718 721 # naming conflict in clone()
719 722 _update = update
720 723
721 724 def clean(repo, node, show_stats=True, quietempty=False):
722 725 """forcibly switch the working directory to node, clobbering changes"""
723 726 stats = updaterepo(repo, node, True)
724 727 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
725 728 if show_stats:
726 729 _showstats(repo, stats, quietempty)
727 730 return stats[3] > 0
728 731
729 732 # naming conflict in updatetotally()
730 733 _clean = clean
731 734
732 735 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
733 736 """Update the working directory with extra care for non-file components
734 737
735 738 This takes care of non-file components below:
736 739
737 740 :bookmark: might be advanced or (in)activated
738 741
739 742 This takes arguments below:
740 743
741 744 :checkout: to which revision the working directory is updated
742 745 :brev: a name, which might be a bookmark to be activated after updating
743 746 :clean: whether changes in the working directory can be discarded
744 747 :updatecheck: how to deal with a dirty working directory
745 748
746 749 Valid values for updatecheck are (None => linear):
747 750
748 751 * abort: abort if the working directory is dirty
749 752 * none: don't check (merge working directory changes into destination)
750 753 * linear: check that update is linear before merging working directory
751 754 changes into destination
752 755 * noconflict: check that the update does not result in file merges
753 756
754 757 This returns whether conflict is detected at updating or not.
755 758 """
756 759 if updatecheck is None:
757 760 updatecheck = ui.config('experimental', 'updatecheck')
758 761 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
759 762 # If not configured, or invalid value configured
760 763 updatecheck = 'linear'
761 764 with repo.wlock():
762 765 movemarkfrom = None
763 766 warndest = False
764 767 if checkout is None:
765 768 updata = destutil.destupdate(repo, clean=clean)
766 769 checkout, movemarkfrom, brev = updata
767 770 warndest = True
768 771
769 772 if clean:
770 773 ret = _clean(repo, checkout)
771 774 else:
772 775 if updatecheck == 'abort':
773 776 cmdutil.bailifchanged(repo, merge=False)
774 777 updatecheck = 'none'
775 778 ret = _update(repo, checkout, updatecheck=updatecheck)
776 779
777 780 if not ret and movemarkfrom:
778 781 if movemarkfrom == repo['.'].node():
779 782 pass # no-op update
780 783 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
781 784 b = ui.label(repo._activebookmark, 'bookmarks.active')
782 785 ui.status(_("updating bookmark %s\n") % b)
783 786 else:
784 787 # this can happen with a non-linear update
785 788 b = ui.label(repo._activebookmark, 'bookmarks')
786 789 ui.status(_("(leaving bookmark %s)\n") % b)
787 790 bookmarks.deactivate(repo)
788 791 elif brev in repo._bookmarks:
789 792 if brev != repo._activebookmark:
790 793 b = ui.label(brev, 'bookmarks.active')
791 794 ui.status(_("(activating bookmark %s)\n") % b)
792 795 bookmarks.activate(repo, brev)
793 796 elif brev:
794 797 if repo._activebookmark:
795 798 b = ui.label(repo._activebookmark, 'bookmarks')
796 799 ui.status(_("(leaving bookmark %s)\n") % b)
797 800 bookmarks.deactivate(repo)
798 801
799 802 if warndest:
800 803 destutil.statusotherdests(ui, repo)
801 804
802 805 return ret
803 806
804 807 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
805 808 """Branch merge with node, resolving changes. Return true if any
806 809 unresolved conflicts."""
807 810 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
808 811 labels=labels)
809 812 _showstats(repo, stats)
810 813 if stats[3]:
811 814 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
812 815 "or 'hg update -C .' to abandon\n"))
813 816 elif remind:
814 817 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
815 818 return stats[3] > 0
816 819
817 820 def _incoming(displaychlist, subreporecurse, ui, repo, source,
818 821 opts, buffered=False):
819 822 """
820 823 Helper for incoming / gincoming.
821 824 displaychlist gets called with
822 825 (remoterepo, incomingchangesetlist, displayer) parameters,
823 826 and is supposed to contain only code that can't be unified.
824 827 """
825 828 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
826 829 other = peer(repo, opts, source)
827 830 ui.status(_('comparing with %s\n') % util.hidepassword(source))
828 831 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
829 832
830 833 if revs:
831 834 revs = [other.lookup(rev) for rev in revs]
832 835 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
833 836 revs, opts["bundle"], opts["force"])
834 837 try:
835 838 if not chlist:
836 839 ui.status(_("no changes found\n"))
837 840 return subreporecurse()
838 841 ui.pager('incoming')
839 842 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
840 843 displaychlist(other, chlist, displayer)
841 844 displayer.close()
842 845 finally:
843 846 cleanupfn()
844 847 subreporecurse()
845 848 return 0 # exit code is zero since we found incoming changes
846 849
847 850 def incoming(ui, repo, source, opts):
848 851 def subreporecurse():
849 852 ret = 1
850 853 if opts.get('subrepos'):
851 854 ctx = repo[None]
852 855 for subpath in sorted(ctx.substate):
853 856 sub = ctx.sub(subpath)
854 857 ret = min(ret, sub.incoming(ui, source, opts))
855 858 return ret
856 859
857 860 def display(other, chlist, displayer):
858 861 limit = cmdutil.loglimit(opts)
859 862 if opts.get('newest_first'):
860 863 chlist.reverse()
861 864 count = 0
862 865 for n in chlist:
863 866 if limit is not None and count >= limit:
864 867 break
865 868 parents = [p for p in other.changelog.parents(n) if p != nullid]
866 869 if opts.get('no_merges') and len(parents) == 2:
867 870 continue
868 871 count += 1
869 872 displayer.show(other[n])
870 873 return _incoming(display, subreporecurse, ui, repo, source, opts)
871 874
872 875 def _outgoing(ui, repo, dest, opts):
873 876 dest = ui.expandpath(dest or 'default-push', dest or 'default')
874 877 dest, branches = parseurl(dest, opts.get('branch'))
875 878 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
876 879 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
877 880 if revs:
878 881 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
879 882
880 883 other = peer(repo, opts, dest)
881 884 outgoing = discovery.findcommonoutgoing(repo, other, revs,
882 885 force=opts.get('force'))
883 886 o = outgoing.missing
884 887 if not o:
885 888 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
886 889 return o, other
887 890
888 891 def outgoing(ui, repo, dest, opts):
889 892 def recurse():
890 893 ret = 1
891 894 if opts.get('subrepos'):
892 895 ctx = repo[None]
893 896 for subpath in sorted(ctx.substate):
894 897 sub = ctx.sub(subpath)
895 898 ret = min(ret, sub.outgoing(ui, dest, opts))
896 899 return ret
897 900
898 901 limit = cmdutil.loglimit(opts)
899 902 o, other = _outgoing(ui, repo, dest, opts)
900 903 if not o:
901 904 cmdutil.outgoinghooks(ui, repo, other, opts, o)
902 905 return recurse()
903 906
904 907 if opts.get('newest_first'):
905 908 o.reverse()
906 909 ui.pager('outgoing')
907 910 displayer = cmdutil.show_changeset(ui, repo, opts)
908 911 count = 0
909 912 for n in o:
910 913 if limit is not None and count >= limit:
911 914 break
912 915 parents = [p for p in repo.changelog.parents(n) if p != nullid]
913 916 if opts.get('no_merges') and len(parents) == 2:
914 917 continue
915 918 count += 1
916 919 displayer.show(repo[n])
917 920 displayer.close()
918 921 cmdutil.outgoinghooks(ui, repo, other, opts, o)
919 922 recurse()
920 923 return 0 # exit code is zero since we found outgoing changes
921 924
922 925 def verify(repo):
923 926 """verify the consistency of a repository"""
924 927 ret = verifymod.verify(repo)
925 928
926 929 # Broken subrepo references in hidden csets don't seem worth worrying about,
927 930 # since they can't be pushed/pulled, and --hidden can be used if they are a
928 931 # concern.
929 932
930 933 # pathto() is needed for -R case
931 934 revs = repo.revs("filelog(%s)",
932 935 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
933 936
934 937 if revs:
935 938 repo.ui.status(_('checking subrepo links\n'))
936 939 for rev in revs:
937 940 ctx = repo[rev]
938 941 try:
939 942 for subpath in ctx.substate:
940 943 try:
941 944 ret = (ctx.sub(subpath, allowcreate=False).verify()
942 945 or ret)
943 946 except error.RepoError as e:
944 947 repo.ui.warn(('%s: %s\n') % (rev, e))
945 948 except Exception:
946 949 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
947 950 node.short(ctx.node()))
948 951
949 952 return ret
950 953
951 954 def remoteui(src, opts):
952 955 'build a remote ui from ui or repo and opts'
953 956 if util.safehasattr(src, 'baseui'): # looks like a repository
954 957 dst = src.baseui.copy() # drop repo-specific config
955 958 src = src.ui # copy target options from repo
956 959 else: # assume it's a global ui object
957 960 dst = src.copy() # keep all global options
958 961
959 962 # copy ssh-specific options
960 963 for o in 'ssh', 'remotecmd':
961 964 v = opts.get(o) or src.config('ui', o)
962 965 if v:
963 966 dst.setconfig("ui", o, v, 'copied')
964 967
965 968 # copy bundle-specific options
966 969 r = src.config('bundle', 'mainreporoot')
967 970 if r:
968 971 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
969 972
970 973 # copy selected local settings to the remote ui
971 974 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
972 975 for key, val in src.configitems(sect):
973 976 dst.setconfig(sect, key, val, 'copied')
974 977 v = src.config('web', 'cacerts')
975 978 if v:
976 979 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
977 980
978 981 return dst
979 982
980 983 # Files of interest
981 984 # Used to check if the repository has changed looking at mtime and size of
982 985 # these files.
983 986 foi = [('spath', '00changelog.i'),
984 987 ('spath', 'phaseroots'), # ! phase can change content at the same size
985 988 ('spath', 'obsstore'),
986 989 ('path', 'bookmarks'), # ! bookmark can change content at the same size
987 990 ]
988 991
989 992 class cachedlocalrepo(object):
990 993 """Holds a localrepository that can be cached and reused."""
991 994
992 995 def __init__(self, repo):
993 996 """Create a new cached repo from an existing repo.
994 997
995 998 We assume the passed in repo was recently created. If the
996 999 repo has changed between when it was created and when it was
997 1000 turned into a cache, it may not refresh properly.
998 1001 """
999 1002 assert isinstance(repo, localrepo.localrepository)
1000 1003 self._repo = repo
1001 1004 self._state, self.mtime = self._repostate()
1002 1005 self._filtername = repo.filtername
1003 1006
1004 1007 def fetch(self):
1005 1008 """Refresh (if necessary) and return a repository.
1006 1009
1007 1010 If the cached instance is out of date, it will be recreated
1008 1011 automatically and returned.
1009 1012
1010 1013 Returns a tuple of the repo and a boolean indicating whether a new
1011 1014 repo instance was created.
1012 1015 """
1013 1016 # We compare the mtimes and sizes of some well-known files to
1014 1017 # determine if the repo changed. This is not precise, as mtimes
1015 1018 # are susceptible to clock skew and imprecise filesystems and
1016 1019 # file content can change while maintaining the same size.
1017 1020
1018 1021 state, mtime = self._repostate()
1019 1022 if state == self._state:
1020 1023 return self._repo, False
1021 1024
1022 1025 repo = repository(self._repo.baseui, self._repo.url())
1023 1026 if self._filtername:
1024 1027 self._repo = repo.filtered(self._filtername)
1025 1028 else:
1026 1029 self._repo = repo.unfiltered()
1027 1030 self._state = state
1028 1031 self.mtime = mtime
1029 1032
1030 1033 return self._repo, True
1031 1034
1032 1035 def _repostate(self):
1033 1036 state = []
1034 1037 maxmtime = -1
1035 1038 for attr, fname in foi:
1036 1039 prefix = getattr(self._repo, attr)
1037 1040 p = os.path.join(prefix, fname)
1038 1041 try:
1039 1042 st = os.stat(p)
1040 1043 except OSError:
1041 1044 st = os.stat(prefix)
1042 1045 state.append((st.st_mtime, st.st_size))
1043 1046 maxmtime = max(maxmtime, st.st_mtime)
1044 1047
1045 1048 return tuple(state), maxmtime
1046 1049
1047 1050 def copy(self):
1048 1051 """Obtain a copy of this class instance.
1049 1052
1050 1053 A new localrepository instance is obtained. The new instance should be
1051 1054 completely independent of the original.
1052 1055 """
1053 1056 repo = repository(self._repo.baseui, self._repo.origroot)
1054 1057 if self._filtername:
1055 1058 repo = repo.filtered(self._filtername)
1056 1059 else:
1057 1060 repo = repo.unfiltered()
1058 1061 c = cachedlocalrepo(repo)
1059 1062 c._state = self._state
1060 1063 c.mtime = self.mtime
1061 1064 return c
@@ -1,726 +1,734 b''
1 1 setup
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > blackbox=
6 6 > mock=$TESTDIR/mockblackbox.py
7 7 > EOF
8 8
9 9 Helper functions:
10 10
11 11 $ cacheexists() {
12 12 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
13 13 > }
14 14
15 15 $ fnodescacheexists() {
16 16 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
17 17 > }
18 18
19 19 $ dumptags() {
20 20 > rev=$1
21 21 > echo "rev $rev: .hgtags:"
22 22 > hg cat -r$rev .hgtags
23 23 > }
24 24
25 25 # XXX need to test that the tag cache works when we strip an old head
26 26 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
27 27 # same, but stuff has changed behind tip.
28 28
29 29 Setup:
30 30
31 31 $ hg init t
32 32 $ cd t
33 33 $ cacheexists
34 34 no tag cache
35 35 $ fnodescacheexists
36 36 no fnodes cache
37 37 $ hg id
38 38 000000000000 tip
39 39 $ cacheexists
40 40 no tag cache
41 41 $ fnodescacheexists
42 42 no fnodes cache
43 43 $ echo a > a
44 44 $ hg add a
45 45 $ hg commit -m "test"
46 46 $ hg co
47 47 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 48 $ hg identify
49 49 acb14030fe0a tip
50 50 $ hg identify -r 'wdir()'
51 51 acb14030fe0a tip
52 52 $ cacheexists
53 53 tag cache exists
54 54 No fnodes cache because .hgtags file doesn't exist
55 55 (this is an implementation detail)
56 56 $ fnodescacheexists
57 57 no fnodes cache
58 58
59 59 Try corrupting the cache
60 60
61 61 $ printf 'a b' > .hg/cache/tags2-visible
62 62 $ hg identify
63 63 acb14030fe0a tip
64 64 $ cacheexists
65 65 tag cache exists
66 66 $ fnodescacheexists
67 67 no fnodes cache
68 68 $ hg identify
69 69 acb14030fe0a tip
70 70
71 71 Create local tag with long name:
72 72
73 73 $ T=`hg identify --debug --id`
74 74 $ hg tag -l "This is a local tag with a really long name!"
75 75 $ hg tags
76 76 tip 0:acb14030fe0a
77 77 This is a local tag with a really long name! 0:acb14030fe0a
78 78 $ rm .hg/localtags
79 79
80 80 Create a tag behind hg's back:
81 81
82 82 $ echo "$T first" > .hgtags
83 83 $ cat .hgtags
84 84 acb14030fe0a21b60322c440ad2d20cf7685a376 first
85 85 $ hg add .hgtags
86 86 $ hg commit -m "add tags"
87 87 $ hg tags
88 88 tip 1:b9154636be93
89 89 first 0:acb14030fe0a
90 90 $ hg identify
91 91 b9154636be93 tip
92 92
93 93 We should have a fnodes cache now that we have a real tag
94 94 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
95 95
96 96
97 97 $ fnodescacheexists
98 98 fnodes cache exists
99 99 $ f --size --hexdump .hg/cache/hgtagsfnodes1
100 100 .hg/cache/hgtagsfnodes1: size=48
101 101 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
102 102 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
103 103 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
104 104
105 105 Repeat with cold tag cache:
106 106
107 107 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
108 108 $ hg identify
109 109 b9154636be93 tip
110 110
111 111 $ fnodescacheexists
112 112 fnodes cache exists
113 113 $ f --size --hexdump .hg/cache/hgtagsfnodes1
114 114 .hg/cache/hgtagsfnodes1: size=48
115 115 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
116 116 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
117 117 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
118 118
119 119 And again, but now unable to write tag cache or lock file:
120 120
121 121 #if unix-permissions
122 122 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
123 123 $ chmod 555 .hg/cache
124 124 $ hg identify
125 125 b9154636be93 tip
126 126 $ chmod 755 .hg/cache
127 127
128 128 $ chmod 555 .hg
129 129 $ hg identify
130 130 b9154636be93 tip
131 131 $ chmod 755 .hg
132 132 #endif
133 133
134 134 Tag cache debug info written to blackbox log
135 135
136 136 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
137 137 $ hg identify
138 138 b9154636be93 tip
139 139 $ hg blackbox -l 6
140 140 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
141 141 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
142 142 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/1 cache hits/lookups in * seconds (glob)
143 143 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
144 144 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
145 145 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
146 146
147 147 Failure to acquire lock results in no write
148 148
149 149 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
150 150 $ echo 'foo:1' > .hg/wlock
151 151 $ hg identify
152 152 b9154636be93 tip
153 153 $ hg blackbox -l 6
154 154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
155 155 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
156 156 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/1 cache hits/lookups in * seconds (glob)
157 157 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
158 158 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
159 159 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
160 160
161 161 $ fnodescacheexists
162 162 no fnodes cache
163 163
164 164 $ rm .hg/wlock
165 165
166 166 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
167 167 $ hg identify
168 168 b9154636be93 tip
169 169
170 170 Create a branch:
171 171
172 172 $ echo bb > a
173 173 $ hg status
174 174 M a
175 175 $ hg identify
176 176 b9154636be93+ tip
177 177 $ hg co first
178 178 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
179 179 $ hg id
180 180 acb14030fe0a+ first
181 181 $ hg id -r 'wdir()'
182 182 acb14030fe0a+ first
183 183 $ hg -v id
184 184 acb14030fe0a+ first
185 185 $ hg status
186 186 M a
187 187 $ echo 1 > b
188 188 $ hg add b
189 189 $ hg commit -m "branch"
190 190 created new head
191 191
192 192 Creating a new commit shouldn't append the .hgtags fnodes cache until
193 193 tags info is accessed
194 194
195 195 $ f --size --hexdump .hg/cache/hgtagsfnodes1
196 196 .hg/cache/hgtagsfnodes1: size=48
197 197 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
198 198 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
199 199 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
200 200
201 201 $ hg id
202 202 c8edf04160c7 tip
203 203
204 204 First 4 bytes of record 3 are changeset fragment
205 205
206 206 $ f --size --hexdump .hg/cache/hgtagsfnodes1
207 207 .hg/cache/hgtagsfnodes1: size=72
208 208 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
209 209 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
210 210 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
211 211 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
212 212 0040: 00 00 00 00 00 00 00 00 |........|
213 213
214 214 Merge the two heads:
215 215
216 216 $ hg merge 1
217 217 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
218 218 (branch merge, don't forget to commit)
219 219 $ hg blackbox -l3
220 220 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
221 221 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
222 222 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
223 223 $ hg id
224 224 c8edf04160c7+b9154636be93+ tip
225 225 $ hg status
226 226 M .hgtags
227 227 $ hg commit -m "merge"
228 228
229 229 Create a fake head, make sure tag not visible afterwards:
230 230
231 231 $ cp .hgtags tags
232 232 $ hg tag last
233 233 $ hg rm .hgtags
234 234 $ hg commit -m "remove"
235 235
236 236 $ mv tags .hgtags
237 237 $ hg add .hgtags
238 238 $ hg commit -m "readd"
239 239 $
240 240 $ hg tags
241 241 tip 6:35ff301afafe
242 242 first 0:acb14030fe0a
243 243
244 244 Add invalid tags:
245 245
246 246 $ echo "spam" >> .hgtags
247 247 $ echo >> .hgtags
248 248 $ echo "foo bar" >> .hgtags
249 249 $ echo "a5a5 invalid" >> .hg/localtags
250 250 $ cat .hgtags
251 251 acb14030fe0a21b60322c440ad2d20cf7685a376 first
252 252 spam
253 253
254 254 foo bar
255 255 $ hg commit -m "tags"
256 256
257 257 Report tag parse error on other head:
258 258
259 259 $ hg up 3
260 260 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
261 261 $ echo 'x y' >> .hgtags
262 262 $ hg commit -m "head"
263 263 created new head
264 264
265 265 $ hg tags --debug
266 266 .hgtags@75d9f02dfe28, line 2: cannot parse entry
267 267 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
268 268 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
269 269 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
270 270 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
271 271 $ hg tip
272 272 changeset: 8:c4be69a18c11
273 273 tag: tip
274 274 parent: 3:ac5e980c4dc0
275 275 user: test
276 276 date: Thu Jan 01 00:00:00 1970 +0000
277 277 summary: head
278 278
279 279
280 280 Test tag precedence rules:
281 281
282 282 $ cd ..
283 283 $ hg init t2
284 284 $ cd t2
285 285 $ echo foo > foo
286 286 $ hg add foo
287 287 $ hg ci -m 'add foo' # rev 0
288 288 $ hg tag bar # rev 1
289 289 $ echo >> foo
290 290 $ hg ci -m 'change foo 1' # rev 2
291 291 $ hg up -C 1
292 292 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
293 293 $ hg tag -r 1 -f bar # rev 3
294 294 $ hg up -C 1
295 295 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
296 296 $ echo >> foo
297 297 $ hg ci -m 'change foo 2' # rev 4
298 298 created new head
299 299 $ hg tags
300 300 tip 4:0c192d7d5e6b
301 301 bar 1:78391a272241
302 302
303 303 Repeat in case of cache effects:
304 304
305 305 $ hg tags
306 306 tip 4:0c192d7d5e6b
307 307 bar 1:78391a272241
308 308
309 309 Detailed dump of tag info:
310 310
311 311 $ hg heads -q # expect 4, 3, 2
312 312 4:0c192d7d5e6b
313 313 3:6fa450212aeb
314 314 2:7a94127795a3
315 315 $ dumptags 2
316 316 rev 2: .hgtags:
317 317 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
318 318 $ dumptags 3
319 319 rev 3: .hgtags:
320 320 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
321 321 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
322 322 78391a272241d70354aa14c874552cad6b51bb42 bar
323 323 $ dumptags 4
324 324 rev 4: .hgtags:
325 325 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
326 326
327 327 Dump cache:
328 328
329 329 $ cat .hg/cache/tags2-visible
330 330 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
331 331 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
332 332 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
333 333 78391a272241d70354aa14c874552cad6b51bb42 bar
334 334
335 335 $ f --size --hexdump .hg/cache/hgtagsfnodes1
336 336 .hg/cache/hgtagsfnodes1: size=120
337 337 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
338 338 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
339 339 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
340 340 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
341 341 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
342 342 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
343 343 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
344 344 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
345 345
346 346 Corrupt the .hgtags fnodes cache
347 347 Extra junk data at the end should get overwritten on next cache update
348 348
349 349 $ echo extra >> .hg/cache/hgtagsfnodes1
350 350 $ echo dummy1 > foo
351 351 $ hg commit -m throwaway1
352 352
353 353 $ hg tags
354 354 tip 5:8dbfe60eff30
355 355 bar 1:78391a272241
356 356
357 357 $ hg blackbox -l 6
358 358 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
359 359 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
360 360 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 2/3 cache hits/lookups in * seconds (glob)
361 361 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
362 362 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
363 363 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
364 364
365 365 #if unix-permissions no-root
366 366 Errors writing to .hgtags fnodes cache are silently ignored
367 367
368 368 $ echo dummy2 > foo
369 369 $ hg commit -m throwaway2
370 370
371 371 $ chmod a-w .hg/cache/hgtagsfnodes1
372 372 $ rm -f .hg/cache/tags2-visible
373 373
374 374 $ hg tags
375 375 tip 6:b968051b5cf3
376 376 bar 1:78391a272241
377 377
378 378 $ hg blackbox -l 6
379 379 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
380 380 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno 13] Permission denied: '$TESTTMP/t2/.hg/cache/hgtagsfnodes1'
381 381 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/3 cache hits/lookups in * seconds (glob)
382 382 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
383 383 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
384 384 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
385 385
386 386 $ chmod a+w .hg/cache/hgtagsfnodes1
387 387
388 388 $ rm -f .hg/cache/tags2-visible
389 389 $ hg tags
390 390 tip 6:b968051b5cf3
391 391 bar 1:78391a272241
392 392
393 393 $ hg blackbox -l 6
394 394 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
395 395 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
396 396 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/3 cache hits/lookups in * seconds (glob)
397 397 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
398 398 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
399 399 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
400 400
401 401 $ f --size .hg/cache/hgtagsfnodes1
402 402 .hg/cache/hgtagsfnodes1: size=168
403 403
404 404 $ hg -q --config extensions.strip= strip -r 6 --no-backup
405 405 #endif
406 406
407 407 Stripping doesn't truncate the tags cache until new data is available
408 408
409 409 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
410 410 $ hg tags
411 411 tip 5:8dbfe60eff30
412 412 bar 1:78391a272241
413 413
414 414 $ f --size .hg/cache/hgtagsfnodes1
415 415 .hg/cache/hgtagsfnodes1: size=144
416 416
417 417 $ hg -q --config extensions.strip= strip -r 5 --no-backup
418 418 $ hg tags
419 419 tip 4:0c192d7d5e6b
420 420 bar 1:78391a272241
421 421
422 422 $ hg blackbox -l 5
423 423 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
424 424 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/3 cache hits/lookups in * seconds (glob)
425 425 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
426 426 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
427 427 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
428 428
429 429 $ f --size .hg/cache/hgtagsfnodes1
430 430 .hg/cache/hgtagsfnodes1: size=120
431 431
432 432 $ echo dummy > foo
433 433 $ hg commit -m throwaway3
434 434
435 435 $ hg tags
436 436 tip 5:035f65efb448
437 437 bar 1:78391a272241
438 438
439 439 $ hg blackbox -l 6
440 440 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
441 441 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
442 442 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 2/3 cache hits/lookups in * seconds (glob)
443 443 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
444 444 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
445 445 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
446 446 $ f --size .hg/cache/hgtagsfnodes1
447 447 .hg/cache/hgtagsfnodes1: size=144
448 448
449 449 $ hg -q --config extensions.strip= strip -r 5 --no-backup
450 450
451 451 Test tag removal:
452 452
453 453 $ hg tag --remove bar # rev 5
454 454 $ hg tip -vp
455 455 changeset: 5:5f6e8655b1c7
456 456 tag: tip
457 457 user: test
458 458 date: Thu Jan 01 00:00:00 1970 +0000
459 459 files: .hgtags
460 460 description:
461 461 Removed tag bar
462 462
463 463
464 464 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
465 465 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
466 466 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
467 467 @@ -1,1 +1,3 @@
468 468 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
469 469 +78391a272241d70354aa14c874552cad6b51bb42 bar
470 470 +0000000000000000000000000000000000000000 bar
471 471
472 472 $ hg tags
473 473 tip 5:5f6e8655b1c7
474 474 $ hg tags # again, try to expose cache bugs
475 475 tip 5:5f6e8655b1c7
476 476
477 477 Remove nonexistent tag:
478 478
479 479 $ hg tag --remove foobar
480 480 abort: tag 'foobar' does not exist
481 481 [255]
482 482 $ hg tip
483 483 changeset: 5:5f6e8655b1c7
484 484 tag: tip
485 485 user: test
486 486 date: Thu Jan 01 00:00:00 1970 +0000
487 487 summary: Removed tag bar
488 488
489 489
490 490 Undo a tag with rollback:
491 491
492 492 $ hg rollback # destroy rev 5 (restore bar)
493 493 repository tip rolled back to revision 4 (undo commit)
494 494 working directory now based on revision 4
495 495 $ hg tags
496 496 tip 4:0c192d7d5e6b
497 497 bar 1:78391a272241
498 498 $ hg tags
499 499 tip 4:0c192d7d5e6b
500 500 bar 1:78391a272241
501 501
502 502 Test tag rank:
503 503
504 504 $ cd ..
505 505 $ hg init t3
506 506 $ cd t3
507 507 $ echo foo > foo
508 508 $ hg add foo
509 509 $ hg ci -m 'add foo' # rev 0
510 510 $ hg tag -f bar # rev 1 bar -> 0
511 511 $ hg tag -f bar # rev 2 bar -> 1
512 512 $ hg tag -fr 0 bar # rev 3 bar -> 0
513 513 $ hg tag -fr 1 bar # rev 4 bar -> 1
514 514 $ hg tag -fr 0 bar # rev 5 bar -> 0
515 515 $ hg tags
516 516 tip 5:85f05169d91d
517 517 bar 0:bbd179dfa0a7
518 518 $ hg co 3
519 519 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
520 520 $ echo barbar > foo
521 521 $ hg ci -m 'change foo' # rev 6
522 522 created new head
523 523 $ hg tags
524 524 tip 6:735c3ca72986
525 525 bar 0:bbd179dfa0a7
526 526
527 527 Don't allow moving tag without -f:
528 528
529 529 $ hg tag -r 3 bar
530 530 abort: tag 'bar' already exists (use -f to force)
531 531 [255]
532 532 $ hg tags
533 533 tip 6:735c3ca72986
534 534 bar 0:bbd179dfa0a7
535 535
536 536 Strip 1: expose an old head:
537 537
538 538 $ hg --config extensions.mq= strip 5
539 539 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
540 540 $ hg tags # partly stale cache
541 541 tip 5:735c3ca72986
542 542 bar 1:78391a272241
543 543 $ hg tags # up-to-date cache
544 544 tip 5:735c3ca72986
545 545 bar 1:78391a272241
546 546
547 547 Strip 2: destroy whole branch, no old head exposed
548 548
549 549 $ hg --config extensions.mq= strip 4
550 550 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
551 551 $ hg tags # partly stale
552 552 tip 4:735c3ca72986
553 553 bar 0:bbd179dfa0a7
554 554 $ rm -f .hg/cache/tags2-visible
555 555 $ hg tags # cold cache
556 556 tip 4:735c3ca72986
557 557 bar 0:bbd179dfa0a7
558 558
559 559 Test tag rank with 3 heads:
560 560
561 561 $ cd ..
562 562 $ hg init t4
563 563 $ cd t4
564 564 $ echo foo > foo
565 565 $ hg add
566 566 adding foo
567 567 $ hg ci -m 'add foo' # rev 0
568 568 $ hg tag bar # rev 1 bar -> 0
569 569 $ hg tag -f bar # rev 2 bar -> 1
570 570 $ hg up -qC 0
571 571 $ hg tag -fr 2 bar # rev 3 bar -> 2
572 572 $ hg tags
573 573 tip 3:197c21bbbf2c
574 574 bar 2:6fa450212aeb
575 575 $ hg up -qC 0
576 576 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
577 577
578 578 Bar should still point to rev 2:
579 579
580 580 $ hg tags
581 581 tip 4:3b4b14ed0202
582 582 bar 2:6fa450212aeb
583 583
584 584 Test that removing global/local tags does not get confused when trying
585 585 to remove a tag of type X which actually only exists as a type Y:
586 586
587 587 $ cd ..
588 588 $ hg init t5
589 589 $ cd t5
590 590 $ echo foo > foo
591 591 $ hg add
592 592 adding foo
593 593 $ hg ci -m 'add foo' # rev 0
594 594
595 595 $ hg tag -r 0 -l localtag
596 596 $ hg tag --remove localtag
597 597 abort: tag 'localtag' is not a global tag
598 598 [255]
599 599 $
600 600 $ hg tag -r 0 globaltag
601 601 $ hg tag --remove -l globaltag
602 602 abort: tag 'globaltag' is not a local tag
603 603 [255]
604 604 $ hg tags -v
605 605 tip 1:a0b6fe111088
606 606 localtag 0:bbd179dfa0a7 local
607 607 globaltag 0:bbd179dfa0a7
608 608
609 609 Test for issue3911
610 610
611 611 $ hg tag -r 0 -l localtag2
612 612 $ hg tag -l --remove localtag2
613 613 $ hg tags -v
614 614 tip 1:a0b6fe111088
615 615 localtag 0:bbd179dfa0a7 local
616 616 globaltag 0:bbd179dfa0a7
617 617
618 618 $ hg tag -r 1 -f localtag
619 619 $ hg tags -v
620 620 tip 2:5c70a037bb37
621 621 localtag 1:a0b6fe111088
622 622 globaltag 0:bbd179dfa0a7
623 623
624 624 $ hg tags -v
625 625 tip 2:5c70a037bb37
626 626 localtag 1:a0b6fe111088
627 627 globaltag 0:bbd179dfa0a7
628 628
629 629 $ hg tag -r 1 localtag2
630 630 $ hg tags -v
631 631 tip 3:bbfb8cd42be2
632 632 localtag2 1:a0b6fe111088
633 633 localtag 1:a0b6fe111088
634 634 globaltag 0:bbd179dfa0a7
635 635
636 636 $ hg tags -v
637 637 tip 3:bbfb8cd42be2
638 638 localtag2 1:a0b6fe111088
639 639 localtag 1:a0b6fe111088
640 640 globaltag 0:bbd179dfa0a7
641 641
642 642 $ cd ..
643 643
644 644 Create a repository with tags data to test .hgtags fnodes transfer
645 645
646 646 $ hg init tagsserver
647 647 $ cd tagsserver
648 648 $ touch foo
649 649 $ hg -q commit -A -m initial
650 650 $ hg tag -m 'tag 0.1' 0.1
651 651 $ echo second > foo
652 652 $ hg commit -m second
653 653 $ hg tag -m 'tag 0.2' 0.2
654 654 $ hg tags
655 655 tip 3:40f0358cb314
656 656 0.2 2:f63cc8fe54e4
657 657 0.1 0:96ee1d7354c4
658 658 $ cd ..
659 659
660 660 Cloning should pull down hgtags fnodes mappings and write the cache file
661 661
662 662 $ hg clone --pull tagsserver tagsclient
663 663 requesting all changes
664 664 adding changesets
665 665 adding manifests
666 666 adding file changes
667 667 added 4 changesets with 4 changes to 2 files
668 668 updating to branch default
669 669 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
670 670
671 671 Missing tags2* files means the cache wasn't written through the normal mechanism.
672 672
673 673 $ ls tagsclient/.hg/cache
674 674 branch2-base
675 675 checkisexec (execbit !)
676 676 checklink (symlink !)
677 677 checklink-target (symlink !)
678 678 hgtagsfnodes1
679 679
680 680 Cache should contain the head only, even though other nodes have tags data
681 681
682 682 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
683 683 tagsclient/.hg/cache/hgtagsfnodes1: size=96
684 684 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
685 685 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
686 686 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
687 687 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
688 688 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
689 689 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
690 690
691 691 Running hg tags should produce tags2* file and not change cache
692 692
693 693 $ hg -R tagsclient tags
694 694 tip 3:40f0358cb314
695 695 0.2 2:f63cc8fe54e4
696 696 0.1 0:96ee1d7354c4
697 697
698 698 $ ls tagsclient/.hg/cache
699 699 branch2-base
700 700 checkisexec (execbit !)
701 701 checklink (symlink !)
702 702 checklink-target (symlink !)
703 703 hgtagsfnodes1
704 704 tags2-visible
705 705
706 706 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
707 707 tagsclient/.hg/cache/hgtagsfnodes1: size=96
708 708 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
709 709 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
710 710 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
711 711 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
712 712 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
713 713 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
714 714
715 715 Check that the bundle includes cache data
716 716
717 717 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
718 718 4 changesets found
719 719 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
720 720 Stream params: sortdict([('Compression', 'BZ')])
721 721 changegroup -- "sortdict([('version', '02'), ('nbchanges', '4')])"
722 722 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
723 723 c4dab0c2fd337eb9191f80c3024830a4889a8f34
724 724 f63cc8fe54e4d326f8d692805d70e092f851ddb1
725 725 40f0358cb314c824a5929ee527308d90e023bc10
726 726 hgtagsfnodes -- 'sortdict()'
727
728 Check that local clone includes cache data
729
730 $ hg clone tagsclient tags-local-clone
731 updating to branch default
732 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
733 $ (cd tags-local-clone/.hg/cache/; ls -1 tag*)
734 tags2-visible
General Comments 0
You need to be logged in to leave comments. Login now