##// END OF EJS Templates
local-clone: also copy tags related caches...
marmoute -
r32495:a25cc3ca default
parent child Browse files
Show More
@@ -1,1061 +1,1064 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 repoview,
34 repoview,
35 scmutil,
35 scmutil,
36 sshpeer,
36 sshpeer,
37 statichttprepo,
37 statichttprepo,
38 ui as uimod,
38 ui as uimod,
39 unionrepo,
39 unionrepo,
40 url,
40 url,
41 util,
41 util,
42 verify as verifymod,
42 verify as verifymod,
43 vfs as vfsmod,
43 vfs as vfsmod,
44 )
44 )
45
45
46 release = lock.release
46 release = lock.release
47
47
48 # shared features
48 # shared features
49 sharedbookmarks = 'bookmarks'
49 sharedbookmarks = 'bookmarks'
50
50
51 def _local(path):
51 def _local(path):
52 path = util.expandpath(util.urllocalpath(path))
52 path = util.expandpath(util.urllocalpath(path))
53 return (os.path.isfile(path) and bundlerepo or localrepo)
53 return (os.path.isfile(path) and bundlerepo or localrepo)
54
54
55 def addbranchrevs(lrepo, other, branches, revs):
55 def addbranchrevs(lrepo, other, branches, revs):
56 peer = other.peer() # a courtesy to callers using a localrepo for other
56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 hashbranch, branches = branches
57 hashbranch, branches = branches
58 if not hashbranch and not branches:
58 if not hashbranch and not branches:
59 x = revs or None
59 x = revs or None
60 if util.safehasattr(revs, 'first'):
60 if util.safehasattr(revs, 'first'):
61 y = revs.first()
61 y = revs.first()
62 elif revs:
62 elif revs:
63 y = revs[0]
63 y = revs[0]
64 else:
64 else:
65 y = None
65 y = None
66 return x, y
66 return x, y
67 if revs:
67 if revs:
68 revs = list(revs)
68 revs = list(revs)
69 else:
69 else:
70 revs = []
70 revs = []
71
71
72 if not peer.capable('branchmap'):
72 if not peer.capable('branchmap'):
73 if branches:
73 if branches:
74 raise error.Abort(_("remote branch lookup not supported"))
74 raise error.Abort(_("remote branch lookup not supported"))
75 revs.append(hashbranch)
75 revs.append(hashbranch)
76 return revs, revs[0]
76 return revs, revs[0]
77 branchmap = peer.branchmap()
77 branchmap = peer.branchmap()
78
78
79 def primary(branch):
79 def primary(branch):
80 if branch == '.':
80 if branch == '.':
81 if not lrepo:
81 if not lrepo:
82 raise error.Abort(_("dirstate branch not accessible"))
82 raise error.Abort(_("dirstate branch not accessible"))
83 branch = lrepo.dirstate.branch()
83 branch = lrepo.dirstate.branch()
84 if branch in branchmap:
84 if branch in branchmap:
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 return True
86 return True
87 else:
87 else:
88 return False
88 return False
89
89
90 for branch in branches:
90 for branch in branches:
91 if not primary(branch):
91 if not primary(branch):
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 if hashbranch:
93 if hashbranch:
94 if not primary(hashbranch):
94 if not primary(hashbranch):
95 revs.append(hashbranch)
95 revs.append(hashbranch)
96 return revs, revs[0]
96 return revs, revs[0]
97
97
98 def parseurl(path, branches=None):
98 def parseurl(path, branches=None):
99 '''parse url#branch, returning (url, (branch, branches))'''
99 '''parse url#branch, returning (url, (branch, branches))'''
100
100
101 u = util.url(path)
101 u = util.url(path)
102 branch = None
102 branch = None
103 if u.fragment:
103 if u.fragment:
104 branch = u.fragment
104 branch = u.fragment
105 u.fragment = None
105 u.fragment = None
106 return bytes(u), (branch, branches or [])
106 return bytes(u), (branch, branches or [])
107
107
108 schemes = {
108 schemes = {
109 'bundle': bundlerepo,
109 'bundle': bundlerepo,
110 'union': unionrepo,
110 'union': unionrepo,
111 'file': _local,
111 'file': _local,
112 'http': httppeer,
112 'http': httppeer,
113 'https': httppeer,
113 'https': httppeer,
114 'ssh': sshpeer,
114 'ssh': sshpeer,
115 'static-http': statichttprepo,
115 'static-http': statichttprepo,
116 }
116 }
117
117
118 def _peerlookup(path):
118 def _peerlookup(path):
119 u = util.url(path)
119 u = util.url(path)
120 scheme = u.scheme or 'file'
120 scheme = u.scheme or 'file'
121 thing = schemes.get(scheme) or schemes['file']
121 thing = schemes.get(scheme) or schemes['file']
122 try:
122 try:
123 return thing(path)
123 return thing(path)
124 except TypeError:
124 except TypeError:
125 # we can't test callable(thing) because 'thing' can be an unloaded
125 # we can't test callable(thing) because 'thing' can be an unloaded
126 # module that implements __call__
126 # module that implements __call__
127 if not util.safehasattr(thing, 'instance'):
127 if not util.safehasattr(thing, 'instance'):
128 raise
128 raise
129 return thing
129 return thing
130
130
131 def islocal(repo):
131 def islocal(repo):
132 '''return true if repo (or path pointing to repo) is local'''
132 '''return true if repo (or path pointing to repo) is local'''
133 if isinstance(repo, str):
133 if isinstance(repo, str):
134 try:
134 try:
135 return _peerlookup(repo).islocal(repo)
135 return _peerlookup(repo).islocal(repo)
136 except AttributeError:
136 except AttributeError:
137 return False
137 return False
138 return repo.local()
138 return repo.local()
139
139
140 def openpath(ui, path):
140 def openpath(ui, path):
141 '''open path with open if local, url.open if remote'''
141 '''open path with open if local, url.open if remote'''
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 if pathurl.islocal():
143 if pathurl.islocal():
144 return util.posixfile(pathurl.localpath(), 'rb')
144 return util.posixfile(pathurl.localpath(), 'rb')
145 else:
145 else:
146 return url.open(ui, path)
146 return url.open(ui, path)
147
147
148 # a list of (ui, repo) functions called for wire peer initialization
148 # a list of (ui, repo) functions called for wire peer initialization
149 wirepeersetupfuncs = []
149 wirepeersetupfuncs = []
150
150
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 """return a repository object for the specified path"""
152 """return a repository object for the specified path"""
153 obj = _peerlookup(path).instance(ui, path, create)
153 obj = _peerlookup(path).instance(ui, path, create)
154 ui = getattr(obj, "ui", ui)
154 ui = getattr(obj, "ui", ui)
155 for f in presetupfuncs or []:
155 for f in presetupfuncs or []:
156 f(ui, obj)
156 f(ui, obj)
157 for name, module in extensions.extensions(ui):
157 for name, module in extensions.extensions(ui):
158 hook = getattr(module, 'reposetup', None)
158 hook = getattr(module, 'reposetup', None)
159 if hook:
159 if hook:
160 hook(ui, obj)
160 hook(ui, obj)
161 if not obj.local():
161 if not obj.local():
162 for f in wirepeersetupfuncs:
162 for f in wirepeersetupfuncs:
163 f(ui, obj)
163 f(ui, obj)
164 return obj
164 return obj
165
165
166 def repository(ui, path='', create=False, presetupfuncs=None):
166 def repository(ui, path='', create=False, presetupfuncs=None):
167 """return a repository object for the specified path"""
167 """return a repository object for the specified path"""
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 repo = peer.local()
169 repo = peer.local()
170 if not repo:
170 if not repo:
171 raise error.Abort(_("repository '%s' is not local") %
171 raise error.Abort(_("repository '%s' is not local") %
172 (path or peer.url()))
172 (path or peer.url()))
173 return repo.filtered('visible')
173 return repo.filtered('visible')
174
174
175 def peer(uiorrepo, opts, path, create=False):
175 def peer(uiorrepo, opts, path, create=False):
176 '''return a repository peer for the specified path'''
176 '''return a repository peer for the specified path'''
177 rui = remoteui(uiorrepo, opts)
177 rui = remoteui(uiorrepo, opts)
178 return _peerorrepo(rui, path, create).peer()
178 return _peerorrepo(rui, path, create).peer()
179
179
180 def defaultdest(source):
180 def defaultdest(source):
181 '''return default destination of clone if none is given
181 '''return default destination of clone if none is given
182
182
183 >>> defaultdest('foo')
183 >>> defaultdest('foo')
184 'foo'
184 'foo'
185 >>> defaultdest('/foo/bar')
185 >>> defaultdest('/foo/bar')
186 'bar'
186 'bar'
187 >>> defaultdest('/')
187 >>> defaultdest('/')
188 ''
188 ''
189 >>> defaultdest('')
189 >>> defaultdest('')
190 ''
190 ''
191 >>> defaultdest('http://example.org/')
191 >>> defaultdest('http://example.org/')
192 ''
192 ''
193 >>> defaultdest('http://example.org/foo/')
193 >>> defaultdest('http://example.org/foo/')
194 'foo'
194 'foo'
195 '''
195 '''
196 path = util.url(source).path
196 path = util.url(source).path
197 if not path:
197 if not path:
198 return ''
198 return ''
199 return os.path.basename(os.path.normpath(path))
199 return os.path.basename(os.path.normpath(path))
200
200
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 relative=False):
202 relative=False):
203 '''create a shared repository'''
203 '''create a shared repository'''
204
204
205 if not islocal(source):
205 if not islocal(source):
206 raise error.Abort(_('can only share local repositories'))
206 raise error.Abort(_('can only share local repositories'))
207
207
208 if not dest:
208 if not dest:
209 dest = defaultdest(source)
209 dest = defaultdest(source)
210 else:
210 else:
211 dest = ui.expandpath(dest)
211 dest = ui.expandpath(dest)
212
212
213 if isinstance(source, str):
213 if isinstance(source, str):
214 origsource = ui.expandpath(source)
214 origsource = ui.expandpath(source)
215 source, branches = parseurl(origsource)
215 source, branches = parseurl(origsource)
216 srcrepo = repository(ui, source)
216 srcrepo = repository(ui, source)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 else:
218 else:
219 srcrepo = source.local()
219 srcrepo = source.local()
220 origsource = source = srcrepo.url()
220 origsource = source = srcrepo.url()
221 checkout = None
221 checkout = None
222
222
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224
224
225 destwvfs = vfsmod.vfs(dest, realpath=True)
225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227
227
228 if destvfs.lexists():
228 if destvfs.lexists():
229 raise error.Abort(_('destination already exists'))
229 raise error.Abort(_('destination already exists'))
230
230
231 if not destwvfs.isdir():
231 if not destwvfs.isdir():
232 destwvfs.mkdir()
232 destwvfs.mkdir()
233 destvfs.makedir()
233 destvfs.makedir()
234
234
235 requirements = ''
235 requirements = ''
236 try:
236 try:
237 requirements = srcrepo.vfs.read('requires')
237 requirements = srcrepo.vfs.read('requires')
238 except IOError as inst:
238 except IOError as inst:
239 if inst.errno != errno.ENOENT:
239 if inst.errno != errno.ENOENT:
240 raise
240 raise
241
241
242 if relative:
242 if relative:
243 try:
243 try:
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 requirements += 'relshared\n'
245 requirements += 'relshared\n'
246 except IOError as e:
246 except IOError as e:
247 raise error.Abort(_('cannot calculate relative path'),
247 raise error.Abort(_('cannot calculate relative path'),
248 hint=str(e))
248 hint=str(e))
249 else:
249 else:
250 requirements += 'shared\n'
250 requirements += 'shared\n'
251
251
252 destvfs.write('requires', requirements)
252 destvfs.write('requires', requirements)
253 destvfs.write('sharedpath', sharedpath)
253 destvfs.write('sharedpath', sharedpath)
254
254
255 r = repository(ui, destwvfs.base)
255 r = repository(ui, destwvfs.base)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 _postshareupdate(r, update, checkout=checkout)
257 _postshareupdate(r, update, checkout=checkout)
258
258
259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
259 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
260 """Called after a new shared repo is created.
260 """Called after a new shared repo is created.
261
261
262 The new repo only has a requirements file and pointer to the source.
262 The new repo only has a requirements file and pointer to the source.
263 This function configures additional shared data.
263 This function configures additional shared data.
264
264
265 Extensions can wrap this function and write additional entries to
265 Extensions can wrap this function and write additional entries to
266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
266 destrepo/.hg/shared to indicate additional pieces of data to be shared.
267 """
267 """
268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
268 default = defaultpath or sourcerepo.ui.config('paths', 'default')
269 if default:
269 if default:
270 fp = destrepo.vfs("hgrc", "w", text=True)
270 fp = destrepo.vfs("hgrc", "w", text=True)
271 fp.write("[paths]\n")
271 fp.write("[paths]\n")
272 fp.write("default = %s\n" % default)
272 fp.write("default = %s\n" % default)
273 fp.close()
273 fp.close()
274
274
275 with destrepo.wlock():
275 with destrepo.wlock():
276 if bookmarks:
276 if bookmarks:
277 fp = destrepo.vfs('shared', 'w')
277 fp = destrepo.vfs('shared', 'w')
278 fp.write(sharedbookmarks + '\n')
278 fp.write(sharedbookmarks + '\n')
279 fp.close()
279 fp.close()
280
280
281 def _postshareupdate(repo, update, checkout=None):
281 def _postshareupdate(repo, update, checkout=None):
282 """Maybe perform a working directory update after a shared repo is created.
282 """Maybe perform a working directory update after a shared repo is created.
283
283
284 ``update`` can be a boolean or a revision to update to.
284 ``update`` can be a boolean or a revision to update to.
285 """
285 """
286 if not update:
286 if not update:
287 return
287 return
288
288
289 repo.ui.status(_("updating working directory\n"))
289 repo.ui.status(_("updating working directory\n"))
290 if update is not True:
290 if update is not True:
291 checkout = update
291 checkout = update
292 for test in (checkout, 'default', 'tip'):
292 for test in (checkout, 'default', 'tip'):
293 if test is None:
293 if test is None:
294 continue
294 continue
295 try:
295 try:
296 uprev = repo.lookup(test)
296 uprev = repo.lookup(test)
297 break
297 break
298 except error.RepoLookupError:
298 except error.RepoLookupError:
299 continue
299 continue
300 _update(repo, uprev)
300 _update(repo, uprev)
301
301
302 def copystore(ui, srcrepo, destpath):
302 def copystore(ui, srcrepo, destpath):
303 '''copy files from store of srcrepo in destpath
303 '''copy files from store of srcrepo in destpath
304
304
305 returns destlock
305 returns destlock
306 '''
306 '''
307 destlock = None
307 destlock = None
308 try:
308 try:
309 hardlink = None
309 hardlink = None
310 num = 0
310 num = 0
311 closetopic = [None]
311 closetopic = [None]
312 def prog(topic, pos):
312 def prog(topic, pos):
313 if pos is None:
313 if pos is None:
314 closetopic[0] = topic
314 closetopic[0] = topic
315 else:
315 else:
316 ui.progress(topic, pos + num)
316 ui.progress(topic, pos + num)
317 srcpublishing = srcrepo.publishing()
317 srcpublishing = srcrepo.publishing()
318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
318 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
319 dstvfs = vfsmod.vfs(destpath)
319 dstvfs = vfsmod.vfs(destpath)
320 for f in srcrepo.store.copylist():
320 for f in srcrepo.store.copylist():
321 if srcpublishing and f.endswith('phaseroots'):
321 if srcpublishing and f.endswith('phaseroots'):
322 continue
322 continue
323 dstbase = os.path.dirname(f)
323 dstbase = os.path.dirname(f)
324 if dstbase and not dstvfs.exists(dstbase):
324 if dstbase and not dstvfs.exists(dstbase):
325 dstvfs.mkdir(dstbase)
325 dstvfs.mkdir(dstbase)
326 if srcvfs.exists(f):
326 if srcvfs.exists(f):
327 if f.endswith('data'):
327 if f.endswith('data'):
328 # 'dstbase' may be empty (e.g. revlog format 0)
328 # 'dstbase' may be empty (e.g. revlog format 0)
329 lockfile = os.path.join(dstbase, "lock")
329 lockfile = os.path.join(dstbase, "lock")
330 # lock to avoid premature writing to the target
330 # lock to avoid premature writing to the target
331 destlock = lock.lock(dstvfs, lockfile)
331 destlock = lock.lock(dstvfs, lockfile)
332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
332 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
333 hardlink, progress=prog)
333 hardlink, progress=prog)
334 num += n
334 num += n
335 if hardlink:
335 if hardlink:
336 ui.debug("linked %d files\n" % num)
336 ui.debug("linked %d files\n" % num)
337 if closetopic[0]:
337 if closetopic[0]:
338 ui.progress(closetopic[0], None)
338 ui.progress(closetopic[0], None)
339 else:
339 else:
340 ui.debug("copied %d files\n" % num)
340 ui.debug("copied %d files\n" % num)
341 if closetopic[0]:
341 if closetopic[0]:
342 ui.progress(closetopic[0], None)
342 ui.progress(closetopic[0], None)
343 return destlock
343 return destlock
344 except: # re-raises
344 except: # re-raises
345 release(destlock)
345 release(destlock)
346 raise
346 raise
347
347
348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
348 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
349 rev=None, update=True, stream=False):
349 rev=None, update=True, stream=False):
350 """Perform a clone using a shared repo.
350 """Perform a clone using a shared repo.
351
351
352 The store for the repository will be located at <sharepath>/.hg. The
352 The store for the repository will be located at <sharepath>/.hg. The
353 specified revisions will be cloned or pulled from "source". A shared repo
353 specified revisions will be cloned or pulled from "source". A shared repo
354 will be created at "dest" and a working copy will be created if "update" is
354 will be created at "dest" and a working copy will be created if "update" is
355 True.
355 True.
356 """
356 """
357 revs = None
357 revs = None
358 if rev:
358 if rev:
359 if not srcpeer.capable('lookup'):
359 if not srcpeer.capable('lookup'):
360 raise error.Abort(_("src repository does not support "
360 raise error.Abort(_("src repository does not support "
361 "revision lookup and so doesn't "
361 "revision lookup and so doesn't "
362 "support clone by revision"))
362 "support clone by revision"))
363 revs = [srcpeer.lookup(r) for r in rev]
363 revs = [srcpeer.lookup(r) for r in rev]
364
364
365 # Obtain a lock before checking for or cloning the pooled repo otherwise
365 # Obtain a lock before checking for or cloning the pooled repo otherwise
366 # 2 clients may race creating or populating it.
366 # 2 clients may race creating or populating it.
367 pooldir = os.path.dirname(sharepath)
367 pooldir = os.path.dirname(sharepath)
368 # lock class requires the directory to exist.
368 # lock class requires the directory to exist.
369 try:
369 try:
370 util.makedir(pooldir, False)
370 util.makedir(pooldir, False)
371 except OSError as e:
371 except OSError as e:
372 if e.errno != errno.EEXIST:
372 if e.errno != errno.EEXIST:
373 raise
373 raise
374
374
375 poolvfs = vfsmod.vfs(pooldir)
375 poolvfs = vfsmod.vfs(pooldir)
376 basename = os.path.basename(sharepath)
376 basename = os.path.basename(sharepath)
377
377
378 with lock.lock(poolvfs, '%s.lock' % basename):
378 with lock.lock(poolvfs, '%s.lock' % basename):
379 if os.path.exists(sharepath):
379 if os.path.exists(sharepath):
380 ui.status(_('(sharing from existing pooled repository %s)\n') %
380 ui.status(_('(sharing from existing pooled repository %s)\n') %
381 basename)
381 basename)
382 else:
382 else:
383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
383 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
384 # Always use pull mode because hardlinks in share mode don't work
384 # Always use pull mode because hardlinks in share mode don't work
385 # well. Never update because working copies aren't necessary in
385 # well. Never update because working copies aren't necessary in
386 # share mode.
386 # share mode.
387 clone(ui, peeropts, source, dest=sharepath, pull=True,
387 clone(ui, peeropts, source, dest=sharepath, pull=True,
388 rev=rev, update=False, stream=stream)
388 rev=rev, update=False, stream=stream)
389
389
390 # Resolve the value to put in [paths] section for the source.
390 # Resolve the value to put in [paths] section for the source.
391 if islocal(source):
391 if islocal(source):
392 defaultpath = os.path.abspath(util.urllocalpath(source))
392 defaultpath = os.path.abspath(util.urllocalpath(source))
393 else:
393 else:
394 defaultpath = source
394 defaultpath = source
395
395
396 sharerepo = repository(ui, path=sharepath)
396 sharerepo = repository(ui, path=sharepath)
397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
397 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
398 defaultpath=defaultpath)
398 defaultpath=defaultpath)
399
399
400 # We need to perform a pull against the dest repo to fetch bookmarks
400 # We need to perform a pull against the dest repo to fetch bookmarks
401 # and other non-store data that isn't shared by default. In the case of
401 # and other non-store data that isn't shared by default. In the case of
402 # non-existing shared repo, this means we pull from the remote twice. This
402 # non-existing shared repo, this means we pull from the remote twice. This
403 # is a bit weird. But at the time it was implemented, there wasn't an easy
403 # is a bit weird. But at the time it was implemented, there wasn't an easy
404 # way to pull just non-changegroup data.
404 # way to pull just non-changegroup data.
405 destrepo = repository(ui, path=dest)
405 destrepo = repository(ui, path=dest)
406 exchange.pull(destrepo, srcpeer, heads=revs)
406 exchange.pull(destrepo, srcpeer, heads=revs)
407
407
408 _postshareupdate(destrepo, update)
408 _postshareupdate(destrepo, update)
409
409
410 return srcpeer, peer(ui, peeropts, dest)
410 return srcpeer, peer(ui, peeropts, dest)
411
411
412 # Recomputing branch cache might be slow on big repos,
412 # Recomputing branch cache might be slow on big repos,
413 # so just copy it
413 # so just copy it
414 def _copycache(srcrepo, dstcachedir, fname):
414 def _copycache(srcrepo, dstcachedir, fname):
415 """copy a cache from srcrepo to destcachedir (if it exists)"""
415 """copy a cache from srcrepo to destcachedir (if it exists)"""
416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
416 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
417 dstbranchcache = os.path.join(dstcachedir, fname)
417 dstbranchcache = os.path.join(dstcachedir, fname)
418 if os.path.exists(srcbranchcache):
418 if os.path.exists(srcbranchcache):
419 if not os.path.exists(dstcachedir):
419 if not os.path.exists(dstcachedir):
420 os.mkdir(dstcachedir)
420 os.mkdir(dstcachedir)
421 util.copyfile(srcbranchcache, dstbranchcache)
421 util.copyfile(srcbranchcache, dstbranchcache)
422
422
423 def _cachetocopy(srcrepo):
423 def _cachetocopy(srcrepo):
424 """return the list of cache file valuable to copy during a clone"""
424 """return the list of cache file valuable to copy during a clone"""
425 # In local clones we're copying all nodes, not just served
425 # In local clones we're copying all nodes, not just served
426 # ones. Therefore copy all branch caches over.
426 # ones. Therefore copy all branch caches over.
427 cachefiles = ['branch2']
427 cachefiles = ['branch2']
428 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
428 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
429 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
429 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
430 cachefiles += ['tags2']
431 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
432 cachefiles += ['hgtagsfnodes1']
430 return cachefiles
433 return cachefiles
431
434
432 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
435 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
433 update=True, stream=False, branch=None, shareopts=None):
436 update=True, stream=False, branch=None, shareopts=None):
434 """Make a copy of an existing repository.
437 """Make a copy of an existing repository.
435
438
436 Create a copy of an existing repository in a new directory. The
439 Create a copy of an existing repository in a new directory. The
437 source and destination are URLs, as passed to the repository
440 source and destination are URLs, as passed to the repository
438 function. Returns a pair of repository peers, the source and
441 function. Returns a pair of repository peers, the source and
439 newly created destination.
442 newly created destination.
440
443
441 The location of the source is added to the new repository's
444 The location of the source is added to the new repository's
442 .hg/hgrc file, as the default to be used for future pulls and
445 .hg/hgrc file, as the default to be used for future pulls and
443 pushes.
446 pushes.
444
447
445 If an exception is raised, the partly cloned/updated destination
448 If an exception is raised, the partly cloned/updated destination
446 repository will be deleted.
449 repository will be deleted.
447
450
448 Arguments:
451 Arguments:
449
452
450 source: repository object or URL
453 source: repository object or URL
451
454
452 dest: URL of destination repository to create (defaults to base
455 dest: URL of destination repository to create (defaults to base
453 name of source repository)
456 name of source repository)
454
457
455 pull: always pull from source repository, even in local case or if the
458 pull: always pull from source repository, even in local case or if the
456 server prefers streaming
459 server prefers streaming
457
460
458 stream: stream raw data uncompressed from repository (fast over
461 stream: stream raw data uncompressed from repository (fast over
459 LAN, slow over WAN)
462 LAN, slow over WAN)
460
463
461 rev: revision to clone up to (implies pull=True)
464 rev: revision to clone up to (implies pull=True)
462
465
463 update: update working directory after clone completes, if
466 update: update working directory after clone completes, if
464 destination is local repository (True means update to default rev,
467 destination is local repository (True means update to default rev,
465 anything else is treated as a revision)
468 anything else is treated as a revision)
466
469
467 branch: branches to clone
470 branch: branches to clone
468
471
469 shareopts: dict of options to control auto sharing behavior. The "pool" key
472 shareopts: dict of options to control auto sharing behavior. The "pool" key
470 activates auto sharing mode and defines the directory for stores. The
473 activates auto sharing mode and defines the directory for stores. The
471 "mode" key determines how to construct the directory name of the shared
474 "mode" key determines how to construct the directory name of the shared
472 repository. "identity" means the name is derived from the node of the first
475 repository. "identity" means the name is derived from the node of the first
473 changeset in the repository. "remote" means the name is derived from the
476 changeset in the repository. "remote" means the name is derived from the
474 remote's path/URL. Defaults to "identity."
477 remote's path/URL. Defaults to "identity."
475 """
478 """
476
479
477 if isinstance(source, str):
480 if isinstance(source, str):
478 origsource = ui.expandpath(source)
481 origsource = ui.expandpath(source)
479 source, branch = parseurl(origsource, branch)
482 source, branch = parseurl(origsource, branch)
480 srcpeer = peer(ui, peeropts, source)
483 srcpeer = peer(ui, peeropts, source)
481 else:
484 else:
482 srcpeer = source.peer() # in case we were called with a localrepo
485 srcpeer = source.peer() # in case we were called with a localrepo
483 branch = (None, branch or [])
486 branch = (None, branch or [])
484 origsource = source = srcpeer.url()
487 origsource = source = srcpeer.url()
485 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
488 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
486
489
487 if dest is None:
490 if dest is None:
488 dest = defaultdest(source)
491 dest = defaultdest(source)
489 if dest:
492 if dest:
490 ui.status(_("destination directory: %s\n") % dest)
493 ui.status(_("destination directory: %s\n") % dest)
491 else:
494 else:
492 dest = ui.expandpath(dest)
495 dest = ui.expandpath(dest)
493
496
494 dest = util.urllocalpath(dest)
497 dest = util.urllocalpath(dest)
495 source = util.urllocalpath(source)
498 source = util.urllocalpath(source)
496
499
497 if not dest:
500 if not dest:
498 raise error.Abort(_("empty destination path is not valid"))
501 raise error.Abort(_("empty destination path is not valid"))
499
502
500 destvfs = vfsmod.vfs(dest, expandpath=True)
503 destvfs = vfsmod.vfs(dest, expandpath=True)
501 if destvfs.lexists():
504 if destvfs.lexists():
502 if not destvfs.isdir():
505 if not destvfs.isdir():
503 raise error.Abort(_("destination '%s' already exists") % dest)
506 raise error.Abort(_("destination '%s' already exists") % dest)
504 elif destvfs.listdir():
507 elif destvfs.listdir():
505 raise error.Abort(_("destination '%s' is not empty") % dest)
508 raise error.Abort(_("destination '%s' is not empty") % dest)
506
509
507 shareopts = shareopts or {}
510 shareopts = shareopts or {}
508 sharepool = shareopts.get('pool')
511 sharepool = shareopts.get('pool')
509 sharenamemode = shareopts.get('mode')
512 sharenamemode = shareopts.get('mode')
510 if sharepool and islocal(dest):
513 if sharepool and islocal(dest):
511 sharepath = None
514 sharepath = None
512 if sharenamemode == 'identity':
515 if sharenamemode == 'identity':
513 # Resolve the name from the initial changeset in the remote
516 # Resolve the name from the initial changeset in the remote
514 # repository. This returns nullid when the remote is empty. It
517 # repository. This returns nullid when the remote is empty. It
515 # raises RepoLookupError if revision 0 is filtered or otherwise
518 # raises RepoLookupError if revision 0 is filtered or otherwise
516 # not available. If we fail to resolve, sharing is not enabled.
519 # not available. If we fail to resolve, sharing is not enabled.
517 try:
520 try:
518 rootnode = srcpeer.lookup('0')
521 rootnode = srcpeer.lookup('0')
519 if rootnode != node.nullid:
522 if rootnode != node.nullid:
520 sharepath = os.path.join(sharepool, node.hex(rootnode))
523 sharepath = os.path.join(sharepool, node.hex(rootnode))
521 else:
524 else:
522 ui.status(_('(not using pooled storage: '
525 ui.status(_('(not using pooled storage: '
523 'remote appears to be empty)\n'))
526 'remote appears to be empty)\n'))
524 except error.RepoLookupError:
527 except error.RepoLookupError:
525 ui.status(_('(not using pooled storage: '
528 ui.status(_('(not using pooled storage: '
526 'unable to resolve identity of remote)\n'))
529 'unable to resolve identity of remote)\n'))
527 elif sharenamemode == 'remote':
530 elif sharenamemode == 'remote':
528 sharepath = os.path.join(
531 sharepath = os.path.join(
529 sharepool, hashlib.sha1(source).hexdigest())
532 sharepool, hashlib.sha1(source).hexdigest())
530 else:
533 else:
531 raise error.Abort(_('unknown share naming mode: %s') %
534 raise error.Abort(_('unknown share naming mode: %s') %
532 sharenamemode)
535 sharenamemode)
533
536
534 if sharepath:
537 if sharepath:
535 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
538 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
536 dest, pull=pull, rev=rev, update=update,
539 dest, pull=pull, rev=rev, update=update,
537 stream=stream)
540 stream=stream)
538
541
539 srclock = destlock = cleandir = None
542 srclock = destlock = cleandir = None
540 srcrepo = srcpeer.local()
543 srcrepo = srcpeer.local()
541 try:
544 try:
542 abspath = origsource
545 abspath = origsource
543 if islocal(origsource):
546 if islocal(origsource):
544 abspath = os.path.abspath(util.urllocalpath(origsource))
547 abspath = os.path.abspath(util.urllocalpath(origsource))
545
548
546 if islocal(dest):
549 if islocal(dest):
547 cleandir = dest
550 cleandir = dest
548
551
549 copy = False
552 copy = False
550 if (srcrepo and srcrepo.cancopy() and islocal(dest)
553 if (srcrepo and srcrepo.cancopy() and islocal(dest)
551 and not phases.hassecret(srcrepo)):
554 and not phases.hassecret(srcrepo)):
552 copy = not pull and not rev
555 copy = not pull and not rev
553
556
554 if copy:
557 if copy:
555 try:
558 try:
556 # we use a lock here because if we race with commit, we
559 # we use a lock here because if we race with commit, we
557 # can end up with extra data in the cloned revlogs that's
560 # can end up with extra data in the cloned revlogs that's
558 # not pointed to by changesets, thus causing verify to
561 # not pointed to by changesets, thus causing verify to
559 # fail
562 # fail
560 srclock = srcrepo.lock(wait=False)
563 srclock = srcrepo.lock(wait=False)
561 except error.LockError:
564 except error.LockError:
562 copy = False
565 copy = False
563
566
564 if copy:
567 if copy:
565 srcrepo.hook('preoutgoing', throw=True, source='clone')
568 srcrepo.hook('preoutgoing', throw=True, source='clone')
566 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
569 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
567 if not os.path.exists(dest):
570 if not os.path.exists(dest):
568 os.mkdir(dest)
571 os.mkdir(dest)
569 else:
572 else:
570 # only clean up directories we create ourselves
573 # only clean up directories we create ourselves
571 cleandir = hgdir
574 cleandir = hgdir
572 try:
575 try:
573 destpath = hgdir
576 destpath = hgdir
574 util.makedir(destpath, notindexed=True)
577 util.makedir(destpath, notindexed=True)
575 except OSError as inst:
578 except OSError as inst:
576 if inst.errno == errno.EEXIST:
579 if inst.errno == errno.EEXIST:
577 cleandir = None
580 cleandir = None
578 raise error.Abort(_("destination '%s' already exists")
581 raise error.Abort(_("destination '%s' already exists")
579 % dest)
582 % dest)
580 raise
583 raise
581
584
582 destlock = copystore(ui, srcrepo, destpath)
585 destlock = copystore(ui, srcrepo, destpath)
583 # copy bookmarks over
586 # copy bookmarks over
584 srcbookmarks = srcrepo.vfs.join('bookmarks')
587 srcbookmarks = srcrepo.vfs.join('bookmarks')
585 dstbookmarks = os.path.join(destpath, 'bookmarks')
588 dstbookmarks = os.path.join(destpath, 'bookmarks')
586 if os.path.exists(srcbookmarks):
589 if os.path.exists(srcbookmarks):
587 util.copyfile(srcbookmarks, dstbookmarks)
590 util.copyfile(srcbookmarks, dstbookmarks)
588
591
589 dstcachedir = os.path.join(destpath, 'cache')
592 dstcachedir = os.path.join(destpath, 'cache')
590 for cache in _cachetocopy(srcrepo):
593 for cache in _cachetocopy(srcrepo):
591 _copycache(srcrepo, dstcachedir, cache)
594 _copycache(srcrepo, dstcachedir, cache)
592
595
593 # we need to re-init the repo after manually copying the data
596 # we need to re-init the repo after manually copying the data
594 # into it
597 # into it
595 destpeer = peer(srcrepo, peeropts, dest)
598 destpeer = peer(srcrepo, peeropts, dest)
596 srcrepo.hook('outgoing', source='clone',
599 srcrepo.hook('outgoing', source='clone',
597 node=node.hex(node.nullid))
600 node=node.hex(node.nullid))
598 else:
601 else:
599 try:
602 try:
600 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
603 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
601 # only pass ui when no srcrepo
604 # only pass ui when no srcrepo
602 except OSError as inst:
605 except OSError as inst:
603 if inst.errno == errno.EEXIST:
606 if inst.errno == errno.EEXIST:
604 cleandir = None
607 cleandir = None
605 raise error.Abort(_("destination '%s' already exists")
608 raise error.Abort(_("destination '%s' already exists")
606 % dest)
609 % dest)
607 raise
610 raise
608
611
609 revs = None
612 revs = None
610 if rev:
613 if rev:
611 if not srcpeer.capable('lookup'):
614 if not srcpeer.capable('lookup'):
612 raise error.Abort(_("src repository does not support "
615 raise error.Abort(_("src repository does not support "
613 "revision lookup and so doesn't "
616 "revision lookup and so doesn't "
614 "support clone by revision"))
617 "support clone by revision"))
615 revs = [srcpeer.lookup(r) for r in rev]
618 revs = [srcpeer.lookup(r) for r in rev]
616 checkout = revs[0]
619 checkout = revs[0]
617 local = destpeer.local()
620 local = destpeer.local()
618 if local:
621 if local:
619 if not stream:
622 if not stream:
620 if pull:
623 if pull:
621 stream = False
624 stream = False
622 else:
625 else:
623 stream = None
626 stream = None
624 # internal config: ui.quietbookmarkmove
627 # internal config: ui.quietbookmarkmove
625 overrides = {('ui', 'quietbookmarkmove'): True}
628 overrides = {('ui', 'quietbookmarkmove'): True}
626 with local.ui.configoverride(overrides, 'clone'):
629 with local.ui.configoverride(overrides, 'clone'):
627 exchange.pull(local, srcpeer, revs,
630 exchange.pull(local, srcpeer, revs,
628 streamclonerequested=stream)
631 streamclonerequested=stream)
629 elif srcrepo:
632 elif srcrepo:
630 exchange.push(srcrepo, destpeer, revs=revs,
633 exchange.push(srcrepo, destpeer, revs=revs,
631 bookmarks=srcrepo._bookmarks.keys())
634 bookmarks=srcrepo._bookmarks.keys())
632 else:
635 else:
633 raise error.Abort(_("clone from remote to remote not supported")
636 raise error.Abort(_("clone from remote to remote not supported")
634 )
637 )
635
638
636 cleandir = None
639 cleandir = None
637
640
638 destrepo = destpeer.local()
641 destrepo = destpeer.local()
639 if destrepo:
642 if destrepo:
640 template = uimod.samplehgrcs['cloned']
643 template = uimod.samplehgrcs['cloned']
641 fp = destrepo.vfs("hgrc", "w", text=True)
644 fp = destrepo.vfs("hgrc", "w", text=True)
642 u = util.url(abspath)
645 u = util.url(abspath)
643 u.passwd = None
646 u.passwd = None
644 defaulturl = str(u)
647 defaulturl = str(u)
645 fp.write(template % defaulturl)
648 fp.write(template % defaulturl)
646 fp.close()
649 fp.close()
647
650
648 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
651 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
649
652
650 if update:
653 if update:
651 if update is not True:
654 if update is not True:
652 checkout = srcpeer.lookup(update)
655 checkout = srcpeer.lookup(update)
653 uprev = None
656 uprev = None
654 status = None
657 status = None
655 if checkout is not None:
658 if checkout is not None:
656 try:
659 try:
657 uprev = destrepo.lookup(checkout)
660 uprev = destrepo.lookup(checkout)
658 except error.RepoLookupError:
661 except error.RepoLookupError:
659 if update is not True:
662 if update is not True:
660 try:
663 try:
661 uprev = destrepo.lookup(update)
664 uprev = destrepo.lookup(update)
662 except error.RepoLookupError:
665 except error.RepoLookupError:
663 pass
666 pass
664 if uprev is None:
667 if uprev is None:
665 try:
668 try:
666 uprev = destrepo._bookmarks['@']
669 uprev = destrepo._bookmarks['@']
667 update = '@'
670 update = '@'
668 bn = destrepo[uprev].branch()
671 bn = destrepo[uprev].branch()
669 if bn == 'default':
672 if bn == 'default':
670 status = _("updating to bookmark @\n")
673 status = _("updating to bookmark @\n")
671 else:
674 else:
672 status = (_("updating to bookmark @ on branch %s\n")
675 status = (_("updating to bookmark @ on branch %s\n")
673 % bn)
676 % bn)
674 except KeyError:
677 except KeyError:
675 try:
678 try:
676 uprev = destrepo.branchtip('default')
679 uprev = destrepo.branchtip('default')
677 except error.RepoLookupError:
680 except error.RepoLookupError:
678 uprev = destrepo.lookup('tip')
681 uprev = destrepo.lookup('tip')
679 if not status:
682 if not status:
680 bn = destrepo[uprev].branch()
683 bn = destrepo[uprev].branch()
681 status = _("updating to branch %s\n") % bn
684 status = _("updating to branch %s\n") % bn
682 destrepo.ui.status(status)
685 destrepo.ui.status(status)
683 _update(destrepo, uprev)
686 _update(destrepo, uprev)
684 if update in destrepo._bookmarks:
687 if update in destrepo._bookmarks:
685 bookmarks.activate(destrepo, update)
688 bookmarks.activate(destrepo, update)
686 finally:
689 finally:
687 release(srclock, destlock)
690 release(srclock, destlock)
688 if cleandir is not None:
691 if cleandir is not None:
689 shutil.rmtree(cleandir, True)
692 shutil.rmtree(cleandir, True)
690 if srcpeer is not None:
693 if srcpeer is not None:
691 srcpeer.close()
694 srcpeer.close()
692 return srcpeer, destpeer
695 return srcpeer, destpeer
693
696
694 def _showstats(repo, stats, quietempty=False):
697 def _showstats(repo, stats, quietempty=False):
695 if quietempty and not any(stats):
698 if quietempty and not any(stats):
696 return
699 return
697 repo.ui.status(_("%d files updated, %d files merged, "
700 repo.ui.status(_("%d files updated, %d files merged, "
698 "%d files removed, %d files unresolved\n") % stats)
701 "%d files removed, %d files unresolved\n") % stats)
699
702
700 def updaterepo(repo, node, overwrite, updatecheck=None):
703 def updaterepo(repo, node, overwrite, updatecheck=None):
701 """Update the working directory to node.
704 """Update the working directory to node.
702
705
703 When overwrite is set, changes are clobbered, merged else
706 When overwrite is set, changes are clobbered, merged else
704
707
705 returns stats (see pydoc mercurial.merge.applyupdates)"""
708 returns stats (see pydoc mercurial.merge.applyupdates)"""
706 return mergemod.update(repo, node, False, overwrite,
709 return mergemod.update(repo, node, False, overwrite,
707 labels=['working copy', 'destination'],
710 labels=['working copy', 'destination'],
708 updatecheck=updatecheck)
711 updatecheck=updatecheck)
709
712
710 def update(repo, node, quietempty=False, updatecheck=None):
713 def update(repo, node, quietempty=False, updatecheck=None):
711 """update the working directory to node"""
714 """update the working directory to node"""
712 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
715 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
713 _showstats(repo, stats, quietempty)
716 _showstats(repo, stats, quietempty)
714 if stats[3]:
717 if stats[3]:
715 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
718 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
716 return stats[3] > 0
719 return stats[3] > 0
717
720
718 # naming conflict in clone()
721 # naming conflict in clone()
719 _update = update
722 _update = update
720
723
721 def clean(repo, node, show_stats=True, quietempty=False):
724 def clean(repo, node, show_stats=True, quietempty=False):
722 """forcibly switch the working directory to node, clobbering changes"""
725 """forcibly switch the working directory to node, clobbering changes"""
723 stats = updaterepo(repo, node, True)
726 stats = updaterepo(repo, node, True)
724 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
727 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
725 if show_stats:
728 if show_stats:
726 _showstats(repo, stats, quietempty)
729 _showstats(repo, stats, quietempty)
727 return stats[3] > 0
730 return stats[3] > 0
728
731
729 # naming conflict in updatetotally()
732 # naming conflict in updatetotally()
730 _clean = clean
733 _clean = clean
731
734
732 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
735 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
733 """Update the working directory with extra care for non-file components
736 """Update the working directory with extra care for non-file components
734
737
735 This takes care of non-file components below:
738 This takes care of non-file components below:
736
739
737 :bookmark: might be advanced or (in)activated
740 :bookmark: might be advanced or (in)activated
738
741
739 This takes arguments below:
742 This takes arguments below:
740
743
741 :checkout: to which revision the working directory is updated
744 :checkout: to which revision the working directory is updated
742 :brev: a name, which might be a bookmark to be activated after updating
745 :brev: a name, which might be a bookmark to be activated after updating
743 :clean: whether changes in the working directory can be discarded
746 :clean: whether changes in the working directory can be discarded
744 :updatecheck: how to deal with a dirty working directory
747 :updatecheck: how to deal with a dirty working directory
745
748
746 Valid values for updatecheck are (None => linear):
749 Valid values for updatecheck are (None => linear):
747
750
748 * abort: abort if the working directory is dirty
751 * abort: abort if the working directory is dirty
749 * none: don't check (merge working directory changes into destination)
752 * none: don't check (merge working directory changes into destination)
750 * linear: check that update is linear before merging working directory
753 * linear: check that update is linear before merging working directory
751 changes into destination
754 changes into destination
752 * noconflict: check that the update does not result in file merges
755 * noconflict: check that the update does not result in file merges
753
756
754 This returns whether conflict is detected at updating or not.
757 This returns whether conflict is detected at updating or not.
755 """
758 """
756 if updatecheck is None:
759 if updatecheck is None:
757 updatecheck = ui.config('experimental', 'updatecheck')
760 updatecheck = ui.config('experimental', 'updatecheck')
758 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
761 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
759 # If not configured, or invalid value configured
762 # If not configured, or invalid value configured
760 updatecheck = 'linear'
763 updatecheck = 'linear'
761 with repo.wlock():
764 with repo.wlock():
762 movemarkfrom = None
765 movemarkfrom = None
763 warndest = False
766 warndest = False
764 if checkout is None:
767 if checkout is None:
765 updata = destutil.destupdate(repo, clean=clean)
768 updata = destutil.destupdate(repo, clean=clean)
766 checkout, movemarkfrom, brev = updata
769 checkout, movemarkfrom, brev = updata
767 warndest = True
770 warndest = True
768
771
769 if clean:
772 if clean:
770 ret = _clean(repo, checkout)
773 ret = _clean(repo, checkout)
771 else:
774 else:
772 if updatecheck == 'abort':
775 if updatecheck == 'abort':
773 cmdutil.bailifchanged(repo, merge=False)
776 cmdutil.bailifchanged(repo, merge=False)
774 updatecheck = 'none'
777 updatecheck = 'none'
775 ret = _update(repo, checkout, updatecheck=updatecheck)
778 ret = _update(repo, checkout, updatecheck=updatecheck)
776
779
777 if not ret and movemarkfrom:
780 if not ret and movemarkfrom:
778 if movemarkfrom == repo['.'].node():
781 if movemarkfrom == repo['.'].node():
779 pass # no-op update
782 pass # no-op update
780 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
783 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
781 b = ui.label(repo._activebookmark, 'bookmarks.active')
784 b = ui.label(repo._activebookmark, 'bookmarks.active')
782 ui.status(_("updating bookmark %s\n") % b)
785 ui.status(_("updating bookmark %s\n") % b)
783 else:
786 else:
784 # this can happen with a non-linear update
787 # this can happen with a non-linear update
785 b = ui.label(repo._activebookmark, 'bookmarks')
788 b = ui.label(repo._activebookmark, 'bookmarks')
786 ui.status(_("(leaving bookmark %s)\n") % b)
789 ui.status(_("(leaving bookmark %s)\n") % b)
787 bookmarks.deactivate(repo)
790 bookmarks.deactivate(repo)
788 elif brev in repo._bookmarks:
791 elif brev in repo._bookmarks:
789 if brev != repo._activebookmark:
792 if brev != repo._activebookmark:
790 b = ui.label(brev, 'bookmarks.active')
793 b = ui.label(brev, 'bookmarks.active')
791 ui.status(_("(activating bookmark %s)\n") % b)
794 ui.status(_("(activating bookmark %s)\n") % b)
792 bookmarks.activate(repo, brev)
795 bookmarks.activate(repo, brev)
793 elif brev:
796 elif brev:
794 if repo._activebookmark:
797 if repo._activebookmark:
795 b = ui.label(repo._activebookmark, 'bookmarks')
798 b = ui.label(repo._activebookmark, 'bookmarks')
796 ui.status(_("(leaving bookmark %s)\n") % b)
799 ui.status(_("(leaving bookmark %s)\n") % b)
797 bookmarks.deactivate(repo)
800 bookmarks.deactivate(repo)
798
801
799 if warndest:
802 if warndest:
800 destutil.statusotherdests(ui, repo)
803 destutil.statusotherdests(ui, repo)
801
804
802 return ret
805 return ret
803
806
804 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
807 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
805 """Branch merge with node, resolving changes. Return true if any
808 """Branch merge with node, resolving changes. Return true if any
806 unresolved conflicts."""
809 unresolved conflicts."""
807 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
810 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
808 labels=labels)
811 labels=labels)
809 _showstats(repo, stats)
812 _showstats(repo, stats)
810 if stats[3]:
813 if stats[3]:
811 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
814 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
812 "or 'hg update -C .' to abandon\n"))
815 "or 'hg update -C .' to abandon\n"))
813 elif remind:
816 elif remind:
814 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
817 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
815 return stats[3] > 0
818 return stats[3] > 0
816
819
817 def _incoming(displaychlist, subreporecurse, ui, repo, source,
820 def _incoming(displaychlist, subreporecurse, ui, repo, source,
818 opts, buffered=False):
821 opts, buffered=False):
819 """
822 """
820 Helper for incoming / gincoming.
823 Helper for incoming / gincoming.
821 displaychlist gets called with
824 displaychlist gets called with
822 (remoterepo, incomingchangesetlist, displayer) parameters,
825 (remoterepo, incomingchangesetlist, displayer) parameters,
823 and is supposed to contain only code that can't be unified.
826 and is supposed to contain only code that can't be unified.
824 """
827 """
825 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
828 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
826 other = peer(repo, opts, source)
829 other = peer(repo, opts, source)
827 ui.status(_('comparing with %s\n') % util.hidepassword(source))
830 ui.status(_('comparing with %s\n') % util.hidepassword(source))
828 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
831 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
829
832
830 if revs:
833 if revs:
831 revs = [other.lookup(rev) for rev in revs]
834 revs = [other.lookup(rev) for rev in revs]
832 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
835 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
833 revs, opts["bundle"], opts["force"])
836 revs, opts["bundle"], opts["force"])
834 try:
837 try:
835 if not chlist:
838 if not chlist:
836 ui.status(_("no changes found\n"))
839 ui.status(_("no changes found\n"))
837 return subreporecurse()
840 return subreporecurse()
838 ui.pager('incoming')
841 ui.pager('incoming')
839 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
842 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
840 displaychlist(other, chlist, displayer)
843 displaychlist(other, chlist, displayer)
841 displayer.close()
844 displayer.close()
842 finally:
845 finally:
843 cleanupfn()
846 cleanupfn()
844 subreporecurse()
847 subreporecurse()
845 return 0 # exit code is zero since we found incoming changes
848 return 0 # exit code is zero since we found incoming changes
846
849
847 def incoming(ui, repo, source, opts):
850 def incoming(ui, repo, source, opts):
848 def subreporecurse():
851 def subreporecurse():
849 ret = 1
852 ret = 1
850 if opts.get('subrepos'):
853 if opts.get('subrepos'):
851 ctx = repo[None]
854 ctx = repo[None]
852 for subpath in sorted(ctx.substate):
855 for subpath in sorted(ctx.substate):
853 sub = ctx.sub(subpath)
856 sub = ctx.sub(subpath)
854 ret = min(ret, sub.incoming(ui, source, opts))
857 ret = min(ret, sub.incoming(ui, source, opts))
855 return ret
858 return ret
856
859
857 def display(other, chlist, displayer):
860 def display(other, chlist, displayer):
858 limit = cmdutil.loglimit(opts)
861 limit = cmdutil.loglimit(opts)
859 if opts.get('newest_first'):
862 if opts.get('newest_first'):
860 chlist.reverse()
863 chlist.reverse()
861 count = 0
864 count = 0
862 for n in chlist:
865 for n in chlist:
863 if limit is not None and count >= limit:
866 if limit is not None and count >= limit:
864 break
867 break
865 parents = [p for p in other.changelog.parents(n) if p != nullid]
868 parents = [p for p in other.changelog.parents(n) if p != nullid]
866 if opts.get('no_merges') and len(parents) == 2:
869 if opts.get('no_merges') and len(parents) == 2:
867 continue
870 continue
868 count += 1
871 count += 1
869 displayer.show(other[n])
872 displayer.show(other[n])
870 return _incoming(display, subreporecurse, ui, repo, source, opts)
873 return _incoming(display, subreporecurse, ui, repo, source, opts)
871
874
872 def _outgoing(ui, repo, dest, opts):
875 def _outgoing(ui, repo, dest, opts):
873 dest = ui.expandpath(dest or 'default-push', dest or 'default')
876 dest = ui.expandpath(dest or 'default-push', dest or 'default')
874 dest, branches = parseurl(dest, opts.get('branch'))
877 dest, branches = parseurl(dest, opts.get('branch'))
875 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
878 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
876 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
879 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
877 if revs:
880 if revs:
878 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
881 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
879
882
880 other = peer(repo, opts, dest)
883 other = peer(repo, opts, dest)
881 outgoing = discovery.findcommonoutgoing(repo, other, revs,
884 outgoing = discovery.findcommonoutgoing(repo, other, revs,
882 force=opts.get('force'))
885 force=opts.get('force'))
883 o = outgoing.missing
886 o = outgoing.missing
884 if not o:
887 if not o:
885 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
888 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
886 return o, other
889 return o, other
887
890
888 def outgoing(ui, repo, dest, opts):
891 def outgoing(ui, repo, dest, opts):
889 def recurse():
892 def recurse():
890 ret = 1
893 ret = 1
891 if opts.get('subrepos'):
894 if opts.get('subrepos'):
892 ctx = repo[None]
895 ctx = repo[None]
893 for subpath in sorted(ctx.substate):
896 for subpath in sorted(ctx.substate):
894 sub = ctx.sub(subpath)
897 sub = ctx.sub(subpath)
895 ret = min(ret, sub.outgoing(ui, dest, opts))
898 ret = min(ret, sub.outgoing(ui, dest, opts))
896 return ret
899 return ret
897
900
898 limit = cmdutil.loglimit(opts)
901 limit = cmdutil.loglimit(opts)
899 o, other = _outgoing(ui, repo, dest, opts)
902 o, other = _outgoing(ui, repo, dest, opts)
900 if not o:
903 if not o:
901 cmdutil.outgoinghooks(ui, repo, other, opts, o)
904 cmdutil.outgoinghooks(ui, repo, other, opts, o)
902 return recurse()
905 return recurse()
903
906
904 if opts.get('newest_first'):
907 if opts.get('newest_first'):
905 o.reverse()
908 o.reverse()
906 ui.pager('outgoing')
909 ui.pager('outgoing')
907 displayer = cmdutil.show_changeset(ui, repo, opts)
910 displayer = cmdutil.show_changeset(ui, repo, opts)
908 count = 0
911 count = 0
909 for n in o:
912 for n in o:
910 if limit is not None and count >= limit:
913 if limit is not None and count >= limit:
911 break
914 break
912 parents = [p for p in repo.changelog.parents(n) if p != nullid]
915 parents = [p for p in repo.changelog.parents(n) if p != nullid]
913 if opts.get('no_merges') and len(parents) == 2:
916 if opts.get('no_merges') and len(parents) == 2:
914 continue
917 continue
915 count += 1
918 count += 1
916 displayer.show(repo[n])
919 displayer.show(repo[n])
917 displayer.close()
920 displayer.close()
918 cmdutil.outgoinghooks(ui, repo, other, opts, o)
921 cmdutil.outgoinghooks(ui, repo, other, opts, o)
919 recurse()
922 recurse()
920 return 0 # exit code is zero since we found outgoing changes
923 return 0 # exit code is zero since we found outgoing changes
921
924
922 def verify(repo):
925 def verify(repo):
923 """verify the consistency of a repository"""
926 """verify the consistency of a repository"""
924 ret = verifymod.verify(repo)
927 ret = verifymod.verify(repo)
925
928
926 # Broken subrepo references in hidden csets don't seem worth worrying about,
929 # Broken subrepo references in hidden csets don't seem worth worrying about,
927 # since they can't be pushed/pulled, and --hidden can be used if they are a
930 # since they can't be pushed/pulled, and --hidden can be used if they are a
928 # concern.
931 # concern.
929
932
930 # pathto() is needed for -R case
933 # pathto() is needed for -R case
931 revs = repo.revs("filelog(%s)",
934 revs = repo.revs("filelog(%s)",
932 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
935 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
933
936
934 if revs:
937 if revs:
935 repo.ui.status(_('checking subrepo links\n'))
938 repo.ui.status(_('checking subrepo links\n'))
936 for rev in revs:
939 for rev in revs:
937 ctx = repo[rev]
940 ctx = repo[rev]
938 try:
941 try:
939 for subpath in ctx.substate:
942 for subpath in ctx.substate:
940 try:
943 try:
941 ret = (ctx.sub(subpath, allowcreate=False).verify()
944 ret = (ctx.sub(subpath, allowcreate=False).verify()
942 or ret)
945 or ret)
943 except error.RepoError as e:
946 except error.RepoError as e:
944 repo.ui.warn(('%s: %s\n') % (rev, e))
947 repo.ui.warn(('%s: %s\n') % (rev, e))
945 except Exception:
948 except Exception:
946 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
949 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
947 node.short(ctx.node()))
950 node.short(ctx.node()))
948
951
949 return ret
952 return ret
950
953
951 def remoteui(src, opts):
954 def remoteui(src, opts):
952 'build a remote ui from ui or repo and opts'
955 'build a remote ui from ui or repo and opts'
953 if util.safehasattr(src, 'baseui'): # looks like a repository
956 if util.safehasattr(src, 'baseui'): # looks like a repository
954 dst = src.baseui.copy() # drop repo-specific config
957 dst = src.baseui.copy() # drop repo-specific config
955 src = src.ui # copy target options from repo
958 src = src.ui # copy target options from repo
956 else: # assume it's a global ui object
959 else: # assume it's a global ui object
957 dst = src.copy() # keep all global options
960 dst = src.copy() # keep all global options
958
961
959 # copy ssh-specific options
962 # copy ssh-specific options
960 for o in 'ssh', 'remotecmd':
963 for o in 'ssh', 'remotecmd':
961 v = opts.get(o) or src.config('ui', o)
964 v = opts.get(o) or src.config('ui', o)
962 if v:
965 if v:
963 dst.setconfig("ui", o, v, 'copied')
966 dst.setconfig("ui", o, v, 'copied')
964
967
965 # copy bundle-specific options
968 # copy bundle-specific options
966 r = src.config('bundle', 'mainreporoot')
969 r = src.config('bundle', 'mainreporoot')
967 if r:
970 if r:
968 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
971 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
969
972
970 # copy selected local settings to the remote ui
973 # copy selected local settings to the remote ui
971 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
974 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
972 for key, val in src.configitems(sect):
975 for key, val in src.configitems(sect):
973 dst.setconfig(sect, key, val, 'copied')
976 dst.setconfig(sect, key, val, 'copied')
974 v = src.config('web', 'cacerts')
977 v = src.config('web', 'cacerts')
975 if v:
978 if v:
976 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
979 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
977
980
978 return dst
981 return dst
979
982
980 # Files of interest
983 # Files of interest
981 # Used to check if the repository has changed looking at mtime and size of
984 # Used to check if the repository has changed looking at mtime and size of
982 # these files.
985 # these files.
983 foi = [('spath', '00changelog.i'),
986 foi = [('spath', '00changelog.i'),
984 ('spath', 'phaseroots'), # ! phase can change content at the same size
987 ('spath', 'phaseroots'), # ! phase can change content at the same size
985 ('spath', 'obsstore'),
988 ('spath', 'obsstore'),
986 ('path', 'bookmarks'), # ! bookmark can change content at the same size
989 ('path', 'bookmarks'), # ! bookmark can change content at the same size
987 ]
990 ]
988
991
989 class cachedlocalrepo(object):
992 class cachedlocalrepo(object):
990 """Holds a localrepository that can be cached and reused."""
993 """Holds a localrepository that can be cached and reused."""
991
994
992 def __init__(self, repo):
995 def __init__(self, repo):
993 """Create a new cached repo from an existing repo.
996 """Create a new cached repo from an existing repo.
994
997
995 We assume the passed in repo was recently created. If the
998 We assume the passed in repo was recently created. If the
996 repo has changed between when it was created and when it was
999 repo has changed between when it was created and when it was
997 turned into a cache, it may not refresh properly.
1000 turned into a cache, it may not refresh properly.
998 """
1001 """
999 assert isinstance(repo, localrepo.localrepository)
1002 assert isinstance(repo, localrepo.localrepository)
1000 self._repo = repo
1003 self._repo = repo
1001 self._state, self.mtime = self._repostate()
1004 self._state, self.mtime = self._repostate()
1002 self._filtername = repo.filtername
1005 self._filtername = repo.filtername
1003
1006
1004 def fetch(self):
1007 def fetch(self):
1005 """Refresh (if necessary) and return a repository.
1008 """Refresh (if necessary) and return a repository.
1006
1009
1007 If the cached instance is out of date, it will be recreated
1010 If the cached instance is out of date, it will be recreated
1008 automatically and returned.
1011 automatically and returned.
1009
1012
1010 Returns a tuple of the repo and a boolean indicating whether a new
1013 Returns a tuple of the repo and a boolean indicating whether a new
1011 repo instance was created.
1014 repo instance was created.
1012 """
1015 """
1013 # We compare the mtimes and sizes of some well-known files to
1016 # We compare the mtimes and sizes of some well-known files to
1014 # determine if the repo changed. This is not precise, as mtimes
1017 # determine if the repo changed. This is not precise, as mtimes
1015 # are susceptible to clock skew and imprecise filesystems and
1018 # are susceptible to clock skew and imprecise filesystems and
1016 # file content can change while maintaining the same size.
1019 # file content can change while maintaining the same size.
1017
1020
1018 state, mtime = self._repostate()
1021 state, mtime = self._repostate()
1019 if state == self._state:
1022 if state == self._state:
1020 return self._repo, False
1023 return self._repo, False
1021
1024
1022 repo = repository(self._repo.baseui, self._repo.url())
1025 repo = repository(self._repo.baseui, self._repo.url())
1023 if self._filtername:
1026 if self._filtername:
1024 self._repo = repo.filtered(self._filtername)
1027 self._repo = repo.filtered(self._filtername)
1025 else:
1028 else:
1026 self._repo = repo.unfiltered()
1029 self._repo = repo.unfiltered()
1027 self._state = state
1030 self._state = state
1028 self.mtime = mtime
1031 self.mtime = mtime
1029
1032
1030 return self._repo, True
1033 return self._repo, True
1031
1034
1032 def _repostate(self):
1035 def _repostate(self):
1033 state = []
1036 state = []
1034 maxmtime = -1
1037 maxmtime = -1
1035 for attr, fname in foi:
1038 for attr, fname in foi:
1036 prefix = getattr(self._repo, attr)
1039 prefix = getattr(self._repo, attr)
1037 p = os.path.join(prefix, fname)
1040 p = os.path.join(prefix, fname)
1038 try:
1041 try:
1039 st = os.stat(p)
1042 st = os.stat(p)
1040 except OSError:
1043 except OSError:
1041 st = os.stat(prefix)
1044 st = os.stat(prefix)
1042 state.append((st.st_mtime, st.st_size))
1045 state.append((st.st_mtime, st.st_size))
1043 maxmtime = max(maxmtime, st.st_mtime)
1046 maxmtime = max(maxmtime, st.st_mtime)
1044
1047
1045 return tuple(state), maxmtime
1048 return tuple(state), maxmtime
1046
1049
1047 def copy(self):
1050 def copy(self):
1048 """Obtain a copy of this class instance.
1051 """Obtain a copy of this class instance.
1049
1052
1050 A new localrepository instance is obtained. The new instance should be
1053 A new localrepository instance is obtained. The new instance should be
1051 completely independent of the original.
1054 completely independent of the original.
1052 """
1055 """
1053 repo = repository(self._repo.baseui, self._repo.origroot)
1056 repo = repository(self._repo.baseui, self._repo.origroot)
1054 if self._filtername:
1057 if self._filtername:
1055 repo = repo.filtered(self._filtername)
1058 repo = repo.filtered(self._filtername)
1056 else:
1059 else:
1057 repo = repo.unfiltered()
1060 repo = repo.unfiltered()
1058 c = cachedlocalrepo(repo)
1061 c = cachedlocalrepo(repo)
1059 c._state = self._state
1062 c._state = self._state
1060 c.mtime = self.mtime
1063 c.mtime = self.mtime
1061 return c
1064 return c
@@ -1,726 +1,734 b''
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > blackbox=
5 > blackbox=
6 > mock=$TESTDIR/mockblackbox.py
6 > mock=$TESTDIR/mockblackbox.py
7 > EOF
7 > EOF
8
8
9 Helper functions:
9 Helper functions:
10
10
11 $ cacheexists() {
11 $ cacheexists() {
12 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
12 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
13 > }
13 > }
14
14
15 $ fnodescacheexists() {
15 $ fnodescacheexists() {
16 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
16 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
17 > }
17 > }
18
18
19 $ dumptags() {
19 $ dumptags() {
20 > rev=$1
20 > rev=$1
21 > echo "rev $rev: .hgtags:"
21 > echo "rev $rev: .hgtags:"
22 > hg cat -r$rev .hgtags
22 > hg cat -r$rev .hgtags
23 > }
23 > }
24
24
25 # XXX need to test that the tag cache works when we strip an old head
25 # XXX need to test that the tag cache works when we strip an old head
26 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
26 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
27 # same, but stuff has changed behind tip.
27 # same, but stuff has changed behind tip.
28
28
29 Setup:
29 Setup:
30
30
31 $ hg init t
31 $ hg init t
32 $ cd t
32 $ cd t
33 $ cacheexists
33 $ cacheexists
34 no tag cache
34 no tag cache
35 $ fnodescacheexists
35 $ fnodescacheexists
36 no fnodes cache
36 no fnodes cache
37 $ hg id
37 $ hg id
38 000000000000 tip
38 000000000000 tip
39 $ cacheexists
39 $ cacheexists
40 no tag cache
40 no tag cache
41 $ fnodescacheexists
41 $ fnodescacheexists
42 no fnodes cache
42 no fnodes cache
43 $ echo a > a
43 $ echo a > a
44 $ hg add a
44 $ hg add a
45 $ hg commit -m "test"
45 $ hg commit -m "test"
46 $ hg co
46 $ hg co
47 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 $ hg identify
48 $ hg identify
49 acb14030fe0a tip
49 acb14030fe0a tip
50 $ hg identify -r 'wdir()'
50 $ hg identify -r 'wdir()'
51 acb14030fe0a tip
51 acb14030fe0a tip
52 $ cacheexists
52 $ cacheexists
53 tag cache exists
53 tag cache exists
54 No fnodes cache because .hgtags file doesn't exist
54 No fnodes cache because .hgtags file doesn't exist
55 (this is an implementation detail)
55 (this is an implementation detail)
56 $ fnodescacheexists
56 $ fnodescacheexists
57 no fnodes cache
57 no fnodes cache
58
58
59 Try corrupting the cache
59 Try corrupting the cache
60
60
61 $ printf 'a b' > .hg/cache/tags2-visible
61 $ printf 'a b' > .hg/cache/tags2-visible
62 $ hg identify
62 $ hg identify
63 acb14030fe0a tip
63 acb14030fe0a tip
64 $ cacheexists
64 $ cacheexists
65 tag cache exists
65 tag cache exists
66 $ fnodescacheexists
66 $ fnodescacheexists
67 no fnodes cache
67 no fnodes cache
68 $ hg identify
68 $ hg identify
69 acb14030fe0a tip
69 acb14030fe0a tip
70
70
71 Create local tag with long name:
71 Create local tag with long name:
72
72
73 $ T=`hg identify --debug --id`
73 $ T=`hg identify --debug --id`
74 $ hg tag -l "This is a local tag with a really long name!"
74 $ hg tag -l "This is a local tag with a really long name!"
75 $ hg tags
75 $ hg tags
76 tip 0:acb14030fe0a
76 tip 0:acb14030fe0a
77 This is a local tag with a really long name! 0:acb14030fe0a
77 This is a local tag with a really long name! 0:acb14030fe0a
78 $ rm .hg/localtags
78 $ rm .hg/localtags
79
79
80 Create a tag behind hg's back:
80 Create a tag behind hg's back:
81
81
82 $ echo "$T first" > .hgtags
82 $ echo "$T first" > .hgtags
83 $ cat .hgtags
83 $ cat .hgtags
84 acb14030fe0a21b60322c440ad2d20cf7685a376 first
84 acb14030fe0a21b60322c440ad2d20cf7685a376 first
85 $ hg add .hgtags
85 $ hg add .hgtags
86 $ hg commit -m "add tags"
86 $ hg commit -m "add tags"
87 $ hg tags
87 $ hg tags
88 tip 1:b9154636be93
88 tip 1:b9154636be93
89 first 0:acb14030fe0a
89 first 0:acb14030fe0a
90 $ hg identify
90 $ hg identify
91 b9154636be93 tip
91 b9154636be93 tip
92
92
93 We should have a fnodes cache now that we have a real tag
93 We should have a fnodes cache now that we have a real tag
94 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
94 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
95
95
96
96
97 $ fnodescacheexists
97 $ fnodescacheexists
98 fnodes cache exists
98 fnodes cache exists
99 $ f --size --hexdump .hg/cache/hgtagsfnodes1
99 $ f --size --hexdump .hg/cache/hgtagsfnodes1
100 .hg/cache/hgtagsfnodes1: size=48
100 .hg/cache/hgtagsfnodes1: size=48
101 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
101 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
102 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
102 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
103 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
103 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
104
104
105 Repeat with cold tag cache:
105 Repeat with cold tag cache:
106
106
107 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
107 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
108 $ hg identify
108 $ hg identify
109 b9154636be93 tip
109 b9154636be93 tip
110
110
111 $ fnodescacheexists
111 $ fnodescacheexists
112 fnodes cache exists
112 fnodes cache exists
113 $ f --size --hexdump .hg/cache/hgtagsfnodes1
113 $ f --size --hexdump .hg/cache/hgtagsfnodes1
114 .hg/cache/hgtagsfnodes1: size=48
114 .hg/cache/hgtagsfnodes1: size=48
115 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
115 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
116 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
116 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
117 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
117 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
118
118
119 And again, but now unable to write tag cache or lock file:
119 And again, but now unable to write tag cache or lock file:
120
120
121 #if unix-permissions
121 #if unix-permissions
122 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
122 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
123 $ chmod 555 .hg/cache
123 $ chmod 555 .hg/cache
124 $ hg identify
124 $ hg identify
125 b9154636be93 tip
125 b9154636be93 tip
126 $ chmod 755 .hg/cache
126 $ chmod 755 .hg/cache
127
127
128 $ chmod 555 .hg
128 $ chmod 555 .hg
129 $ hg identify
129 $ hg identify
130 b9154636be93 tip
130 b9154636be93 tip
131 $ chmod 755 .hg
131 $ chmod 755 .hg
132 #endif
132 #endif
133
133
134 Tag cache debug info written to blackbox log
134 Tag cache debug info written to blackbox log
135
135
136 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
136 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
137 $ hg identify
137 $ hg identify
138 b9154636be93 tip
138 b9154636be93 tip
139 $ hg blackbox -l 6
139 $ hg blackbox -l 6
140 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
140 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
141 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
141 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
142 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/1 cache hits/lookups in * seconds (glob)
142 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/1 cache hits/lookups in * seconds (glob)
143 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
143 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
144 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
144 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
145 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
145 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
146
146
147 Failure to acquire lock results in no write
147 Failure to acquire lock results in no write
148
148
149 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
149 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
150 $ echo 'foo:1' > .hg/wlock
150 $ echo 'foo:1' > .hg/wlock
151 $ hg identify
151 $ hg identify
152 b9154636be93 tip
152 b9154636be93 tip
153 $ hg blackbox -l 6
153 $ hg blackbox -l 6
154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
155 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
155 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
156 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/1 cache hits/lookups in * seconds (glob)
156 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/1 cache hits/lookups in * seconds (glob)
157 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
157 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
158 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
158 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
159 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
159 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
160
160
161 $ fnodescacheexists
161 $ fnodescacheexists
162 no fnodes cache
162 no fnodes cache
163
163
164 $ rm .hg/wlock
164 $ rm .hg/wlock
165
165
166 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
166 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
167 $ hg identify
167 $ hg identify
168 b9154636be93 tip
168 b9154636be93 tip
169
169
170 Create a branch:
170 Create a branch:
171
171
172 $ echo bb > a
172 $ echo bb > a
173 $ hg status
173 $ hg status
174 M a
174 M a
175 $ hg identify
175 $ hg identify
176 b9154636be93+ tip
176 b9154636be93+ tip
177 $ hg co first
177 $ hg co first
178 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
178 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
179 $ hg id
179 $ hg id
180 acb14030fe0a+ first
180 acb14030fe0a+ first
181 $ hg id -r 'wdir()'
181 $ hg id -r 'wdir()'
182 acb14030fe0a+ first
182 acb14030fe0a+ first
183 $ hg -v id
183 $ hg -v id
184 acb14030fe0a+ first
184 acb14030fe0a+ first
185 $ hg status
185 $ hg status
186 M a
186 M a
187 $ echo 1 > b
187 $ echo 1 > b
188 $ hg add b
188 $ hg add b
189 $ hg commit -m "branch"
189 $ hg commit -m "branch"
190 created new head
190 created new head
191
191
192 Creating a new commit shouldn't append the .hgtags fnodes cache until
192 Creating a new commit shouldn't append the .hgtags fnodes cache until
193 tags info is accessed
193 tags info is accessed
194
194
195 $ f --size --hexdump .hg/cache/hgtagsfnodes1
195 $ f --size --hexdump .hg/cache/hgtagsfnodes1
196 .hg/cache/hgtagsfnodes1: size=48
196 .hg/cache/hgtagsfnodes1: size=48
197 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
197 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
198 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
198 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
199 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
199 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
200
200
201 $ hg id
201 $ hg id
202 c8edf04160c7 tip
202 c8edf04160c7 tip
203
203
204 First 4 bytes of record 3 are changeset fragment
204 First 4 bytes of record 3 are changeset fragment
205
205
206 $ f --size --hexdump .hg/cache/hgtagsfnodes1
206 $ f --size --hexdump .hg/cache/hgtagsfnodes1
207 .hg/cache/hgtagsfnodes1: size=72
207 .hg/cache/hgtagsfnodes1: size=72
208 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
208 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
209 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
209 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
210 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
210 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
211 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
211 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
212 0040: 00 00 00 00 00 00 00 00 |........|
212 0040: 00 00 00 00 00 00 00 00 |........|
213
213
214 Merge the two heads:
214 Merge the two heads:
215
215
216 $ hg merge 1
216 $ hg merge 1
217 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
217 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
218 (branch merge, don't forget to commit)
218 (branch merge, don't forget to commit)
219 $ hg blackbox -l3
219 $ hg blackbox -l3
220 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
220 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
221 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
221 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
222 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
222 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
223 $ hg id
223 $ hg id
224 c8edf04160c7+b9154636be93+ tip
224 c8edf04160c7+b9154636be93+ tip
225 $ hg status
225 $ hg status
226 M .hgtags
226 M .hgtags
227 $ hg commit -m "merge"
227 $ hg commit -m "merge"
228
228
229 Create a fake head, make sure tag not visible afterwards:
229 Create a fake head, make sure tag not visible afterwards:
230
230
231 $ cp .hgtags tags
231 $ cp .hgtags tags
232 $ hg tag last
232 $ hg tag last
233 $ hg rm .hgtags
233 $ hg rm .hgtags
234 $ hg commit -m "remove"
234 $ hg commit -m "remove"
235
235
236 $ mv tags .hgtags
236 $ mv tags .hgtags
237 $ hg add .hgtags
237 $ hg add .hgtags
238 $ hg commit -m "readd"
238 $ hg commit -m "readd"
239 $
239 $
240 $ hg tags
240 $ hg tags
241 tip 6:35ff301afafe
241 tip 6:35ff301afafe
242 first 0:acb14030fe0a
242 first 0:acb14030fe0a
243
243
244 Add invalid tags:
244 Add invalid tags:
245
245
246 $ echo "spam" >> .hgtags
246 $ echo "spam" >> .hgtags
247 $ echo >> .hgtags
247 $ echo >> .hgtags
248 $ echo "foo bar" >> .hgtags
248 $ echo "foo bar" >> .hgtags
249 $ echo "a5a5 invalid" >> .hg/localtags
249 $ echo "a5a5 invalid" >> .hg/localtags
250 $ cat .hgtags
250 $ cat .hgtags
251 acb14030fe0a21b60322c440ad2d20cf7685a376 first
251 acb14030fe0a21b60322c440ad2d20cf7685a376 first
252 spam
252 spam
253
253
254 foo bar
254 foo bar
255 $ hg commit -m "tags"
255 $ hg commit -m "tags"
256
256
257 Report tag parse error on other head:
257 Report tag parse error on other head:
258
258
259 $ hg up 3
259 $ hg up 3
260 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
260 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
261 $ echo 'x y' >> .hgtags
261 $ echo 'x y' >> .hgtags
262 $ hg commit -m "head"
262 $ hg commit -m "head"
263 created new head
263 created new head
264
264
265 $ hg tags --debug
265 $ hg tags --debug
266 .hgtags@75d9f02dfe28, line 2: cannot parse entry
266 .hgtags@75d9f02dfe28, line 2: cannot parse entry
267 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
267 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
268 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
268 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
269 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
269 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
270 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
270 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
271 $ hg tip
271 $ hg tip
272 changeset: 8:c4be69a18c11
272 changeset: 8:c4be69a18c11
273 tag: tip
273 tag: tip
274 parent: 3:ac5e980c4dc0
274 parent: 3:ac5e980c4dc0
275 user: test
275 user: test
276 date: Thu Jan 01 00:00:00 1970 +0000
276 date: Thu Jan 01 00:00:00 1970 +0000
277 summary: head
277 summary: head
278
278
279
279
280 Test tag precedence rules:
280 Test tag precedence rules:
281
281
282 $ cd ..
282 $ cd ..
283 $ hg init t2
283 $ hg init t2
284 $ cd t2
284 $ cd t2
285 $ echo foo > foo
285 $ echo foo > foo
286 $ hg add foo
286 $ hg add foo
287 $ hg ci -m 'add foo' # rev 0
287 $ hg ci -m 'add foo' # rev 0
288 $ hg tag bar # rev 1
288 $ hg tag bar # rev 1
289 $ echo >> foo
289 $ echo >> foo
290 $ hg ci -m 'change foo 1' # rev 2
290 $ hg ci -m 'change foo 1' # rev 2
291 $ hg up -C 1
291 $ hg up -C 1
292 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
292 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
293 $ hg tag -r 1 -f bar # rev 3
293 $ hg tag -r 1 -f bar # rev 3
294 $ hg up -C 1
294 $ hg up -C 1
295 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
295 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
296 $ echo >> foo
296 $ echo >> foo
297 $ hg ci -m 'change foo 2' # rev 4
297 $ hg ci -m 'change foo 2' # rev 4
298 created new head
298 created new head
299 $ hg tags
299 $ hg tags
300 tip 4:0c192d7d5e6b
300 tip 4:0c192d7d5e6b
301 bar 1:78391a272241
301 bar 1:78391a272241
302
302
303 Repeat in case of cache effects:
303 Repeat in case of cache effects:
304
304
305 $ hg tags
305 $ hg tags
306 tip 4:0c192d7d5e6b
306 tip 4:0c192d7d5e6b
307 bar 1:78391a272241
307 bar 1:78391a272241
308
308
309 Detailed dump of tag info:
309 Detailed dump of tag info:
310
310
311 $ hg heads -q # expect 4, 3, 2
311 $ hg heads -q # expect 4, 3, 2
312 4:0c192d7d5e6b
312 4:0c192d7d5e6b
313 3:6fa450212aeb
313 3:6fa450212aeb
314 2:7a94127795a3
314 2:7a94127795a3
315 $ dumptags 2
315 $ dumptags 2
316 rev 2: .hgtags:
316 rev 2: .hgtags:
317 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
317 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
318 $ dumptags 3
318 $ dumptags 3
319 rev 3: .hgtags:
319 rev 3: .hgtags:
320 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
320 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
321 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
321 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
322 78391a272241d70354aa14c874552cad6b51bb42 bar
322 78391a272241d70354aa14c874552cad6b51bb42 bar
323 $ dumptags 4
323 $ dumptags 4
324 rev 4: .hgtags:
324 rev 4: .hgtags:
325 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
325 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
326
326
327 Dump cache:
327 Dump cache:
328
328
329 $ cat .hg/cache/tags2-visible
329 $ cat .hg/cache/tags2-visible
330 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
330 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
331 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
331 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
332 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
332 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
333 78391a272241d70354aa14c874552cad6b51bb42 bar
333 78391a272241d70354aa14c874552cad6b51bb42 bar
334
334
335 $ f --size --hexdump .hg/cache/hgtagsfnodes1
335 $ f --size --hexdump .hg/cache/hgtagsfnodes1
336 .hg/cache/hgtagsfnodes1: size=120
336 .hg/cache/hgtagsfnodes1: size=120
337 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
337 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
338 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
338 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
339 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
339 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
340 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
340 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
341 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
341 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
342 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
342 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
343 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
343 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
344 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
344 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
345
345
346 Corrupt the .hgtags fnodes cache
346 Corrupt the .hgtags fnodes cache
347 Extra junk data at the end should get overwritten on next cache update
347 Extra junk data at the end should get overwritten on next cache update
348
348
349 $ echo extra >> .hg/cache/hgtagsfnodes1
349 $ echo extra >> .hg/cache/hgtagsfnodes1
350 $ echo dummy1 > foo
350 $ echo dummy1 > foo
351 $ hg commit -m throwaway1
351 $ hg commit -m throwaway1
352
352
353 $ hg tags
353 $ hg tags
354 tip 5:8dbfe60eff30
354 tip 5:8dbfe60eff30
355 bar 1:78391a272241
355 bar 1:78391a272241
356
356
357 $ hg blackbox -l 6
357 $ hg blackbox -l 6
358 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
358 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
359 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
359 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
360 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 2/3 cache hits/lookups in * seconds (glob)
360 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 2/3 cache hits/lookups in * seconds (glob)
361 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
361 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
362 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
362 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
363 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
363 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
364
364
365 #if unix-permissions no-root
365 #if unix-permissions no-root
366 Errors writing to .hgtags fnodes cache are silently ignored
366 Errors writing to .hgtags fnodes cache are silently ignored
367
367
368 $ echo dummy2 > foo
368 $ echo dummy2 > foo
369 $ hg commit -m throwaway2
369 $ hg commit -m throwaway2
370
370
371 $ chmod a-w .hg/cache/hgtagsfnodes1
371 $ chmod a-w .hg/cache/hgtagsfnodes1
372 $ rm -f .hg/cache/tags2-visible
372 $ rm -f .hg/cache/tags2-visible
373
373
374 $ hg tags
374 $ hg tags
375 tip 6:b968051b5cf3
375 tip 6:b968051b5cf3
376 bar 1:78391a272241
376 bar 1:78391a272241
377
377
378 $ hg blackbox -l 6
378 $ hg blackbox -l 6
379 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
379 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
380 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno 13] Permission denied: '$TESTTMP/t2/.hg/cache/hgtagsfnodes1'
380 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno 13] Permission denied: '$TESTTMP/t2/.hg/cache/hgtagsfnodes1'
381 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/3 cache hits/lookups in * seconds (glob)
381 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/3 cache hits/lookups in * seconds (glob)
382 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
382 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
383 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
383 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
384 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
384 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
385
385
386 $ chmod a+w .hg/cache/hgtagsfnodes1
386 $ chmod a+w .hg/cache/hgtagsfnodes1
387
387
388 $ rm -f .hg/cache/tags2-visible
388 $ rm -f .hg/cache/tags2-visible
389 $ hg tags
389 $ hg tags
390 tip 6:b968051b5cf3
390 tip 6:b968051b5cf3
391 bar 1:78391a272241
391 bar 1:78391a272241
392
392
393 $ hg blackbox -l 6
393 $ hg blackbox -l 6
394 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
394 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
395 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
395 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
396 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/3 cache hits/lookups in * seconds (glob)
396 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/3 cache hits/lookups in * seconds (glob)
397 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
397 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
398 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
398 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
399 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
399 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
400
400
401 $ f --size .hg/cache/hgtagsfnodes1
401 $ f --size .hg/cache/hgtagsfnodes1
402 .hg/cache/hgtagsfnodes1: size=168
402 .hg/cache/hgtagsfnodes1: size=168
403
403
404 $ hg -q --config extensions.strip= strip -r 6 --no-backup
404 $ hg -q --config extensions.strip= strip -r 6 --no-backup
405 #endif
405 #endif
406
406
407 Stripping doesn't truncate the tags cache until new data is available
407 Stripping doesn't truncate the tags cache until new data is available
408
408
409 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
409 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
410 $ hg tags
410 $ hg tags
411 tip 5:8dbfe60eff30
411 tip 5:8dbfe60eff30
412 bar 1:78391a272241
412 bar 1:78391a272241
413
413
414 $ f --size .hg/cache/hgtagsfnodes1
414 $ f --size .hg/cache/hgtagsfnodes1
415 .hg/cache/hgtagsfnodes1: size=144
415 .hg/cache/hgtagsfnodes1: size=144
416
416
417 $ hg -q --config extensions.strip= strip -r 5 --no-backup
417 $ hg -q --config extensions.strip= strip -r 5 --no-backup
418 $ hg tags
418 $ hg tags
419 tip 4:0c192d7d5e6b
419 tip 4:0c192d7d5e6b
420 bar 1:78391a272241
420 bar 1:78391a272241
421
421
422 $ hg blackbox -l 5
422 $ hg blackbox -l 5
423 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
423 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
424 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/3 cache hits/lookups in * seconds (glob)
424 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/3 cache hits/lookups in * seconds (glob)
425 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
425 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
426 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
426 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
427 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
427 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
428
428
429 $ f --size .hg/cache/hgtagsfnodes1
429 $ f --size .hg/cache/hgtagsfnodes1
430 .hg/cache/hgtagsfnodes1: size=120
430 .hg/cache/hgtagsfnodes1: size=120
431
431
432 $ echo dummy > foo
432 $ echo dummy > foo
433 $ hg commit -m throwaway3
433 $ hg commit -m throwaway3
434
434
435 $ hg tags
435 $ hg tags
436 tip 5:035f65efb448
436 tip 5:035f65efb448
437 bar 1:78391a272241
437 bar 1:78391a272241
438
438
439 $ hg blackbox -l 6
439 $ hg blackbox -l 6
440 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
440 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
441 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
441 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
442 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 2/3 cache hits/lookups in * seconds (glob)
442 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 2/3 cache hits/lookups in * seconds (glob)
443 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
443 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
444 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
444 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
445 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
445 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
446 $ f --size .hg/cache/hgtagsfnodes1
446 $ f --size .hg/cache/hgtagsfnodes1
447 .hg/cache/hgtagsfnodes1: size=144
447 .hg/cache/hgtagsfnodes1: size=144
448
448
449 $ hg -q --config extensions.strip= strip -r 5 --no-backup
449 $ hg -q --config extensions.strip= strip -r 5 --no-backup
450
450
451 Test tag removal:
451 Test tag removal:
452
452
453 $ hg tag --remove bar # rev 5
453 $ hg tag --remove bar # rev 5
454 $ hg tip -vp
454 $ hg tip -vp
455 changeset: 5:5f6e8655b1c7
455 changeset: 5:5f6e8655b1c7
456 tag: tip
456 tag: tip
457 user: test
457 user: test
458 date: Thu Jan 01 00:00:00 1970 +0000
458 date: Thu Jan 01 00:00:00 1970 +0000
459 files: .hgtags
459 files: .hgtags
460 description:
460 description:
461 Removed tag bar
461 Removed tag bar
462
462
463
463
464 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
464 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
465 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
465 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
466 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
466 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
467 @@ -1,1 +1,3 @@
467 @@ -1,1 +1,3 @@
468 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
468 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
469 +78391a272241d70354aa14c874552cad6b51bb42 bar
469 +78391a272241d70354aa14c874552cad6b51bb42 bar
470 +0000000000000000000000000000000000000000 bar
470 +0000000000000000000000000000000000000000 bar
471
471
472 $ hg tags
472 $ hg tags
473 tip 5:5f6e8655b1c7
473 tip 5:5f6e8655b1c7
474 $ hg tags # again, try to expose cache bugs
474 $ hg tags # again, try to expose cache bugs
475 tip 5:5f6e8655b1c7
475 tip 5:5f6e8655b1c7
476
476
477 Remove nonexistent tag:
477 Remove nonexistent tag:
478
478
479 $ hg tag --remove foobar
479 $ hg tag --remove foobar
480 abort: tag 'foobar' does not exist
480 abort: tag 'foobar' does not exist
481 [255]
481 [255]
482 $ hg tip
482 $ hg tip
483 changeset: 5:5f6e8655b1c7
483 changeset: 5:5f6e8655b1c7
484 tag: tip
484 tag: tip
485 user: test
485 user: test
486 date: Thu Jan 01 00:00:00 1970 +0000
486 date: Thu Jan 01 00:00:00 1970 +0000
487 summary: Removed tag bar
487 summary: Removed tag bar
488
488
489
489
490 Undo a tag with rollback:
490 Undo a tag with rollback:
491
491
492 $ hg rollback # destroy rev 5 (restore bar)
492 $ hg rollback # destroy rev 5 (restore bar)
493 repository tip rolled back to revision 4 (undo commit)
493 repository tip rolled back to revision 4 (undo commit)
494 working directory now based on revision 4
494 working directory now based on revision 4
495 $ hg tags
495 $ hg tags
496 tip 4:0c192d7d5e6b
496 tip 4:0c192d7d5e6b
497 bar 1:78391a272241
497 bar 1:78391a272241
498 $ hg tags
498 $ hg tags
499 tip 4:0c192d7d5e6b
499 tip 4:0c192d7d5e6b
500 bar 1:78391a272241
500 bar 1:78391a272241
501
501
502 Test tag rank:
502 Test tag rank:
503
503
504 $ cd ..
504 $ cd ..
505 $ hg init t3
505 $ hg init t3
506 $ cd t3
506 $ cd t3
507 $ echo foo > foo
507 $ echo foo > foo
508 $ hg add foo
508 $ hg add foo
509 $ hg ci -m 'add foo' # rev 0
509 $ hg ci -m 'add foo' # rev 0
510 $ hg tag -f bar # rev 1 bar -> 0
510 $ hg tag -f bar # rev 1 bar -> 0
511 $ hg tag -f bar # rev 2 bar -> 1
511 $ hg tag -f bar # rev 2 bar -> 1
512 $ hg tag -fr 0 bar # rev 3 bar -> 0
512 $ hg tag -fr 0 bar # rev 3 bar -> 0
513 $ hg tag -fr 1 bar # rev 4 bar -> 1
513 $ hg tag -fr 1 bar # rev 4 bar -> 1
514 $ hg tag -fr 0 bar # rev 5 bar -> 0
514 $ hg tag -fr 0 bar # rev 5 bar -> 0
515 $ hg tags
515 $ hg tags
516 tip 5:85f05169d91d
516 tip 5:85f05169d91d
517 bar 0:bbd179dfa0a7
517 bar 0:bbd179dfa0a7
518 $ hg co 3
518 $ hg co 3
519 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
519 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
520 $ echo barbar > foo
520 $ echo barbar > foo
521 $ hg ci -m 'change foo' # rev 6
521 $ hg ci -m 'change foo' # rev 6
522 created new head
522 created new head
523 $ hg tags
523 $ hg tags
524 tip 6:735c3ca72986
524 tip 6:735c3ca72986
525 bar 0:bbd179dfa0a7
525 bar 0:bbd179dfa0a7
526
526
527 Don't allow moving tag without -f:
527 Don't allow moving tag without -f:
528
528
529 $ hg tag -r 3 bar
529 $ hg tag -r 3 bar
530 abort: tag 'bar' already exists (use -f to force)
530 abort: tag 'bar' already exists (use -f to force)
531 [255]
531 [255]
532 $ hg tags
532 $ hg tags
533 tip 6:735c3ca72986
533 tip 6:735c3ca72986
534 bar 0:bbd179dfa0a7
534 bar 0:bbd179dfa0a7
535
535
536 Strip 1: expose an old head:
536 Strip 1: expose an old head:
537
537
538 $ hg --config extensions.mq= strip 5
538 $ hg --config extensions.mq= strip 5
539 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
539 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
540 $ hg tags # partly stale cache
540 $ hg tags # partly stale cache
541 tip 5:735c3ca72986
541 tip 5:735c3ca72986
542 bar 1:78391a272241
542 bar 1:78391a272241
543 $ hg tags # up-to-date cache
543 $ hg tags # up-to-date cache
544 tip 5:735c3ca72986
544 tip 5:735c3ca72986
545 bar 1:78391a272241
545 bar 1:78391a272241
546
546
547 Strip 2: destroy whole branch, no old head exposed
547 Strip 2: destroy whole branch, no old head exposed
548
548
549 $ hg --config extensions.mq= strip 4
549 $ hg --config extensions.mq= strip 4
550 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
550 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
551 $ hg tags # partly stale
551 $ hg tags # partly stale
552 tip 4:735c3ca72986
552 tip 4:735c3ca72986
553 bar 0:bbd179dfa0a7
553 bar 0:bbd179dfa0a7
554 $ rm -f .hg/cache/tags2-visible
554 $ rm -f .hg/cache/tags2-visible
555 $ hg tags # cold cache
555 $ hg tags # cold cache
556 tip 4:735c3ca72986
556 tip 4:735c3ca72986
557 bar 0:bbd179dfa0a7
557 bar 0:bbd179dfa0a7
558
558
559 Test tag rank with 3 heads:
559 Test tag rank with 3 heads:
560
560
561 $ cd ..
561 $ cd ..
562 $ hg init t4
562 $ hg init t4
563 $ cd t4
563 $ cd t4
564 $ echo foo > foo
564 $ echo foo > foo
565 $ hg add
565 $ hg add
566 adding foo
566 adding foo
567 $ hg ci -m 'add foo' # rev 0
567 $ hg ci -m 'add foo' # rev 0
568 $ hg tag bar # rev 1 bar -> 0
568 $ hg tag bar # rev 1 bar -> 0
569 $ hg tag -f bar # rev 2 bar -> 1
569 $ hg tag -f bar # rev 2 bar -> 1
570 $ hg up -qC 0
570 $ hg up -qC 0
571 $ hg tag -fr 2 bar # rev 3 bar -> 2
571 $ hg tag -fr 2 bar # rev 3 bar -> 2
572 $ hg tags
572 $ hg tags
573 tip 3:197c21bbbf2c
573 tip 3:197c21bbbf2c
574 bar 2:6fa450212aeb
574 bar 2:6fa450212aeb
575 $ hg up -qC 0
575 $ hg up -qC 0
576 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
576 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
577
577
578 Bar should still point to rev 2:
578 Bar should still point to rev 2:
579
579
580 $ hg tags
580 $ hg tags
581 tip 4:3b4b14ed0202
581 tip 4:3b4b14ed0202
582 bar 2:6fa450212aeb
582 bar 2:6fa450212aeb
583
583
584 Test that removing global/local tags does not get confused when trying
584 Test that removing global/local tags does not get confused when trying
585 to remove a tag of type X which actually only exists as a type Y:
585 to remove a tag of type X which actually only exists as a type Y:
586
586
587 $ cd ..
587 $ cd ..
588 $ hg init t5
588 $ hg init t5
589 $ cd t5
589 $ cd t5
590 $ echo foo > foo
590 $ echo foo > foo
591 $ hg add
591 $ hg add
592 adding foo
592 adding foo
593 $ hg ci -m 'add foo' # rev 0
593 $ hg ci -m 'add foo' # rev 0
594
594
595 $ hg tag -r 0 -l localtag
595 $ hg tag -r 0 -l localtag
596 $ hg tag --remove localtag
596 $ hg tag --remove localtag
597 abort: tag 'localtag' is not a global tag
597 abort: tag 'localtag' is not a global tag
598 [255]
598 [255]
599 $
599 $
600 $ hg tag -r 0 globaltag
600 $ hg tag -r 0 globaltag
601 $ hg tag --remove -l globaltag
601 $ hg tag --remove -l globaltag
602 abort: tag 'globaltag' is not a local tag
602 abort: tag 'globaltag' is not a local tag
603 [255]
603 [255]
604 $ hg tags -v
604 $ hg tags -v
605 tip 1:a0b6fe111088
605 tip 1:a0b6fe111088
606 localtag 0:bbd179dfa0a7 local
606 localtag 0:bbd179dfa0a7 local
607 globaltag 0:bbd179dfa0a7
607 globaltag 0:bbd179dfa0a7
608
608
609 Test for issue3911
609 Test for issue3911
610
610
611 $ hg tag -r 0 -l localtag2
611 $ hg tag -r 0 -l localtag2
612 $ hg tag -l --remove localtag2
612 $ hg tag -l --remove localtag2
613 $ hg tags -v
613 $ hg tags -v
614 tip 1:a0b6fe111088
614 tip 1:a0b6fe111088
615 localtag 0:bbd179dfa0a7 local
615 localtag 0:bbd179dfa0a7 local
616 globaltag 0:bbd179dfa0a7
616 globaltag 0:bbd179dfa0a7
617
617
618 $ hg tag -r 1 -f localtag
618 $ hg tag -r 1 -f localtag
619 $ hg tags -v
619 $ hg tags -v
620 tip 2:5c70a037bb37
620 tip 2:5c70a037bb37
621 localtag 1:a0b6fe111088
621 localtag 1:a0b6fe111088
622 globaltag 0:bbd179dfa0a7
622 globaltag 0:bbd179dfa0a7
623
623
624 $ hg tags -v
624 $ hg tags -v
625 tip 2:5c70a037bb37
625 tip 2:5c70a037bb37
626 localtag 1:a0b6fe111088
626 localtag 1:a0b6fe111088
627 globaltag 0:bbd179dfa0a7
627 globaltag 0:bbd179dfa0a7
628
628
629 $ hg tag -r 1 localtag2
629 $ hg tag -r 1 localtag2
630 $ hg tags -v
630 $ hg tags -v
631 tip 3:bbfb8cd42be2
631 tip 3:bbfb8cd42be2
632 localtag2 1:a0b6fe111088
632 localtag2 1:a0b6fe111088
633 localtag 1:a0b6fe111088
633 localtag 1:a0b6fe111088
634 globaltag 0:bbd179dfa0a7
634 globaltag 0:bbd179dfa0a7
635
635
636 $ hg tags -v
636 $ hg tags -v
637 tip 3:bbfb8cd42be2
637 tip 3:bbfb8cd42be2
638 localtag2 1:a0b6fe111088
638 localtag2 1:a0b6fe111088
639 localtag 1:a0b6fe111088
639 localtag 1:a0b6fe111088
640 globaltag 0:bbd179dfa0a7
640 globaltag 0:bbd179dfa0a7
641
641
642 $ cd ..
642 $ cd ..
643
643
644 Create a repository with tags data to test .hgtags fnodes transfer
644 Create a repository with tags data to test .hgtags fnodes transfer
645
645
646 $ hg init tagsserver
646 $ hg init tagsserver
647 $ cd tagsserver
647 $ cd tagsserver
648 $ touch foo
648 $ touch foo
649 $ hg -q commit -A -m initial
649 $ hg -q commit -A -m initial
650 $ hg tag -m 'tag 0.1' 0.1
650 $ hg tag -m 'tag 0.1' 0.1
651 $ echo second > foo
651 $ echo second > foo
652 $ hg commit -m second
652 $ hg commit -m second
653 $ hg tag -m 'tag 0.2' 0.2
653 $ hg tag -m 'tag 0.2' 0.2
654 $ hg tags
654 $ hg tags
655 tip 3:40f0358cb314
655 tip 3:40f0358cb314
656 0.2 2:f63cc8fe54e4
656 0.2 2:f63cc8fe54e4
657 0.1 0:96ee1d7354c4
657 0.1 0:96ee1d7354c4
658 $ cd ..
658 $ cd ..
659
659
660 Cloning should pull down hgtags fnodes mappings and write the cache file
660 Cloning should pull down hgtags fnodes mappings and write the cache file
661
661
662 $ hg clone --pull tagsserver tagsclient
662 $ hg clone --pull tagsserver tagsclient
663 requesting all changes
663 requesting all changes
664 adding changesets
664 adding changesets
665 adding manifests
665 adding manifests
666 adding file changes
666 adding file changes
667 added 4 changesets with 4 changes to 2 files
667 added 4 changesets with 4 changes to 2 files
668 updating to branch default
668 updating to branch default
669 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
669 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
670
670
671 Missing tags2* files means the cache wasn't written through the normal mechanism.
671 Missing tags2* files means the cache wasn't written through the normal mechanism.
672
672
673 $ ls tagsclient/.hg/cache
673 $ ls tagsclient/.hg/cache
674 branch2-base
674 branch2-base
675 checkisexec (execbit !)
675 checkisexec (execbit !)
676 checklink (symlink !)
676 checklink (symlink !)
677 checklink-target (symlink !)
677 checklink-target (symlink !)
678 hgtagsfnodes1
678 hgtagsfnodes1
679
679
680 Cache should contain the head only, even though other nodes have tags data
680 Cache should contain the head only, even though other nodes have tags data
681
681
682 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
682 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
683 tagsclient/.hg/cache/hgtagsfnodes1: size=96
683 tagsclient/.hg/cache/hgtagsfnodes1: size=96
684 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
684 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
685 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
685 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
686 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
686 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
687 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
687 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
688 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
688 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
689 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
689 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
690
690
691 Running hg tags should produce tags2* file and not change cache
691 Running hg tags should produce tags2* file and not change cache
692
692
693 $ hg -R tagsclient tags
693 $ hg -R tagsclient tags
694 tip 3:40f0358cb314
694 tip 3:40f0358cb314
695 0.2 2:f63cc8fe54e4
695 0.2 2:f63cc8fe54e4
696 0.1 0:96ee1d7354c4
696 0.1 0:96ee1d7354c4
697
697
698 $ ls tagsclient/.hg/cache
698 $ ls tagsclient/.hg/cache
699 branch2-base
699 branch2-base
700 checkisexec (execbit !)
700 checkisexec (execbit !)
701 checklink (symlink !)
701 checklink (symlink !)
702 checklink-target (symlink !)
702 checklink-target (symlink !)
703 hgtagsfnodes1
703 hgtagsfnodes1
704 tags2-visible
704 tags2-visible
705
705
706 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
706 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
707 tagsclient/.hg/cache/hgtagsfnodes1: size=96
707 tagsclient/.hg/cache/hgtagsfnodes1: size=96
708 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
708 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
709 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
709 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
710 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
710 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
711 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
711 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
712 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
712 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
713 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
713 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
714
714
715 Check that the bundle includes cache data
715 Check that the bundle includes cache data
716
716
717 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
717 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
718 4 changesets found
718 4 changesets found
719 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
719 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
720 Stream params: sortdict([('Compression', 'BZ')])
720 Stream params: sortdict([('Compression', 'BZ')])
721 changegroup -- "sortdict([('version', '02'), ('nbchanges', '4')])"
721 changegroup -- "sortdict([('version', '02'), ('nbchanges', '4')])"
722 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
722 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
723 c4dab0c2fd337eb9191f80c3024830a4889a8f34
723 c4dab0c2fd337eb9191f80c3024830a4889a8f34
724 f63cc8fe54e4d326f8d692805d70e092f851ddb1
724 f63cc8fe54e4d326f8d692805d70e092f851ddb1
725 40f0358cb314c824a5929ee527308d90e023bc10
725 40f0358cb314c824a5929ee527308d90e023bc10
726 hgtagsfnodes -- 'sortdict()'
726 hgtagsfnodes -- 'sortdict()'
727
728 Check that local clone includes cache data
729
730 $ hg clone tagsclient tags-local-clone
731 updating to branch default
732 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
733 $ (cd tags-local-clone/.hg/cache/; ls -1 tag*)
734 tags2-visible
General Comments 0
You need to be logged in to leave comments. Login now