##// END OF EJS Templates
cache: avoid warming the fnodetags cache after clone...
marmoute -
r47985:8be95673 stable
parent child Browse files
Show More
@@ -1,1596 +1,1596 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 graphmod,
35 graphmod,
36 httppeer,
36 httppeer,
37 localrepo,
37 localrepo,
38 lock,
38 lock,
39 logcmdutil,
39 logcmdutil,
40 logexchange,
40 logexchange,
41 merge as mergemod,
41 merge as mergemod,
42 mergestate as mergestatemod,
42 mergestate as mergestatemod,
43 narrowspec,
43 narrowspec,
44 phases,
44 phases,
45 requirements,
45 requirements,
46 scmutil,
46 scmutil,
47 sshpeer,
47 sshpeer,
48 statichttprepo,
48 statichttprepo,
49 ui as uimod,
49 ui as uimod,
50 unionrepo,
50 unionrepo,
51 url,
51 url,
52 util,
52 util,
53 verify as verifymod,
53 verify as verifymod,
54 vfs as vfsmod,
54 vfs as vfsmod,
55 )
55 )
56 from .utils import (
56 from .utils import (
57 hashutil,
57 hashutil,
58 stringutil,
58 stringutil,
59 urlutil,
59 urlutil,
60 )
60 )
61
61
62
62
63 release = lock.release
63 release = lock.release
64
64
65 # shared features
65 # shared features
66 sharedbookmarks = b'bookmarks'
66 sharedbookmarks = b'bookmarks'
67
67
68
68
69 def _local(path):
69 def _local(path):
70 path = util.expandpath(urlutil.urllocalpath(path))
70 path = util.expandpath(urlutil.urllocalpath(path))
71
71
72 try:
72 try:
73 # we use os.stat() directly here instead of os.path.isfile()
73 # we use os.stat() directly here instead of os.path.isfile()
74 # because the latter started returning `False` on invalid path
74 # because the latter started returning `False` on invalid path
75 # exceptions starting in 3.8 and we care about handling
75 # exceptions starting in 3.8 and we care about handling
76 # invalid paths specially here.
76 # invalid paths specially here.
77 st = os.stat(path)
77 st = os.stat(path)
78 isfile = stat.S_ISREG(st.st_mode)
78 isfile = stat.S_ISREG(st.st_mode)
79 # Python 2 raises TypeError, Python 3 ValueError.
79 # Python 2 raises TypeError, Python 3 ValueError.
80 except (TypeError, ValueError) as e:
80 except (TypeError, ValueError) as e:
81 raise error.Abort(
81 raise error.Abort(
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 )
83 )
84 except OSError:
84 except OSError:
85 isfile = False
85 isfile = False
86
86
87 return isfile and bundlerepo or localrepo
87 return isfile and bundlerepo or localrepo
88
88
89
89
90 def addbranchrevs(lrepo, other, branches, revs):
90 def addbranchrevs(lrepo, other, branches, revs):
91 peer = other.peer() # a courtesy to callers using a localrepo for other
91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 hashbranch, branches = branches
92 hashbranch, branches = branches
93 if not hashbranch and not branches:
93 if not hashbranch and not branches:
94 x = revs or None
94 x = revs or None
95 if revs:
95 if revs:
96 y = revs[0]
96 y = revs[0]
97 else:
97 else:
98 y = None
98 y = None
99 return x, y
99 return x, y
100 if revs:
100 if revs:
101 revs = list(revs)
101 revs = list(revs)
102 else:
102 else:
103 revs = []
103 revs = []
104
104
105 if not peer.capable(b'branchmap'):
105 if not peer.capable(b'branchmap'):
106 if branches:
106 if branches:
107 raise error.Abort(_(b"remote branch lookup not supported"))
107 raise error.Abort(_(b"remote branch lookup not supported"))
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111 with peer.commandexecutor() as e:
111 with peer.commandexecutor() as e:
112 branchmap = e.callcommand(b'branchmap', {}).result()
112 branchmap = e.callcommand(b'branchmap', {}).result()
113
113
114 def primary(branch):
114 def primary(branch):
115 if branch == b'.':
115 if branch == b'.':
116 if not lrepo:
116 if not lrepo:
117 raise error.Abort(_(b"dirstate branch not accessible"))
117 raise error.Abort(_(b"dirstate branch not accessible"))
118 branch = lrepo.dirstate.branch()
118 branch = lrepo.dirstate.branch()
119 if branch in branchmap:
119 if branch in branchmap:
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 return True
121 return True
122 else:
122 else:
123 return False
123 return False
124
124
125 for branch in branches:
125 for branch in branches:
126 if not primary(branch):
126 if not primary(branch):
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 if hashbranch:
128 if hashbranch:
129 if not primary(hashbranch):
129 if not primary(hashbranch):
130 revs.append(hashbranch)
130 revs.append(hashbranch)
131 return revs, revs[0]
131 return revs, revs[0]
132
132
133
133
134 def parseurl(path, branches=None):
134 def parseurl(path, branches=None):
135 '''parse url#branch, returning (url, (branch, branches))'''
135 '''parse url#branch, returning (url, (branch, branches))'''
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 return urlutil.parseurl(path, branches=branches)
138 return urlutil.parseurl(path, branches=branches)
139
139
140
140
141 schemes = {
141 schemes = {
142 b'bundle': bundlerepo,
142 b'bundle': bundlerepo,
143 b'union': unionrepo,
143 b'union': unionrepo,
144 b'file': _local,
144 b'file': _local,
145 b'http': httppeer,
145 b'http': httppeer,
146 b'https': httppeer,
146 b'https': httppeer,
147 b'ssh': sshpeer,
147 b'ssh': sshpeer,
148 b'static-http': statichttprepo,
148 b'static-http': statichttprepo,
149 }
149 }
150
150
151
151
152 def _peerlookup(path):
152 def _peerlookup(path):
153 u = urlutil.url(path)
153 u = urlutil.url(path)
154 scheme = u.scheme or b'file'
154 scheme = u.scheme or b'file'
155 thing = schemes.get(scheme) or schemes[b'file']
155 thing = schemes.get(scheme) or schemes[b'file']
156 try:
156 try:
157 return thing(path)
157 return thing(path)
158 except TypeError:
158 except TypeError:
159 # we can't test callable(thing) because 'thing' can be an unloaded
159 # we can't test callable(thing) because 'thing' can be an unloaded
160 # module that implements __call__
160 # module that implements __call__
161 if not util.safehasattr(thing, b'instance'):
161 if not util.safehasattr(thing, b'instance'):
162 raise
162 raise
163 return thing
163 return thing
164
164
165
165
166 def islocal(repo):
166 def islocal(repo):
167 '''return true if repo (or path pointing to repo) is local'''
167 '''return true if repo (or path pointing to repo) is local'''
168 if isinstance(repo, bytes):
168 if isinstance(repo, bytes):
169 try:
169 try:
170 return _peerlookup(repo).islocal(repo)
170 return _peerlookup(repo).islocal(repo)
171 except AttributeError:
171 except AttributeError:
172 return False
172 return False
173 return repo.local()
173 return repo.local()
174
174
175
175
176 def openpath(ui, path, sendaccept=True):
176 def openpath(ui, path, sendaccept=True):
177 '''open path with open if local, url.open if remote'''
177 '''open path with open if local, url.open if remote'''
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 if pathurl.islocal():
179 if pathurl.islocal():
180 return util.posixfile(pathurl.localpath(), b'rb')
180 return util.posixfile(pathurl.localpath(), b'rb')
181 else:
181 else:
182 return url.open(ui, path, sendaccept=sendaccept)
182 return url.open(ui, path, sendaccept=sendaccept)
183
183
184
184
185 # a list of (ui, repo) functions called for wire peer initialization
185 # a list of (ui, repo) functions called for wire peer initialization
186 wirepeersetupfuncs = []
186 wirepeersetupfuncs = []
187
187
188
188
189 def _peerorrepo(
189 def _peerorrepo(
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ):
191 ):
192 """return a repository object for the specified path"""
192 """return a repository object for the specified path"""
193 obj = _peerlookup(path).instance(
193 obj = _peerlookup(path).instance(
194 ui, path, create, intents=intents, createopts=createopts
194 ui, path, create, intents=intents, createopts=createopts
195 )
195 )
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 ui.log(
207 ui.log(
208 b'extension', b' > reposetup for %s took %s\n', name, stats
208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 )
209 )
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
211 if not obj.local():
212 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
213 f(ui, obj)
213 f(ui, obj)
214 return obj
214 return obj
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 peer = _peerorrepo(
226 peer = _peerorrepo(
227 ui,
227 ui,
228 path,
228 path,
229 create,
229 create,
230 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
231 intents=intents,
231 intents=intents,
232 createopts=createopts,
232 createopts=createopts,
233 )
233 )
234 repo = peer.local()
234 repo = peer.local()
235 if not repo:
235 if not repo:
236 raise error.Abort(
236 raise error.Abort(
237 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
238 )
238 )
239 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
240
240
241
241
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
244 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
245 return _peerorrepo(
245 return _peerorrepo(
246 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
247 ).peer()
247 ).peer()
248
248
249
249
250 def defaultdest(source):
250 def defaultdest(source):
251 """return default destination of clone if none is given
251 """return default destination of clone if none is given
252
252
253 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
254 'foo'
254 'foo'
255 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
256 'bar'
256 'bar'
257 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
258 ''
258 ''
259 >>> defaultdest(b'')
259 >>> defaultdest(b'')
260 ''
260 ''
261 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
262 ''
262 ''
263 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
264 'foo'
264 'foo'
265 """
265 """
266 path = urlutil.url(source).path
266 path = urlutil.url(source).path
267 if not path:
267 if not path:
268 return b''
268 return b''
269 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
270
270
271
271
272 def sharedreposource(repo):
272 def sharedreposource(repo):
273 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
274
274
275 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
276 """
276 """
277 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
278 return None
278 return None
279
279
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 return repo.srcrepo
281 return repo.srcrepo
282
282
283 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
284 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
285 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
286 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
287 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
288 return srcrepo
288 return srcrepo
289
289
290
290
291 def share(
291 def share(
292 ui,
292 ui,
293 source,
293 source,
294 dest=None,
294 dest=None,
295 update=True,
295 update=True,
296 bookmarks=True,
296 bookmarks=True,
297 defaultpath=None,
297 defaultpath=None,
298 relative=False,
298 relative=False,
299 ):
299 ):
300 '''create a shared repository'''
300 '''create a shared repository'''
301
301
302 if not islocal(source):
302 if not islocal(source):
303 raise error.Abort(_(b'can only share local repositories'))
303 raise error.Abort(_(b'can only share local repositories'))
304
304
305 if not dest:
305 if not dest:
306 dest = defaultdest(source)
306 dest = defaultdest(source)
307 else:
307 else:
308 dest = urlutil.get_clone_path(ui, dest)[1]
308 dest = urlutil.get_clone_path(ui, dest)[1]
309
309
310 if isinstance(source, bytes):
310 if isinstance(source, bytes):
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
311 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
314 else:
315 srcrepo = source.local()
315 srcrepo = source.local()
316 checkout = None
316 checkout = None
317
317
318 shareditems = set()
318 shareditems = set()
319 if bookmarks:
319 if bookmarks:
320 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
321
321
322 r = repository(
322 r = repository(
323 ui,
323 ui,
324 dest,
324 dest,
325 create=True,
325 create=True,
326 createopts={
326 createopts={
327 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
328 b'sharedrelative': relative,
329 b'shareditems': shareditems,
329 b'shareditems': shareditems,
330 },
330 },
331 )
331 )
332
332
333 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
334 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
336 return r
336 return r
337
337
338
338
339 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
342 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
345
345
346 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
347 return
347 return
348
348
349 currentconfig = b''
349 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
352
352
353 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
356 fp.write(sourceconfig)
357 fp.write(b'\n')
357 fp.write(b'\n')
358 fp.write(currentconfig)
358 fp.write(currentconfig)
359
359
360
360
361 def unshare(ui, repo):
361 def unshare(ui, repo):
362 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
363
363
364 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
365
365
366 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
367
367
368 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
369 called.
369 called.
370 """
370 """
371
371
372 with repo.lock():
372 with repo.lock():
373 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
376 # fail
376 # fail
377 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
382 # disable hooks and other checks
383 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
384
384
385 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
387
387
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
391
391
392 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
396
396
397 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
398 # removed from .hgsub
399 c = newrepo[b'.']
399 c = newrepo[b'.']
400 subs = c.substate
400 subs = c.substate
401 for s in sorted(subs):
401 for s in sorted(subs):
402 c.sub(s).unshare()
402 c.sub(s).unshare()
403
403
404 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
405
405
406 return newrepo
406 return newrepo
407
407
408
408
409 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
411
411
412 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
413 This function configures additional shared data.
414
414
415 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
417 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
419 if default:
420 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
423 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
425
425
426
426
427 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
429
429
430 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
431 """
431 """
432 if not update:
432 if not update:
433 return
433 return
434
434
435 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
436 if update is not True:
437 checkout = update
437 checkout = update
438 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
439 if test is None:
439 if test is None:
440 continue
440 continue
441 try:
441 try:
442 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
443 break
443 break
444 except error.RepoLookupError:
444 except error.RepoLookupError:
445 continue
445 continue
446 _update(repo, uprev)
446 _update(repo, uprev)
447
447
448
448
449 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
451
451
452 returns destlock
452 returns destlock
453 """
453 """
454 destlock = None
454 destlock = None
455 try:
455 try:
456 hardlink = None
456 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
459 num = 0
460 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
465 continue
466 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
469 if srcvfs.exists(f):
470 if f.endswith(b'data'):
470 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
477 )
478 num += n
478 num += n
479 if hardlink:
479 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
481 else:
481 else:
482 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
483 return destlock
483 return destlock
484 except: # re-raises
484 except: # re-raises
485 release(destlock)
485 release(destlock)
486 raise
486 raise
487
487
488
488
489 def clonewithshare(
489 def clonewithshare(
490 ui,
490 ui,
491 peeropts,
491 peeropts,
492 sharepath,
492 sharepath,
493 source,
493 source,
494 srcpeer,
494 srcpeer,
495 dest,
495 dest,
496 pull=False,
496 pull=False,
497 rev=None,
497 rev=None,
498 update=True,
498 update=True,
499 stream=False,
499 stream=False,
500 ):
500 ):
501 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
502
502
503 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
506 True.
506 True.
507 """
507 """
508 revs = None
508 revs = None
509 if rev:
509 if rev:
510 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
511 raise error.Abort(
512 _(
512 _(
513 b"src repository does not support "
513 b"src repository does not support "
514 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
515 b"support clone by revision"
515 b"support clone by revision"
516 )
516 )
517 )
517 )
518
518
519 # TODO this is batchable.
519 # TODO this is batchable.
520 remoterevs = []
520 remoterevs = []
521 for r in rev:
521 for r in rev:
522 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
523 remoterevs.append(
524 e.callcommand(
524 e.callcommand(
525 b'lookup',
525 b'lookup',
526 {
526 {
527 b'key': r,
527 b'key': r,
528 },
528 },
529 ).result()
529 ).result()
530 )
530 )
531 revs = remoterevs
531 revs = remoterevs
532
532
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
537 try:
537 try:
538 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
539 except OSError as e:
539 except OSError as e:
540 if e.errno != errno.EEXIST:
540 if e.errno != errno.EEXIST:
541 raise
541 raise
542
542
543 poolvfs = vfsmod.vfs(pooldir)
543 poolvfs = vfsmod.vfs(pooldir)
544 basename = os.path.basename(sharepath)
544 basename = os.path.basename(sharepath)
545
545
546 with lock.lock(poolvfs, b'%s.lock' % basename):
546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 if os.path.exists(sharepath):
547 if os.path.exists(sharepath):
548 ui.status(
548 ui.status(
549 _(b'(sharing from existing pooled repository %s)\n') % basename
549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 )
550 )
551 else:
551 else:
552 ui.status(
552 ui.status(
553 _(b'(sharing from new pooled repository %s)\n') % basename
553 _(b'(sharing from new pooled repository %s)\n') % basename
554 )
554 )
555 # Always use pull mode because hardlinks in share mode don't work
555 # Always use pull mode because hardlinks in share mode don't work
556 # well. Never update because working copies aren't necessary in
556 # well. Never update because working copies aren't necessary in
557 # share mode.
557 # share mode.
558 clone(
558 clone(
559 ui,
559 ui,
560 peeropts,
560 peeropts,
561 source,
561 source,
562 dest=sharepath,
562 dest=sharepath,
563 pull=True,
563 pull=True,
564 revs=rev,
564 revs=rev,
565 update=False,
565 update=False,
566 stream=stream,
566 stream=stream,
567 )
567 )
568
568
569 # Resolve the value to put in [paths] section for the source.
569 # Resolve the value to put in [paths] section for the source.
570 if islocal(source):
570 if islocal(source):
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 else:
572 else:
573 defaultpath = source
573 defaultpath = source
574
574
575 sharerepo = repository(ui, path=sharepath)
575 sharerepo = repository(ui, path=sharepath)
576 destrepo = share(
576 destrepo = share(
577 ui,
577 ui,
578 sharerepo,
578 sharerepo,
579 dest=dest,
579 dest=dest,
580 update=False,
580 update=False,
581 bookmarks=False,
581 bookmarks=False,
582 defaultpath=defaultpath,
582 defaultpath=defaultpath,
583 )
583 )
584
584
585 # We need to perform a pull against the dest repo to fetch bookmarks
585 # We need to perform a pull against the dest repo to fetch bookmarks
586 # and other non-store data that isn't shared by default. In the case of
586 # and other non-store data that isn't shared by default. In the case of
587 # non-existing shared repo, this means we pull from the remote twice. This
587 # non-existing shared repo, this means we pull from the remote twice. This
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # way to pull just non-changegroup data.
589 # way to pull just non-changegroup data.
590 exchange.pull(destrepo, srcpeer, heads=revs)
590 exchange.pull(destrepo, srcpeer, heads=revs)
591
591
592 _postshareupdate(destrepo, update)
592 _postshareupdate(destrepo, update)
593
593
594 return srcpeer, peer(ui, peeropts, dest)
594 return srcpeer, peer(ui, peeropts, dest)
595
595
596
596
597 # Recomputing caches is often slow on big repos, so copy them.
597 # Recomputing caches is often slow on big repos, so copy them.
598 def _copycache(srcrepo, dstcachedir, fname):
598 def _copycache(srcrepo, dstcachedir, fname):
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 srcfname = srcrepo.cachevfs.join(fname)
600 srcfname = srcrepo.cachevfs.join(fname)
601 dstfname = os.path.join(dstcachedir, fname)
601 dstfname = os.path.join(dstcachedir, fname)
602 if os.path.exists(srcfname):
602 if os.path.exists(srcfname):
603 if not os.path.exists(dstcachedir):
603 if not os.path.exists(dstcachedir):
604 os.mkdir(dstcachedir)
604 os.mkdir(dstcachedir)
605 util.copyfile(srcfname, dstfname)
605 util.copyfile(srcfname, dstfname)
606
606
607
607
608 def clone(
608 def clone(
609 ui,
609 ui,
610 peeropts,
610 peeropts,
611 source,
611 source,
612 dest=None,
612 dest=None,
613 pull=False,
613 pull=False,
614 revs=None,
614 revs=None,
615 update=True,
615 update=True,
616 stream=False,
616 stream=False,
617 branch=None,
617 branch=None,
618 shareopts=None,
618 shareopts=None,
619 storeincludepats=None,
619 storeincludepats=None,
620 storeexcludepats=None,
620 storeexcludepats=None,
621 depth=None,
621 depth=None,
622 ):
622 ):
623 """Make a copy of an existing repository.
623 """Make a copy of an existing repository.
624
624
625 Create a copy of an existing repository in a new directory. The
625 Create a copy of an existing repository in a new directory. The
626 source and destination are URLs, as passed to the repository
626 source and destination are URLs, as passed to the repository
627 function. Returns a pair of repository peers, the source and
627 function. Returns a pair of repository peers, the source and
628 newly created destination.
628 newly created destination.
629
629
630 The location of the source is added to the new repository's
630 The location of the source is added to the new repository's
631 .hg/hgrc file, as the default to be used for future pulls and
631 .hg/hgrc file, as the default to be used for future pulls and
632 pushes.
632 pushes.
633
633
634 If an exception is raised, the partly cloned/updated destination
634 If an exception is raised, the partly cloned/updated destination
635 repository will be deleted.
635 repository will be deleted.
636
636
637 Arguments:
637 Arguments:
638
638
639 source: repository object or URL
639 source: repository object or URL
640
640
641 dest: URL of destination repository to create (defaults to base
641 dest: URL of destination repository to create (defaults to base
642 name of source repository)
642 name of source repository)
643
643
644 pull: always pull from source repository, even in local case or if the
644 pull: always pull from source repository, even in local case or if the
645 server prefers streaming
645 server prefers streaming
646
646
647 stream: stream raw data uncompressed from repository (fast over
647 stream: stream raw data uncompressed from repository (fast over
648 LAN, slow over WAN)
648 LAN, slow over WAN)
649
649
650 revs: revision to clone up to (implies pull=True)
650 revs: revision to clone up to (implies pull=True)
651
651
652 update: update working directory after clone completes, if
652 update: update working directory after clone completes, if
653 destination is local repository (True means update to default rev,
653 destination is local repository (True means update to default rev,
654 anything else is treated as a revision)
654 anything else is treated as a revision)
655
655
656 branch: branches to clone
656 branch: branches to clone
657
657
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 activates auto sharing mode and defines the directory for stores. The
659 activates auto sharing mode and defines the directory for stores. The
660 "mode" key determines how to construct the directory name of the shared
660 "mode" key determines how to construct the directory name of the shared
661 repository. "identity" means the name is derived from the node of the first
661 repository. "identity" means the name is derived from the node of the first
662 changeset in the repository. "remote" means the name is derived from the
662 changeset in the repository. "remote" means the name is derived from the
663 remote's path/URL. Defaults to "identity."
663 remote's path/URL. Defaults to "identity."
664
664
665 storeincludepats and storeexcludepats: sets of file patterns to include and
665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 exclude in the repository copy, respectively. If not defined, all files
666 exclude in the repository copy, respectively. If not defined, all files
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 only the requested files will be performed. If ``storeincludepats`` is not
668 only the requested files will be performed. If ``storeincludepats`` is not
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 ``path:.``. If both are empty sets, no files will be cloned.
670 ``path:.``. If both are empty sets, no files will be cloned.
671 """
671 """
672
672
673 if isinstance(source, bytes):
673 if isinstance(source, bytes):
674 src = urlutil.get_clone_path(ui, source, branch)
674 src = urlutil.get_clone_path(ui, source, branch)
675 origsource, source, branches = src
675 origsource, source, branches = src
676 srcpeer = peer(ui, peeropts, source)
676 srcpeer = peer(ui, peeropts, source)
677 else:
677 else:
678 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
679 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
681 srclock = destlock = cleandir = None
681 srclock = destlock = cleandir = None
682 destpeer = None
682 destpeer = None
683 try:
683 try:
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685
685
686 if dest is None:
686 if dest is None:
687 dest = defaultdest(source)
687 dest = defaultdest(source)
688 if dest:
688 if dest:
689 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
690 else:
690 else:
691 dest = urlutil.get_clone_path(ui, dest)[0]
691 dest = urlutil.get_clone_path(ui, dest)[0]
692
692
693 dest = urlutil.urllocalpath(dest)
693 dest = urlutil.urllocalpath(dest)
694 source = urlutil.urllocalpath(source)
694 source = urlutil.urllocalpath(source)
695
695
696 if not dest:
696 if not dest:
697 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
698
698
699 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 if destvfs.lexists():
700 if destvfs.lexists():
701 if not destvfs.isdir():
701 if not destvfs.isdir():
702 raise error.InputError(
702 raise error.InputError(
703 _(b"destination '%s' already exists") % dest
703 _(b"destination '%s' already exists") % dest
704 )
704 )
705 elif destvfs.listdir():
705 elif destvfs.listdir():
706 raise error.InputError(
706 raise error.InputError(
707 _(b"destination '%s' is not empty") % dest
707 _(b"destination '%s' is not empty") % dest
708 )
708 )
709
709
710 createopts = {}
710 createopts = {}
711 narrow = False
711 narrow = False
712
712
713 if storeincludepats is not None:
713 if storeincludepats is not None:
714 narrowspec.validatepatterns(storeincludepats)
714 narrowspec.validatepatterns(storeincludepats)
715 narrow = True
715 narrow = True
716
716
717 if storeexcludepats is not None:
717 if storeexcludepats is not None:
718 narrowspec.validatepatterns(storeexcludepats)
718 narrowspec.validatepatterns(storeexcludepats)
719 narrow = True
719 narrow = True
720
720
721 if narrow:
721 if narrow:
722 # Include everything by default if only exclusion patterns defined.
722 # Include everything by default if only exclusion patterns defined.
723 if storeexcludepats and not storeincludepats:
723 if storeexcludepats and not storeincludepats:
724 storeincludepats = {b'path:.'}
724 storeincludepats = {b'path:.'}
725
725
726 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
727
727
728 if depth:
728 if depth:
729 createopts[b'shallowfilestore'] = True
729 createopts[b'shallowfilestore'] = True
730
730
731 if srcpeer.capable(b'lfs-serve'):
731 if srcpeer.capable(b'lfs-serve'):
732 # Repository creation honors the config if it disabled the extension, so
732 # Repository creation honors the config if it disabled the extension, so
733 # we can't just announce that lfs will be enabled. This check avoids
733 # we can't just announce that lfs will be enabled. This check avoids
734 # saying that lfs will be enabled, and then saying it's an unknown
734 # saying that lfs will be enabled, and then saying it's an unknown
735 # feature. The lfs creation option is set in either case so that a
735 # feature. The lfs creation option is set in either case so that a
736 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is set, the clone aborts early, before transferring any
737 # requirement is set, the clone aborts early, before transferring any
738 # data.
738 # data.
739 createopts[b'lfs'] = True
739 createopts[b'lfs'] = True
740
740
741 if extensions.disabled_help(b'lfs'):
741 if extensions.disabled_help(b'lfs'):
742 ui.status(
742 ui.status(
743 _(
743 _(
744 b'(remote is using large file support (lfs), but it is '
744 b'(remote is using large file support (lfs), but it is '
745 b'explicitly disabled in the local configuration)\n'
745 b'explicitly disabled in the local configuration)\n'
746 )
746 )
747 )
747 )
748 else:
748 else:
749 ui.status(
749 ui.status(
750 _(
750 _(
751 b'(remote is using large file support (lfs); lfs will '
751 b'(remote is using large file support (lfs); lfs will '
752 b'be enabled for this repository)\n'
752 b'be enabled for this repository)\n'
753 )
753 )
754 )
754 )
755
755
756 shareopts = shareopts or {}
756 shareopts = shareopts or {}
757 sharepool = shareopts.get(b'pool')
757 sharepool = shareopts.get(b'pool')
758 sharenamemode = shareopts.get(b'mode')
758 sharenamemode = shareopts.get(b'mode')
759 if sharepool and islocal(dest):
759 if sharepool and islocal(dest):
760 sharepath = None
760 sharepath = None
761 if sharenamemode == b'identity':
761 if sharenamemode == b'identity':
762 # Resolve the name from the initial changeset in the remote
762 # Resolve the name from the initial changeset in the remote
763 # repository. This returns nullid when the remote is empty. It
763 # repository. This returns nullid when the remote is empty. It
764 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # not available. If we fail to resolve, sharing is not enabled.
765 # not available. If we fail to resolve, sharing is not enabled.
766 try:
766 try:
767 with srcpeer.commandexecutor() as e:
767 with srcpeer.commandexecutor() as e:
768 rootnode = e.callcommand(
768 rootnode = e.callcommand(
769 b'lookup',
769 b'lookup',
770 {
770 {
771 b'key': b'0',
771 b'key': b'0',
772 },
772 },
773 ).result()
773 ).result()
774
774
775 if rootnode != nullid:
775 if rootnode != nullid:
776 sharepath = os.path.join(sharepool, hex(rootnode))
776 sharepath = os.path.join(sharepool, hex(rootnode))
777 else:
777 else:
778 ui.status(
778 ui.status(
779 _(
779 _(
780 b'(not using pooled storage: '
780 b'(not using pooled storage: '
781 b'remote appears to be empty)\n'
781 b'remote appears to be empty)\n'
782 )
782 )
783 )
783 )
784 except error.RepoLookupError:
784 except error.RepoLookupError:
785 ui.status(
785 ui.status(
786 _(
786 _(
787 b'(not using pooled storage: '
787 b'(not using pooled storage: '
788 b'unable to resolve identity of remote)\n'
788 b'unable to resolve identity of remote)\n'
789 )
789 )
790 )
790 )
791 elif sharenamemode == b'remote':
791 elif sharenamemode == b'remote':
792 sharepath = os.path.join(
792 sharepath = os.path.join(
793 sharepool, hex(hashutil.sha1(source).digest())
793 sharepool, hex(hashutil.sha1(source).digest())
794 )
794 )
795 else:
795 else:
796 raise error.Abort(
796 raise error.Abort(
797 _(b'unknown share naming mode: %s') % sharenamemode
797 _(b'unknown share naming mode: %s') % sharenamemode
798 )
798 )
799
799
800 # TODO this is a somewhat arbitrary restriction.
800 # TODO this is a somewhat arbitrary restriction.
801 if narrow:
801 if narrow:
802 ui.status(
802 ui.status(
803 _(b'(pooled storage not supported for narrow clones)\n')
803 _(b'(pooled storage not supported for narrow clones)\n')
804 )
804 )
805 sharepath = None
805 sharepath = None
806
806
807 if sharepath:
807 if sharepath:
808 return clonewithshare(
808 return clonewithshare(
809 ui,
809 ui,
810 peeropts,
810 peeropts,
811 sharepath,
811 sharepath,
812 source,
812 source,
813 srcpeer,
813 srcpeer,
814 dest,
814 dest,
815 pull=pull,
815 pull=pull,
816 rev=revs,
816 rev=revs,
817 update=update,
817 update=update,
818 stream=stream,
818 stream=stream,
819 )
819 )
820
820
821 srcrepo = srcpeer.local()
821 srcrepo = srcpeer.local()
822
822
823 abspath = origsource
823 abspath = origsource
824 if islocal(origsource):
824 if islocal(origsource):
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826
826
827 if islocal(dest):
827 if islocal(dest):
828 cleandir = dest
828 cleandir = dest
829
829
830 copy = False
830 copy = False
831 if (
831 if (
832 srcrepo
832 srcrepo
833 and srcrepo.cancopy()
833 and srcrepo.cancopy()
834 and islocal(dest)
834 and islocal(dest)
835 and not phases.hassecret(srcrepo)
835 and not phases.hassecret(srcrepo)
836 ):
836 ):
837 copy = not pull and not revs
837 copy = not pull and not revs
838
838
839 # TODO this is a somewhat arbitrary restriction.
839 # TODO this is a somewhat arbitrary restriction.
840 if narrow:
840 if narrow:
841 copy = False
841 copy = False
842
842
843 if copy:
843 if copy:
844 try:
844 try:
845 # we use a lock here because if we race with commit, we
845 # we use a lock here because if we race with commit, we
846 # can end up with extra data in the cloned revlogs that's
846 # can end up with extra data in the cloned revlogs that's
847 # not pointed to by changesets, thus causing verify to
847 # not pointed to by changesets, thus causing verify to
848 # fail
848 # fail
849 srclock = srcrepo.lock(wait=False)
849 srclock = srcrepo.lock(wait=False)
850 except error.LockError:
850 except error.LockError:
851 copy = False
851 copy = False
852
852
853 if copy:
853 if copy:
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 if not os.path.exists(dest):
856 if not os.path.exists(dest):
857 util.makedirs(dest)
857 util.makedirs(dest)
858 else:
858 else:
859 # only clean up directories we create ourselves
859 # only clean up directories we create ourselves
860 cleandir = hgdir
860 cleandir = hgdir
861 try:
861 try:
862 destpath = hgdir
862 destpath = hgdir
863 util.makedir(destpath, notindexed=True)
863 util.makedir(destpath, notindexed=True)
864 except OSError as inst:
864 except OSError as inst:
865 if inst.errno == errno.EEXIST:
865 if inst.errno == errno.EEXIST:
866 cleandir = None
866 cleandir = None
867 raise error.Abort(
867 raise error.Abort(
868 _(b"destination '%s' already exists") % dest
868 _(b"destination '%s' already exists") % dest
869 )
869 )
870 raise
870 raise
871
871
872 destlock = copystore(ui, srcrepo, destpath)
872 destlock = copystore(ui, srcrepo, destpath)
873 # copy bookmarks over
873 # copy bookmarks over
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 if os.path.exists(srcbookmarks):
876 if os.path.exists(srcbookmarks):
877 util.copyfile(srcbookmarks, dstbookmarks)
877 util.copyfile(srcbookmarks, dstbookmarks)
878
878
879 dstcachedir = os.path.join(destpath, b'cache')
879 dstcachedir = os.path.join(destpath, b'cache')
880 for cache in cacheutil.cachetocopy(srcrepo):
880 for cache in cacheutil.cachetocopy(srcrepo):
881 _copycache(srcrepo, dstcachedir, cache)
881 _copycache(srcrepo, dstcachedir, cache)
882
882
883 # we need to re-init the repo after manually copying the data
883 # we need to re-init the repo after manually copying the data
884 # into it
884 # into it
885 destpeer = peer(srcrepo, peeropts, dest)
885 destpeer = peer(srcrepo, peeropts, dest)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 else:
887 else:
888 try:
888 try:
889 # only pass ui when no srcrepo
889 # only pass ui when no srcrepo
890 destpeer = peer(
890 destpeer = peer(
891 srcrepo or ui,
891 srcrepo or ui,
892 peeropts,
892 peeropts,
893 dest,
893 dest,
894 create=True,
894 create=True,
895 createopts=createopts,
895 createopts=createopts,
896 )
896 )
897 except OSError as inst:
897 except OSError as inst:
898 if inst.errno == errno.EEXIST:
898 if inst.errno == errno.EEXIST:
899 cleandir = None
899 cleandir = None
900 raise error.Abort(
900 raise error.Abort(
901 _(b"destination '%s' already exists") % dest
901 _(b"destination '%s' already exists") % dest
902 )
902 )
903 raise
903 raise
904
904
905 if revs:
905 if revs:
906 if not srcpeer.capable(b'lookup'):
906 if not srcpeer.capable(b'lookup'):
907 raise error.Abort(
907 raise error.Abort(
908 _(
908 _(
909 b"src repository does not support "
909 b"src repository does not support "
910 b"revision lookup and so doesn't "
910 b"revision lookup and so doesn't "
911 b"support clone by revision"
911 b"support clone by revision"
912 )
912 )
913 )
913 )
914
914
915 # TODO this is batchable.
915 # TODO this is batchable.
916 remoterevs = []
916 remoterevs = []
917 for rev in revs:
917 for rev in revs:
918 with srcpeer.commandexecutor() as e:
918 with srcpeer.commandexecutor() as e:
919 remoterevs.append(
919 remoterevs.append(
920 e.callcommand(
920 e.callcommand(
921 b'lookup',
921 b'lookup',
922 {
922 {
923 b'key': rev,
923 b'key': rev,
924 },
924 },
925 ).result()
925 ).result()
926 )
926 )
927 revs = remoterevs
927 revs = remoterevs
928
928
929 checkout = revs[0]
929 checkout = revs[0]
930 else:
930 else:
931 revs = None
931 revs = None
932 local = destpeer.local()
932 local = destpeer.local()
933 if local:
933 if local:
934 if narrow:
934 if narrow:
935 with local.wlock(), local.lock():
935 with local.wlock(), local.lock():
936 local.setnarrowpats(storeincludepats, storeexcludepats)
936 local.setnarrowpats(storeincludepats, storeexcludepats)
937 narrowspec.copytoworkingcopy(local)
937 narrowspec.copytoworkingcopy(local)
938
938
939 u = urlutil.url(abspath)
939 u = urlutil.url(abspath)
940 defaulturl = bytes(u)
940 defaulturl = bytes(u)
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 if not stream:
942 if not stream:
943 if pull:
943 if pull:
944 stream = False
944 stream = False
945 else:
945 else:
946 stream = None
946 stream = None
947 # internal config: ui.quietbookmarkmove
947 # internal config: ui.quietbookmarkmove
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 with local.ui.configoverride(overrides, b'clone'):
949 with local.ui.configoverride(overrides, b'clone'):
950 exchange.pull(
950 exchange.pull(
951 local,
951 local,
952 srcpeer,
952 srcpeer,
953 revs,
953 revs,
954 streamclonerequested=stream,
954 streamclonerequested=stream,
955 includepats=storeincludepats,
955 includepats=storeincludepats,
956 excludepats=storeexcludepats,
956 excludepats=storeexcludepats,
957 depth=depth,
957 depth=depth,
958 )
958 )
959 elif srcrepo:
959 elif srcrepo:
960 # TODO lift restriction once exchange.push() accepts narrow
960 # TODO lift restriction once exchange.push() accepts narrow
961 # push.
961 # push.
962 if narrow:
962 if narrow:
963 raise error.Abort(
963 raise error.Abort(
964 _(
964 _(
965 b'narrow clone not available for '
965 b'narrow clone not available for '
966 b'remote destinations'
966 b'remote destinations'
967 )
967 )
968 )
968 )
969
969
970 exchange.push(
970 exchange.push(
971 srcrepo,
971 srcrepo,
972 destpeer,
972 destpeer,
973 revs=revs,
973 revs=revs,
974 bookmarks=srcrepo._bookmarks.keys(),
974 bookmarks=srcrepo._bookmarks.keys(),
975 )
975 )
976 else:
976 else:
977 raise error.Abort(
977 raise error.Abort(
978 _(b"clone from remote to remote not supported")
978 _(b"clone from remote to remote not supported")
979 )
979 )
980
980
981 cleandir = None
981 cleandir = None
982
982
983 destrepo = destpeer.local()
983 destrepo = destpeer.local()
984 if destrepo:
984 if destrepo:
985 template = uimod.samplehgrcs[b'cloned']
985 template = uimod.samplehgrcs[b'cloned']
986 u = urlutil.url(abspath)
986 u = urlutil.url(abspath)
987 u.passwd = None
987 u.passwd = None
988 defaulturl = bytes(u)
988 defaulturl = bytes(u)
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991
991
992 if ui.configbool(b'experimental', b'remotenames'):
992 if ui.configbool(b'experimental', b'remotenames'):
993 logexchange.pullremotenames(destrepo, srcpeer)
993 logexchange.pullremotenames(destrepo, srcpeer)
994
994
995 if update:
995 if update:
996 if update is not True:
996 if update is not True:
997 with srcpeer.commandexecutor() as e:
997 with srcpeer.commandexecutor() as e:
998 checkout = e.callcommand(
998 checkout = e.callcommand(
999 b'lookup',
999 b'lookup',
1000 {
1000 {
1001 b'key': update,
1001 b'key': update,
1002 },
1002 },
1003 ).result()
1003 ).result()
1004
1004
1005 uprev = None
1005 uprev = None
1006 status = None
1006 status = None
1007 if checkout is not None:
1007 if checkout is not None:
1008 # Some extensions (at least hg-git and hg-subversion) have
1008 # Some extensions (at least hg-git and hg-subversion) have
1009 # a peer.lookup() implementation that returns a name instead
1009 # a peer.lookup() implementation that returns a name instead
1010 # of a nodeid. We work around it here until we've figured
1010 # of a nodeid. We work around it here until we've figured
1011 # out a better solution.
1011 # out a better solution.
1012 if len(checkout) == 20 and checkout in destrepo:
1012 if len(checkout) == 20 and checkout in destrepo:
1013 uprev = checkout
1013 uprev = checkout
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 else:
1016 else:
1017 if update is not True:
1017 if update is not True:
1018 try:
1018 try:
1019 uprev = destrepo.lookup(update)
1019 uprev = destrepo.lookup(update)
1020 except error.RepoLookupError:
1020 except error.RepoLookupError:
1021 pass
1021 pass
1022 if uprev is None:
1022 if uprev is None:
1023 try:
1023 try:
1024 if destrepo._activebookmark:
1024 if destrepo._activebookmark:
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1026 update = destrepo._activebookmark
1026 update = destrepo._activebookmark
1027 else:
1027 else:
1028 uprev = destrepo._bookmarks[b'@']
1028 uprev = destrepo._bookmarks[b'@']
1029 update = b'@'
1029 update = b'@'
1030 bn = destrepo[uprev].branch()
1030 bn = destrepo[uprev].branch()
1031 if bn == b'default':
1031 if bn == b'default':
1032 status = _(b"updating to bookmark %s\n" % update)
1032 status = _(b"updating to bookmark %s\n" % update)
1033 else:
1033 else:
1034 status = (
1034 status = (
1035 _(b"updating to bookmark %s on branch %s\n")
1035 _(b"updating to bookmark %s on branch %s\n")
1036 ) % (update, bn)
1036 ) % (update, bn)
1037 except KeyError:
1037 except KeyError:
1038 try:
1038 try:
1039 uprev = destrepo.branchtip(b'default')
1039 uprev = destrepo.branchtip(b'default')
1040 except error.RepoLookupError:
1040 except error.RepoLookupError:
1041 uprev = destrepo.lookup(b'tip')
1041 uprev = destrepo.lookup(b'tip')
1042 if not status:
1042 if not status:
1043 bn = destrepo[uprev].branch()
1043 bn = destrepo[uprev].branch()
1044 status = _(b"updating to branch %s\n") % bn
1044 status = _(b"updating to branch %s\n") % bn
1045 destrepo.ui.status(status)
1045 destrepo.ui.status(status)
1046 _update(destrepo, uprev)
1046 _update(destrepo, uprev)
1047 if update in destrepo._bookmarks:
1047 if update in destrepo._bookmarks:
1048 bookmarks.activate(destrepo, update)
1048 bookmarks.activate(destrepo, update)
1049 if destlock is not None:
1049 if destlock is not None:
1050 release(destlock)
1050 release(destlock)
1051 # here is a tiny windows were someone could end up writing the
1051 # here is a tiny windows were someone could end up writing the
1052 # repository before the cache are sure to be warm. This is "fine"
1052 # repository before the cache are sure to be warm. This is "fine"
1053 # as the only "bad" outcome would be some slowness. That potential
1053 # as the only "bad" outcome would be some slowness. That potential
1054 # slowness already affect reader.
1054 # slowness already affect reader.
1055 with destrepo.lock():
1055 with destrepo.lock():
1056 destrepo.updatecaches(full=True)
1056 destrepo.updatecaches(full=b"post-clone")
1057 finally:
1057 finally:
1058 release(srclock, destlock)
1058 release(srclock, destlock)
1059 if cleandir is not None:
1059 if cleandir is not None:
1060 shutil.rmtree(cleandir, True)
1060 shutil.rmtree(cleandir, True)
1061 if srcpeer is not None:
1061 if srcpeer is not None:
1062 srcpeer.close()
1062 srcpeer.close()
1063 if destpeer and destpeer.local() is None:
1063 if destpeer and destpeer.local() is None:
1064 destpeer.close()
1064 destpeer.close()
1065 return srcpeer, destpeer
1065 return srcpeer, destpeer
1066
1066
1067
1067
1068 def _showstats(repo, stats, quietempty=False):
1068 def _showstats(repo, stats, quietempty=False):
1069 if quietempty and stats.isempty():
1069 if quietempty and stats.isempty():
1070 return
1070 return
1071 repo.ui.status(
1071 repo.ui.status(
1072 _(
1072 _(
1073 b"%d files updated, %d files merged, "
1073 b"%d files updated, %d files merged, "
1074 b"%d files removed, %d files unresolved\n"
1074 b"%d files removed, %d files unresolved\n"
1075 )
1075 )
1076 % (
1076 % (
1077 stats.updatedcount,
1077 stats.updatedcount,
1078 stats.mergedcount,
1078 stats.mergedcount,
1079 stats.removedcount,
1079 stats.removedcount,
1080 stats.unresolvedcount,
1080 stats.unresolvedcount,
1081 )
1081 )
1082 )
1082 )
1083
1083
1084
1084
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 """Update the working directory to node.
1086 """Update the working directory to node.
1087
1087
1088 When overwrite is set, changes are clobbered, merged else
1088 When overwrite is set, changes are clobbered, merged else
1089
1089
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 repo.ui.deprecwarn(
1091 repo.ui.deprecwarn(
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 b'5.7',
1093 b'5.7',
1094 )
1094 )
1095 return mergemod._update(
1095 return mergemod._update(
1096 repo,
1096 repo,
1097 node,
1097 node,
1098 branchmerge=False,
1098 branchmerge=False,
1099 force=overwrite,
1099 force=overwrite,
1100 labels=[b'working copy', b'destination'],
1100 labels=[b'working copy', b'destination'],
1101 updatecheck=updatecheck,
1101 updatecheck=updatecheck,
1102 )
1102 )
1103
1103
1104
1104
1105 def update(repo, node, quietempty=False, updatecheck=None):
1105 def update(repo, node, quietempty=False, updatecheck=None):
1106 """update the working directory to node"""
1106 """update the working directory to node"""
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 _showstats(repo, stats, quietempty)
1108 _showstats(repo, stats, quietempty)
1109 if stats.unresolvedcount:
1109 if stats.unresolvedcount:
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 return stats.unresolvedcount > 0
1111 return stats.unresolvedcount > 0
1112
1112
1113
1113
1114 # naming conflict in clone()
1114 # naming conflict in clone()
1115 _update = update
1115 _update = update
1116
1116
1117
1117
1118 def clean(repo, node, show_stats=True, quietempty=False):
1118 def clean(repo, node, show_stats=True, quietempty=False):
1119 """forcibly switch the working directory to node, clobbering changes"""
1119 """forcibly switch the working directory to node, clobbering changes"""
1120 stats = mergemod.clean_update(repo[node])
1120 stats = mergemod.clean_update(repo[node])
1121 assert stats.unresolvedcount == 0
1121 assert stats.unresolvedcount == 0
1122 if show_stats:
1122 if show_stats:
1123 _showstats(repo, stats, quietempty)
1123 _showstats(repo, stats, quietempty)
1124 return False
1124 return False
1125
1125
1126
1126
1127 # naming conflict in updatetotally()
1127 # naming conflict in updatetotally()
1128 _clean = clean
1128 _clean = clean
1129
1129
1130 _VALID_UPDATECHECKS = {
1130 _VALID_UPDATECHECKS = {
1131 mergemod.UPDATECHECK_ABORT,
1131 mergemod.UPDATECHECK_ABORT,
1132 mergemod.UPDATECHECK_NONE,
1132 mergemod.UPDATECHECK_NONE,
1133 mergemod.UPDATECHECK_LINEAR,
1133 mergemod.UPDATECHECK_LINEAR,
1134 mergemod.UPDATECHECK_NO_CONFLICT,
1134 mergemod.UPDATECHECK_NO_CONFLICT,
1135 }
1135 }
1136
1136
1137
1137
1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 """Update the working directory with extra care for non-file components
1139 """Update the working directory with extra care for non-file components
1140
1140
1141 This takes care of non-file components below:
1141 This takes care of non-file components below:
1142
1142
1143 :bookmark: might be advanced or (in)activated
1143 :bookmark: might be advanced or (in)activated
1144
1144
1145 This takes arguments below:
1145 This takes arguments below:
1146
1146
1147 :checkout: to which revision the working directory is updated
1147 :checkout: to which revision the working directory is updated
1148 :brev: a name, which might be a bookmark to be activated after updating
1148 :brev: a name, which might be a bookmark to be activated after updating
1149 :clean: whether changes in the working directory can be discarded
1149 :clean: whether changes in the working directory can be discarded
1150 :updatecheck: how to deal with a dirty working directory
1150 :updatecheck: how to deal with a dirty working directory
1151
1151
1152 Valid values for updatecheck are the UPDATECHECK_* constants
1152 Valid values for updatecheck are the UPDATECHECK_* constants
1153 defined in the merge module. Passing `None` will result in using the
1153 defined in the merge module. Passing `None` will result in using the
1154 configured default.
1154 configured default.
1155
1155
1156 * ABORT: abort if the working directory is dirty
1156 * ABORT: abort if the working directory is dirty
1157 * NONE: don't check (merge working directory changes into destination)
1157 * NONE: don't check (merge working directory changes into destination)
1158 * LINEAR: check that update is linear before merging working directory
1158 * LINEAR: check that update is linear before merging working directory
1159 changes into destination
1159 changes into destination
1160 * NO_CONFLICT: check that the update does not result in file merges
1160 * NO_CONFLICT: check that the update does not result in file merges
1161
1161
1162 This returns whether conflict is detected at updating or not.
1162 This returns whether conflict is detected at updating or not.
1163 """
1163 """
1164 if updatecheck is None:
1164 if updatecheck is None:
1165 updatecheck = ui.config(b'commands', b'update.check')
1165 updatecheck = ui.config(b'commands', b'update.check')
1166 if updatecheck not in _VALID_UPDATECHECKS:
1166 if updatecheck not in _VALID_UPDATECHECKS:
1167 # If not configured, or invalid value configured
1167 # If not configured, or invalid value configured
1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 if updatecheck not in _VALID_UPDATECHECKS:
1169 if updatecheck not in _VALID_UPDATECHECKS:
1170 raise ValueError(
1170 raise ValueError(
1171 r'Invalid updatecheck value %r (can accept %r)'
1171 r'Invalid updatecheck value %r (can accept %r)'
1172 % (updatecheck, _VALID_UPDATECHECKS)
1172 % (updatecheck, _VALID_UPDATECHECKS)
1173 )
1173 )
1174 with repo.wlock():
1174 with repo.wlock():
1175 movemarkfrom = None
1175 movemarkfrom = None
1176 warndest = False
1176 warndest = False
1177 if checkout is None:
1177 if checkout is None:
1178 updata = destutil.destupdate(repo, clean=clean)
1178 updata = destutil.destupdate(repo, clean=clean)
1179 checkout, movemarkfrom, brev = updata
1179 checkout, movemarkfrom, brev = updata
1180 warndest = True
1180 warndest = True
1181
1181
1182 if clean:
1182 if clean:
1183 ret = _clean(repo, checkout)
1183 ret = _clean(repo, checkout)
1184 else:
1184 else:
1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 cmdutil.bailifchanged(repo, merge=False)
1186 cmdutil.bailifchanged(repo, merge=False)
1187 updatecheck = mergemod.UPDATECHECK_NONE
1187 updatecheck = mergemod.UPDATECHECK_NONE
1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1189
1189
1190 if not ret and movemarkfrom:
1190 if not ret and movemarkfrom:
1191 if movemarkfrom == repo[b'.'].node():
1191 if movemarkfrom == repo[b'.'].node():
1192 pass # no-op update
1192 pass # no-op update
1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 ui.status(_(b"updating bookmark %s\n") % b)
1195 ui.status(_(b"updating bookmark %s\n") % b)
1196 else:
1196 else:
1197 # this can happen with a non-linear update
1197 # this can happen with a non-linear update
1198 b = ui.label(repo._activebookmark, b'bookmarks')
1198 b = ui.label(repo._activebookmark, b'bookmarks')
1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 bookmarks.deactivate(repo)
1200 bookmarks.deactivate(repo)
1201 elif brev in repo._bookmarks:
1201 elif brev in repo._bookmarks:
1202 if brev != repo._activebookmark:
1202 if brev != repo._activebookmark:
1203 b = ui.label(brev, b'bookmarks.active')
1203 b = ui.label(brev, b'bookmarks.active')
1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 bookmarks.activate(repo, brev)
1205 bookmarks.activate(repo, brev)
1206 elif brev:
1206 elif brev:
1207 if repo._activebookmark:
1207 if repo._activebookmark:
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 bookmarks.deactivate(repo)
1210 bookmarks.deactivate(repo)
1211
1211
1212 if warndest:
1212 if warndest:
1213 destutil.statusotherdests(ui, repo)
1213 destutil.statusotherdests(ui, repo)
1214
1214
1215 return ret
1215 return ret
1216
1216
1217
1217
1218 def merge(
1218 def merge(
1219 ctx,
1219 ctx,
1220 force=False,
1220 force=False,
1221 remind=True,
1221 remind=True,
1222 labels=None,
1222 labels=None,
1223 ):
1223 ):
1224 """Branch merge with node, resolving changes. Return true if any
1224 """Branch merge with node, resolving changes. Return true if any
1225 unresolved conflicts."""
1225 unresolved conflicts."""
1226 repo = ctx.repo()
1226 repo = ctx.repo()
1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 _showstats(repo, stats)
1228 _showstats(repo, stats)
1229 if stats.unresolvedcount:
1229 if stats.unresolvedcount:
1230 repo.ui.status(
1230 repo.ui.status(
1231 _(
1231 _(
1232 b"use 'hg resolve' to retry unresolved file merges "
1232 b"use 'hg resolve' to retry unresolved file merges "
1233 b"or 'hg merge --abort' to abandon\n"
1233 b"or 'hg merge --abort' to abandon\n"
1234 )
1234 )
1235 )
1235 )
1236 elif remind:
1236 elif remind:
1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 return stats.unresolvedcount > 0
1238 return stats.unresolvedcount > 0
1239
1239
1240
1240
1241 def abortmerge(ui, repo):
1241 def abortmerge(ui, repo):
1242 ms = mergestatemod.mergestate.read(repo)
1242 ms = mergestatemod.mergestate.read(repo)
1243 if ms.active():
1243 if ms.active():
1244 # there were conflicts
1244 # there were conflicts
1245 node = ms.localctx.hex()
1245 node = ms.localctx.hex()
1246 else:
1246 else:
1247 # there were no conficts, mergestate was not stored
1247 # there were no conficts, mergestate was not stored
1248 node = repo[b'.'].hex()
1248 node = repo[b'.'].hex()
1249
1249
1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 stats = mergemod.clean_update(repo[node])
1251 stats = mergemod.clean_update(repo[node])
1252 assert stats.unresolvedcount == 0
1252 assert stats.unresolvedcount == 0
1253 _showstats(repo, stats)
1253 _showstats(repo, stats)
1254
1254
1255
1255
1256 def _incoming(
1256 def _incoming(
1257 displaychlist,
1257 displaychlist,
1258 subreporecurse,
1258 subreporecurse,
1259 ui,
1259 ui,
1260 repo,
1260 repo,
1261 source,
1261 source,
1262 opts,
1262 opts,
1263 buffered=False,
1263 buffered=False,
1264 subpath=None,
1264 subpath=None,
1265 ):
1265 ):
1266 """
1266 """
1267 Helper for incoming / gincoming.
1267 Helper for incoming / gincoming.
1268 displaychlist gets called with
1268 displaychlist gets called with
1269 (remoterepo, incomingchangesetlist, displayer) parameters,
1269 (remoterepo, incomingchangesetlist, displayer) parameters,
1270 and is supposed to contain only code that can't be unified.
1270 and is supposed to contain only code that can't be unified.
1271 """
1271 """
1272 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1272 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1273 srcs = list(srcs)
1273 srcs = list(srcs)
1274 if len(srcs) != 1:
1274 if len(srcs) != 1:
1275 msg = _(b'for now, incoming supports only a single source, %d provided')
1275 msg = _(b'for now, incoming supports only a single source, %d provided')
1276 msg %= len(srcs)
1276 msg %= len(srcs)
1277 raise error.Abort(msg)
1277 raise error.Abort(msg)
1278 source, branches = srcs[0]
1278 source, branches = srcs[0]
1279 if subpath is not None:
1279 if subpath is not None:
1280 subpath = urlutil.url(subpath)
1280 subpath = urlutil.url(subpath)
1281 if subpath.isabs():
1281 if subpath.isabs():
1282 source = bytes(subpath)
1282 source = bytes(subpath)
1283 else:
1283 else:
1284 p = urlutil.url(source)
1284 p = urlutil.url(source)
1285 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1285 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1286 source = bytes(p)
1286 source = bytes(p)
1287 other = peer(repo, opts, source)
1287 other = peer(repo, opts, source)
1288 cleanupfn = other.close
1288 cleanupfn = other.close
1289 try:
1289 try:
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292
1292
1293 if revs:
1293 if revs:
1294 revs = [other.lookup(rev) for rev in revs]
1294 revs = [other.lookup(rev) for rev in revs]
1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1296 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1297 )
1297 )
1298
1298
1299 if not chlist:
1299 if not chlist:
1300 ui.status(_(b"no changes found\n"))
1300 ui.status(_(b"no changes found\n"))
1301 return subreporecurse()
1301 return subreporecurse()
1302 ui.pager(b'incoming')
1302 ui.pager(b'incoming')
1303 displayer = logcmdutil.changesetdisplayer(
1303 displayer = logcmdutil.changesetdisplayer(
1304 ui, other, opts, buffered=buffered
1304 ui, other, opts, buffered=buffered
1305 )
1305 )
1306 displaychlist(other, chlist, displayer)
1306 displaychlist(other, chlist, displayer)
1307 displayer.close()
1307 displayer.close()
1308 finally:
1308 finally:
1309 cleanupfn()
1309 cleanupfn()
1310 subreporecurse()
1310 subreporecurse()
1311 return 0 # exit code is zero since we found incoming changes
1311 return 0 # exit code is zero since we found incoming changes
1312
1312
1313
1313
1314 def incoming(ui, repo, source, opts, subpath=None):
1314 def incoming(ui, repo, source, opts, subpath=None):
1315 def subreporecurse():
1315 def subreporecurse():
1316 ret = 1
1316 ret = 1
1317 if opts.get(b'subrepos'):
1317 if opts.get(b'subrepos'):
1318 ctx = repo[None]
1318 ctx = repo[None]
1319 for subpath in sorted(ctx.substate):
1319 for subpath in sorted(ctx.substate):
1320 sub = ctx.sub(subpath)
1320 sub = ctx.sub(subpath)
1321 ret = min(ret, sub.incoming(ui, source, opts))
1321 ret = min(ret, sub.incoming(ui, source, opts))
1322 return ret
1322 return ret
1323
1323
1324 def display(other, chlist, displayer):
1324 def display(other, chlist, displayer):
1325 limit = logcmdutil.getlimit(opts)
1325 limit = logcmdutil.getlimit(opts)
1326 if opts.get(b'newest_first'):
1326 if opts.get(b'newest_first'):
1327 chlist.reverse()
1327 chlist.reverse()
1328 count = 0
1328 count = 0
1329 for n in chlist:
1329 for n in chlist:
1330 if limit is not None and count >= limit:
1330 if limit is not None and count >= limit:
1331 break
1331 break
1332 parents = [p for p in other.changelog.parents(n) if p != nullid]
1332 parents = [p for p in other.changelog.parents(n) if p != nullid]
1333 if opts.get(b'no_merges') and len(parents) == 2:
1333 if opts.get(b'no_merges') and len(parents) == 2:
1334 continue
1334 continue
1335 count += 1
1335 count += 1
1336 displayer.show(other[n])
1336 displayer.show(other[n])
1337
1337
1338 return _incoming(
1338 return _incoming(
1339 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1339 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1340 )
1340 )
1341
1341
1342
1342
1343 def _outgoing(ui, repo, dests, opts, subpath=None):
1343 def _outgoing(ui, repo, dests, opts, subpath=None):
1344 out = set()
1344 out = set()
1345 others = []
1345 others = []
1346 for path in urlutil.get_push_paths(repo, ui, dests):
1346 for path in urlutil.get_push_paths(repo, ui, dests):
1347 dest = path.pushloc or path.loc
1347 dest = path.pushloc or path.loc
1348 if subpath is not None:
1348 if subpath is not None:
1349 subpath = urlutil.url(subpath)
1349 subpath = urlutil.url(subpath)
1350 if subpath.isabs():
1350 if subpath.isabs():
1351 dest = bytes(subpath)
1351 dest = bytes(subpath)
1352 else:
1352 else:
1353 p = urlutil.url(dest)
1353 p = urlutil.url(dest)
1354 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1354 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1355 dest = bytes(p)
1355 dest = bytes(p)
1356 branches = path.branch, opts.get(b'branch') or []
1356 branches = path.branch, opts.get(b'branch') or []
1357
1357
1358 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1358 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1359 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1359 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1360 if revs:
1360 if revs:
1361 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1361 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1362
1362
1363 other = peer(repo, opts, dest)
1363 other = peer(repo, opts, dest)
1364 try:
1364 try:
1365 outgoing = discovery.findcommonoutgoing(
1365 outgoing = discovery.findcommonoutgoing(
1366 repo, other, revs, force=opts.get(b'force')
1366 repo, other, revs, force=opts.get(b'force')
1367 )
1367 )
1368 o = outgoing.missing
1368 o = outgoing.missing
1369 out.update(o)
1369 out.update(o)
1370 if not o:
1370 if not o:
1371 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1371 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1372 others.append(other)
1372 others.append(other)
1373 except: # re-raises
1373 except: # re-raises
1374 other.close()
1374 other.close()
1375 raise
1375 raise
1376 # make sure this is ordered by revision number
1376 # make sure this is ordered by revision number
1377 outgoing_revs = list(out)
1377 outgoing_revs = list(out)
1378 cl = repo.changelog
1378 cl = repo.changelog
1379 outgoing_revs.sort(key=cl.rev)
1379 outgoing_revs.sort(key=cl.rev)
1380 return outgoing_revs, others
1380 return outgoing_revs, others
1381
1381
1382
1382
1383 def _outgoing_recurse(ui, repo, dests, opts):
1383 def _outgoing_recurse(ui, repo, dests, opts):
1384 ret = 1
1384 ret = 1
1385 if opts.get(b'subrepos'):
1385 if opts.get(b'subrepos'):
1386 ctx = repo[None]
1386 ctx = repo[None]
1387 for subpath in sorted(ctx.substate):
1387 for subpath in sorted(ctx.substate):
1388 sub = ctx.sub(subpath)
1388 sub = ctx.sub(subpath)
1389 ret = min(ret, sub.outgoing(ui, dests, opts))
1389 ret = min(ret, sub.outgoing(ui, dests, opts))
1390 return ret
1390 return ret
1391
1391
1392
1392
1393 def _outgoing_filter(repo, revs, opts):
1393 def _outgoing_filter(repo, revs, opts):
1394 """apply revision filtering/ordering option for outgoing"""
1394 """apply revision filtering/ordering option for outgoing"""
1395 limit = logcmdutil.getlimit(opts)
1395 limit = logcmdutil.getlimit(opts)
1396 no_merges = opts.get(b'no_merges')
1396 no_merges = opts.get(b'no_merges')
1397 if opts.get(b'newest_first'):
1397 if opts.get(b'newest_first'):
1398 revs.reverse()
1398 revs.reverse()
1399 if limit is None and not no_merges:
1399 if limit is None and not no_merges:
1400 for r in revs:
1400 for r in revs:
1401 yield r
1401 yield r
1402 return
1402 return
1403
1403
1404 count = 0
1404 count = 0
1405 cl = repo.changelog
1405 cl = repo.changelog
1406 for n in revs:
1406 for n in revs:
1407 if limit is not None and count >= limit:
1407 if limit is not None and count >= limit:
1408 break
1408 break
1409 parents = [p for p in cl.parents(n) if p != nullid]
1409 parents = [p for p in cl.parents(n) if p != nullid]
1410 if no_merges and len(parents) == 2:
1410 if no_merges and len(parents) == 2:
1411 continue
1411 continue
1412 count += 1
1412 count += 1
1413 yield n
1413 yield n
1414
1414
1415
1415
1416 def outgoing(ui, repo, dests, opts, subpath=None):
1416 def outgoing(ui, repo, dests, opts, subpath=None):
1417 if opts.get(b'graph'):
1417 if opts.get(b'graph'):
1418 logcmdutil.checkunsupportedgraphflags([], opts)
1418 logcmdutil.checkunsupportedgraphflags([], opts)
1419 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1419 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1420 ret = 1
1420 ret = 1
1421 try:
1421 try:
1422 if o:
1422 if o:
1423 ret = 0
1423 ret = 0
1424
1424
1425 if opts.get(b'graph'):
1425 if opts.get(b'graph'):
1426 revdag = logcmdutil.graphrevs(repo, o, opts)
1426 revdag = logcmdutil.graphrevs(repo, o, opts)
1427 ui.pager(b'outgoing')
1427 ui.pager(b'outgoing')
1428 displayer = logcmdutil.changesetdisplayer(
1428 displayer = logcmdutil.changesetdisplayer(
1429 ui, repo, opts, buffered=True
1429 ui, repo, opts, buffered=True
1430 )
1430 )
1431 logcmdutil.displaygraph(
1431 logcmdutil.displaygraph(
1432 ui, repo, revdag, displayer, graphmod.asciiedges
1432 ui, repo, revdag, displayer, graphmod.asciiedges
1433 )
1433 )
1434 else:
1434 else:
1435 ui.pager(b'outgoing')
1435 ui.pager(b'outgoing')
1436 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1436 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1437 for n in _outgoing_filter(repo, o, opts):
1437 for n in _outgoing_filter(repo, o, opts):
1438 displayer.show(repo[n])
1438 displayer.show(repo[n])
1439 displayer.close()
1439 displayer.close()
1440 for oth in others:
1440 for oth in others:
1441 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1441 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1442 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1442 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1443 return ret # exit code is zero since we found outgoing changes
1443 return ret # exit code is zero since we found outgoing changes
1444 finally:
1444 finally:
1445 for oth in others:
1445 for oth in others:
1446 oth.close()
1446 oth.close()
1447
1447
1448
1448
1449 def verify(repo, level=None):
1449 def verify(repo, level=None):
1450 """verify the consistency of a repository"""
1450 """verify the consistency of a repository"""
1451 ret = verifymod.verify(repo, level=level)
1451 ret = verifymod.verify(repo, level=level)
1452
1452
1453 # Broken subrepo references in hidden csets don't seem worth worrying about,
1453 # Broken subrepo references in hidden csets don't seem worth worrying about,
1454 # since they can't be pushed/pulled, and --hidden can be used if they are a
1454 # since they can't be pushed/pulled, and --hidden can be used if they are a
1455 # concern.
1455 # concern.
1456
1456
1457 # pathto() is needed for -R case
1457 # pathto() is needed for -R case
1458 revs = repo.revs(
1458 revs = repo.revs(
1459 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1459 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1460 )
1460 )
1461
1461
1462 if revs:
1462 if revs:
1463 repo.ui.status(_(b'checking subrepo links\n'))
1463 repo.ui.status(_(b'checking subrepo links\n'))
1464 for rev in revs:
1464 for rev in revs:
1465 ctx = repo[rev]
1465 ctx = repo[rev]
1466 try:
1466 try:
1467 for subpath in ctx.substate:
1467 for subpath in ctx.substate:
1468 try:
1468 try:
1469 ret = (
1469 ret = (
1470 ctx.sub(subpath, allowcreate=False).verify() or ret
1470 ctx.sub(subpath, allowcreate=False).verify() or ret
1471 )
1471 )
1472 except error.RepoError as e:
1472 except error.RepoError as e:
1473 repo.ui.warn(b'%d: %s\n' % (rev, e))
1473 repo.ui.warn(b'%d: %s\n' % (rev, e))
1474 except Exception:
1474 except Exception:
1475 repo.ui.warn(
1475 repo.ui.warn(
1476 _(b'.hgsubstate is corrupt in revision %s\n')
1476 _(b'.hgsubstate is corrupt in revision %s\n')
1477 % short(ctx.node())
1477 % short(ctx.node())
1478 )
1478 )
1479
1479
1480 return ret
1480 return ret
1481
1481
1482
1482
1483 def remoteui(src, opts):
1483 def remoteui(src, opts):
1484 """build a remote ui from ui or repo and opts"""
1484 """build a remote ui from ui or repo and opts"""
1485 if util.safehasattr(src, b'baseui'): # looks like a repository
1485 if util.safehasattr(src, b'baseui'): # looks like a repository
1486 dst = src.baseui.copy() # drop repo-specific config
1486 dst = src.baseui.copy() # drop repo-specific config
1487 src = src.ui # copy target options from repo
1487 src = src.ui # copy target options from repo
1488 else: # assume it's a global ui object
1488 else: # assume it's a global ui object
1489 dst = src.copy() # keep all global options
1489 dst = src.copy() # keep all global options
1490
1490
1491 # copy ssh-specific options
1491 # copy ssh-specific options
1492 for o in b'ssh', b'remotecmd':
1492 for o in b'ssh', b'remotecmd':
1493 v = opts.get(o) or src.config(b'ui', o)
1493 v = opts.get(o) or src.config(b'ui', o)
1494 if v:
1494 if v:
1495 dst.setconfig(b"ui", o, v, b'copied')
1495 dst.setconfig(b"ui", o, v, b'copied')
1496
1496
1497 # copy bundle-specific options
1497 # copy bundle-specific options
1498 r = src.config(b'bundle', b'mainreporoot')
1498 r = src.config(b'bundle', b'mainreporoot')
1499 if r:
1499 if r:
1500 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1500 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1501
1501
1502 # copy selected local settings to the remote ui
1502 # copy selected local settings to the remote ui
1503 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1503 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1504 for key, val in src.configitems(sect):
1504 for key, val in src.configitems(sect):
1505 dst.setconfig(sect, key, val, b'copied')
1505 dst.setconfig(sect, key, val, b'copied')
1506 v = src.config(b'web', b'cacerts')
1506 v = src.config(b'web', b'cacerts')
1507 if v:
1507 if v:
1508 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1508 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1509
1509
1510 return dst
1510 return dst
1511
1511
1512
1512
1513 # Files of interest
1513 # Files of interest
1514 # Used to check if the repository has changed looking at mtime and size of
1514 # Used to check if the repository has changed looking at mtime and size of
1515 # these files.
1515 # these files.
1516 foi = [
1516 foi = [
1517 (b'spath', b'00changelog.i'),
1517 (b'spath', b'00changelog.i'),
1518 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1518 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1519 (b'spath', b'obsstore'),
1519 (b'spath', b'obsstore'),
1520 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1520 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1521 ]
1521 ]
1522
1522
1523
1523
1524 class cachedlocalrepo(object):
1524 class cachedlocalrepo(object):
1525 """Holds a localrepository that can be cached and reused."""
1525 """Holds a localrepository that can be cached and reused."""
1526
1526
1527 def __init__(self, repo):
1527 def __init__(self, repo):
1528 """Create a new cached repo from an existing repo.
1528 """Create a new cached repo from an existing repo.
1529
1529
1530 We assume the passed in repo was recently created. If the
1530 We assume the passed in repo was recently created. If the
1531 repo has changed between when it was created and when it was
1531 repo has changed between when it was created and when it was
1532 turned into a cache, it may not refresh properly.
1532 turned into a cache, it may not refresh properly.
1533 """
1533 """
1534 assert isinstance(repo, localrepo.localrepository)
1534 assert isinstance(repo, localrepo.localrepository)
1535 self._repo = repo
1535 self._repo = repo
1536 self._state, self.mtime = self._repostate()
1536 self._state, self.mtime = self._repostate()
1537 self._filtername = repo.filtername
1537 self._filtername = repo.filtername
1538
1538
1539 def fetch(self):
1539 def fetch(self):
1540 """Refresh (if necessary) and return a repository.
1540 """Refresh (if necessary) and return a repository.
1541
1541
1542 If the cached instance is out of date, it will be recreated
1542 If the cached instance is out of date, it will be recreated
1543 automatically and returned.
1543 automatically and returned.
1544
1544
1545 Returns a tuple of the repo and a boolean indicating whether a new
1545 Returns a tuple of the repo and a boolean indicating whether a new
1546 repo instance was created.
1546 repo instance was created.
1547 """
1547 """
1548 # We compare the mtimes and sizes of some well-known files to
1548 # We compare the mtimes and sizes of some well-known files to
1549 # determine if the repo changed. This is not precise, as mtimes
1549 # determine if the repo changed. This is not precise, as mtimes
1550 # are susceptible to clock skew and imprecise filesystems and
1550 # are susceptible to clock skew and imprecise filesystems and
1551 # file content can change while maintaining the same size.
1551 # file content can change while maintaining the same size.
1552
1552
1553 state, mtime = self._repostate()
1553 state, mtime = self._repostate()
1554 if state == self._state:
1554 if state == self._state:
1555 return self._repo, False
1555 return self._repo, False
1556
1556
1557 repo = repository(self._repo.baseui, self._repo.url())
1557 repo = repository(self._repo.baseui, self._repo.url())
1558 if self._filtername:
1558 if self._filtername:
1559 self._repo = repo.filtered(self._filtername)
1559 self._repo = repo.filtered(self._filtername)
1560 else:
1560 else:
1561 self._repo = repo.unfiltered()
1561 self._repo = repo.unfiltered()
1562 self._state = state
1562 self._state = state
1563 self.mtime = mtime
1563 self.mtime = mtime
1564
1564
1565 return self._repo, True
1565 return self._repo, True
1566
1566
1567 def _repostate(self):
1567 def _repostate(self):
1568 state = []
1568 state = []
1569 maxmtime = -1
1569 maxmtime = -1
1570 for attr, fname in foi:
1570 for attr, fname in foi:
1571 prefix = getattr(self._repo, attr)
1571 prefix = getattr(self._repo, attr)
1572 p = os.path.join(prefix, fname)
1572 p = os.path.join(prefix, fname)
1573 try:
1573 try:
1574 st = os.stat(p)
1574 st = os.stat(p)
1575 except OSError:
1575 except OSError:
1576 st = os.stat(prefix)
1576 st = os.stat(prefix)
1577 state.append((st[stat.ST_MTIME], st.st_size))
1577 state.append((st[stat.ST_MTIME], st.st_size))
1578 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1578 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1579
1579
1580 return tuple(state), maxmtime
1580 return tuple(state), maxmtime
1581
1581
1582 def copy(self):
1582 def copy(self):
1583 """Obtain a copy of this class instance.
1583 """Obtain a copy of this class instance.
1584
1584
1585 A new localrepository instance is obtained. The new instance should be
1585 A new localrepository instance is obtained. The new instance should be
1586 completely independent of the original.
1586 completely independent of the original.
1587 """
1587 """
1588 repo = repository(self._repo.baseui, self._repo.origroot)
1588 repo = repository(self._repo.baseui, self._repo.origroot)
1589 if self._filtername:
1589 if self._filtername:
1590 repo = repo.filtered(self._filtername)
1590 repo = repo.filtered(self._filtername)
1591 else:
1591 else:
1592 repo = repo.unfiltered()
1592 repo = repo.unfiltered()
1593 c = cachedlocalrepo(repo)
1593 c = cachedlocalrepo(repo)
1594 c._state = self._state
1594 c._state = self._state
1595 c.mtime = self.mtime
1595 c.mtime = self.mtime
1596 return c
1596 return c
@@ -1,3759 +1,3765 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 sha1nodeconstants,
24 sha1nodeconstants,
25 short,
25 short,
26 )
26 )
27 from .pycompat import (
27 from .pycompat import (
28 delattr,
28 delattr,
29 getattr,
29 getattr,
30 )
30 )
31 from . import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 branchmap,
33 branchmap,
34 bundle2,
34 bundle2,
35 bundlecaches,
35 bundlecaches,
36 changegroup,
36 changegroup,
37 color,
37 color,
38 commit,
38 commit,
39 context,
39 context,
40 dirstate,
40 dirstate,
41 dirstateguard,
41 dirstateguard,
42 discovery,
42 discovery,
43 encoding,
43 encoding,
44 error,
44 error,
45 exchange,
45 exchange,
46 extensions,
46 extensions,
47 filelog,
47 filelog,
48 hook,
48 hook,
49 lock as lockmod,
49 lock as lockmod,
50 match as matchmod,
50 match as matchmod,
51 mergestate as mergestatemod,
51 mergestate as mergestatemod,
52 mergeutil,
52 mergeutil,
53 metadata as metadatamod,
53 metadata as metadatamod,
54 namespaces,
54 namespaces,
55 narrowspec,
55 narrowspec,
56 obsolete,
56 obsolete,
57 pathutil,
57 pathutil,
58 phases,
58 phases,
59 pushkey,
59 pushkey,
60 pycompat,
60 pycompat,
61 rcutil,
61 rcutil,
62 repoview,
62 repoview,
63 requirements as requirementsmod,
63 requirements as requirementsmod,
64 revlog,
64 revlog,
65 revset,
65 revset,
66 revsetlang,
66 revsetlang,
67 scmutil,
67 scmutil,
68 sparse,
68 sparse,
69 store as storemod,
69 store as storemod,
70 subrepoutil,
70 subrepoutil,
71 tags as tagsmod,
71 tags as tagsmod,
72 transaction,
72 transaction,
73 txnutil,
73 txnutil,
74 util,
74 util,
75 vfs as vfsmod,
75 vfs as vfsmod,
76 wireprototypes,
76 wireprototypes,
77 )
77 )
78
78
79 from .interfaces import (
79 from .interfaces import (
80 repository,
80 repository,
81 util as interfaceutil,
81 util as interfaceutil,
82 )
82 )
83
83
84 from .utils import (
84 from .utils import (
85 hashutil,
85 hashutil,
86 procutil,
86 procutil,
87 stringutil,
87 stringutil,
88 urlutil,
88 urlutil,
89 )
89 )
90
90
91 from .revlogutils import (
91 from .revlogutils import (
92 concurrency_checker as revlogchecker,
92 concurrency_checker as revlogchecker,
93 constants as revlogconst,
93 constants as revlogconst,
94 )
94 )
95
95
96 release = lockmod.release
96 release = lockmod.release
97 urlerr = util.urlerr
97 urlerr = util.urlerr
98 urlreq = util.urlreq
98 urlreq = util.urlreq
99
99
100 # set of (path, vfs-location) tuples. vfs-location is:
100 # set of (path, vfs-location) tuples. vfs-location is:
101 # - 'plain for vfs relative paths
101 # - 'plain for vfs relative paths
102 # - '' for svfs relative paths
102 # - '' for svfs relative paths
103 _cachedfiles = set()
103 _cachedfiles = set()
104
104
105
105
106 class _basefilecache(scmutil.filecache):
106 class _basefilecache(scmutil.filecache):
107 """All filecache usage on repo are done for logic that should be unfiltered"""
107 """All filecache usage on repo are done for logic that should be unfiltered"""
108
108
109 def __get__(self, repo, type=None):
109 def __get__(self, repo, type=None):
110 if repo is None:
110 if repo is None:
111 return self
111 return self
112 # proxy to unfiltered __dict__ since filtered repo has no entry
112 # proxy to unfiltered __dict__ since filtered repo has no entry
113 unfi = repo.unfiltered()
113 unfi = repo.unfiltered()
114 try:
114 try:
115 return unfi.__dict__[self.sname]
115 return unfi.__dict__[self.sname]
116 except KeyError:
116 except KeyError:
117 pass
117 pass
118 return super(_basefilecache, self).__get__(unfi, type)
118 return super(_basefilecache, self).__get__(unfi, type)
119
119
120 def set(self, repo, value):
120 def set(self, repo, value):
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
122
122
123
123
124 class repofilecache(_basefilecache):
124 class repofilecache(_basefilecache):
125 """filecache for files in .hg but outside of .hg/store"""
125 """filecache for files in .hg but outside of .hg/store"""
126
126
127 def __init__(self, *paths):
127 def __init__(self, *paths):
128 super(repofilecache, self).__init__(*paths)
128 super(repofilecache, self).__init__(*paths)
129 for path in paths:
129 for path in paths:
130 _cachedfiles.add((path, b'plain'))
130 _cachedfiles.add((path, b'plain'))
131
131
132 def join(self, obj, fname):
132 def join(self, obj, fname):
133 return obj.vfs.join(fname)
133 return obj.vfs.join(fname)
134
134
135
135
136 class storecache(_basefilecache):
136 class storecache(_basefilecache):
137 """filecache for files in the store"""
137 """filecache for files in the store"""
138
138
139 def __init__(self, *paths):
139 def __init__(self, *paths):
140 super(storecache, self).__init__(*paths)
140 super(storecache, self).__init__(*paths)
141 for path in paths:
141 for path in paths:
142 _cachedfiles.add((path, b''))
142 _cachedfiles.add((path, b''))
143
143
144 def join(self, obj, fname):
144 def join(self, obj, fname):
145 return obj.sjoin(fname)
145 return obj.sjoin(fname)
146
146
147
147
148 class mixedrepostorecache(_basefilecache):
148 class mixedrepostorecache(_basefilecache):
149 """filecache for a mix files in .hg/store and outside"""
149 """filecache for a mix files in .hg/store and outside"""
150
150
151 def __init__(self, *pathsandlocations):
151 def __init__(self, *pathsandlocations):
152 # scmutil.filecache only uses the path for passing back into our
152 # scmutil.filecache only uses the path for passing back into our
153 # join(), so we can safely pass a list of paths and locations
153 # join(), so we can safely pass a list of paths and locations
154 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 super(mixedrepostorecache, self).__init__(*pathsandlocations)
155 _cachedfiles.update(pathsandlocations)
155 _cachedfiles.update(pathsandlocations)
156
156
157 def join(self, obj, fnameandlocation):
157 def join(self, obj, fnameandlocation):
158 fname, location = fnameandlocation
158 fname, location = fnameandlocation
159 if location == b'plain':
159 if location == b'plain':
160 return obj.vfs.join(fname)
160 return obj.vfs.join(fname)
161 else:
161 else:
162 if location != b'':
162 if location != b'':
163 raise error.ProgrammingError(
163 raise error.ProgrammingError(
164 b'unexpected location: %s' % location
164 b'unexpected location: %s' % location
165 )
165 )
166 return obj.sjoin(fname)
166 return obj.sjoin(fname)
167
167
168
168
169 def isfilecached(repo, name):
169 def isfilecached(repo, name):
170 """check if a repo has already cached "name" filecache-ed property
170 """check if a repo has already cached "name" filecache-ed property
171
171
172 This returns (cachedobj-or-None, iscached) tuple.
172 This returns (cachedobj-or-None, iscached) tuple.
173 """
173 """
174 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 cacheentry = repo.unfiltered()._filecache.get(name, None)
175 if not cacheentry:
175 if not cacheentry:
176 return None, False
176 return None, False
177 return cacheentry.obj, True
177 return cacheentry.obj, True
178
178
179
179
180 class unfilteredpropertycache(util.propertycache):
180 class unfilteredpropertycache(util.propertycache):
181 """propertycache that apply to unfiltered repo only"""
181 """propertycache that apply to unfiltered repo only"""
182
182
183 def __get__(self, repo, type=None):
183 def __get__(self, repo, type=None):
184 unfi = repo.unfiltered()
184 unfi = repo.unfiltered()
185 if unfi is repo:
185 if unfi is repo:
186 return super(unfilteredpropertycache, self).__get__(unfi)
186 return super(unfilteredpropertycache, self).__get__(unfi)
187 return getattr(unfi, self.name)
187 return getattr(unfi, self.name)
188
188
189
189
190 class filteredpropertycache(util.propertycache):
190 class filteredpropertycache(util.propertycache):
191 """propertycache that must take filtering in account"""
191 """propertycache that must take filtering in account"""
192
192
193 def cachevalue(self, obj, value):
193 def cachevalue(self, obj, value):
194 object.__setattr__(obj, self.name, value)
194 object.__setattr__(obj, self.name, value)
195
195
196
196
197 def hasunfilteredcache(repo, name):
197 def hasunfilteredcache(repo, name):
198 """check if a repo has an unfilteredpropertycache value for <name>"""
198 """check if a repo has an unfilteredpropertycache value for <name>"""
199 return name in vars(repo.unfiltered())
199 return name in vars(repo.unfiltered())
200
200
201
201
202 def unfilteredmethod(orig):
202 def unfilteredmethod(orig):
203 """decorate method that always need to be run on unfiltered version"""
203 """decorate method that always need to be run on unfiltered version"""
204
204
205 @functools.wraps(orig)
205 @functools.wraps(orig)
206 def wrapper(repo, *args, **kwargs):
206 def wrapper(repo, *args, **kwargs):
207 return orig(repo.unfiltered(), *args, **kwargs)
207 return orig(repo.unfiltered(), *args, **kwargs)
208
208
209 return wrapper
209 return wrapper
210
210
211
211
212 moderncaps = {
212 moderncaps = {
213 b'lookup',
213 b'lookup',
214 b'branchmap',
214 b'branchmap',
215 b'pushkey',
215 b'pushkey',
216 b'known',
216 b'known',
217 b'getbundle',
217 b'getbundle',
218 b'unbundle',
218 b'unbundle',
219 }
219 }
220 legacycaps = moderncaps.union({b'changegroupsubset'})
220 legacycaps = moderncaps.union({b'changegroupsubset'})
221
221
222
222
223 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 @interfaceutil.implementer(repository.ipeercommandexecutor)
224 class localcommandexecutor(object):
224 class localcommandexecutor(object):
225 def __init__(self, peer):
225 def __init__(self, peer):
226 self._peer = peer
226 self._peer = peer
227 self._sent = False
227 self._sent = False
228 self._closed = False
228 self._closed = False
229
229
230 def __enter__(self):
230 def __enter__(self):
231 return self
231 return self
232
232
233 def __exit__(self, exctype, excvalue, exctb):
233 def __exit__(self, exctype, excvalue, exctb):
234 self.close()
234 self.close()
235
235
236 def callcommand(self, command, args):
236 def callcommand(self, command, args):
237 if self._sent:
237 if self._sent:
238 raise error.ProgrammingError(
238 raise error.ProgrammingError(
239 b'callcommand() cannot be used after sendcommands()'
239 b'callcommand() cannot be used after sendcommands()'
240 )
240 )
241
241
242 if self._closed:
242 if self._closed:
243 raise error.ProgrammingError(
243 raise error.ProgrammingError(
244 b'callcommand() cannot be used after close()'
244 b'callcommand() cannot be used after close()'
245 )
245 )
246
246
247 # We don't need to support anything fancy. Just call the named
247 # We don't need to support anything fancy. Just call the named
248 # method on the peer and return a resolved future.
248 # method on the peer and return a resolved future.
249 fn = getattr(self._peer, pycompat.sysstr(command))
249 fn = getattr(self._peer, pycompat.sysstr(command))
250
250
251 f = pycompat.futures.Future()
251 f = pycompat.futures.Future()
252
252
253 try:
253 try:
254 result = fn(**pycompat.strkwargs(args))
254 result = fn(**pycompat.strkwargs(args))
255 except Exception:
255 except Exception:
256 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
257 else:
257 else:
258 f.set_result(result)
258 f.set_result(result)
259
259
260 return f
260 return f
261
261
262 def sendcommands(self):
262 def sendcommands(self):
263 self._sent = True
263 self._sent = True
264
264
265 def close(self):
265 def close(self):
266 self._closed = True
266 self._closed = True
267
267
268
268
269 @interfaceutil.implementer(repository.ipeercommands)
269 @interfaceutil.implementer(repository.ipeercommands)
270 class localpeer(repository.peer):
270 class localpeer(repository.peer):
271 '''peer for a local repo; reflects only the most recent API'''
271 '''peer for a local repo; reflects only the most recent API'''
272
272
273 def __init__(self, repo, caps=None):
273 def __init__(self, repo, caps=None):
274 super(localpeer, self).__init__()
274 super(localpeer, self).__init__()
275
275
276 if caps is None:
276 if caps is None:
277 caps = moderncaps.copy()
277 caps = moderncaps.copy()
278 self._repo = repo.filtered(b'served')
278 self._repo = repo.filtered(b'served')
279 self.ui = repo.ui
279 self.ui = repo.ui
280
280
281 if repo._wanted_sidedata:
281 if repo._wanted_sidedata:
282 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 formatted = bundle2.format_remote_wanted_sidedata(repo)
283 caps.add(b'exp-wanted-sidedata=' + formatted)
283 caps.add(b'exp-wanted-sidedata=' + formatted)
284
284
285 self._caps = repo._restrictcapabilities(caps)
285 self._caps = repo._restrictcapabilities(caps)
286
286
287 # Begin of _basepeer interface.
287 # Begin of _basepeer interface.
288
288
289 def url(self):
289 def url(self):
290 return self._repo.url()
290 return self._repo.url()
291
291
292 def local(self):
292 def local(self):
293 return self._repo
293 return self._repo
294
294
295 def peer(self):
295 def peer(self):
296 return self
296 return self
297
297
298 def canpush(self):
298 def canpush(self):
299 return True
299 return True
300
300
301 def close(self):
301 def close(self):
302 self._repo.close()
302 self._repo.close()
303
303
304 # End of _basepeer interface.
304 # End of _basepeer interface.
305
305
306 # Begin of _basewirecommands interface.
306 # Begin of _basewirecommands interface.
307
307
308 def branchmap(self):
308 def branchmap(self):
309 return self._repo.branchmap()
309 return self._repo.branchmap()
310
310
311 def capabilities(self):
311 def capabilities(self):
312 return self._caps
312 return self._caps
313
313
314 def clonebundles(self):
314 def clonebundles(self):
315 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
316
316
317 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 def debugwireargs(self, one, two, three=None, four=None, five=None):
318 """Used to test argument passing over the wire"""
318 """Used to test argument passing over the wire"""
319 return b"%s %s %s %s %s" % (
319 return b"%s %s %s %s %s" % (
320 one,
320 one,
321 two,
321 two,
322 pycompat.bytestr(three),
322 pycompat.bytestr(three),
323 pycompat.bytestr(four),
323 pycompat.bytestr(four),
324 pycompat.bytestr(five),
324 pycompat.bytestr(five),
325 )
325 )
326
326
327 def getbundle(
327 def getbundle(
328 self,
328 self,
329 source,
329 source,
330 heads=None,
330 heads=None,
331 common=None,
331 common=None,
332 bundlecaps=None,
332 bundlecaps=None,
333 remote_sidedata=None,
333 remote_sidedata=None,
334 **kwargs
334 **kwargs
335 ):
335 ):
336 chunks = exchange.getbundlechunks(
336 chunks = exchange.getbundlechunks(
337 self._repo,
337 self._repo,
338 source,
338 source,
339 heads=heads,
339 heads=heads,
340 common=common,
340 common=common,
341 bundlecaps=bundlecaps,
341 bundlecaps=bundlecaps,
342 remote_sidedata=remote_sidedata,
342 remote_sidedata=remote_sidedata,
343 **kwargs
343 **kwargs
344 )[1]
344 )[1]
345 cb = util.chunkbuffer(chunks)
345 cb = util.chunkbuffer(chunks)
346
346
347 if exchange.bundle2requested(bundlecaps):
347 if exchange.bundle2requested(bundlecaps):
348 # When requesting a bundle2, getbundle returns a stream to make the
348 # When requesting a bundle2, getbundle returns a stream to make the
349 # wire level function happier. We need to build a proper object
349 # wire level function happier. We need to build a proper object
350 # from it in local peer.
350 # from it in local peer.
351 return bundle2.getunbundler(self.ui, cb)
351 return bundle2.getunbundler(self.ui, cb)
352 else:
352 else:
353 return changegroup.getunbundler(b'01', cb, None)
353 return changegroup.getunbundler(b'01', cb, None)
354
354
355 def heads(self):
355 def heads(self):
356 return self._repo.heads()
356 return self._repo.heads()
357
357
358 def known(self, nodes):
358 def known(self, nodes):
359 return self._repo.known(nodes)
359 return self._repo.known(nodes)
360
360
361 def listkeys(self, namespace):
361 def listkeys(self, namespace):
362 return self._repo.listkeys(namespace)
362 return self._repo.listkeys(namespace)
363
363
364 def lookup(self, key):
364 def lookup(self, key):
365 return self._repo.lookup(key)
365 return self._repo.lookup(key)
366
366
367 def pushkey(self, namespace, key, old, new):
367 def pushkey(self, namespace, key, old, new):
368 return self._repo.pushkey(namespace, key, old, new)
368 return self._repo.pushkey(namespace, key, old, new)
369
369
370 def stream_out(self):
370 def stream_out(self):
371 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371 raise error.Abort(_(b'cannot perform stream clone against local peer'))
372
372
373 def unbundle(self, bundle, heads, url):
373 def unbundle(self, bundle, heads, url):
374 """apply a bundle on a repo
374 """apply a bundle on a repo
375
375
376 This function handles the repo locking itself."""
376 This function handles the repo locking itself."""
377 try:
377 try:
378 try:
378 try:
379 bundle = exchange.readbundle(self.ui, bundle, None)
379 bundle = exchange.readbundle(self.ui, bundle, None)
380 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
381 if util.safehasattr(ret, b'getchunks'):
381 if util.safehasattr(ret, b'getchunks'):
382 # This is a bundle20 object, turn it into an unbundler.
382 # This is a bundle20 object, turn it into an unbundler.
383 # This little dance should be dropped eventually when the
383 # This little dance should be dropped eventually when the
384 # API is finally improved.
384 # API is finally improved.
385 stream = util.chunkbuffer(ret.getchunks())
385 stream = util.chunkbuffer(ret.getchunks())
386 ret = bundle2.getunbundler(self.ui, stream)
386 ret = bundle2.getunbundler(self.ui, stream)
387 return ret
387 return ret
388 except Exception as exc:
388 except Exception as exc:
389 # If the exception contains output salvaged from a bundle2
389 # If the exception contains output salvaged from a bundle2
390 # reply, we need to make sure it is printed before continuing
390 # reply, we need to make sure it is printed before continuing
391 # to fail. So we build a bundle2 with such output and consume
391 # to fail. So we build a bundle2 with such output and consume
392 # it directly.
392 # it directly.
393 #
393 #
394 # This is not very elegant but allows a "simple" solution for
394 # This is not very elegant but allows a "simple" solution for
395 # issue4594
395 # issue4594
396 output = getattr(exc, '_bundle2salvagedoutput', ())
396 output = getattr(exc, '_bundle2salvagedoutput', ())
397 if output:
397 if output:
398 bundler = bundle2.bundle20(self._repo.ui)
398 bundler = bundle2.bundle20(self._repo.ui)
399 for out in output:
399 for out in output:
400 bundler.addpart(out)
400 bundler.addpart(out)
401 stream = util.chunkbuffer(bundler.getchunks())
401 stream = util.chunkbuffer(bundler.getchunks())
402 b = bundle2.getunbundler(self.ui, stream)
402 b = bundle2.getunbundler(self.ui, stream)
403 bundle2.processbundle(self._repo, b)
403 bundle2.processbundle(self._repo, b)
404 raise
404 raise
405 except error.PushRaced as exc:
405 except error.PushRaced as exc:
406 raise error.ResponseError(
406 raise error.ResponseError(
407 _(b'push failed:'), stringutil.forcebytestr(exc)
407 _(b'push failed:'), stringutil.forcebytestr(exc)
408 )
408 )
409
409
410 # End of _basewirecommands interface.
410 # End of _basewirecommands interface.
411
411
412 # Begin of peer interface.
412 # Begin of peer interface.
413
413
414 def commandexecutor(self):
414 def commandexecutor(self):
415 return localcommandexecutor(self)
415 return localcommandexecutor(self)
416
416
417 # End of peer interface.
417 # End of peer interface.
418
418
419
419
420 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 @interfaceutil.implementer(repository.ipeerlegacycommands)
421 class locallegacypeer(localpeer):
421 class locallegacypeer(localpeer):
422 """peer extension which implements legacy methods too; used for tests with
422 """peer extension which implements legacy methods too; used for tests with
423 restricted capabilities"""
423 restricted capabilities"""
424
424
425 def __init__(self, repo):
425 def __init__(self, repo):
426 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
427
427
428 # Begin of baselegacywirecommands interface.
428 # Begin of baselegacywirecommands interface.
429
429
430 def between(self, pairs):
430 def between(self, pairs):
431 return self._repo.between(pairs)
431 return self._repo.between(pairs)
432
432
433 def branches(self, nodes):
433 def branches(self, nodes):
434 return self._repo.branches(nodes)
434 return self._repo.branches(nodes)
435
435
436 def changegroup(self, nodes, source):
436 def changegroup(self, nodes, source):
437 outgoing = discovery.outgoing(
437 outgoing = discovery.outgoing(
438 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
439 )
439 )
440 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
441
441
442 def changegroupsubset(self, bases, heads, source):
442 def changegroupsubset(self, bases, heads, source):
443 outgoing = discovery.outgoing(
443 outgoing = discovery.outgoing(
444 self._repo, missingroots=bases, ancestorsof=heads
444 self._repo, missingroots=bases, ancestorsof=heads
445 )
445 )
446 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
447
447
448 # End of baselegacywirecommands interface.
448 # End of baselegacywirecommands interface.
449
449
450
450
451 # Functions receiving (ui, features) that extensions can register to impact
451 # Functions receiving (ui, features) that extensions can register to impact
452 # the ability to load repositories with custom requirements. Only
452 # the ability to load repositories with custom requirements. Only
453 # functions defined in loaded extensions are called.
453 # functions defined in loaded extensions are called.
454 #
454 #
455 # The function receives a set of requirement strings that the repository
455 # The function receives a set of requirement strings that the repository
456 # is capable of opening. Functions will typically add elements to the
456 # is capable of opening. Functions will typically add elements to the
457 # set to reflect that the extension knows how to handle that requirements.
457 # set to reflect that the extension knows how to handle that requirements.
458 featuresetupfuncs = set()
458 featuresetupfuncs = set()
459
459
460
460
461 def _getsharedvfs(hgvfs, requirements):
461 def _getsharedvfs(hgvfs, requirements):
462 """returns the vfs object pointing to root of shared source
462 """returns the vfs object pointing to root of shared source
463 repo for a shared repository
463 repo for a shared repository
464
464
465 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 hgvfs is vfs pointing at .hg/ of current repo (shared one)
466 requirements is a set of requirements of current repo (shared one)
466 requirements is a set of requirements of current repo (shared one)
467 """
467 """
468 # The ``shared`` or ``relshared`` requirements indicate the
468 # The ``shared`` or ``relshared`` requirements indicate the
469 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 # store lives in the path contained in the ``.hg/sharedpath`` file.
470 # This is an absolute path for ``shared`` and relative to
470 # This is an absolute path for ``shared`` and relative to
471 # ``.hg/`` for ``relshared``.
471 # ``.hg/`` for ``relshared``.
472 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
473 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
474 sharedpath = util.normpath(hgvfs.join(sharedpath))
474 sharedpath = util.normpath(hgvfs.join(sharedpath))
475
475
476 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
477
477
478 if not sharedvfs.exists():
478 if not sharedvfs.exists():
479 raise error.RepoError(
479 raise error.RepoError(
480 _(b'.hg/sharedpath points to nonexistent directory %s')
480 _(b'.hg/sharedpath points to nonexistent directory %s')
481 % sharedvfs.base
481 % sharedvfs.base
482 )
482 )
483 return sharedvfs
483 return sharedvfs
484
484
485
485
486 def _readrequires(vfs, allowmissing):
486 def _readrequires(vfs, allowmissing):
487 """reads the require file present at root of this vfs
487 """reads the require file present at root of this vfs
488 and return a set of requirements
488 and return a set of requirements
489
489
490 If allowmissing is True, we suppress ENOENT if raised"""
490 If allowmissing is True, we suppress ENOENT if raised"""
491 # requires file contains a newline-delimited list of
491 # requires file contains a newline-delimited list of
492 # features/capabilities the opener (us) must have in order to use
492 # features/capabilities the opener (us) must have in order to use
493 # the repository. This file was introduced in Mercurial 0.9.2,
493 # the repository. This file was introduced in Mercurial 0.9.2,
494 # which means very old repositories may not have one. We assume
494 # which means very old repositories may not have one. We assume
495 # a missing file translates to no requirements.
495 # a missing file translates to no requirements.
496 try:
496 try:
497 requirements = set(vfs.read(b'requires').splitlines())
497 requirements = set(vfs.read(b'requires').splitlines())
498 except IOError as e:
498 except IOError as e:
499 if not (allowmissing and e.errno == errno.ENOENT):
499 if not (allowmissing and e.errno == errno.ENOENT):
500 raise
500 raise
501 requirements = set()
501 requirements = set()
502 return requirements
502 return requirements
503
503
504
504
505 def makelocalrepository(baseui, path, intents=None):
505 def makelocalrepository(baseui, path, intents=None):
506 """Create a local repository object.
506 """Create a local repository object.
507
507
508 Given arguments needed to construct a local repository, this function
508 Given arguments needed to construct a local repository, this function
509 performs various early repository loading functionality (such as
509 performs various early repository loading functionality (such as
510 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
511 the repository can be opened, derives a type suitable for representing
511 the repository can be opened, derives a type suitable for representing
512 that repository, and returns an instance of it.
512 that repository, and returns an instance of it.
513
513
514 The returned object conforms to the ``repository.completelocalrepository``
514 The returned object conforms to the ``repository.completelocalrepository``
515 interface.
515 interface.
516
516
517 The repository type is derived by calling a series of factory functions
517 The repository type is derived by calling a series of factory functions
518 for each aspect/interface of the final repository. These are defined by
518 for each aspect/interface of the final repository. These are defined by
519 ``REPO_INTERFACES``.
519 ``REPO_INTERFACES``.
520
520
521 Each factory function is called to produce a type implementing a specific
521 Each factory function is called to produce a type implementing a specific
522 interface. The cumulative list of returned types will be combined into a
522 interface. The cumulative list of returned types will be combined into a
523 new type and that type will be instantiated to represent the local
523 new type and that type will be instantiated to represent the local
524 repository.
524 repository.
525
525
526 The factory functions each receive various state that may be consulted
526 The factory functions each receive various state that may be consulted
527 as part of deriving a type.
527 as part of deriving a type.
528
528
529 Extensions should wrap these factory functions to customize repository type
529 Extensions should wrap these factory functions to customize repository type
530 creation. Note that an extension's wrapped function may be called even if
530 creation. Note that an extension's wrapped function may be called even if
531 that extension is not loaded for the repo being constructed. Extensions
531 that extension is not loaded for the repo being constructed. Extensions
532 should check if their ``__name__`` appears in the
532 should check if their ``__name__`` appears in the
533 ``extensionmodulenames`` set passed to the factory function and no-op if
533 ``extensionmodulenames`` set passed to the factory function and no-op if
534 not.
534 not.
535 """
535 """
536 ui = baseui.copy()
536 ui = baseui.copy()
537 # Prevent copying repo configuration.
537 # Prevent copying repo configuration.
538 ui.copy = baseui.copy
538 ui.copy = baseui.copy
539
539
540 # Working directory VFS rooted at repository root.
540 # Working directory VFS rooted at repository root.
541 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
542
542
543 # Main VFS for .hg/ directory.
543 # Main VFS for .hg/ directory.
544 hgpath = wdirvfs.join(b'.hg')
544 hgpath = wdirvfs.join(b'.hg')
545 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
546 # Whether this repository is shared one or not
546 # Whether this repository is shared one or not
547 shared = False
547 shared = False
548 # If this repository is shared, vfs pointing to shared repo
548 # If this repository is shared, vfs pointing to shared repo
549 sharedvfs = None
549 sharedvfs = None
550
550
551 # The .hg/ path should exist and should be a directory. All other
551 # The .hg/ path should exist and should be a directory. All other
552 # cases are errors.
552 # cases are errors.
553 if not hgvfs.isdir():
553 if not hgvfs.isdir():
554 try:
554 try:
555 hgvfs.stat()
555 hgvfs.stat()
556 except OSError as e:
556 except OSError as e:
557 if e.errno != errno.ENOENT:
557 if e.errno != errno.ENOENT:
558 raise
558 raise
559 except ValueError as e:
559 except ValueError as e:
560 # Can be raised on Python 3.8 when path is invalid.
560 # Can be raised on Python 3.8 when path is invalid.
561 raise error.Abort(
561 raise error.Abort(
562 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
563 )
563 )
564
564
565 raise error.RepoError(_(b'repository %s not found') % path)
565 raise error.RepoError(_(b'repository %s not found') % path)
566
566
567 requirements = _readrequires(hgvfs, True)
567 requirements = _readrequires(hgvfs, True)
568 shared = (
568 shared = (
569 requirementsmod.SHARED_REQUIREMENT in requirements
569 requirementsmod.SHARED_REQUIREMENT in requirements
570 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
571 )
571 )
572 storevfs = None
572 storevfs = None
573 if shared:
573 if shared:
574 # This is a shared repo
574 # This is a shared repo
575 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 sharedvfs = _getsharedvfs(hgvfs, requirements)
576 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
577 else:
577 else:
578 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
579
579
580 # if .hg/requires contains the sharesafe requirement, it means
580 # if .hg/requires contains the sharesafe requirement, it means
581 # there exists a `.hg/store/requires` too and we should read it
581 # there exists a `.hg/store/requires` too and we should read it
582 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
583 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
584 # is not present, refer checkrequirementscompat() for that
584 # is not present, refer checkrequirementscompat() for that
585 #
585 #
586 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
587 # repository was shared the old way. We check the share source .hg/requires
587 # repository was shared the old way. We check the share source .hg/requires
588 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
589 # to be reshared
589 # to be reshared
590 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 hint = _(b"see `hg help config.format.use-share-safe` for more information")
591 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
592
592
593 if (
593 if (
594 shared
594 shared
595 and requirementsmod.SHARESAFE_REQUIREMENT
595 and requirementsmod.SHARESAFE_REQUIREMENT
596 not in _readrequires(sharedvfs, True)
596 not in _readrequires(sharedvfs, True)
597 ):
597 ):
598 mismatch_warn = ui.configbool(
598 mismatch_warn = ui.configbool(
599 b'share', b'safe-mismatch.source-not-safe.warn'
599 b'share', b'safe-mismatch.source-not-safe.warn'
600 )
600 )
601 mismatch_config = ui.config(
601 mismatch_config = ui.config(
602 b'share', b'safe-mismatch.source-not-safe'
602 b'share', b'safe-mismatch.source-not-safe'
603 )
603 )
604 if mismatch_config in (
604 if mismatch_config in (
605 b'downgrade-allow',
605 b'downgrade-allow',
606 b'allow',
606 b'allow',
607 b'downgrade-abort',
607 b'downgrade-abort',
608 ):
608 ):
609 # prevent cyclic import localrepo -> upgrade -> localrepo
609 # prevent cyclic import localrepo -> upgrade -> localrepo
610 from . import upgrade
610 from . import upgrade
611
611
612 upgrade.downgrade_share_to_non_safe(
612 upgrade.downgrade_share_to_non_safe(
613 ui,
613 ui,
614 hgvfs,
614 hgvfs,
615 sharedvfs,
615 sharedvfs,
616 requirements,
616 requirements,
617 mismatch_config,
617 mismatch_config,
618 mismatch_warn,
618 mismatch_warn,
619 )
619 )
620 elif mismatch_config == b'abort':
620 elif mismatch_config == b'abort':
621 raise error.Abort(
621 raise error.Abort(
622 _(b"share source does not support share-safe requirement"),
622 _(b"share source does not support share-safe requirement"),
623 hint=hint,
623 hint=hint,
624 )
624 )
625 else:
625 else:
626 raise error.Abort(
626 raise error.Abort(
627 _(
627 _(
628 b"share-safe mismatch with source.\nUnrecognized"
628 b"share-safe mismatch with source.\nUnrecognized"
629 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 b" value '%s' of `share.safe-mismatch.source-not-safe`"
630 b" set."
630 b" set."
631 )
631 )
632 % mismatch_config,
632 % mismatch_config,
633 hint=hint,
633 hint=hint,
634 )
634 )
635 else:
635 else:
636 requirements |= _readrequires(storevfs, False)
636 requirements |= _readrequires(storevfs, False)
637 elif shared:
637 elif shared:
638 sourcerequires = _readrequires(sharedvfs, False)
638 sourcerequires = _readrequires(sharedvfs, False)
639 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
640 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
641 mismatch_warn = ui.configbool(
641 mismatch_warn = ui.configbool(
642 b'share', b'safe-mismatch.source-safe.warn'
642 b'share', b'safe-mismatch.source-safe.warn'
643 )
643 )
644 if mismatch_config in (
644 if mismatch_config in (
645 b'upgrade-allow',
645 b'upgrade-allow',
646 b'allow',
646 b'allow',
647 b'upgrade-abort',
647 b'upgrade-abort',
648 ):
648 ):
649 # prevent cyclic import localrepo -> upgrade -> localrepo
649 # prevent cyclic import localrepo -> upgrade -> localrepo
650 from . import upgrade
650 from . import upgrade
651
651
652 upgrade.upgrade_share_to_safe(
652 upgrade.upgrade_share_to_safe(
653 ui,
653 ui,
654 hgvfs,
654 hgvfs,
655 storevfs,
655 storevfs,
656 requirements,
656 requirements,
657 mismatch_config,
657 mismatch_config,
658 mismatch_warn,
658 mismatch_warn,
659 )
659 )
660 elif mismatch_config == b'abort':
660 elif mismatch_config == b'abort':
661 raise error.Abort(
661 raise error.Abort(
662 _(
662 _(
663 b'version mismatch: source uses share-safe'
663 b'version mismatch: source uses share-safe'
664 b' functionality while the current share does not'
664 b' functionality while the current share does not'
665 ),
665 ),
666 hint=hint,
666 hint=hint,
667 )
667 )
668 else:
668 else:
669 raise error.Abort(
669 raise error.Abort(
670 _(
670 _(
671 b"share-safe mismatch with source.\nUnrecognized"
671 b"share-safe mismatch with source.\nUnrecognized"
672 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 b" value '%s' of `share.safe-mismatch.source-safe` set."
673 )
673 )
674 % mismatch_config,
674 % mismatch_config,
675 hint=hint,
675 hint=hint,
676 )
676 )
677
677
678 # The .hg/hgrc file may load extensions or contain config options
678 # The .hg/hgrc file may load extensions or contain config options
679 # that influence repository construction. Attempt to load it and
679 # that influence repository construction. Attempt to load it and
680 # process any new extensions that it may have pulled in.
680 # process any new extensions that it may have pulled in.
681 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
682 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
683 extensions.loadall(ui)
683 extensions.loadall(ui)
684 extensions.populateui(ui)
684 extensions.populateui(ui)
685
685
686 # Set of module names of extensions loaded for this repository.
686 # Set of module names of extensions loaded for this repository.
687 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
688
688
689 supportedrequirements = gathersupportedrequirements(ui)
689 supportedrequirements = gathersupportedrequirements(ui)
690
690
691 # We first validate the requirements are known.
691 # We first validate the requirements are known.
692 ensurerequirementsrecognized(requirements, supportedrequirements)
692 ensurerequirementsrecognized(requirements, supportedrequirements)
693
693
694 # Then we validate that the known set is reasonable to use together.
694 # Then we validate that the known set is reasonable to use together.
695 ensurerequirementscompatible(ui, requirements)
695 ensurerequirementscompatible(ui, requirements)
696
696
697 # TODO there are unhandled edge cases related to opening repositories with
697 # TODO there are unhandled edge cases related to opening repositories with
698 # shared storage. If storage is shared, we should also test for requirements
698 # shared storage. If storage is shared, we should also test for requirements
699 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
700 # that repo, as that repo may load extensions needed to open it. This is a
700 # that repo, as that repo may load extensions needed to open it. This is a
701 # bit complicated because we don't want the other hgrc to overwrite settings
701 # bit complicated because we don't want the other hgrc to overwrite settings
702 # in this hgrc.
702 # in this hgrc.
703 #
703 #
704 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
705 # file when sharing repos. But if a requirement is added after the share is
705 # file when sharing repos. But if a requirement is added after the share is
706 # performed, thereby introducing a new requirement for the opener, we may
706 # performed, thereby introducing a new requirement for the opener, we may
707 # will not see that and could encounter a run-time error interacting with
707 # will not see that and could encounter a run-time error interacting with
708 # that shared store since it has an unknown-to-us requirement.
708 # that shared store since it has an unknown-to-us requirement.
709
709
710 # At this point, we know we should be capable of opening the repository.
710 # At this point, we know we should be capable of opening the repository.
711 # Now get on with doing that.
711 # Now get on with doing that.
712
712
713 features = set()
713 features = set()
714
714
715 # The "store" part of the repository holds versioned data. How it is
715 # The "store" part of the repository holds versioned data. How it is
716 # accessed is determined by various requirements. If `shared` or
716 # accessed is determined by various requirements. If `shared` or
717 # `relshared` requirements are present, this indicates current repository
717 # `relshared` requirements are present, this indicates current repository
718 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 # is a share and store exists in path mentioned in `.hg/sharedpath`
719 if shared:
719 if shared:
720 storebasepath = sharedvfs.base
720 storebasepath = sharedvfs.base
721 cachepath = sharedvfs.join(b'cache')
721 cachepath = sharedvfs.join(b'cache')
722 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
723 else:
723 else:
724 storebasepath = hgvfs.base
724 storebasepath = hgvfs.base
725 cachepath = hgvfs.join(b'cache')
725 cachepath = hgvfs.join(b'cache')
726 wcachepath = hgvfs.join(b'wcache')
726 wcachepath = hgvfs.join(b'wcache')
727
727
728 # The store has changed over time and the exact layout is dictated by
728 # The store has changed over time and the exact layout is dictated by
729 # requirements. The store interface abstracts differences across all
729 # requirements. The store interface abstracts differences across all
730 # of them.
730 # of them.
731 store = makestore(
731 store = makestore(
732 requirements,
732 requirements,
733 storebasepath,
733 storebasepath,
734 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 lambda base: vfsmod.vfs(base, cacheaudited=True),
735 )
735 )
736 hgvfs.createmode = store.createmode
736 hgvfs.createmode = store.createmode
737
737
738 storevfs = store.vfs
738 storevfs = store.vfs
739 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
740
740
741 # The cache vfs is used to manage cache files.
741 # The cache vfs is used to manage cache files.
742 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
742 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
743 cachevfs.createmode = store.createmode
743 cachevfs.createmode = store.createmode
744 # The cache vfs is used to manage cache files related to the working copy
744 # The cache vfs is used to manage cache files related to the working copy
745 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
745 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
746 wcachevfs.createmode = store.createmode
746 wcachevfs.createmode = store.createmode
747
747
748 # Now resolve the type for the repository object. We do this by repeatedly
748 # Now resolve the type for the repository object. We do this by repeatedly
749 # calling a factory function to produces types for specific aspects of the
749 # calling a factory function to produces types for specific aspects of the
750 # repo's operation. The aggregate returned types are used as base classes
750 # repo's operation. The aggregate returned types are used as base classes
751 # for a dynamically-derived type, which will represent our new repository.
751 # for a dynamically-derived type, which will represent our new repository.
752
752
753 bases = []
753 bases = []
754 extrastate = {}
754 extrastate = {}
755
755
756 for iface, fn in REPO_INTERFACES:
756 for iface, fn in REPO_INTERFACES:
757 # We pass all potentially useful state to give extensions tons of
757 # We pass all potentially useful state to give extensions tons of
758 # flexibility.
758 # flexibility.
759 typ = fn()(
759 typ = fn()(
760 ui=ui,
760 ui=ui,
761 intents=intents,
761 intents=intents,
762 requirements=requirements,
762 requirements=requirements,
763 features=features,
763 features=features,
764 wdirvfs=wdirvfs,
764 wdirvfs=wdirvfs,
765 hgvfs=hgvfs,
765 hgvfs=hgvfs,
766 store=store,
766 store=store,
767 storevfs=storevfs,
767 storevfs=storevfs,
768 storeoptions=storevfs.options,
768 storeoptions=storevfs.options,
769 cachevfs=cachevfs,
769 cachevfs=cachevfs,
770 wcachevfs=wcachevfs,
770 wcachevfs=wcachevfs,
771 extensionmodulenames=extensionmodulenames,
771 extensionmodulenames=extensionmodulenames,
772 extrastate=extrastate,
772 extrastate=extrastate,
773 baseclasses=bases,
773 baseclasses=bases,
774 )
774 )
775
775
776 if not isinstance(typ, type):
776 if not isinstance(typ, type):
777 raise error.ProgrammingError(
777 raise error.ProgrammingError(
778 b'unable to construct type for %s' % iface
778 b'unable to construct type for %s' % iface
779 )
779 )
780
780
781 bases.append(typ)
781 bases.append(typ)
782
782
783 # type() allows you to use characters in type names that wouldn't be
783 # type() allows you to use characters in type names that wouldn't be
784 # recognized as Python symbols in source code. We abuse that to add
784 # recognized as Python symbols in source code. We abuse that to add
785 # rich information about our constructed repo.
785 # rich information about our constructed repo.
786 name = pycompat.sysstr(
786 name = pycompat.sysstr(
787 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
787 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
788 )
788 )
789
789
790 cls = type(name, tuple(bases), {})
790 cls = type(name, tuple(bases), {})
791
791
792 return cls(
792 return cls(
793 baseui=baseui,
793 baseui=baseui,
794 ui=ui,
794 ui=ui,
795 origroot=path,
795 origroot=path,
796 wdirvfs=wdirvfs,
796 wdirvfs=wdirvfs,
797 hgvfs=hgvfs,
797 hgvfs=hgvfs,
798 requirements=requirements,
798 requirements=requirements,
799 supportedrequirements=supportedrequirements,
799 supportedrequirements=supportedrequirements,
800 sharedpath=storebasepath,
800 sharedpath=storebasepath,
801 store=store,
801 store=store,
802 cachevfs=cachevfs,
802 cachevfs=cachevfs,
803 wcachevfs=wcachevfs,
803 wcachevfs=wcachevfs,
804 features=features,
804 features=features,
805 intents=intents,
805 intents=intents,
806 )
806 )
807
807
808
808
809 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
809 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
810 """Load hgrc files/content into a ui instance.
810 """Load hgrc files/content into a ui instance.
811
811
812 This is called during repository opening to load any additional
812 This is called during repository opening to load any additional
813 config files or settings relevant to the current repository.
813 config files or settings relevant to the current repository.
814
814
815 Returns a bool indicating whether any additional configs were loaded.
815 Returns a bool indicating whether any additional configs were loaded.
816
816
817 Extensions should monkeypatch this function to modify how per-repo
817 Extensions should monkeypatch this function to modify how per-repo
818 configs are loaded. For example, an extension may wish to pull in
818 configs are loaded. For example, an extension may wish to pull in
819 configs from alternate files or sources.
819 configs from alternate files or sources.
820
820
821 sharedvfs is vfs object pointing to source repo if the current one is a
821 sharedvfs is vfs object pointing to source repo if the current one is a
822 shared one
822 shared one
823 """
823 """
824 if not rcutil.use_repo_hgrc():
824 if not rcutil.use_repo_hgrc():
825 return False
825 return False
826
826
827 ret = False
827 ret = False
828 # first load config from shared source if we has to
828 # first load config from shared source if we has to
829 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
829 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
830 try:
830 try:
831 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
831 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
832 ret = True
832 ret = True
833 except IOError:
833 except IOError:
834 pass
834 pass
835
835
836 try:
836 try:
837 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
837 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
838 ret = True
838 ret = True
839 except IOError:
839 except IOError:
840 pass
840 pass
841
841
842 try:
842 try:
843 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
843 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
844 ret = True
844 ret = True
845 except IOError:
845 except IOError:
846 pass
846 pass
847
847
848 return ret
848 return ret
849
849
850
850
851 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
851 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
852 """Perform additional actions after .hg/hgrc is loaded.
852 """Perform additional actions after .hg/hgrc is loaded.
853
853
854 This function is called during repository loading immediately after
854 This function is called during repository loading immediately after
855 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
855 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
856
856
857 The function can be used to validate configs, automatically add
857 The function can be used to validate configs, automatically add
858 options (including extensions) based on requirements, etc.
858 options (including extensions) based on requirements, etc.
859 """
859 """
860
860
861 # Map of requirements to list of extensions to load automatically when
861 # Map of requirements to list of extensions to load automatically when
862 # requirement is present.
862 # requirement is present.
863 autoextensions = {
863 autoextensions = {
864 b'git': [b'git'],
864 b'git': [b'git'],
865 b'largefiles': [b'largefiles'],
865 b'largefiles': [b'largefiles'],
866 b'lfs': [b'lfs'],
866 b'lfs': [b'lfs'],
867 }
867 }
868
868
869 for requirement, names in sorted(autoextensions.items()):
869 for requirement, names in sorted(autoextensions.items()):
870 if requirement not in requirements:
870 if requirement not in requirements:
871 continue
871 continue
872
872
873 for name in names:
873 for name in names:
874 if not ui.hasconfig(b'extensions', name):
874 if not ui.hasconfig(b'extensions', name):
875 ui.setconfig(b'extensions', name, b'', source=b'autoload')
875 ui.setconfig(b'extensions', name, b'', source=b'autoload')
876
876
877
877
878 def gathersupportedrequirements(ui):
878 def gathersupportedrequirements(ui):
879 """Determine the complete set of recognized requirements."""
879 """Determine the complete set of recognized requirements."""
880 # Start with all requirements supported by this file.
880 # Start with all requirements supported by this file.
881 supported = set(localrepository._basesupported)
881 supported = set(localrepository._basesupported)
882
882
883 # Execute ``featuresetupfuncs`` entries if they belong to an extension
883 # Execute ``featuresetupfuncs`` entries if they belong to an extension
884 # relevant to this ui instance.
884 # relevant to this ui instance.
885 modules = {m.__name__ for n, m in extensions.extensions(ui)}
885 modules = {m.__name__ for n, m in extensions.extensions(ui)}
886
886
887 for fn in featuresetupfuncs:
887 for fn in featuresetupfuncs:
888 if fn.__module__ in modules:
888 if fn.__module__ in modules:
889 fn(ui, supported)
889 fn(ui, supported)
890
890
891 # Add derived requirements from registered compression engines.
891 # Add derived requirements from registered compression engines.
892 for name in util.compengines:
892 for name in util.compengines:
893 engine = util.compengines[name]
893 engine = util.compengines[name]
894 if engine.available() and engine.revlogheader():
894 if engine.available() and engine.revlogheader():
895 supported.add(b'exp-compression-%s' % name)
895 supported.add(b'exp-compression-%s' % name)
896 if engine.name() == b'zstd':
896 if engine.name() == b'zstd':
897 supported.add(b'revlog-compression-zstd')
897 supported.add(b'revlog-compression-zstd')
898
898
899 return supported
899 return supported
900
900
901
901
902 def ensurerequirementsrecognized(requirements, supported):
902 def ensurerequirementsrecognized(requirements, supported):
903 """Validate that a set of local requirements is recognized.
903 """Validate that a set of local requirements is recognized.
904
904
905 Receives a set of requirements. Raises an ``error.RepoError`` if there
905 Receives a set of requirements. Raises an ``error.RepoError`` if there
906 exists any requirement in that set that currently loaded code doesn't
906 exists any requirement in that set that currently loaded code doesn't
907 recognize.
907 recognize.
908
908
909 Returns a set of supported requirements.
909 Returns a set of supported requirements.
910 """
910 """
911 missing = set()
911 missing = set()
912
912
913 for requirement in requirements:
913 for requirement in requirements:
914 if requirement in supported:
914 if requirement in supported:
915 continue
915 continue
916
916
917 if not requirement or not requirement[0:1].isalnum():
917 if not requirement or not requirement[0:1].isalnum():
918 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
918 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
919
919
920 missing.add(requirement)
920 missing.add(requirement)
921
921
922 if missing:
922 if missing:
923 raise error.RequirementError(
923 raise error.RequirementError(
924 _(b'repository requires features unknown to this Mercurial: %s')
924 _(b'repository requires features unknown to this Mercurial: %s')
925 % b' '.join(sorted(missing)),
925 % b' '.join(sorted(missing)),
926 hint=_(
926 hint=_(
927 b'see https://mercurial-scm.org/wiki/MissingRequirement '
927 b'see https://mercurial-scm.org/wiki/MissingRequirement '
928 b'for more information'
928 b'for more information'
929 ),
929 ),
930 )
930 )
931
931
932
932
933 def ensurerequirementscompatible(ui, requirements):
933 def ensurerequirementscompatible(ui, requirements):
934 """Validates that a set of recognized requirements is mutually compatible.
934 """Validates that a set of recognized requirements is mutually compatible.
935
935
936 Some requirements may not be compatible with others or require
936 Some requirements may not be compatible with others or require
937 config options that aren't enabled. This function is called during
937 config options that aren't enabled. This function is called during
938 repository opening to ensure that the set of requirements needed
938 repository opening to ensure that the set of requirements needed
939 to open a repository is sane and compatible with config options.
939 to open a repository is sane and compatible with config options.
940
940
941 Extensions can monkeypatch this function to perform additional
941 Extensions can monkeypatch this function to perform additional
942 checking.
942 checking.
943
943
944 ``error.RepoError`` should be raised on failure.
944 ``error.RepoError`` should be raised on failure.
945 """
945 """
946 if (
946 if (
947 requirementsmod.SPARSE_REQUIREMENT in requirements
947 requirementsmod.SPARSE_REQUIREMENT in requirements
948 and not sparse.enabled
948 and not sparse.enabled
949 ):
949 ):
950 raise error.RepoError(
950 raise error.RepoError(
951 _(
951 _(
952 b'repository is using sparse feature but '
952 b'repository is using sparse feature but '
953 b'sparse is not enabled; enable the '
953 b'sparse is not enabled; enable the '
954 b'"sparse" extensions to access'
954 b'"sparse" extensions to access'
955 )
955 )
956 )
956 )
957
957
958
958
959 def makestore(requirements, path, vfstype):
959 def makestore(requirements, path, vfstype):
960 """Construct a storage object for a repository."""
960 """Construct a storage object for a repository."""
961 if requirementsmod.STORE_REQUIREMENT in requirements:
961 if requirementsmod.STORE_REQUIREMENT in requirements:
962 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
962 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
963 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
963 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
964 return storemod.fncachestore(path, vfstype, dotencode)
964 return storemod.fncachestore(path, vfstype, dotencode)
965
965
966 return storemod.encodedstore(path, vfstype)
966 return storemod.encodedstore(path, vfstype)
967
967
968 return storemod.basicstore(path, vfstype)
968 return storemod.basicstore(path, vfstype)
969
969
970
970
971 def resolvestorevfsoptions(ui, requirements, features):
971 def resolvestorevfsoptions(ui, requirements, features):
972 """Resolve the options to pass to the store vfs opener.
972 """Resolve the options to pass to the store vfs opener.
973
973
974 The returned dict is used to influence behavior of the storage layer.
974 The returned dict is used to influence behavior of the storage layer.
975 """
975 """
976 options = {}
976 options = {}
977
977
978 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
978 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
979 options[b'treemanifest'] = True
979 options[b'treemanifest'] = True
980
980
981 # experimental config: format.manifestcachesize
981 # experimental config: format.manifestcachesize
982 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
982 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
983 if manifestcachesize is not None:
983 if manifestcachesize is not None:
984 options[b'manifestcachesize'] = manifestcachesize
984 options[b'manifestcachesize'] = manifestcachesize
985
985
986 # In the absence of another requirement superseding a revlog-related
986 # In the absence of another requirement superseding a revlog-related
987 # requirement, we have to assume the repo is using revlog version 0.
987 # requirement, we have to assume the repo is using revlog version 0.
988 # This revlog format is super old and we don't bother trying to parse
988 # This revlog format is super old and we don't bother trying to parse
989 # opener options for it because those options wouldn't do anything
989 # opener options for it because those options wouldn't do anything
990 # meaningful on such old repos.
990 # meaningful on such old repos.
991 if (
991 if (
992 requirementsmod.REVLOGV1_REQUIREMENT in requirements
992 requirementsmod.REVLOGV1_REQUIREMENT in requirements
993 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
993 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
994 ):
994 ):
995 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
995 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
996 else: # explicitly mark repo as using revlogv0
996 else: # explicitly mark repo as using revlogv0
997 options[b'revlogv0'] = True
997 options[b'revlogv0'] = True
998
998
999 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
999 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1000 options[b'copies-storage'] = b'changeset-sidedata'
1000 options[b'copies-storage'] = b'changeset-sidedata'
1001 else:
1001 else:
1002 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1002 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1003 copiesextramode = (b'changeset-only', b'compatibility')
1003 copiesextramode = (b'changeset-only', b'compatibility')
1004 if writecopiesto in copiesextramode:
1004 if writecopiesto in copiesextramode:
1005 options[b'copies-storage'] = b'extra'
1005 options[b'copies-storage'] = b'extra'
1006
1006
1007 return options
1007 return options
1008
1008
1009
1009
1010 def resolverevlogstorevfsoptions(ui, requirements, features):
1010 def resolverevlogstorevfsoptions(ui, requirements, features):
1011 """Resolve opener options specific to revlogs."""
1011 """Resolve opener options specific to revlogs."""
1012
1012
1013 options = {}
1013 options = {}
1014 options[b'flagprocessors'] = {}
1014 options[b'flagprocessors'] = {}
1015
1015
1016 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1016 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1017 options[b'revlogv1'] = True
1017 options[b'revlogv1'] = True
1018 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1018 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1019 options[b'revlogv2'] = True
1019 options[b'revlogv2'] = True
1020
1020
1021 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1021 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1022 options[b'generaldelta'] = True
1022 options[b'generaldelta'] = True
1023
1023
1024 # experimental config: format.chunkcachesize
1024 # experimental config: format.chunkcachesize
1025 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1025 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1026 if chunkcachesize is not None:
1026 if chunkcachesize is not None:
1027 options[b'chunkcachesize'] = chunkcachesize
1027 options[b'chunkcachesize'] = chunkcachesize
1028
1028
1029 deltabothparents = ui.configbool(
1029 deltabothparents = ui.configbool(
1030 b'storage', b'revlog.optimize-delta-parent-choice'
1030 b'storage', b'revlog.optimize-delta-parent-choice'
1031 )
1031 )
1032 options[b'deltabothparents'] = deltabothparents
1032 options[b'deltabothparents'] = deltabothparents
1033
1033
1034 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1034 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1035 lazydeltabase = False
1035 lazydeltabase = False
1036 if lazydelta:
1036 if lazydelta:
1037 lazydeltabase = ui.configbool(
1037 lazydeltabase = ui.configbool(
1038 b'storage', b'revlog.reuse-external-delta-parent'
1038 b'storage', b'revlog.reuse-external-delta-parent'
1039 )
1039 )
1040 if lazydeltabase is None:
1040 if lazydeltabase is None:
1041 lazydeltabase = not scmutil.gddeltaconfig(ui)
1041 lazydeltabase = not scmutil.gddeltaconfig(ui)
1042 options[b'lazydelta'] = lazydelta
1042 options[b'lazydelta'] = lazydelta
1043 options[b'lazydeltabase'] = lazydeltabase
1043 options[b'lazydeltabase'] = lazydeltabase
1044
1044
1045 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1045 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1046 if 0 <= chainspan:
1046 if 0 <= chainspan:
1047 options[b'maxdeltachainspan'] = chainspan
1047 options[b'maxdeltachainspan'] = chainspan
1048
1048
1049 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1049 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1050 if mmapindexthreshold is not None:
1050 if mmapindexthreshold is not None:
1051 options[b'mmapindexthreshold'] = mmapindexthreshold
1051 options[b'mmapindexthreshold'] = mmapindexthreshold
1052
1052
1053 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1053 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1054 srdensitythres = float(
1054 srdensitythres = float(
1055 ui.config(b'experimental', b'sparse-read.density-threshold')
1055 ui.config(b'experimental', b'sparse-read.density-threshold')
1056 )
1056 )
1057 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1057 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1058 options[b'with-sparse-read'] = withsparseread
1058 options[b'with-sparse-read'] = withsparseread
1059 options[b'sparse-read-density-threshold'] = srdensitythres
1059 options[b'sparse-read-density-threshold'] = srdensitythres
1060 options[b'sparse-read-min-gap-size'] = srmingapsize
1060 options[b'sparse-read-min-gap-size'] = srmingapsize
1061
1061
1062 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1062 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1063 options[b'sparse-revlog'] = sparserevlog
1063 options[b'sparse-revlog'] = sparserevlog
1064 if sparserevlog:
1064 if sparserevlog:
1065 options[b'generaldelta'] = True
1065 options[b'generaldelta'] = True
1066
1066
1067 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1067 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1068 options[b'side-data'] = sidedata
1068 options[b'side-data'] = sidedata
1069
1069
1070 maxchainlen = None
1070 maxchainlen = None
1071 if sparserevlog:
1071 if sparserevlog:
1072 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1072 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1073 # experimental config: format.maxchainlen
1073 # experimental config: format.maxchainlen
1074 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1074 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1075 if maxchainlen is not None:
1075 if maxchainlen is not None:
1076 options[b'maxchainlen'] = maxchainlen
1076 options[b'maxchainlen'] = maxchainlen
1077
1077
1078 for r in requirements:
1078 for r in requirements:
1079 # we allow multiple compression engine requirement to co-exist because
1079 # we allow multiple compression engine requirement to co-exist because
1080 # strickly speaking, revlog seems to support mixed compression style.
1080 # strickly speaking, revlog seems to support mixed compression style.
1081 #
1081 #
1082 # The compression used for new entries will be "the last one"
1082 # The compression used for new entries will be "the last one"
1083 prefix = r.startswith
1083 prefix = r.startswith
1084 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1084 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1085 options[b'compengine'] = r.split(b'-', 2)[2]
1085 options[b'compengine'] = r.split(b'-', 2)[2]
1086
1086
1087 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1087 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1088 if options[b'zlib.level'] is not None:
1088 if options[b'zlib.level'] is not None:
1089 if not (0 <= options[b'zlib.level'] <= 9):
1089 if not (0 <= options[b'zlib.level'] <= 9):
1090 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1090 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1091 raise error.Abort(msg % options[b'zlib.level'])
1091 raise error.Abort(msg % options[b'zlib.level'])
1092 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1092 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1093 if options[b'zstd.level'] is not None:
1093 if options[b'zstd.level'] is not None:
1094 if not (0 <= options[b'zstd.level'] <= 22):
1094 if not (0 <= options[b'zstd.level'] <= 22):
1095 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1095 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1096 raise error.Abort(msg % options[b'zstd.level'])
1096 raise error.Abort(msg % options[b'zstd.level'])
1097
1097
1098 if requirementsmod.NARROW_REQUIREMENT in requirements:
1098 if requirementsmod.NARROW_REQUIREMENT in requirements:
1099 options[b'enableellipsis'] = True
1099 options[b'enableellipsis'] = True
1100
1100
1101 if ui.configbool(b'experimental', b'rust.index'):
1101 if ui.configbool(b'experimental', b'rust.index'):
1102 options[b'rust.index'] = True
1102 options[b'rust.index'] = True
1103 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1103 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1104 slow_path = ui.config(
1104 slow_path = ui.config(
1105 b'storage', b'revlog.persistent-nodemap.slow-path'
1105 b'storage', b'revlog.persistent-nodemap.slow-path'
1106 )
1106 )
1107 if slow_path not in (b'allow', b'warn', b'abort'):
1107 if slow_path not in (b'allow', b'warn', b'abort'):
1108 default = ui.config_default(
1108 default = ui.config_default(
1109 b'storage', b'revlog.persistent-nodemap.slow-path'
1109 b'storage', b'revlog.persistent-nodemap.slow-path'
1110 )
1110 )
1111 msg = _(
1111 msg = _(
1112 b'unknown value for config '
1112 b'unknown value for config '
1113 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1113 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1114 )
1114 )
1115 ui.warn(msg % slow_path)
1115 ui.warn(msg % slow_path)
1116 if not ui.quiet:
1116 if not ui.quiet:
1117 ui.warn(_(b'falling back to default value: %s\n') % default)
1117 ui.warn(_(b'falling back to default value: %s\n') % default)
1118 slow_path = default
1118 slow_path = default
1119
1119
1120 msg = _(
1120 msg = _(
1121 b"accessing `persistent-nodemap` repository without associated "
1121 b"accessing `persistent-nodemap` repository without associated "
1122 b"fast implementation."
1122 b"fast implementation."
1123 )
1123 )
1124 hint = _(
1124 hint = _(
1125 b"check `hg help config.format.use-persistent-nodemap` "
1125 b"check `hg help config.format.use-persistent-nodemap` "
1126 b"for details"
1126 b"for details"
1127 )
1127 )
1128 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1128 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1129 if slow_path == b'warn':
1129 if slow_path == b'warn':
1130 msg = b"warning: " + msg + b'\n'
1130 msg = b"warning: " + msg + b'\n'
1131 ui.warn(msg)
1131 ui.warn(msg)
1132 if not ui.quiet:
1132 if not ui.quiet:
1133 hint = b'(' + hint + b')\n'
1133 hint = b'(' + hint + b')\n'
1134 ui.warn(hint)
1134 ui.warn(hint)
1135 if slow_path == b'abort':
1135 if slow_path == b'abort':
1136 raise error.Abort(msg, hint=hint)
1136 raise error.Abort(msg, hint=hint)
1137 options[b'persistent-nodemap'] = True
1137 options[b'persistent-nodemap'] = True
1138 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1138 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1139 options[b'persistent-nodemap.mmap'] = True
1139 options[b'persistent-nodemap.mmap'] = True
1140 if ui.configbool(b'devel', b'persistent-nodemap'):
1140 if ui.configbool(b'devel', b'persistent-nodemap'):
1141 options[b'devel-force-nodemap'] = True
1141 options[b'devel-force-nodemap'] = True
1142
1142
1143 return options
1143 return options
1144
1144
1145
1145
1146 def makemain(**kwargs):
1146 def makemain(**kwargs):
1147 """Produce a type conforming to ``ilocalrepositorymain``."""
1147 """Produce a type conforming to ``ilocalrepositorymain``."""
1148 return localrepository
1148 return localrepository
1149
1149
1150
1150
1151 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1151 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1152 class revlogfilestorage(object):
1152 class revlogfilestorage(object):
1153 """File storage when using revlogs."""
1153 """File storage when using revlogs."""
1154
1154
1155 def file(self, path):
1155 def file(self, path):
1156 if path.startswith(b'/'):
1156 if path.startswith(b'/'):
1157 path = path[1:]
1157 path = path[1:]
1158
1158
1159 return filelog.filelog(self.svfs, path)
1159 return filelog.filelog(self.svfs, path)
1160
1160
1161
1161
1162 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1162 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1163 class revlognarrowfilestorage(object):
1163 class revlognarrowfilestorage(object):
1164 """File storage when using revlogs and narrow files."""
1164 """File storage when using revlogs and narrow files."""
1165
1165
1166 def file(self, path):
1166 def file(self, path):
1167 if path.startswith(b'/'):
1167 if path.startswith(b'/'):
1168 path = path[1:]
1168 path = path[1:]
1169
1169
1170 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1170 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1171
1171
1172
1172
1173 def makefilestorage(requirements, features, **kwargs):
1173 def makefilestorage(requirements, features, **kwargs):
1174 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1174 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1175 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1175 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1176 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1176 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1177
1177
1178 if requirementsmod.NARROW_REQUIREMENT in requirements:
1178 if requirementsmod.NARROW_REQUIREMENT in requirements:
1179 return revlognarrowfilestorage
1179 return revlognarrowfilestorage
1180 else:
1180 else:
1181 return revlogfilestorage
1181 return revlogfilestorage
1182
1182
1183
1183
1184 # List of repository interfaces and factory functions for them. Each
1184 # List of repository interfaces and factory functions for them. Each
1185 # will be called in order during ``makelocalrepository()`` to iteratively
1185 # will be called in order during ``makelocalrepository()`` to iteratively
1186 # derive the final type for a local repository instance. We capture the
1186 # derive the final type for a local repository instance. We capture the
1187 # function as a lambda so we don't hold a reference and the module-level
1187 # function as a lambda so we don't hold a reference and the module-level
1188 # functions can be wrapped.
1188 # functions can be wrapped.
1189 REPO_INTERFACES = [
1189 REPO_INTERFACES = [
1190 (repository.ilocalrepositorymain, lambda: makemain),
1190 (repository.ilocalrepositorymain, lambda: makemain),
1191 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1191 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1192 ]
1192 ]
1193
1193
1194
1194
1195 @interfaceutil.implementer(repository.ilocalrepositorymain)
1195 @interfaceutil.implementer(repository.ilocalrepositorymain)
1196 class localrepository(object):
1196 class localrepository(object):
1197 """Main class for representing local repositories.
1197 """Main class for representing local repositories.
1198
1198
1199 All local repositories are instances of this class.
1199 All local repositories are instances of this class.
1200
1200
1201 Constructed on its own, instances of this class are not usable as
1201 Constructed on its own, instances of this class are not usable as
1202 repository objects. To obtain a usable repository object, call
1202 repository objects. To obtain a usable repository object, call
1203 ``hg.repository()``, ``localrepo.instance()``, or
1203 ``hg.repository()``, ``localrepo.instance()``, or
1204 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1204 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1205 ``instance()`` adds support for creating new repositories.
1205 ``instance()`` adds support for creating new repositories.
1206 ``hg.repository()`` adds more extension integration, including calling
1206 ``hg.repository()`` adds more extension integration, including calling
1207 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1207 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1208 used.
1208 used.
1209 """
1209 """
1210
1210
1211 # obsolete experimental requirements:
1211 # obsolete experimental requirements:
1212 # - manifestv2: An experimental new manifest format that allowed
1212 # - manifestv2: An experimental new manifest format that allowed
1213 # for stem compression of long paths. Experiment ended up not
1213 # for stem compression of long paths. Experiment ended up not
1214 # being successful (repository sizes went up due to worse delta
1214 # being successful (repository sizes went up due to worse delta
1215 # chains), and the code was deleted in 4.6.
1215 # chains), and the code was deleted in 4.6.
1216 supportedformats = {
1216 supportedformats = {
1217 requirementsmod.REVLOGV1_REQUIREMENT,
1217 requirementsmod.REVLOGV1_REQUIREMENT,
1218 requirementsmod.GENERALDELTA_REQUIREMENT,
1218 requirementsmod.GENERALDELTA_REQUIREMENT,
1219 requirementsmod.TREEMANIFEST_REQUIREMENT,
1219 requirementsmod.TREEMANIFEST_REQUIREMENT,
1220 requirementsmod.COPIESSDC_REQUIREMENT,
1220 requirementsmod.COPIESSDC_REQUIREMENT,
1221 requirementsmod.REVLOGV2_REQUIREMENT,
1221 requirementsmod.REVLOGV2_REQUIREMENT,
1222 requirementsmod.SIDEDATA_REQUIREMENT,
1222 requirementsmod.SIDEDATA_REQUIREMENT,
1223 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1223 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1224 requirementsmod.NODEMAP_REQUIREMENT,
1224 requirementsmod.NODEMAP_REQUIREMENT,
1225 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1225 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1226 requirementsmod.SHARESAFE_REQUIREMENT,
1226 requirementsmod.SHARESAFE_REQUIREMENT,
1227 }
1227 }
1228 _basesupported = supportedformats | {
1228 _basesupported = supportedformats | {
1229 requirementsmod.STORE_REQUIREMENT,
1229 requirementsmod.STORE_REQUIREMENT,
1230 requirementsmod.FNCACHE_REQUIREMENT,
1230 requirementsmod.FNCACHE_REQUIREMENT,
1231 requirementsmod.SHARED_REQUIREMENT,
1231 requirementsmod.SHARED_REQUIREMENT,
1232 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1232 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1233 requirementsmod.DOTENCODE_REQUIREMENT,
1233 requirementsmod.DOTENCODE_REQUIREMENT,
1234 requirementsmod.SPARSE_REQUIREMENT,
1234 requirementsmod.SPARSE_REQUIREMENT,
1235 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1235 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1236 }
1236 }
1237
1237
1238 # list of prefix for file which can be written without 'wlock'
1238 # list of prefix for file which can be written without 'wlock'
1239 # Extensions should extend this list when needed
1239 # Extensions should extend this list when needed
1240 _wlockfreeprefix = {
1240 _wlockfreeprefix = {
1241 # We migh consider requiring 'wlock' for the next
1241 # We migh consider requiring 'wlock' for the next
1242 # two, but pretty much all the existing code assume
1242 # two, but pretty much all the existing code assume
1243 # wlock is not needed so we keep them excluded for
1243 # wlock is not needed so we keep them excluded for
1244 # now.
1244 # now.
1245 b'hgrc',
1245 b'hgrc',
1246 b'requires',
1246 b'requires',
1247 # XXX cache is a complicatged business someone
1247 # XXX cache is a complicatged business someone
1248 # should investigate this in depth at some point
1248 # should investigate this in depth at some point
1249 b'cache/',
1249 b'cache/',
1250 # XXX shouldn't be dirstate covered by the wlock?
1250 # XXX shouldn't be dirstate covered by the wlock?
1251 b'dirstate',
1251 b'dirstate',
1252 # XXX bisect was still a bit too messy at the time
1252 # XXX bisect was still a bit too messy at the time
1253 # this changeset was introduced. Someone should fix
1253 # this changeset was introduced. Someone should fix
1254 # the remainig bit and drop this line
1254 # the remainig bit and drop this line
1255 b'bisect.state',
1255 b'bisect.state',
1256 }
1256 }
1257
1257
1258 def __init__(
1258 def __init__(
1259 self,
1259 self,
1260 baseui,
1260 baseui,
1261 ui,
1261 ui,
1262 origroot,
1262 origroot,
1263 wdirvfs,
1263 wdirvfs,
1264 hgvfs,
1264 hgvfs,
1265 requirements,
1265 requirements,
1266 supportedrequirements,
1266 supportedrequirements,
1267 sharedpath,
1267 sharedpath,
1268 store,
1268 store,
1269 cachevfs,
1269 cachevfs,
1270 wcachevfs,
1270 wcachevfs,
1271 features,
1271 features,
1272 intents=None,
1272 intents=None,
1273 ):
1273 ):
1274 """Create a new local repository instance.
1274 """Create a new local repository instance.
1275
1275
1276 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1276 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1277 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1277 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1278 object.
1278 object.
1279
1279
1280 Arguments:
1280 Arguments:
1281
1281
1282 baseui
1282 baseui
1283 ``ui.ui`` instance that ``ui`` argument was based off of.
1283 ``ui.ui`` instance that ``ui`` argument was based off of.
1284
1284
1285 ui
1285 ui
1286 ``ui.ui`` instance for use by the repository.
1286 ``ui.ui`` instance for use by the repository.
1287
1287
1288 origroot
1288 origroot
1289 ``bytes`` path to working directory root of this repository.
1289 ``bytes`` path to working directory root of this repository.
1290
1290
1291 wdirvfs
1291 wdirvfs
1292 ``vfs.vfs`` rooted at the working directory.
1292 ``vfs.vfs`` rooted at the working directory.
1293
1293
1294 hgvfs
1294 hgvfs
1295 ``vfs.vfs`` rooted at .hg/
1295 ``vfs.vfs`` rooted at .hg/
1296
1296
1297 requirements
1297 requirements
1298 ``set`` of bytestrings representing repository opening requirements.
1298 ``set`` of bytestrings representing repository opening requirements.
1299
1299
1300 supportedrequirements
1300 supportedrequirements
1301 ``set`` of bytestrings representing repository requirements that we
1301 ``set`` of bytestrings representing repository requirements that we
1302 know how to open. May be a supetset of ``requirements``.
1302 know how to open. May be a supetset of ``requirements``.
1303
1303
1304 sharedpath
1304 sharedpath
1305 ``bytes`` Defining path to storage base directory. Points to a
1305 ``bytes`` Defining path to storage base directory. Points to a
1306 ``.hg/`` directory somewhere.
1306 ``.hg/`` directory somewhere.
1307
1307
1308 store
1308 store
1309 ``store.basicstore`` (or derived) instance providing access to
1309 ``store.basicstore`` (or derived) instance providing access to
1310 versioned storage.
1310 versioned storage.
1311
1311
1312 cachevfs
1312 cachevfs
1313 ``vfs.vfs`` used for cache files.
1313 ``vfs.vfs`` used for cache files.
1314
1314
1315 wcachevfs
1315 wcachevfs
1316 ``vfs.vfs`` used for cache files related to the working copy.
1316 ``vfs.vfs`` used for cache files related to the working copy.
1317
1317
1318 features
1318 features
1319 ``set`` of bytestrings defining features/capabilities of this
1319 ``set`` of bytestrings defining features/capabilities of this
1320 instance.
1320 instance.
1321
1321
1322 intents
1322 intents
1323 ``set`` of system strings indicating what this repo will be used
1323 ``set`` of system strings indicating what this repo will be used
1324 for.
1324 for.
1325 """
1325 """
1326 self.baseui = baseui
1326 self.baseui = baseui
1327 self.ui = ui
1327 self.ui = ui
1328 self.origroot = origroot
1328 self.origroot = origroot
1329 # vfs rooted at working directory.
1329 # vfs rooted at working directory.
1330 self.wvfs = wdirvfs
1330 self.wvfs = wdirvfs
1331 self.root = wdirvfs.base
1331 self.root = wdirvfs.base
1332 # vfs rooted at .hg/. Used to access most non-store paths.
1332 # vfs rooted at .hg/. Used to access most non-store paths.
1333 self.vfs = hgvfs
1333 self.vfs = hgvfs
1334 self.path = hgvfs.base
1334 self.path = hgvfs.base
1335 self.requirements = requirements
1335 self.requirements = requirements
1336 self.nodeconstants = sha1nodeconstants
1336 self.nodeconstants = sha1nodeconstants
1337 self.nullid = self.nodeconstants.nullid
1337 self.nullid = self.nodeconstants.nullid
1338 self.supported = supportedrequirements
1338 self.supported = supportedrequirements
1339 self.sharedpath = sharedpath
1339 self.sharedpath = sharedpath
1340 self.store = store
1340 self.store = store
1341 self.cachevfs = cachevfs
1341 self.cachevfs = cachevfs
1342 self.wcachevfs = wcachevfs
1342 self.wcachevfs = wcachevfs
1343 self.features = features
1343 self.features = features
1344
1344
1345 self.filtername = None
1345 self.filtername = None
1346
1346
1347 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1347 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1348 b'devel', b'check-locks'
1348 b'devel', b'check-locks'
1349 ):
1349 ):
1350 self.vfs.audit = self._getvfsward(self.vfs.audit)
1350 self.vfs.audit = self._getvfsward(self.vfs.audit)
1351 # A list of callback to shape the phase if no data were found.
1351 # A list of callback to shape the phase if no data were found.
1352 # Callback are in the form: func(repo, roots) --> processed root.
1352 # Callback are in the form: func(repo, roots) --> processed root.
1353 # This list it to be filled by extension during repo setup
1353 # This list it to be filled by extension during repo setup
1354 self._phasedefaults = []
1354 self._phasedefaults = []
1355
1355
1356 color.setup(self.ui)
1356 color.setup(self.ui)
1357
1357
1358 self.spath = self.store.path
1358 self.spath = self.store.path
1359 self.svfs = self.store.vfs
1359 self.svfs = self.store.vfs
1360 self.sjoin = self.store.join
1360 self.sjoin = self.store.join
1361 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1361 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1362 b'devel', b'check-locks'
1362 b'devel', b'check-locks'
1363 ):
1363 ):
1364 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1364 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1365 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1365 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1366 else: # standard vfs
1366 else: # standard vfs
1367 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1367 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1368
1368
1369 self._dirstatevalidatewarned = False
1369 self._dirstatevalidatewarned = False
1370
1370
1371 self._branchcaches = branchmap.BranchMapCache()
1371 self._branchcaches = branchmap.BranchMapCache()
1372 self._revbranchcache = None
1372 self._revbranchcache = None
1373 self._filterpats = {}
1373 self._filterpats = {}
1374 self._datafilters = {}
1374 self._datafilters = {}
1375 self._transref = self._lockref = self._wlockref = None
1375 self._transref = self._lockref = self._wlockref = None
1376
1376
1377 # A cache for various files under .hg/ that tracks file changes,
1377 # A cache for various files under .hg/ that tracks file changes,
1378 # (used by the filecache decorator)
1378 # (used by the filecache decorator)
1379 #
1379 #
1380 # Maps a property name to its util.filecacheentry
1380 # Maps a property name to its util.filecacheentry
1381 self._filecache = {}
1381 self._filecache = {}
1382
1382
1383 # hold sets of revision to be filtered
1383 # hold sets of revision to be filtered
1384 # should be cleared when something might have changed the filter value:
1384 # should be cleared when something might have changed the filter value:
1385 # - new changesets,
1385 # - new changesets,
1386 # - phase change,
1386 # - phase change,
1387 # - new obsolescence marker,
1387 # - new obsolescence marker,
1388 # - working directory parent change,
1388 # - working directory parent change,
1389 # - bookmark changes
1389 # - bookmark changes
1390 self.filteredrevcache = {}
1390 self.filteredrevcache = {}
1391
1391
1392 # post-dirstate-status hooks
1392 # post-dirstate-status hooks
1393 self._postdsstatus = []
1393 self._postdsstatus = []
1394
1394
1395 # generic mapping between names and nodes
1395 # generic mapping between names and nodes
1396 self.names = namespaces.namespaces()
1396 self.names = namespaces.namespaces()
1397
1397
1398 # Key to signature value.
1398 # Key to signature value.
1399 self._sparsesignaturecache = {}
1399 self._sparsesignaturecache = {}
1400 # Signature to cached matcher instance.
1400 # Signature to cached matcher instance.
1401 self._sparsematchercache = {}
1401 self._sparsematchercache = {}
1402
1402
1403 self._extrafilterid = repoview.extrafilter(ui)
1403 self._extrafilterid = repoview.extrafilter(ui)
1404
1404
1405 self.filecopiesmode = None
1405 self.filecopiesmode = None
1406 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1406 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1407 self.filecopiesmode = b'changeset-sidedata'
1407 self.filecopiesmode = b'changeset-sidedata'
1408
1408
1409 self._wanted_sidedata = set()
1409 self._wanted_sidedata = set()
1410 self._sidedata_computers = {}
1410 self._sidedata_computers = {}
1411 metadatamod.set_sidedata_spec_for_repo(self)
1411 metadatamod.set_sidedata_spec_for_repo(self)
1412
1412
1413 def _getvfsward(self, origfunc):
1413 def _getvfsward(self, origfunc):
1414 """build a ward for self.vfs"""
1414 """build a ward for self.vfs"""
1415 rref = weakref.ref(self)
1415 rref = weakref.ref(self)
1416
1416
1417 def checkvfs(path, mode=None):
1417 def checkvfs(path, mode=None):
1418 ret = origfunc(path, mode=mode)
1418 ret = origfunc(path, mode=mode)
1419 repo = rref()
1419 repo = rref()
1420 if (
1420 if (
1421 repo is None
1421 repo is None
1422 or not util.safehasattr(repo, b'_wlockref')
1422 or not util.safehasattr(repo, b'_wlockref')
1423 or not util.safehasattr(repo, b'_lockref')
1423 or not util.safehasattr(repo, b'_lockref')
1424 ):
1424 ):
1425 return
1425 return
1426 if mode in (None, b'r', b'rb'):
1426 if mode in (None, b'r', b'rb'):
1427 return
1427 return
1428 if path.startswith(repo.path):
1428 if path.startswith(repo.path):
1429 # truncate name relative to the repository (.hg)
1429 # truncate name relative to the repository (.hg)
1430 path = path[len(repo.path) + 1 :]
1430 path = path[len(repo.path) + 1 :]
1431 if path.startswith(b'cache/'):
1431 if path.startswith(b'cache/'):
1432 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1432 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1433 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1433 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1434 # path prefixes covered by 'lock'
1434 # path prefixes covered by 'lock'
1435 vfs_path_prefixes = (
1435 vfs_path_prefixes = (
1436 b'journal.',
1436 b'journal.',
1437 b'undo.',
1437 b'undo.',
1438 b'strip-backup/',
1438 b'strip-backup/',
1439 b'cache/',
1439 b'cache/',
1440 )
1440 )
1441 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1441 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1442 if repo._currentlock(repo._lockref) is None:
1442 if repo._currentlock(repo._lockref) is None:
1443 repo.ui.develwarn(
1443 repo.ui.develwarn(
1444 b'write with no lock: "%s"' % path,
1444 b'write with no lock: "%s"' % path,
1445 stacklevel=3,
1445 stacklevel=3,
1446 config=b'check-locks',
1446 config=b'check-locks',
1447 )
1447 )
1448 elif repo._currentlock(repo._wlockref) is None:
1448 elif repo._currentlock(repo._wlockref) is None:
1449 # rest of vfs files are covered by 'wlock'
1449 # rest of vfs files are covered by 'wlock'
1450 #
1450 #
1451 # exclude special files
1451 # exclude special files
1452 for prefix in self._wlockfreeprefix:
1452 for prefix in self._wlockfreeprefix:
1453 if path.startswith(prefix):
1453 if path.startswith(prefix):
1454 return
1454 return
1455 repo.ui.develwarn(
1455 repo.ui.develwarn(
1456 b'write with no wlock: "%s"' % path,
1456 b'write with no wlock: "%s"' % path,
1457 stacklevel=3,
1457 stacklevel=3,
1458 config=b'check-locks',
1458 config=b'check-locks',
1459 )
1459 )
1460 return ret
1460 return ret
1461
1461
1462 return checkvfs
1462 return checkvfs
1463
1463
1464 def _getsvfsward(self, origfunc):
1464 def _getsvfsward(self, origfunc):
1465 """build a ward for self.svfs"""
1465 """build a ward for self.svfs"""
1466 rref = weakref.ref(self)
1466 rref = weakref.ref(self)
1467
1467
1468 def checksvfs(path, mode=None):
1468 def checksvfs(path, mode=None):
1469 ret = origfunc(path, mode=mode)
1469 ret = origfunc(path, mode=mode)
1470 repo = rref()
1470 repo = rref()
1471 if repo is None or not util.safehasattr(repo, b'_lockref'):
1471 if repo is None or not util.safehasattr(repo, b'_lockref'):
1472 return
1472 return
1473 if mode in (None, b'r', b'rb'):
1473 if mode in (None, b'r', b'rb'):
1474 return
1474 return
1475 if path.startswith(repo.sharedpath):
1475 if path.startswith(repo.sharedpath):
1476 # truncate name relative to the repository (.hg)
1476 # truncate name relative to the repository (.hg)
1477 path = path[len(repo.sharedpath) + 1 :]
1477 path = path[len(repo.sharedpath) + 1 :]
1478 if repo._currentlock(repo._lockref) is None:
1478 if repo._currentlock(repo._lockref) is None:
1479 repo.ui.develwarn(
1479 repo.ui.develwarn(
1480 b'write with no lock: "%s"' % path, stacklevel=4
1480 b'write with no lock: "%s"' % path, stacklevel=4
1481 )
1481 )
1482 return ret
1482 return ret
1483
1483
1484 return checksvfs
1484 return checksvfs
1485
1485
1486 def close(self):
1486 def close(self):
1487 self._writecaches()
1487 self._writecaches()
1488
1488
1489 def _writecaches(self):
1489 def _writecaches(self):
1490 if self._revbranchcache:
1490 if self._revbranchcache:
1491 self._revbranchcache.write()
1491 self._revbranchcache.write()
1492
1492
1493 def _restrictcapabilities(self, caps):
1493 def _restrictcapabilities(self, caps):
1494 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1494 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1495 caps = set(caps)
1495 caps = set(caps)
1496 capsblob = bundle2.encodecaps(
1496 capsblob = bundle2.encodecaps(
1497 bundle2.getrepocaps(self, role=b'client')
1497 bundle2.getrepocaps(self, role=b'client')
1498 )
1498 )
1499 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1499 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1500 if self.ui.configbool(b'experimental', b'narrow'):
1500 if self.ui.configbool(b'experimental', b'narrow'):
1501 caps.add(wireprototypes.NARROWCAP)
1501 caps.add(wireprototypes.NARROWCAP)
1502 return caps
1502 return caps
1503
1503
1504 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1504 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1505 # self -> auditor -> self._checknested -> self
1505 # self -> auditor -> self._checknested -> self
1506
1506
1507 @property
1507 @property
1508 def auditor(self):
1508 def auditor(self):
1509 # This is only used by context.workingctx.match in order to
1509 # This is only used by context.workingctx.match in order to
1510 # detect files in subrepos.
1510 # detect files in subrepos.
1511 return pathutil.pathauditor(self.root, callback=self._checknested)
1511 return pathutil.pathauditor(self.root, callback=self._checknested)
1512
1512
1513 @property
1513 @property
1514 def nofsauditor(self):
1514 def nofsauditor(self):
1515 # This is only used by context.basectx.match in order to detect
1515 # This is only used by context.basectx.match in order to detect
1516 # files in subrepos.
1516 # files in subrepos.
1517 return pathutil.pathauditor(
1517 return pathutil.pathauditor(
1518 self.root, callback=self._checknested, realfs=False, cached=True
1518 self.root, callback=self._checknested, realfs=False, cached=True
1519 )
1519 )
1520
1520
1521 def _checknested(self, path):
1521 def _checknested(self, path):
1522 """Determine if path is a legal nested repository."""
1522 """Determine if path is a legal nested repository."""
1523 if not path.startswith(self.root):
1523 if not path.startswith(self.root):
1524 return False
1524 return False
1525 subpath = path[len(self.root) + 1 :]
1525 subpath = path[len(self.root) + 1 :]
1526 normsubpath = util.pconvert(subpath)
1526 normsubpath = util.pconvert(subpath)
1527
1527
1528 # XXX: Checking against the current working copy is wrong in
1528 # XXX: Checking against the current working copy is wrong in
1529 # the sense that it can reject things like
1529 # the sense that it can reject things like
1530 #
1530 #
1531 # $ hg cat -r 10 sub/x.txt
1531 # $ hg cat -r 10 sub/x.txt
1532 #
1532 #
1533 # if sub/ is no longer a subrepository in the working copy
1533 # if sub/ is no longer a subrepository in the working copy
1534 # parent revision.
1534 # parent revision.
1535 #
1535 #
1536 # However, it can of course also allow things that would have
1536 # However, it can of course also allow things that would have
1537 # been rejected before, such as the above cat command if sub/
1537 # been rejected before, such as the above cat command if sub/
1538 # is a subrepository now, but was a normal directory before.
1538 # is a subrepository now, but was a normal directory before.
1539 # The old path auditor would have rejected by mistake since it
1539 # The old path auditor would have rejected by mistake since it
1540 # panics when it sees sub/.hg/.
1540 # panics when it sees sub/.hg/.
1541 #
1541 #
1542 # All in all, checking against the working copy seems sensible
1542 # All in all, checking against the working copy seems sensible
1543 # since we want to prevent access to nested repositories on
1543 # since we want to prevent access to nested repositories on
1544 # the filesystem *now*.
1544 # the filesystem *now*.
1545 ctx = self[None]
1545 ctx = self[None]
1546 parts = util.splitpath(subpath)
1546 parts = util.splitpath(subpath)
1547 while parts:
1547 while parts:
1548 prefix = b'/'.join(parts)
1548 prefix = b'/'.join(parts)
1549 if prefix in ctx.substate:
1549 if prefix in ctx.substate:
1550 if prefix == normsubpath:
1550 if prefix == normsubpath:
1551 return True
1551 return True
1552 else:
1552 else:
1553 sub = ctx.sub(prefix)
1553 sub = ctx.sub(prefix)
1554 return sub.checknested(subpath[len(prefix) + 1 :])
1554 return sub.checknested(subpath[len(prefix) + 1 :])
1555 else:
1555 else:
1556 parts.pop()
1556 parts.pop()
1557 return False
1557 return False
1558
1558
1559 def peer(self):
1559 def peer(self):
1560 return localpeer(self) # not cached to avoid reference cycle
1560 return localpeer(self) # not cached to avoid reference cycle
1561
1561
1562 def unfiltered(self):
1562 def unfiltered(self):
1563 """Return unfiltered version of the repository
1563 """Return unfiltered version of the repository
1564
1564
1565 Intended to be overwritten by filtered repo."""
1565 Intended to be overwritten by filtered repo."""
1566 return self
1566 return self
1567
1567
1568 def filtered(self, name, visibilityexceptions=None):
1568 def filtered(self, name, visibilityexceptions=None):
1569 """Return a filtered version of a repository
1569 """Return a filtered version of a repository
1570
1570
1571 The `name` parameter is the identifier of the requested view. This
1571 The `name` parameter is the identifier of the requested view. This
1572 will return a repoview object set "exactly" to the specified view.
1572 will return a repoview object set "exactly" to the specified view.
1573
1573
1574 This function does not apply recursive filtering to a repository. For
1574 This function does not apply recursive filtering to a repository. For
1575 example calling `repo.filtered("served")` will return a repoview using
1575 example calling `repo.filtered("served")` will return a repoview using
1576 the "served" view, regardless of the initial view used by `repo`.
1576 the "served" view, regardless of the initial view used by `repo`.
1577
1577
1578 In other word, there is always only one level of `repoview` "filtering".
1578 In other word, there is always only one level of `repoview` "filtering".
1579 """
1579 """
1580 if self._extrafilterid is not None and b'%' not in name:
1580 if self._extrafilterid is not None and b'%' not in name:
1581 name = name + b'%' + self._extrafilterid
1581 name = name + b'%' + self._extrafilterid
1582
1582
1583 cls = repoview.newtype(self.unfiltered().__class__)
1583 cls = repoview.newtype(self.unfiltered().__class__)
1584 return cls(self, name, visibilityexceptions)
1584 return cls(self, name, visibilityexceptions)
1585
1585
1586 @mixedrepostorecache(
1586 @mixedrepostorecache(
1587 (b'bookmarks', b'plain'),
1587 (b'bookmarks', b'plain'),
1588 (b'bookmarks.current', b'plain'),
1588 (b'bookmarks.current', b'plain'),
1589 (b'bookmarks', b''),
1589 (b'bookmarks', b''),
1590 (b'00changelog.i', b''),
1590 (b'00changelog.i', b''),
1591 )
1591 )
1592 def _bookmarks(self):
1592 def _bookmarks(self):
1593 # Since the multiple files involved in the transaction cannot be
1593 # Since the multiple files involved in the transaction cannot be
1594 # written atomically (with current repository format), there is a race
1594 # written atomically (with current repository format), there is a race
1595 # condition here.
1595 # condition here.
1596 #
1596 #
1597 # 1) changelog content A is read
1597 # 1) changelog content A is read
1598 # 2) outside transaction update changelog to content B
1598 # 2) outside transaction update changelog to content B
1599 # 3) outside transaction update bookmark file referring to content B
1599 # 3) outside transaction update bookmark file referring to content B
1600 # 4) bookmarks file content is read and filtered against changelog-A
1600 # 4) bookmarks file content is read and filtered against changelog-A
1601 #
1601 #
1602 # When this happens, bookmarks against nodes missing from A are dropped.
1602 # When this happens, bookmarks against nodes missing from A are dropped.
1603 #
1603 #
1604 # Having this happening during read is not great, but it become worse
1604 # Having this happening during read is not great, but it become worse
1605 # when this happen during write because the bookmarks to the "unknown"
1605 # when this happen during write because the bookmarks to the "unknown"
1606 # nodes will be dropped for good. However, writes happen within locks.
1606 # nodes will be dropped for good. However, writes happen within locks.
1607 # This locking makes it possible to have a race free consistent read.
1607 # This locking makes it possible to have a race free consistent read.
1608 # For this purpose data read from disc before locking are
1608 # For this purpose data read from disc before locking are
1609 # "invalidated" right after the locks are taken. This invalidations are
1609 # "invalidated" right after the locks are taken. This invalidations are
1610 # "light", the `filecache` mechanism keep the data in memory and will
1610 # "light", the `filecache` mechanism keep the data in memory and will
1611 # reuse them if the underlying files did not changed. Not parsing the
1611 # reuse them if the underlying files did not changed. Not parsing the
1612 # same data multiple times helps performances.
1612 # same data multiple times helps performances.
1613 #
1613 #
1614 # Unfortunately in the case describe above, the files tracked by the
1614 # Unfortunately in the case describe above, the files tracked by the
1615 # bookmarks file cache might not have changed, but the in-memory
1615 # bookmarks file cache might not have changed, but the in-memory
1616 # content is still "wrong" because we used an older changelog content
1616 # content is still "wrong" because we used an older changelog content
1617 # to process the on-disk data. So after locking, the changelog would be
1617 # to process the on-disk data. So after locking, the changelog would be
1618 # refreshed but `_bookmarks` would be preserved.
1618 # refreshed but `_bookmarks` would be preserved.
1619 # Adding `00changelog.i` to the list of tracked file is not
1619 # Adding `00changelog.i` to the list of tracked file is not
1620 # enough, because at the time we build the content for `_bookmarks` in
1620 # enough, because at the time we build the content for `_bookmarks` in
1621 # (4), the changelog file has already diverged from the content used
1621 # (4), the changelog file has already diverged from the content used
1622 # for loading `changelog` in (1)
1622 # for loading `changelog` in (1)
1623 #
1623 #
1624 # To prevent the issue, we force the changelog to be explicitly
1624 # To prevent the issue, we force the changelog to be explicitly
1625 # reloaded while computing `_bookmarks`. The data race can still happen
1625 # reloaded while computing `_bookmarks`. The data race can still happen
1626 # without the lock (with a narrower window), but it would no longer go
1626 # without the lock (with a narrower window), but it would no longer go
1627 # undetected during the lock time refresh.
1627 # undetected during the lock time refresh.
1628 #
1628 #
1629 # The new schedule is as follow
1629 # The new schedule is as follow
1630 #
1630 #
1631 # 1) filecache logic detect that `_bookmarks` needs to be computed
1631 # 1) filecache logic detect that `_bookmarks` needs to be computed
1632 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1632 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1633 # 3) We force `changelog` filecache to be tested
1633 # 3) We force `changelog` filecache to be tested
1634 # 4) cachestat for `changelog` are captured (for changelog)
1634 # 4) cachestat for `changelog` are captured (for changelog)
1635 # 5) `_bookmarks` is computed and cached
1635 # 5) `_bookmarks` is computed and cached
1636 #
1636 #
1637 # The step in (3) ensure we have a changelog at least as recent as the
1637 # The step in (3) ensure we have a changelog at least as recent as the
1638 # cache stat computed in (1). As a result at locking time:
1638 # cache stat computed in (1). As a result at locking time:
1639 # * if the changelog did not changed since (1) -> we can reuse the data
1639 # * if the changelog did not changed since (1) -> we can reuse the data
1640 # * otherwise -> the bookmarks get refreshed.
1640 # * otherwise -> the bookmarks get refreshed.
1641 self._refreshchangelog()
1641 self._refreshchangelog()
1642 return bookmarks.bmstore(self)
1642 return bookmarks.bmstore(self)
1643
1643
1644 def _refreshchangelog(self):
1644 def _refreshchangelog(self):
1645 """make sure the in memory changelog match the on-disk one"""
1645 """make sure the in memory changelog match the on-disk one"""
1646 if 'changelog' in vars(self) and self.currenttransaction() is None:
1646 if 'changelog' in vars(self) and self.currenttransaction() is None:
1647 del self.changelog
1647 del self.changelog
1648
1648
1649 @property
1649 @property
1650 def _activebookmark(self):
1650 def _activebookmark(self):
1651 return self._bookmarks.active
1651 return self._bookmarks.active
1652
1652
1653 # _phasesets depend on changelog. what we need is to call
1653 # _phasesets depend on changelog. what we need is to call
1654 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1654 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1655 # can't be easily expressed in filecache mechanism.
1655 # can't be easily expressed in filecache mechanism.
1656 @storecache(b'phaseroots', b'00changelog.i')
1656 @storecache(b'phaseroots', b'00changelog.i')
1657 def _phasecache(self):
1657 def _phasecache(self):
1658 return phases.phasecache(self, self._phasedefaults)
1658 return phases.phasecache(self, self._phasedefaults)
1659
1659
1660 @storecache(b'obsstore')
1660 @storecache(b'obsstore')
1661 def obsstore(self):
1661 def obsstore(self):
1662 return obsolete.makestore(self.ui, self)
1662 return obsolete.makestore(self.ui, self)
1663
1663
1664 @storecache(b'00changelog.i')
1664 @storecache(b'00changelog.i')
1665 def changelog(self):
1665 def changelog(self):
1666 # load dirstate before changelog to avoid race see issue6303
1666 # load dirstate before changelog to avoid race see issue6303
1667 self.dirstate.prefetch_parents()
1667 self.dirstate.prefetch_parents()
1668 return self.store.changelog(
1668 return self.store.changelog(
1669 txnutil.mayhavepending(self.root),
1669 txnutil.mayhavepending(self.root),
1670 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1670 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1671 )
1671 )
1672
1672
1673 @storecache(b'00manifest.i')
1673 @storecache(b'00manifest.i')
1674 def manifestlog(self):
1674 def manifestlog(self):
1675 return self.store.manifestlog(self, self._storenarrowmatch)
1675 return self.store.manifestlog(self, self._storenarrowmatch)
1676
1676
1677 @repofilecache(b'dirstate')
1677 @repofilecache(b'dirstate')
1678 def dirstate(self):
1678 def dirstate(self):
1679 return self._makedirstate()
1679 return self._makedirstate()
1680
1680
1681 def _makedirstate(self):
1681 def _makedirstate(self):
1682 """Extension point for wrapping the dirstate per-repo."""
1682 """Extension point for wrapping the dirstate per-repo."""
1683 sparsematchfn = lambda: sparse.matcher(self)
1683 sparsematchfn = lambda: sparse.matcher(self)
1684
1684
1685 return dirstate.dirstate(
1685 return dirstate.dirstate(
1686 self.vfs,
1686 self.vfs,
1687 self.ui,
1687 self.ui,
1688 self.root,
1688 self.root,
1689 self._dirstatevalidate,
1689 self._dirstatevalidate,
1690 sparsematchfn,
1690 sparsematchfn,
1691 self.nodeconstants,
1691 self.nodeconstants,
1692 )
1692 )
1693
1693
1694 def _dirstatevalidate(self, node):
1694 def _dirstatevalidate(self, node):
1695 try:
1695 try:
1696 self.changelog.rev(node)
1696 self.changelog.rev(node)
1697 return node
1697 return node
1698 except error.LookupError:
1698 except error.LookupError:
1699 if not self._dirstatevalidatewarned:
1699 if not self._dirstatevalidatewarned:
1700 self._dirstatevalidatewarned = True
1700 self._dirstatevalidatewarned = True
1701 self.ui.warn(
1701 self.ui.warn(
1702 _(b"warning: ignoring unknown working parent %s!\n")
1702 _(b"warning: ignoring unknown working parent %s!\n")
1703 % short(node)
1703 % short(node)
1704 )
1704 )
1705 return nullid
1705 return nullid
1706
1706
1707 @storecache(narrowspec.FILENAME)
1707 @storecache(narrowspec.FILENAME)
1708 def narrowpats(self):
1708 def narrowpats(self):
1709 """matcher patterns for this repository's narrowspec
1709 """matcher patterns for this repository's narrowspec
1710
1710
1711 A tuple of (includes, excludes).
1711 A tuple of (includes, excludes).
1712 """
1712 """
1713 return narrowspec.load(self)
1713 return narrowspec.load(self)
1714
1714
1715 @storecache(narrowspec.FILENAME)
1715 @storecache(narrowspec.FILENAME)
1716 def _storenarrowmatch(self):
1716 def _storenarrowmatch(self):
1717 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1717 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1718 return matchmod.always()
1718 return matchmod.always()
1719 include, exclude = self.narrowpats
1719 include, exclude = self.narrowpats
1720 return narrowspec.match(self.root, include=include, exclude=exclude)
1720 return narrowspec.match(self.root, include=include, exclude=exclude)
1721
1721
1722 @storecache(narrowspec.FILENAME)
1722 @storecache(narrowspec.FILENAME)
1723 def _narrowmatch(self):
1723 def _narrowmatch(self):
1724 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1724 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1725 return matchmod.always()
1725 return matchmod.always()
1726 narrowspec.checkworkingcopynarrowspec(self)
1726 narrowspec.checkworkingcopynarrowspec(self)
1727 include, exclude = self.narrowpats
1727 include, exclude = self.narrowpats
1728 return narrowspec.match(self.root, include=include, exclude=exclude)
1728 return narrowspec.match(self.root, include=include, exclude=exclude)
1729
1729
1730 def narrowmatch(self, match=None, includeexact=False):
1730 def narrowmatch(self, match=None, includeexact=False):
1731 """matcher corresponding the the repo's narrowspec
1731 """matcher corresponding the the repo's narrowspec
1732
1732
1733 If `match` is given, then that will be intersected with the narrow
1733 If `match` is given, then that will be intersected with the narrow
1734 matcher.
1734 matcher.
1735
1735
1736 If `includeexact` is True, then any exact matches from `match` will
1736 If `includeexact` is True, then any exact matches from `match` will
1737 be included even if they're outside the narrowspec.
1737 be included even if they're outside the narrowspec.
1738 """
1738 """
1739 if match:
1739 if match:
1740 if includeexact and not self._narrowmatch.always():
1740 if includeexact and not self._narrowmatch.always():
1741 # do not exclude explicitly-specified paths so that they can
1741 # do not exclude explicitly-specified paths so that they can
1742 # be warned later on
1742 # be warned later on
1743 em = matchmod.exact(match.files())
1743 em = matchmod.exact(match.files())
1744 nm = matchmod.unionmatcher([self._narrowmatch, em])
1744 nm = matchmod.unionmatcher([self._narrowmatch, em])
1745 return matchmod.intersectmatchers(match, nm)
1745 return matchmod.intersectmatchers(match, nm)
1746 return matchmod.intersectmatchers(match, self._narrowmatch)
1746 return matchmod.intersectmatchers(match, self._narrowmatch)
1747 return self._narrowmatch
1747 return self._narrowmatch
1748
1748
1749 def setnarrowpats(self, newincludes, newexcludes):
1749 def setnarrowpats(self, newincludes, newexcludes):
1750 narrowspec.save(self, newincludes, newexcludes)
1750 narrowspec.save(self, newincludes, newexcludes)
1751 self.invalidate(clearfilecache=True)
1751 self.invalidate(clearfilecache=True)
1752
1752
1753 @unfilteredpropertycache
1753 @unfilteredpropertycache
1754 def _quick_access_changeid_null(self):
1754 def _quick_access_changeid_null(self):
1755 return {
1755 return {
1756 b'null': (nullrev, nullid),
1756 b'null': (nullrev, nullid),
1757 nullrev: (nullrev, nullid),
1757 nullrev: (nullrev, nullid),
1758 nullid: (nullrev, nullid),
1758 nullid: (nullrev, nullid),
1759 }
1759 }
1760
1760
1761 @unfilteredpropertycache
1761 @unfilteredpropertycache
1762 def _quick_access_changeid_wc(self):
1762 def _quick_access_changeid_wc(self):
1763 # also fast path access to the working copy parents
1763 # also fast path access to the working copy parents
1764 # however, only do it for filter that ensure wc is visible.
1764 # however, only do it for filter that ensure wc is visible.
1765 quick = self._quick_access_changeid_null.copy()
1765 quick = self._quick_access_changeid_null.copy()
1766 cl = self.unfiltered().changelog
1766 cl = self.unfiltered().changelog
1767 for node in self.dirstate.parents():
1767 for node in self.dirstate.parents():
1768 if node == nullid:
1768 if node == nullid:
1769 continue
1769 continue
1770 rev = cl.index.get_rev(node)
1770 rev = cl.index.get_rev(node)
1771 if rev is None:
1771 if rev is None:
1772 # unknown working copy parent case:
1772 # unknown working copy parent case:
1773 #
1773 #
1774 # skip the fast path and let higher code deal with it
1774 # skip the fast path and let higher code deal with it
1775 continue
1775 continue
1776 pair = (rev, node)
1776 pair = (rev, node)
1777 quick[rev] = pair
1777 quick[rev] = pair
1778 quick[node] = pair
1778 quick[node] = pair
1779 # also add the parents of the parents
1779 # also add the parents of the parents
1780 for r in cl.parentrevs(rev):
1780 for r in cl.parentrevs(rev):
1781 if r == nullrev:
1781 if r == nullrev:
1782 continue
1782 continue
1783 n = cl.node(r)
1783 n = cl.node(r)
1784 pair = (r, n)
1784 pair = (r, n)
1785 quick[r] = pair
1785 quick[r] = pair
1786 quick[n] = pair
1786 quick[n] = pair
1787 p1node = self.dirstate.p1()
1787 p1node = self.dirstate.p1()
1788 if p1node != nullid:
1788 if p1node != nullid:
1789 quick[b'.'] = quick[p1node]
1789 quick[b'.'] = quick[p1node]
1790 return quick
1790 return quick
1791
1791
1792 @unfilteredmethod
1792 @unfilteredmethod
1793 def _quick_access_changeid_invalidate(self):
1793 def _quick_access_changeid_invalidate(self):
1794 if '_quick_access_changeid_wc' in vars(self):
1794 if '_quick_access_changeid_wc' in vars(self):
1795 del self.__dict__['_quick_access_changeid_wc']
1795 del self.__dict__['_quick_access_changeid_wc']
1796
1796
1797 @property
1797 @property
1798 def _quick_access_changeid(self):
1798 def _quick_access_changeid(self):
1799 """an helper dictionnary for __getitem__ calls
1799 """an helper dictionnary for __getitem__ calls
1800
1800
1801 This contains a list of symbol we can recognise right away without
1801 This contains a list of symbol we can recognise right away without
1802 further processing.
1802 further processing.
1803 """
1803 """
1804 if self.filtername in repoview.filter_has_wc:
1804 if self.filtername in repoview.filter_has_wc:
1805 return self._quick_access_changeid_wc
1805 return self._quick_access_changeid_wc
1806 return self._quick_access_changeid_null
1806 return self._quick_access_changeid_null
1807
1807
1808 def __getitem__(self, changeid):
1808 def __getitem__(self, changeid):
1809 # dealing with special cases
1809 # dealing with special cases
1810 if changeid is None:
1810 if changeid is None:
1811 return context.workingctx(self)
1811 return context.workingctx(self)
1812 if isinstance(changeid, context.basectx):
1812 if isinstance(changeid, context.basectx):
1813 return changeid
1813 return changeid
1814
1814
1815 # dealing with multiple revisions
1815 # dealing with multiple revisions
1816 if isinstance(changeid, slice):
1816 if isinstance(changeid, slice):
1817 # wdirrev isn't contiguous so the slice shouldn't include it
1817 # wdirrev isn't contiguous so the slice shouldn't include it
1818 return [
1818 return [
1819 self[i]
1819 self[i]
1820 for i in pycompat.xrange(*changeid.indices(len(self)))
1820 for i in pycompat.xrange(*changeid.indices(len(self)))
1821 if i not in self.changelog.filteredrevs
1821 if i not in self.changelog.filteredrevs
1822 ]
1822 ]
1823
1823
1824 # dealing with some special values
1824 # dealing with some special values
1825 quick_access = self._quick_access_changeid.get(changeid)
1825 quick_access = self._quick_access_changeid.get(changeid)
1826 if quick_access is not None:
1826 if quick_access is not None:
1827 rev, node = quick_access
1827 rev, node = quick_access
1828 return context.changectx(self, rev, node, maybe_filtered=False)
1828 return context.changectx(self, rev, node, maybe_filtered=False)
1829 if changeid == b'tip':
1829 if changeid == b'tip':
1830 node = self.changelog.tip()
1830 node = self.changelog.tip()
1831 rev = self.changelog.rev(node)
1831 rev = self.changelog.rev(node)
1832 return context.changectx(self, rev, node)
1832 return context.changectx(self, rev, node)
1833
1833
1834 # dealing with arbitrary values
1834 # dealing with arbitrary values
1835 try:
1835 try:
1836 if isinstance(changeid, int):
1836 if isinstance(changeid, int):
1837 node = self.changelog.node(changeid)
1837 node = self.changelog.node(changeid)
1838 rev = changeid
1838 rev = changeid
1839 elif changeid == b'.':
1839 elif changeid == b'.':
1840 # this is a hack to delay/avoid loading obsmarkers
1840 # this is a hack to delay/avoid loading obsmarkers
1841 # when we know that '.' won't be hidden
1841 # when we know that '.' won't be hidden
1842 node = self.dirstate.p1()
1842 node = self.dirstate.p1()
1843 rev = self.unfiltered().changelog.rev(node)
1843 rev = self.unfiltered().changelog.rev(node)
1844 elif len(changeid) == 20:
1844 elif len(changeid) == 20:
1845 try:
1845 try:
1846 node = changeid
1846 node = changeid
1847 rev = self.changelog.rev(changeid)
1847 rev = self.changelog.rev(changeid)
1848 except error.FilteredLookupError:
1848 except error.FilteredLookupError:
1849 changeid = hex(changeid) # for the error message
1849 changeid = hex(changeid) # for the error message
1850 raise
1850 raise
1851 except LookupError:
1851 except LookupError:
1852 # check if it might have come from damaged dirstate
1852 # check if it might have come from damaged dirstate
1853 #
1853 #
1854 # XXX we could avoid the unfiltered if we had a recognizable
1854 # XXX we could avoid the unfiltered if we had a recognizable
1855 # exception for filtered changeset access
1855 # exception for filtered changeset access
1856 if (
1856 if (
1857 self.local()
1857 self.local()
1858 and changeid in self.unfiltered().dirstate.parents()
1858 and changeid in self.unfiltered().dirstate.parents()
1859 ):
1859 ):
1860 msg = _(b"working directory has unknown parent '%s'!")
1860 msg = _(b"working directory has unknown parent '%s'!")
1861 raise error.Abort(msg % short(changeid))
1861 raise error.Abort(msg % short(changeid))
1862 changeid = hex(changeid) # for the error message
1862 changeid = hex(changeid) # for the error message
1863 raise
1863 raise
1864
1864
1865 elif len(changeid) == 40:
1865 elif len(changeid) == 40:
1866 node = bin(changeid)
1866 node = bin(changeid)
1867 rev = self.changelog.rev(node)
1867 rev = self.changelog.rev(node)
1868 else:
1868 else:
1869 raise error.ProgrammingError(
1869 raise error.ProgrammingError(
1870 b"unsupported changeid '%s' of type %s"
1870 b"unsupported changeid '%s' of type %s"
1871 % (changeid, pycompat.bytestr(type(changeid)))
1871 % (changeid, pycompat.bytestr(type(changeid)))
1872 )
1872 )
1873
1873
1874 return context.changectx(self, rev, node)
1874 return context.changectx(self, rev, node)
1875
1875
1876 except (error.FilteredIndexError, error.FilteredLookupError):
1876 except (error.FilteredIndexError, error.FilteredLookupError):
1877 raise error.FilteredRepoLookupError(
1877 raise error.FilteredRepoLookupError(
1878 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1878 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1879 )
1879 )
1880 except (IndexError, LookupError):
1880 except (IndexError, LookupError):
1881 raise error.RepoLookupError(
1881 raise error.RepoLookupError(
1882 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1882 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1883 )
1883 )
1884 except error.WdirUnsupported:
1884 except error.WdirUnsupported:
1885 return context.workingctx(self)
1885 return context.workingctx(self)
1886
1886
1887 def __contains__(self, changeid):
1887 def __contains__(self, changeid):
1888 """True if the given changeid exists"""
1888 """True if the given changeid exists"""
1889 try:
1889 try:
1890 self[changeid]
1890 self[changeid]
1891 return True
1891 return True
1892 except error.RepoLookupError:
1892 except error.RepoLookupError:
1893 return False
1893 return False
1894
1894
1895 def __nonzero__(self):
1895 def __nonzero__(self):
1896 return True
1896 return True
1897
1897
1898 __bool__ = __nonzero__
1898 __bool__ = __nonzero__
1899
1899
1900 def __len__(self):
1900 def __len__(self):
1901 # no need to pay the cost of repoview.changelog
1901 # no need to pay the cost of repoview.changelog
1902 unfi = self.unfiltered()
1902 unfi = self.unfiltered()
1903 return len(unfi.changelog)
1903 return len(unfi.changelog)
1904
1904
1905 def __iter__(self):
1905 def __iter__(self):
1906 return iter(self.changelog)
1906 return iter(self.changelog)
1907
1907
1908 def revs(self, expr, *args):
1908 def revs(self, expr, *args):
1909 """Find revisions matching a revset.
1909 """Find revisions matching a revset.
1910
1910
1911 The revset is specified as a string ``expr`` that may contain
1911 The revset is specified as a string ``expr`` that may contain
1912 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1912 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1913
1913
1914 Revset aliases from the configuration are not expanded. To expand
1914 Revset aliases from the configuration are not expanded. To expand
1915 user aliases, consider calling ``scmutil.revrange()`` or
1915 user aliases, consider calling ``scmutil.revrange()`` or
1916 ``repo.anyrevs([expr], user=True)``.
1916 ``repo.anyrevs([expr], user=True)``.
1917
1917
1918 Returns a smartset.abstractsmartset, which is a list-like interface
1918 Returns a smartset.abstractsmartset, which is a list-like interface
1919 that contains integer revisions.
1919 that contains integer revisions.
1920 """
1920 """
1921 tree = revsetlang.spectree(expr, *args)
1921 tree = revsetlang.spectree(expr, *args)
1922 return revset.makematcher(tree)(self)
1922 return revset.makematcher(tree)(self)
1923
1923
1924 def set(self, expr, *args):
1924 def set(self, expr, *args):
1925 """Find revisions matching a revset and emit changectx instances.
1925 """Find revisions matching a revset and emit changectx instances.
1926
1926
1927 This is a convenience wrapper around ``revs()`` that iterates the
1927 This is a convenience wrapper around ``revs()`` that iterates the
1928 result and is a generator of changectx instances.
1928 result and is a generator of changectx instances.
1929
1929
1930 Revset aliases from the configuration are not expanded. To expand
1930 Revset aliases from the configuration are not expanded. To expand
1931 user aliases, consider calling ``scmutil.revrange()``.
1931 user aliases, consider calling ``scmutil.revrange()``.
1932 """
1932 """
1933 for r in self.revs(expr, *args):
1933 for r in self.revs(expr, *args):
1934 yield self[r]
1934 yield self[r]
1935
1935
1936 def anyrevs(self, specs, user=False, localalias=None):
1936 def anyrevs(self, specs, user=False, localalias=None):
1937 """Find revisions matching one of the given revsets.
1937 """Find revisions matching one of the given revsets.
1938
1938
1939 Revset aliases from the configuration are not expanded by default. To
1939 Revset aliases from the configuration are not expanded by default. To
1940 expand user aliases, specify ``user=True``. To provide some local
1940 expand user aliases, specify ``user=True``. To provide some local
1941 definitions overriding user aliases, set ``localalias`` to
1941 definitions overriding user aliases, set ``localalias`` to
1942 ``{name: definitionstring}``.
1942 ``{name: definitionstring}``.
1943 """
1943 """
1944 if specs == [b'null']:
1944 if specs == [b'null']:
1945 return revset.baseset([nullrev])
1945 return revset.baseset([nullrev])
1946 if specs == [b'.']:
1946 if specs == [b'.']:
1947 quick_data = self._quick_access_changeid.get(b'.')
1947 quick_data = self._quick_access_changeid.get(b'.')
1948 if quick_data is not None:
1948 if quick_data is not None:
1949 return revset.baseset([quick_data[0]])
1949 return revset.baseset([quick_data[0]])
1950 if user:
1950 if user:
1951 m = revset.matchany(
1951 m = revset.matchany(
1952 self.ui,
1952 self.ui,
1953 specs,
1953 specs,
1954 lookup=revset.lookupfn(self),
1954 lookup=revset.lookupfn(self),
1955 localalias=localalias,
1955 localalias=localalias,
1956 )
1956 )
1957 else:
1957 else:
1958 m = revset.matchany(None, specs, localalias=localalias)
1958 m = revset.matchany(None, specs, localalias=localalias)
1959 return m(self)
1959 return m(self)
1960
1960
1961 def url(self):
1961 def url(self):
1962 return b'file:' + self.root
1962 return b'file:' + self.root
1963
1963
1964 def hook(self, name, throw=False, **args):
1964 def hook(self, name, throw=False, **args):
1965 """Call a hook, passing this repo instance.
1965 """Call a hook, passing this repo instance.
1966
1966
1967 This a convenience method to aid invoking hooks. Extensions likely
1967 This a convenience method to aid invoking hooks. Extensions likely
1968 won't call this unless they have registered a custom hook or are
1968 won't call this unless they have registered a custom hook or are
1969 replacing code that is expected to call a hook.
1969 replacing code that is expected to call a hook.
1970 """
1970 """
1971 return hook.hook(self.ui, self, name, throw, **args)
1971 return hook.hook(self.ui, self, name, throw, **args)
1972
1972
1973 @filteredpropertycache
1973 @filteredpropertycache
1974 def _tagscache(self):
1974 def _tagscache(self):
1975 """Returns a tagscache object that contains various tags related
1975 """Returns a tagscache object that contains various tags related
1976 caches."""
1976 caches."""
1977
1977
1978 # This simplifies its cache management by having one decorated
1978 # This simplifies its cache management by having one decorated
1979 # function (this one) and the rest simply fetch things from it.
1979 # function (this one) and the rest simply fetch things from it.
1980 class tagscache(object):
1980 class tagscache(object):
1981 def __init__(self):
1981 def __init__(self):
1982 # These two define the set of tags for this repository. tags
1982 # These two define the set of tags for this repository. tags
1983 # maps tag name to node; tagtypes maps tag name to 'global' or
1983 # maps tag name to node; tagtypes maps tag name to 'global' or
1984 # 'local'. (Global tags are defined by .hgtags across all
1984 # 'local'. (Global tags are defined by .hgtags across all
1985 # heads, and local tags are defined in .hg/localtags.)
1985 # heads, and local tags are defined in .hg/localtags.)
1986 # They constitute the in-memory cache of tags.
1986 # They constitute the in-memory cache of tags.
1987 self.tags = self.tagtypes = None
1987 self.tags = self.tagtypes = None
1988
1988
1989 self.nodetagscache = self.tagslist = None
1989 self.nodetagscache = self.tagslist = None
1990
1990
1991 cache = tagscache()
1991 cache = tagscache()
1992 cache.tags, cache.tagtypes = self._findtags()
1992 cache.tags, cache.tagtypes = self._findtags()
1993
1993
1994 return cache
1994 return cache
1995
1995
1996 def tags(self):
1996 def tags(self):
1997 '''return a mapping of tag to node'''
1997 '''return a mapping of tag to node'''
1998 t = {}
1998 t = {}
1999 if self.changelog.filteredrevs:
1999 if self.changelog.filteredrevs:
2000 tags, tt = self._findtags()
2000 tags, tt = self._findtags()
2001 else:
2001 else:
2002 tags = self._tagscache.tags
2002 tags = self._tagscache.tags
2003 rev = self.changelog.rev
2003 rev = self.changelog.rev
2004 for k, v in pycompat.iteritems(tags):
2004 for k, v in pycompat.iteritems(tags):
2005 try:
2005 try:
2006 # ignore tags to unknown nodes
2006 # ignore tags to unknown nodes
2007 rev(v)
2007 rev(v)
2008 t[k] = v
2008 t[k] = v
2009 except (error.LookupError, ValueError):
2009 except (error.LookupError, ValueError):
2010 pass
2010 pass
2011 return t
2011 return t
2012
2012
2013 def _findtags(self):
2013 def _findtags(self):
2014 """Do the hard work of finding tags. Return a pair of dicts
2014 """Do the hard work of finding tags. Return a pair of dicts
2015 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2015 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2016 maps tag name to a string like \'global\' or \'local\'.
2016 maps tag name to a string like \'global\' or \'local\'.
2017 Subclasses or extensions are free to add their own tags, but
2017 Subclasses or extensions are free to add their own tags, but
2018 should be aware that the returned dicts will be retained for the
2018 should be aware that the returned dicts will be retained for the
2019 duration of the localrepo object."""
2019 duration of the localrepo object."""
2020
2020
2021 # XXX what tagtype should subclasses/extensions use? Currently
2021 # XXX what tagtype should subclasses/extensions use? Currently
2022 # mq and bookmarks add tags, but do not set the tagtype at all.
2022 # mq and bookmarks add tags, but do not set the tagtype at all.
2023 # Should each extension invent its own tag type? Should there
2023 # Should each extension invent its own tag type? Should there
2024 # be one tagtype for all such "virtual" tags? Or is the status
2024 # be one tagtype for all such "virtual" tags? Or is the status
2025 # quo fine?
2025 # quo fine?
2026
2026
2027 # map tag name to (node, hist)
2027 # map tag name to (node, hist)
2028 alltags = tagsmod.findglobaltags(self.ui, self)
2028 alltags = tagsmod.findglobaltags(self.ui, self)
2029 # map tag name to tag type
2029 # map tag name to tag type
2030 tagtypes = {tag: b'global' for tag in alltags}
2030 tagtypes = {tag: b'global' for tag in alltags}
2031
2031
2032 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2032 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2033
2033
2034 # Build the return dicts. Have to re-encode tag names because
2034 # Build the return dicts. Have to re-encode tag names because
2035 # the tags module always uses UTF-8 (in order not to lose info
2035 # the tags module always uses UTF-8 (in order not to lose info
2036 # writing to the cache), but the rest of Mercurial wants them in
2036 # writing to the cache), but the rest of Mercurial wants them in
2037 # local encoding.
2037 # local encoding.
2038 tags = {}
2038 tags = {}
2039 for (name, (node, hist)) in pycompat.iteritems(alltags):
2039 for (name, (node, hist)) in pycompat.iteritems(alltags):
2040 if node != nullid:
2040 if node != nullid:
2041 tags[encoding.tolocal(name)] = node
2041 tags[encoding.tolocal(name)] = node
2042 tags[b'tip'] = self.changelog.tip()
2042 tags[b'tip'] = self.changelog.tip()
2043 tagtypes = {
2043 tagtypes = {
2044 encoding.tolocal(name): value
2044 encoding.tolocal(name): value
2045 for (name, value) in pycompat.iteritems(tagtypes)
2045 for (name, value) in pycompat.iteritems(tagtypes)
2046 }
2046 }
2047 return (tags, tagtypes)
2047 return (tags, tagtypes)
2048
2048
2049 def tagtype(self, tagname):
2049 def tagtype(self, tagname):
2050 """
2050 """
2051 return the type of the given tag. result can be:
2051 return the type of the given tag. result can be:
2052
2052
2053 'local' : a local tag
2053 'local' : a local tag
2054 'global' : a global tag
2054 'global' : a global tag
2055 None : tag does not exist
2055 None : tag does not exist
2056 """
2056 """
2057
2057
2058 return self._tagscache.tagtypes.get(tagname)
2058 return self._tagscache.tagtypes.get(tagname)
2059
2059
2060 def tagslist(self):
2060 def tagslist(self):
2061 '''return a list of tags ordered by revision'''
2061 '''return a list of tags ordered by revision'''
2062 if not self._tagscache.tagslist:
2062 if not self._tagscache.tagslist:
2063 l = []
2063 l = []
2064 for t, n in pycompat.iteritems(self.tags()):
2064 for t, n in pycompat.iteritems(self.tags()):
2065 l.append((self.changelog.rev(n), t, n))
2065 l.append((self.changelog.rev(n), t, n))
2066 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2066 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2067
2067
2068 return self._tagscache.tagslist
2068 return self._tagscache.tagslist
2069
2069
2070 def nodetags(self, node):
2070 def nodetags(self, node):
2071 '''return the tags associated with a node'''
2071 '''return the tags associated with a node'''
2072 if not self._tagscache.nodetagscache:
2072 if not self._tagscache.nodetagscache:
2073 nodetagscache = {}
2073 nodetagscache = {}
2074 for t, n in pycompat.iteritems(self._tagscache.tags):
2074 for t, n in pycompat.iteritems(self._tagscache.tags):
2075 nodetagscache.setdefault(n, []).append(t)
2075 nodetagscache.setdefault(n, []).append(t)
2076 for tags in pycompat.itervalues(nodetagscache):
2076 for tags in pycompat.itervalues(nodetagscache):
2077 tags.sort()
2077 tags.sort()
2078 self._tagscache.nodetagscache = nodetagscache
2078 self._tagscache.nodetagscache = nodetagscache
2079 return self._tagscache.nodetagscache.get(node, [])
2079 return self._tagscache.nodetagscache.get(node, [])
2080
2080
2081 def nodebookmarks(self, node):
2081 def nodebookmarks(self, node):
2082 """return the list of bookmarks pointing to the specified node"""
2082 """return the list of bookmarks pointing to the specified node"""
2083 return self._bookmarks.names(node)
2083 return self._bookmarks.names(node)
2084
2084
2085 def branchmap(self):
2085 def branchmap(self):
2086 """returns a dictionary {branch: [branchheads]} with branchheads
2086 """returns a dictionary {branch: [branchheads]} with branchheads
2087 ordered by increasing revision number"""
2087 ordered by increasing revision number"""
2088 return self._branchcaches[self]
2088 return self._branchcaches[self]
2089
2089
2090 @unfilteredmethod
2090 @unfilteredmethod
2091 def revbranchcache(self):
2091 def revbranchcache(self):
2092 if not self._revbranchcache:
2092 if not self._revbranchcache:
2093 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2093 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2094 return self._revbranchcache
2094 return self._revbranchcache
2095
2095
2096 def register_changeset(self, rev, changelogrevision):
2096 def register_changeset(self, rev, changelogrevision):
2097 self.revbranchcache().setdata(rev, changelogrevision)
2097 self.revbranchcache().setdata(rev, changelogrevision)
2098
2098
2099 def branchtip(self, branch, ignoremissing=False):
2099 def branchtip(self, branch, ignoremissing=False):
2100 """return the tip node for a given branch
2100 """return the tip node for a given branch
2101
2101
2102 If ignoremissing is True, then this method will not raise an error.
2102 If ignoremissing is True, then this method will not raise an error.
2103 This is helpful for callers that only expect None for a missing branch
2103 This is helpful for callers that only expect None for a missing branch
2104 (e.g. namespace).
2104 (e.g. namespace).
2105
2105
2106 """
2106 """
2107 try:
2107 try:
2108 return self.branchmap().branchtip(branch)
2108 return self.branchmap().branchtip(branch)
2109 except KeyError:
2109 except KeyError:
2110 if not ignoremissing:
2110 if not ignoremissing:
2111 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2111 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2112 else:
2112 else:
2113 pass
2113 pass
2114
2114
2115 def lookup(self, key):
2115 def lookup(self, key):
2116 node = scmutil.revsymbol(self, key).node()
2116 node = scmutil.revsymbol(self, key).node()
2117 if node is None:
2117 if node is None:
2118 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2118 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2119 return node
2119 return node
2120
2120
2121 def lookupbranch(self, key):
2121 def lookupbranch(self, key):
2122 if self.branchmap().hasbranch(key):
2122 if self.branchmap().hasbranch(key):
2123 return key
2123 return key
2124
2124
2125 return scmutil.revsymbol(self, key).branch()
2125 return scmutil.revsymbol(self, key).branch()
2126
2126
2127 def known(self, nodes):
2127 def known(self, nodes):
2128 cl = self.changelog
2128 cl = self.changelog
2129 get_rev = cl.index.get_rev
2129 get_rev = cl.index.get_rev
2130 filtered = cl.filteredrevs
2130 filtered = cl.filteredrevs
2131 result = []
2131 result = []
2132 for n in nodes:
2132 for n in nodes:
2133 r = get_rev(n)
2133 r = get_rev(n)
2134 resp = not (r is None or r in filtered)
2134 resp = not (r is None or r in filtered)
2135 result.append(resp)
2135 result.append(resp)
2136 return result
2136 return result
2137
2137
2138 def local(self):
2138 def local(self):
2139 return self
2139 return self
2140
2140
2141 def publishing(self):
2141 def publishing(self):
2142 # it's safe (and desirable) to trust the publish flag unconditionally
2142 # it's safe (and desirable) to trust the publish flag unconditionally
2143 # so that we don't finalize changes shared between users via ssh or nfs
2143 # so that we don't finalize changes shared between users via ssh or nfs
2144 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2144 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2145
2145
2146 def cancopy(self):
2146 def cancopy(self):
2147 # so statichttprepo's override of local() works
2147 # so statichttprepo's override of local() works
2148 if not self.local():
2148 if not self.local():
2149 return False
2149 return False
2150 if not self.publishing():
2150 if not self.publishing():
2151 return True
2151 return True
2152 # if publishing we can't copy if there is filtered content
2152 # if publishing we can't copy if there is filtered content
2153 return not self.filtered(b'visible').changelog.filteredrevs
2153 return not self.filtered(b'visible').changelog.filteredrevs
2154
2154
2155 def shared(self):
2155 def shared(self):
2156 '''the type of shared repository (None if not shared)'''
2156 '''the type of shared repository (None if not shared)'''
2157 if self.sharedpath != self.path:
2157 if self.sharedpath != self.path:
2158 return b'store'
2158 return b'store'
2159 return None
2159 return None
2160
2160
2161 def wjoin(self, f, *insidef):
2161 def wjoin(self, f, *insidef):
2162 return self.vfs.reljoin(self.root, f, *insidef)
2162 return self.vfs.reljoin(self.root, f, *insidef)
2163
2163
2164 def setparents(self, p1, p2=nullid):
2164 def setparents(self, p1, p2=nullid):
2165 self[None].setparents(p1, p2)
2165 self[None].setparents(p1, p2)
2166 self._quick_access_changeid_invalidate()
2166 self._quick_access_changeid_invalidate()
2167
2167
2168 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2168 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2169 """changeid must be a changeset revision, if specified.
2169 """changeid must be a changeset revision, if specified.
2170 fileid can be a file revision or node."""
2170 fileid can be a file revision or node."""
2171 return context.filectx(
2171 return context.filectx(
2172 self, path, changeid, fileid, changectx=changectx
2172 self, path, changeid, fileid, changectx=changectx
2173 )
2173 )
2174
2174
2175 def getcwd(self):
2175 def getcwd(self):
2176 return self.dirstate.getcwd()
2176 return self.dirstate.getcwd()
2177
2177
2178 def pathto(self, f, cwd=None):
2178 def pathto(self, f, cwd=None):
2179 return self.dirstate.pathto(f, cwd)
2179 return self.dirstate.pathto(f, cwd)
2180
2180
2181 def _loadfilter(self, filter):
2181 def _loadfilter(self, filter):
2182 if filter not in self._filterpats:
2182 if filter not in self._filterpats:
2183 l = []
2183 l = []
2184 for pat, cmd in self.ui.configitems(filter):
2184 for pat, cmd in self.ui.configitems(filter):
2185 if cmd == b'!':
2185 if cmd == b'!':
2186 continue
2186 continue
2187 mf = matchmod.match(self.root, b'', [pat])
2187 mf = matchmod.match(self.root, b'', [pat])
2188 fn = None
2188 fn = None
2189 params = cmd
2189 params = cmd
2190 for name, filterfn in pycompat.iteritems(self._datafilters):
2190 for name, filterfn in pycompat.iteritems(self._datafilters):
2191 if cmd.startswith(name):
2191 if cmd.startswith(name):
2192 fn = filterfn
2192 fn = filterfn
2193 params = cmd[len(name) :].lstrip()
2193 params = cmd[len(name) :].lstrip()
2194 break
2194 break
2195 if not fn:
2195 if not fn:
2196 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2196 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2197 fn.__name__ = 'commandfilter'
2197 fn.__name__ = 'commandfilter'
2198 # Wrap old filters not supporting keyword arguments
2198 # Wrap old filters not supporting keyword arguments
2199 if not pycompat.getargspec(fn)[2]:
2199 if not pycompat.getargspec(fn)[2]:
2200 oldfn = fn
2200 oldfn = fn
2201 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2201 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2202 fn.__name__ = 'compat-' + oldfn.__name__
2202 fn.__name__ = 'compat-' + oldfn.__name__
2203 l.append((mf, fn, params))
2203 l.append((mf, fn, params))
2204 self._filterpats[filter] = l
2204 self._filterpats[filter] = l
2205 return self._filterpats[filter]
2205 return self._filterpats[filter]
2206
2206
2207 def _filter(self, filterpats, filename, data):
2207 def _filter(self, filterpats, filename, data):
2208 for mf, fn, cmd in filterpats:
2208 for mf, fn, cmd in filterpats:
2209 if mf(filename):
2209 if mf(filename):
2210 self.ui.debug(
2210 self.ui.debug(
2211 b"filtering %s through %s\n"
2211 b"filtering %s through %s\n"
2212 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2212 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2213 )
2213 )
2214 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2214 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2215 break
2215 break
2216
2216
2217 return data
2217 return data
2218
2218
2219 @unfilteredpropertycache
2219 @unfilteredpropertycache
2220 def _encodefilterpats(self):
2220 def _encodefilterpats(self):
2221 return self._loadfilter(b'encode')
2221 return self._loadfilter(b'encode')
2222
2222
2223 @unfilteredpropertycache
2223 @unfilteredpropertycache
2224 def _decodefilterpats(self):
2224 def _decodefilterpats(self):
2225 return self._loadfilter(b'decode')
2225 return self._loadfilter(b'decode')
2226
2226
2227 def adddatafilter(self, name, filter):
2227 def adddatafilter(self, name, filter):
2228 self._datafilters[name] = filter
2228 self._datafilters[name] = filter
2229
2229
2230 def wread(self, filename):
2230 def wread(self, filename):
2231 if self.wvfs.islink(filename):
2231 if self.wvfs.islink(filename):
2232 data = self.wvfs.readlink(filename)
2232 data = self.wvfs.readlink(filename)
2233 else:
2233 else:
2234 data = self.wvfs.read(filename)
2234 data = self.wvfs.read(filename)
2235 return self._filter(self._encodefilterpats, filename, data)
2235 return self._filter(self._encodefilterpats, filename, data)
2236
2236
2237 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2237 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2238 """write ``data`` into ``filename`` in the working directory
2238 """write ``data`` into ``filename`` in the working directory
2239
2239
2240 This returns length of written (maybe decoded) data.
2240 This returns length of written (maybe decoded) data.
2241 """
2241 """
2242 data = self._filter(self._decodefilterpats, filename, data)
2242 data = self._filter(self._decodefilterpats, filename, data)
2243 if b'l' in flags:
2243 if b'l' in flags:
2244 self.wvfs.symlink(data, filename)
2244 self.wvfs.symlink(data, filename)
2245 else:
2245 else:
2246 self.wvfs.write(
2246 self.wvfs.write(
2247 filename, data, backgroundclose=backgroundclose, **kwargs
2247 filename, data, backgroundclose=backgroundclose, **kwargs
2248 )
2248 )
2249 if b'x' in flags:
2249 if b'x' in flags:
2250 self.wvfs.setflags(filename, False, True)
2250 self.wvfs.setflags(filename, False, True)
2251 else:
2251 else:
2252 self.wvfs.setflags(filename, False, False)
2252 self.wvfs.setflags(filename, False, False)
2253 return len(data)
2253 return len(data)
2254
2254
2255 def wwritedata(self, filename, data):
2255 def wwritedata(self, filename, data):
2256 return self._filter(self._decodefilterpats, filename, data)
2256 return self._filter(self._decodefilterpats, filename, data)
2257
2257
2258 def currenttransaction(self):
2258 def currenttransaction(self):
2259 """return the current transaction or None if non exists"""
2259 """return the current transaction or None if non exists"""
2260 if self._transref:
2260 if self._transref:
2261 tr = self._transref()
2261 tr = self._transref()
2262 else:
2262 else:
2263 tr = None
2263 tr = None
2264
2264
2265 if tr and tr.running():
2265 if tr and tr.running():
2266 return tr
2266 return tr
2267 return None
2267 return None
2268
2268
2269 def transaction(self, desc, report=None):
2269 def transaction(self, desc, report=None):
2270 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2270 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2271 b'devel', b'check-locks'
2271 b'devel', b'check-locks'
2272 ):
2272 ):
2273 if self._currentlock(self._lockref) is None:
2273 if self._currentlock(self._lockref) is None:
2274 raise error.ProgrammingError(b'transaction requires locking')
2274 raise error.ProgrammingError(b'transaction requires locking')
2275 tr = self.currenttransaction()
2275 tr = self.currenttransaction()
2276 if tr is not None:
2276 if tr is not None:
2277 return tr.nest(name=desc)
2277 return tr.nest(name=desc)
2278
2278
2279 # abort here if the journal already exists
2279 # abort here if the journal already exists
2280 if self.svfs.exists(b"journal"):
2280 if self.svfs.exists(b"journal"):
2281 raise error.RepoError(
2281 raise error.RepoError(
2282 _(b"abandoned transaction found"),
2282 _(b"abandoned transaction found"),
2283 hint=_(b"run 'hg recover' to clean up transaction"),
2283 hint=_(b"run 'hg recover' to clean up transaction"),
2284 )
2284 )
2285
2285
2286 idbase = b"%.40f#%f" % (random.random(), time.time())
2286 idbase = b"%.40f#%f" % (random.random(), time.time())
2287 ha = hex(hashutil.sha1(idbase).digest())
2287 ha = hex(hashutil.sha1(idbase).digest())
2288 txnid = b'TXN:' + ha
2288 txnid = b'TXN:' + ha
2289 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2289 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2290
2290
2291 self._writejournal(desc)
2291 self._writejournal(desc)
2292 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2292 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2293 if report:
2293 if report:
2294 rp = report
2294 rp = report
2295 else:
2295 else:
2296 rp = self.ui.warn
2296 rp = self.ui.warn
2297 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2297 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2298 # we must avoid cyclic reference between repo and transaction.
2298 # we must avoid cyclic reference between repo and transaction.
2299 reporef = weakref.ref(self)
2299 reporef = weakref.ref(self)
2300 # Code to track tag movement
2300 # Code to track tag movement
2301 #
2301 #
2302 # Since tags are all handled as file content, it is actually quite hard
2302 # Since tags are all handled as file content, it is actually quite hard
2303 # to track these movement from a code perspective. So we fallback to a
2303 # to track these movement from a code perspective. So we fallback to a
2304 # tracking at the repository level. One could envision to track changes
2304 # tracking at the repository level. One could envision to track changes
2305 # to the '.hgtags' file through changegroup apply but that fails to
2305 # to the '.hgtags' file through changegroup apply but that fails to
2306 # cope with case where transaction expose new heads without changegroup
2306 # cope with case where transaction expose new heads without changegroup
2307 # being involved (eg: phase movement).
2307 # being involved (eg: phase movement).
2308 #
2308 #
2309 # For now, We gate the feature behind a flag since this likely comes
2309 # For now, We gate the feature behind a flag since this likely comes
2310 # with performance impacts. The current code run more often than needed
2310 # with performance impacts. The current code run more often than needed
2311 # and do not use caches as much as it could. The current focus is on
2311 # and do not use caches as much as it could. The current focus is on
2312 # the behavior of the feature so we disable it by default. The flag
2312 # the behavior of the feature so we disable it by default. The flag
2313 # will be removed when we are happy with the performance impact.
2313 # will be removed when we are happy with the performance impact.
2314 #
2314 #
2315 # Once this feature is no longer experimental move the following
2315 # Once this feature is no longer experimental move the following
2316 # documentation to the appropriate help section:
2316 # documentation to the appropriate help section:
2317 #
2317 #
2318 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2318 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2319 # tags (new or changed or deleted tags). In addition the details of
2319 # tags (new or changed or deleted tags). In addition the details of
2320 # these changes are made available in a file at:
2320 # these changes are made available in a file at:
2321 # ``REPOROOT/.hg/changes/tags.changes``.
2321 # ``REPOROOT/.hg/changes/tags.changes``.
2322 # Make sure you check for HG_TAG_MOVED before reading that file as it
2322 # Make sure you check for HG_TAG_MOVED before reading that file as it
2323 # might exist from a previous transaction even if no tag were touched
2323 # might exist from a previous transaction even if no tag were touched
2324 # in this one. Changes are recorded in a line base format::
2324 # in this one. Changes are recorded in a line base format::
2325 #
2325 #
2326 # <action> <hex-node> <tag-name>\n
2326 # <action> <hex-node> <tag-name>\n
2327 #
2327 #
2328 # Actions are defined as follow:
2328 # Actions are defined as follow:
2329 # "-R": tag is removed,
2329 # "-R": tag is removed,
2330 # "+A": tag is added,
2330 # "+A": tag is added,
2331 # "-M": tag is moved (old value),
2331 # "-M": tag is moved (old value),
2332 # "+M": tag is moved (new value),
2332 # "+M": tag is moved (new value),
2333 tracktags = lambda x: None
2333 tracktags = lambda x: None
2334 # experimental config: experimental.hook-track-tags
2334 # experimental config: experimental.hook-track-tags
2335 shouldtracktags = self.ui.configbool(
2335 shouldtracktags = self.ui.configbool(
2336 b'experimental', b'hook-track-tags'
2336 b'experimental', b'hook-track-tags'
2337 )
2337 )
2338 if desc != b'strip' and shouldtracktags:
2338 if desc != b'strip' and shouldtracktags:
2339 oldheads = self.changelog.headrevs()
2339 oldheads = self.changelog.headrevs()
2340
2340
2341 def tracktags(tr2):
2341 def tracktags(tr2):
2342 repo = reporef()
2342 repo = reporef()
2343 assert repo is not None # help pytype
2343 assert repo is not None # help pytype
2344 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2344 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2345 newheads = repo.changelog.headrevs()
2345 newheads = repo.changelog.headrevs()
2346 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2346 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2347 # notes: we compare lists here.
2347 # notes: we compare lists here.
2348 # As we do it only once buiding set would not be cheaper
2348 # As we do it only once buiding set would not be cheaper
2349 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2349 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2350 if changes:
2350 if changes:
2351 tr2.hookargs[b'tag_moved'] = b'1'
2351 tr2.hookargs[b'tag_moved'] = b'1'
2352 with repo.vfs(
2352 with repo.vfs(
2353 b'changes/tags.changes', b'w', atomictemp=True
2353 b'changes/tags.changes', b'w', atomictemp=True
2354 ) as changesfile:
2354 ) as changesfile:
2355 # note: we do not register the file to the transaction
2355 # note: we do not register the file to the transaction
2356 # because we needs it to still exist on the transaction
2356 # because we needs it to still exist on the transaction
2357 # is close (for txnclose hooks)
2357 # is close (for txnclose hooks)
2358 tagsmod.writediff(changesfile, changes)
2358 tagsmod.writediff(changesfile, changes)
2359
2359
2360 def validate(tr2):
2360 def validate(tr2):
2361 """will run pre-closing hooks"""
2361 """will run pre-closing hooks"""
2362 # XXX the transaction API is a bit lacking here so we take a hacky
2362 # XXX the transaction API is a bit lacking here so we take a hacky
2363 # path for now
2363 # path for now
2364 #
2364 #
2365 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2365 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2366 # dict is copied before these run. In addition we needs the data
2366 # dict is copied before these run. In addition we needs the data
2367 # available to in memory hooks too.
2367 # available to in memory hooks too.
2368 #
2368 #
2369 # Moreover, we also need to make sure this runs before txnclose
2369 # Moreover, we also need to make sure this runs before txnclose
2370 # hooks and there is no "pending" mechanism that would execute
2370 # hooks and there is no "pending" mechanism that would execute
2371 # logic only if hooks are about to run.
2371 # logic only if hooks are about to run.
2372 #
2372 #
2373 # Fixing this limitation of the transaction is also needed to track
2373 # Fixing this limitation of the transaction is also needed to track
2374 # other families of changes (bookmarks, phases, obsolescence).
2374 # other families of changes (bookmarks, phases, obsolescence).
2375 #
2375 #
2376 # This will have to be fixed before we remove the experimental
2376 # This will have to be fixed before we remove the experimental
2377 # gating.
2377 # gating.
2378 tracktags(tr2)
2378 tracktags(tr2)
2379 repo = reporef()
2379 repo = reporef()
2380 assert repo is not None # help pytype
2380 assert repo is not None # help pytype
2381
2381
2382 singleheadopt = (b'experimental', b'single-head-per-branch')
2382 singleheadopt = (b'experimental', b'single-head-per-branch')
2383 singlehead = repo.ui.configbool(*singleheadopt)
2383 singlehead = repo.ui.configbool(*singleheadopt)
2384 if singlehead:
2384 if singlehead:
2385 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2385 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2386 accountclosed = singleheadsub.get(
2386 accountclosed = singleheadsub.get(
2387 b"account-closed-heads", False
2387 b"account-closed-heads", False
2388 )
2388 )
2389 if singleheadsub.get(b"public-changes-only", False):
2389 if singleheadsub.get(b"public-changes-only", False):
2390 filtername = b"immutable"
2390 filtername = b"immutable"
2391 else:
2391 else:
2392 filtername = b"visible"
2392 filtername = b"visible"
2393 scmutil.enforcesinglehead(
2393 scmutil.enforcesinglehead(
2394 repo, tr2, desc, accountclosed, filtername
2394 repo, tr2, desc, accountclosed, filtername
2395 )
2395 )
2396 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2396 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2397 for name, (old, new) in sorted(
2397 for name, (old, new) in sorted(
2398 tr.changes[b'bookmarks'].items()
2398 tr.changes[b'bookmarks'].items()
2399 ):
2399 ):
2400 args = tr.hookargs.copy()
2400 args = tr.hookargs.copy()
2401 args.update(bookmarks.preparehookargs(name, old, new))
2401 args.update(bookmarks.preparehookargs(name, old, new))
2402 repo.hook(
2402 repo.hook(
2403 b'pretxnclose-bookmark',
2403 b'pretxnclose-bookmark',
2404 throw=True,
2404 throw=True,
2405 **pycompat.strkwargs(args)
2405 **pycompat.strkwargs(args)
2406 )
2406 )
2407 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2407 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2408 cl = repo.unfiltered().changelog
2408 cl = repo.unfiltered().changelog
2409 for revs, (old, new) in tr.changes[b'phases']:
2409 for revs, (old, new) in tr.changes[b'phases']:
2410 for rev in revs:
2410 for rev in revs:
2411 args = tr.hookargs.copy()
2411 args = tr.hookargs.copy()
2412 node = hex(cl.node(rev))
2412 node = hex(cl.node(rev))
2413 args.update(phases.preparehookargs(node, old, new))
2413 args.update(phases.preparehookargs(node, old, new))
2414 repo.hook(
2414 repo.hook(
2415 b'pretxnclose-phase',
2415 b'pretxnclose-phase',
2416 throw=True,
2416 throw=True,
2417 **pycompat.strkwargs(args)
2417 **pycompat.strkwargs(args)
2418 )
2418 )
2419
2419
2420 repo.hook(
2420 repo.hook(
2421 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2421 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2422 )
2422 )
2423
2423
2424 def releasefn(tr, success):
2424 def releasefn(tr, success):
2425 repo = reporef()
2425 repo = reporef()
2426 if repo is None:
2426 if repo is None:
2427 # If the repo has been GC'd (and this release function is being
2427 # If the repo has been GC'd (and this release function is being
2428 # called from transaction.__del__), there's not much we can do,
2428 # called from transaction.__del__), there's not much we can do,
2429 # so just leave the unfinished transaction there and let the
2429 # so just leave the unfinished transaction there and let the
2430 # user run `hg recover`.
2430 # user run `hg recover`.
2431 return
2431 return
2432 if success:
2432 if success:
2433 # this should be explicitly invoked here, because
2433 # this should be explicitly invoked here, because
2434 # in-memory changes aren't written out at closing
2434 # in-memory changes aren't written out at closing
2435 # transaction, if tr.addfilegenerator (via
2435 # transaction, if tr.addfilegenerator (via
2436 # dirstate.write or so) isn't invoked while
2436 # dirstate.write or so) isn't invoked while
2437 # transaction running
2437 # transaction running
2438 repo.dirstate.write(None)
2438 repo.dirstate.write(None)
2439 else:
2439 else:
2440 # discard all changes (including ones already written
2440 # discard all changes (including ones already written
2441 # out) in this transaction
2441 # out) in this transaction
2442 narrowspec.restorebackup(self, b'journal.narrowspec')
2442 narrowspec.restorebackup(self, b'journal.narrowspec')
2443 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2443 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2444 repo.dirstate.restorebackup(None, b'journal.dirstate')
2444 repo.dirstate.restorebackup(None, b'journal.dirstate')
2445
2445
2446 repo.invalidate(clearfilecache=True)
2446 repo.invalidate(clearfilecache=True)
2447
2447
2448 tr = transaction.transaction(
2448 tr = transaction.transaction(
2449 rp,
2449 rp,
2450 self.svfs,
2450 self.svfs,
2451 vfsmap,
2451 vfsmap,
2452 b"journal",
2452 b"journal",
2453 b"undo",
2453 b"undo",
2454 aftertrans(renames),
2454 aftertrans(renames),
2455 self.store.createmode,
2455 self.store.createmode,
2456 validator=validate,
2456 validator=validate,
2457 releasefn=releasefn,
2457 releasefn=releasefn,
2458 checkambigfiles=_cachedfiles,
2458 checkambigfiles=_cachedfiles,
2459 name=desc,
2459 name=desc,
2460 )
2460 )
2461 tr.changes[b'origrepolen'] = len(self)
2461 tr.changes[b'origrepolen'] = len(self)
2462 tr.changes[b'obsmarkers'] = set()
2462 tr.changes[b'obsmarkers'] = set()
2463 tr.changes[b'phases'] = []
2463 tr.changes[b'phases'] = []
2464 tr.changes[b'bookmarks'] = {}
2464 tr.changes[b'bookmarks'] = {}
2465
2465
2466 tr.hookargs[b'txnid'] = txnid
2466 tr.hookargs[b'txnid'] = txnid
2467 tr.hookargs[b'txnname'] = desc
2467 tr.hookargs[b'txnname'] = desc
2468 tr.hookargs[b'changes'] = tr.changes
2468 tr.hookargs[b'changes'] = tr.changes
2469 # note: writing the fncache only during finalize mean that the file is
2469 # note: writing the fncache only during finalize mean that the file is
2470 # outdated when running hooks. As fncache is used for streaming clone,
2470 # outdated when running hooks. As fncache is used for streaming clone,
2471 # this is not expected to break anything that happen during the hooks.
2471 # this is not expected to break anything that happen during the hooks.
2472 tr.addfinalize(b'flush-fncache', self.store.write)
2472 tr.addfinalize(b'flush-fncache', self.store.write)
2473
2473
2474 def txnclosehook(tr2):
2474 def txnclosehook(tr2):
2475 """To be run if transaction is successful, will schedule a hook run"""
2475 """To be run if transaction is successful, will schedule a hook run"""
2476 # Don't reference tr2 in hook() so we don't hold a reference.
2476 # Don't reference tr2 in hook() so we don't hold a reference.
2477 # This reduces memory consumption when there are multiple
2477 # This reduces memory consumption when there are multiple
2478 # transactions per lock. This can likely go away if issue5045
2478 # transactions per lock. This can likely go away if issue5045
2479 # fixes the function accumulation.
2479 # fixes the function accumulation.
2480 hookargs = tr2.hookargs
2480 hookargs = tr2.hookargs
2481
2481
2482 def hookfunc(unused_success):
2482 def hookfunc(unused_success):
2483 repo = reporef()
2483 repo = reporef()
2484 assert repo is not None # help pytype
2484 assert repo is not None # help pytype
2485
2485
2486 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2486 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2487 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2487 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2488 for name, (old, new) in bmchanges:
2488 for name, (old, new) in bmchanges:
2489 args = tr.hookargs.copy()
2489 args = tr.hookargs.copy()
2490 args.update(bookmarks.preparehookargs(name, old, new))
2490 args.update(bookmarks.preparehookargs(name, old, new))
2491 repo.hook(
2491 repo.hook(
2492 b'txnclose-bookmark',
2492 b'txnclose-bookmark',
2493 throw=False,
2493 throw=False,
2494 **pycompat.strkwargs(args)
2494 **pycompat.strkwargs(args)
2495 )
2495 )
2496
2496
2497 if hook.hashook(repo.ui, b'txnclose-phase'):
2497 if hook.hashook(repo.ui, b'txnclose-phase'):
2498 cl = repo.unfiltered().changelog
2498 cl = repo.unfiltered().changelog
2499 phasemv = sorted(
2499 phasemv = sorted(
2500 tr.changes[b'phases'], key=lambda r: r[0][0]
2500 tr.changes[b'phases'], key=lambda r: r[0][0]
2501 )
2501 )
2502 for revs, (old, new) in phasemv:
2502 for revs, (old, new) in phasemv:
2503 for rev in revs:
2503 for rev in revs:
2504 args = tr.hookargs.copy()
2504 args = tr.hookargs.copy()
2505 node = hex(cl.node(rev))
2505 node = hex(cl.node(rev))
2506 args.update(phases.preparehookargs(node, old, new))
2506 args.update(phases.preparehookargs(node, old, new))
2507 repo.hook(
2507 repo.hook(
2508 b'txnclose-phase',
2508 b'txnclose-phase',
2509 throw=False,
2509 throw=False,
2510 **pycompat.strkwargs(args)
2510 **pycompat.strkwargs(args)
2511 )
2511 )
2512
2512
2513 repo.hook(
2513 repo.hook(
2514 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2514 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2515 )
2515 )
2516
2516
2517 repo = reporef()
2517 repo = reporef()
2518 assert repo is not None # help pytype
2518 assert repo is not None # help pytype
2519 repo._afterlock(hookfunc)
2519 repo._afterlock(hookfunc)
2520
2520
2521 tr.addfinalize(b'txnclose-hook', txnclosehook)
2521 tr.addfinalize(b'txnclose-hook', txnclosehook)
2522 # Include a leading "-" to make it happen before the transaction summary
2522 # Include a leading "-" to make it happen before the transaction summary
2523 # reports registered via scmutil.registersummarycallback() whose names
2523 # reports registered via scmutil.registersummarycallback() whose names
2524 # are 00-txnreport etc. That way, the caches will be warm when the
2524 # are 00-txnreport etc. That way, the caches will be warm when the
2525 # callbacks run.
2525 # callbacks run.
2526 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2526 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2527
2527
2528 def txnaborthook(tr2):
2528 def txnaborthook(tr2):
2529 """To be run if transaction is aborted"""
2529 """To be run if transaction is aborted"""
2530 repo = reporef()
2530 repo = reporef()
2531 assert repo is not None # help pytype
2531 assert repo is not None # help pytype
2532 repo.hook(
2532 repo.hook(
2533 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2533 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2534 )
2534 )
2535
2535
2536 tr.addabort(b'txnabort-hook', txnaborthook)
2536 tr.addabort(b'txnabort-hook', txnaborthook)
2537 # avoid eager cache invalidation. in-memory data should be identical
2537 # avoid eager cache invalidation. in-memory data should be identical
2538 # to stored data if transaction has no error.
2538 # to stored data if transaction has no error.
2539 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2539 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2540 self._transref = weakref.ref(tr)
2540 self._transref = weakref.ref(tr)
2541 scmutil.registersummarycallback(self, tr, desc)
2541 scmutil.registersummarycallback(self, tr, desc)
2542 return tr
2542 return tr
2543
2543
2544 def _journalfiles(self):
2544 def _journalfiles(self):
2545 return (
2545 return (
2546 (self.svfs, b'journal'),
2546 (self.svfs, b'journal'),
2547 (self.svfs, b'journal.narrowspec'),
2547 (self.svfs, b'journal.narrowspec'),
2548 (self.vfs, b'journal.narrowspec.dirstate'),
2548 (self.vfs, b'journal.narrowspec.dirstate'),
2549 (self.vfs, b'journal.dirstate'),
2549 (self.vfs, b'journal.dirstate'),
2550 (self.vfs, b'journal.branch'),
2550 (self.vfs, b'journal.branch'),
2551 (self.vfs, b'journal.desc'),
2551 (self.vfs, b'journal.desc'),
2552 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2552 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2553 (self.svfs, b'journal.phaseroots'),
2553 (self.svfs, b'journal.phaseroots'),
2554 )
2554 )
2555
2555
2556 def undofiles(self):
2556 def undofiles(self):
2557 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2557 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2558
2558
2559 @unfilteredmethod
2559 @unfilteredmethod
2560 def _writejournal(self, desc):
2560 def _writejournal(self, desc):
2561 self.dirstate.savebackup(None, b'journal.dirstate')
2561 self.dirstate.savebackup(None, b'journal.dirstate')
2562 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2562 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2563 narrowspec.savebackup(self, b'journal.narrowspec')
2563 narrowspec.savebackup(self, b'journal.narrowspec')
2564 self.vfs.write(
2564 self.vfs.write(
2565 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2565 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2566 )
2566 )
2567 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2567 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2568 bookmarksvfs = bookmarks.bookmarksvfs(self)
2568 bookmarksvfs = bookmarks.bookmarksvfs(self)
2569 bookmarksvfs.write(
2569 bookmarksvfs.write(
2570 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2570 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2571 )
2571 )
2572 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2572 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2573
2573
2574 def recover(self):
2574 def recover(self):
2575 with self.lock():
2575 with self.lock():
2576 if self.svfs.exists(b"journal"):
2576 if self.svfs.exists(b"journal"):
2577 self.ui.status(_(b"rolling back interrupted transaction\n"))
2577 self.ui.status(_(b"rolling back interrupted transaction\n"))
2578 vfsmap = {
2578 vfsmap = {
2579 b'': self.svfs,
2579 b'': self.svfs,
2580 b'plain': self.vfs,
2580 b'plain': self.vfs,
2581 }
2581 }
2582 transaction.rollback(
2582 transaction.rollback(
2583 self.svfs,
2583 self.svfs,
2584 vfsmap,
2584 vfsmap,
2585 b"journal",
2585 b"journal",
2586 self.ui.warn,
2586 self.ui.warn,
2587 checkambigfiles=_cachedfiles,
2587 checkambigfiles=_cachedfiles,
2588 )
2588 )
2589 self.invalidate()
2589 self.invalidate()
2590 return True
2590 return True
2591 else:
2591 else:
2592 self.ui.warn(_(b"no interrupted transaction available\n"))
2592 self.ui.warn(_(b"no interrupted transaction available\n"))
2593 return False
2593 return False
2594
2594
2595 def rollback(self, dryrun=False, force=False):
2595 def rollback(self, dryrun=False, force=False):
2596 wlock = lock = dsguard = None
2596 wlock = lock = dsguard = None
2597 try:
2597 try:
2598 wlock = self.wlock()
2598 wlock = self.wlock()
2599 lock = self.lock()
2599 lock = self.lock()
2600 if self.svfs.exists(b"undo"):
2600 if self.svfs.exists(b"undo"):
2601 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2601 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2602
2602
2603 return self._rollback(dryrun, force, dsguard)
2603 return self._rollback(dryrun, force, dsguard)
2604 else:
2604 else:
2605 self.ui.warn(_(b"no rollback information available\n"))
2605 self.ui.warn(_(b"no rollback information available\n"))
2606 return 1
2606 return 1
2607 finally:
2607 finally:
2608 release(dsguard, lock, wlock)
2608 release(dsguard, lock, wlock)
2609
2609
2610 @unfilteredmethod # Until we get smarter cache management
2610 @unfilteredmethod # Until we get smarter cache management
2611 def _rollback(self, dryrun, force, dsguard):
2611 def _rollback(self, dryrun, force, dsguard):
2612 ui = self.ui
2612 ui = self.ui
2613 try:
2613 try:
2614 args = self.vfs.read(b'undo.desc').splitlines()
2614 args = self.vfs.read(b'undo.desc').splitlines()
2615 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2615 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2616 if len(args) >= 3:
2616 if len(args) >= 3:
2617 detail = args[2]
2617 detail = args[2]
2618 oldtip = oldlen - 1
2618 oldtip = oldlen - 1
2619
2619
2620 if detail and ui.verbose:
2620 if detail and ui.verbose:
2621 msg = _(
2621 msg = _(
2622 b'repository tip rolled back to revision %d'
2622 b'repository tip rolled back to revision %d'
2623 b' (undo %s: %s)\n'
2623 b' (undo %s: %s)\n'
2624 ) % (oldtip, desc, detail)
2624 ) % (oldtip, desc, detail)
2625 else:
2625 else:
2626 msg = _(
2626 msg = _(
2627 b'repository tip rolled back to revision %d (undo %s)\n'
2627 b'repository tip rolled back to revision %d (undo %s)\n'
2628 ) % (oldtip, desc)
2628 ) % (oldtip, desc)
2629 except IOError:
2629 except IOError:
2630 msg = _(b'rolling back unknown transaction\n')
2630 msg = _(b'rolling back unknown transaction\n')
2631 desc = None
2631 desc = None
2632
2632
2633 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2633 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2634 raise error.Abort(
2634 raise error.Abort(
2635 _(
2635 _(
2636 b'rollback of last commit while not checked out '
2636 b'rollback of last commit while not checked out '
2637 b'may lose data'
2637 b'may lose data'
2638 ),
2638 ),
2639 hint=_(b'use -f to force'),
2639 hint=_(b'use -f to force'),
2640 )
2640 )
2641
2641
2642 ui.status(msg)
2642 ui.status(msg)
2643 if dryrun:
2643 if dryrun:
2644 return 0
2644 return 0
2645
2645
2646 parents = self.dirstate.parents()
2646 parents = self.dirstate.parents()
2647 self.destroying()
2647 self.destroying()
2648 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2648 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2649 transaction.rollback(
2649 transaction.rollback(
2650 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2650 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2651 )
2651 )
2652 bookmarksvfs = bookmarks.bookmarksvfs(self)
2652 bookmarksvfs = bookmarks.bookmarksvfs(self)
2653 if bookmarksvfs.exists(b'undo.bookmarks'):
2653 if bookmarksvfs.exists(b'undo.bookmarks'):
2654 bookmarksvfs.rename(
2654 bookmarksvfs.rename(
2655 b'undo.bookmarks', b'bookmarks', checkambig=True
2655 b'undo.bookmarks', b'bookmarks', checkambig=True
2656 )
2656 )
2657 if self.svfs.exists(b'undo.phaseroots'):
2657 if self.svfs.exists(b'undo.phaseroots'):
2658 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2658 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2659 self.invalidate()
2659 self.invalidate()
2660
2660
2661 has_node = self.changelog.index.has_node
2661 has_node = self.changelog.index.has_node
2662 parentgone = any(not has_node(p) for p in parents)
2662 parentgone = any(not has_node(p) for p in parents)
2663 if parentgone:
2663 if parentgone:
2664 # prevent dirstateguard from overwriting already restored one
2664 # prevent dirstateguard from overwriting already restored one
2665 dsguard.close()
2665 dsguard.close()
2666
2666
2667 narrowspec.restorebackup(self, b'undo.narrowspec')
2667 narrowspec.restorebackup(self, b'undo.narrowspec')
2668 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2668 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2669 self.dirstate.restorebackup(None, b'undo.dirstate')
2669 self.dirstate.restorebackup(None, b'undo.dirstate')
2670 try:
2670 try:
2671 branch = self.vfs.read(b'undo.branch')
2671 branch = self.vfs.read(b'undo.branch')
2672 self.dirstate.setbranch(encoding.tolocal(branch))
2672 self.dirstate.setbranch(encoding.tolocal(branch))
2673 except IOError:
2673 except IOError:
2674 ui.warn(
2674 ui.warn(
2675 _(
2675 _(
2676 b'named branch could not be reset: '
2676 b'named branch could not be reset: '
2677 b'current branch is still \'%s\'\n'
2677 b'current branch is still \'%s\'\n'
2678 )
2678 )
2679 % self.dirstate.branch()
2679 % self.dirstate.branch()
2680 )
2680 )
2681
2681
2682 parents = tuple([p.rev() for p in self[None].parents()])
2682 parents = tuple([p.rev() for p in self[None].parents()])
2683 if len(parents) > 1:
2683 if len(parents) > 1:
2684 ui.status(
2684 ui.status(
2685 _(
2685 _(
2686 b'working directory now based on '
2686 b'working directory now based on '
2687 b'revisions %d and %d\n'
2687 b'revisions %d and %d\n'
2688 )
2688 )
2689 % parents
2689 % parents
2690 )
2690 )
2691 else:
2691 else:
2692 ui.status(
2692 ui.status(
2693 _(b'working directory now based on revision %d\n') % parents
2693 _(b'working directory now based on revision %d\n') % parents
2694 )
2694 )
2695 mergestatemod.mergestate.clean(self)
2695 mergestatemod.mergestate.clean(self)
2696
2696
2697 # TODO: if we know which new heads may result from this rollback, pass
2697 # TODO: if we know which new heads may result from this rollback, pass
2698 # them to destroy(), which will prevent the branchhead cache from being
2698 # them to destroy(), which will prevent the branchhead cache from being
2699 # invalidated.
2699 # invalidated.
2700 self.destroyed()
2700 self.destroyed()
2701 return 0
2701 return 0
2702
2702
2703 def _buildcacheupdater(self, newtransaction):
2703 def _buildcacheupdater(self, newtransaction):
2704 """called during transaction to build the callback updating cache
2704 """called during transaction to build the callback updating cache
2705
2705
2706 Lives on the repository to help extension who might want to augment
2706 Lives on the repository to help extension who might want to augment
2707 this logic. For this purpose, the created transaction is passed to the
2707 this logic. For this purpose, the created transaction is passed to the
2708 method.
2708 method.
2709 """
2709 """
2710 # we must avoid cyclic reference between repo and transaction.
2710 # we must avoid cyclic reference between repo and transaction.
2711 reporef = weakref.ref(self)
2711 reporef = weakref.ref(self)
2712
2712
2713 def updater(tr):
2713 def updater(tr):
2714 repo = reporef()
2714 repo = reporef()
2715 assert repo is not None # help pytype
2715 assert repo is not None # help pytype
2716 repo.updatecaches(tr)
2716 repo.updatecaches(tr)
2717
2717
2718 return updater
2718 return updater
2719
2719
2720 @unfilteredmethod
2720 @unfilteredmethod
2721 def updatecaches(self, tr=None, full=False):
2721 def updatecaches(self, tr=None, full=False):
2722 """warm appropriate caches
2722 """warm appropriate caches
2723
2723
2724 If this function is called after a transaction closed. The transaction
2724 If this function is called after a transaction closed. The transaction
2725 will be available in the 'tr' argument. This can be used to selectively
2725 will be available in the 'tr' argument. This can be used to selectively
2726 update caches relevant to the changes in that transaction.
2726 update caches relevant to the changes in that transaction.
2727
2727
2728 If 'full' is set, make sure all caches the function knows about have
2728 If 'full' is set, make sure all caches the function knows about have
2729 up-to-date data. Even the ones usually loaded more lazily.
2729 up-to-date data. Even the ones usually loaded more lazily.
2730
2731 The `full` argument can take a special "post-clone" value. In this case
2732 the cache warming is made after a clone and of the slower cache might
2733 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2734 as we plan for a cleaner way to deal with this for 5.9.
2730 """
2735 """
2731 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2736 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2732 # During strip, many caches are invalid but
2737 # During strip, many caches are invalid but
2733 # later call to `destroyed` will refresh them.
2738 # later call to `destroyed` will refresh them.
2734 return
2739 return
2735
2740
2736 if tr is None or tr.changes[b'origrepolen'] < len(self):
2741 if tr is None or tr.changes[b'origrepolen'] < len(self):
2737 # accessing the 'served' branchmap should refresh all the others,
2742 # accessing the 'served' branchmap should refresh all the others,
2738 self.ui.debug(b'updating the branch cache\n')
2743 self.ui.debug(b'updating the branch cache\n')
2739 self.filtered(b'served').branchmap()
2744 self.filtered(b'served').branchmap()
2740 self.filtered(b'served.hidden').branchmap()
2745 self.filtered(b'served.hidden').branchmap()
2741
2746
2742 if full:
2747 if full:
2743 unfi = self.unfiltered()
2748 unfi = self.unfiltered()
2744
2749
2745 self.changelog.update_caches(transaction=tr)
2750 self.changelog.update_caches(transaction=tr)
2746 self.manifestlog.update_caches(transaction=tr)
2751 self.manifestlog.update_caches(transaction=tr)
2747
2752
2748 rbc = unfi.revbranchcache()
2753 rbc = unfi.revbranchcache()
2749 for r in unfi.changelog:
2754 for r in unfi.changelog:
2750 rbc.branchinfo(r)
2755 rbc.branchinfo(r)
2751 rbc.write()
2756 rbc.write()
2752
2757
2753 # ensure the working copy parents are in the manifestfulltextcache
2758 # ensure the working copy parents are in the manifestfulltextcache
2754 for ctx in self[b'.'].parents():
2759 for ctx in self[b'.'].parents():
2755 ctx.manifest() # accessing the manifest is enough
2760 ctx.manifest() # accessing the manifest is enough
2756
2761
2757 # accessing fnode cache warms the cache
2762 if not full == b"post-clone":
2758 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2763 # accessing fnode cache warms the cache
2764 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2759 # accessing tags warm the cache
2765 # accessing tags warm the cache
2760 self.tags()
2766 self.tags()
2761 self.filtered(b'served').tags()
2767 self.filtered(b'served').tags()
2762
2768
2763 # The `full` arg is documented as updating even the lazily-loaded
2769 # The `full` arg is documented as updating even the lazily-loaded
2764 # caches immediately, so we're forcing a write to cause these caches
2770 # caches immediately, so we're forcing a write to cause these caches
2765 # to be warmed up even if they haven't explicitly been requested
2771 # to be warmed up even if they haven't explicitly been requested
2766 # yet (if they've never been used by hg, they won't ever have been
2772 # yet (if they've never been used by hg, they won't ever have been
2767 # written, even if they're a subset of another kind of cache that
2773 # written, even if they're a subset of another kind of cache that
2768 # *has* been used).
2774 # *has* been used).
2769 for filt in repoview.filtertable.keys():
2775 for filt in repoview.filtertable.keys():
2770 filtered = self.filtered(filt)
2776 filtered = self.filtered(filt)
2771 filtered.branchmap().write(filtered)
2777 filtered.branchmap().write(filtered)
2772
2778
2773 def invalidatecaches(self):
2779 def invalidatecaches(self):
2774
2780
2775 if '_tagscache' in vars(self):
2781 if '_tagscache' in vars(self):
2776 # can't use delattr on proxy
2782 # can't use delattr on proxy
2777 del self.__dict__['_tagscache']
2783 del self.__dict__['_tagscache']
2778
2784
2779 self._branchcaches.clear()
2785 self._branchcaches.clear()
2780 self.invalidatevolatilesets()
2786 self.invalidatevolatilesets()
2781 self._sparsesignaturecache.clear()
2787 self._sparsesignaturecache.clear()
2782
2788
2783 def invalidatevolatilesets(self):
2789 def invalidatevolatilesets(self):
2784 self.filteredrevcache.clear()
2790 self.filteredrevcache.clear()
2785 obsolete.clearobscaches(self)
2791 obsolete.clearobscaches(self)
2786 self._quick_access_changeid_invalidate()
2792 self._quick_access_changeid_invalidate()
2787
2793
2788 def invalidatedirstate(self):
2794 def invalidatedirstate(self):
2789 """Invalidates the dirstate, causing the next call to dirstate
2795 """Invalidates the dirstate, causing the next call to dirstate
2790 to check if it was modified since the last time it was read,
2796 to check if it was modified since the last time it was read,
2791 rereading it if it has.
2797 rereading it if it has.
2792
2798
2793 This is different to dirstate.invalidate() that it doesn't always
2799 This is different to dirstate.invalidate() that it doesn't always
2794 rereads the dirstate. Use dirstate.invalidate() if you want to
2800 rereads the dirstate. Use dirstate.invalidate() if you want to
2795 explicitly read the dirstate again (i.e. restoring it to a previous
2801 explicitly read the dirstate again (i.e. restoring it to a previous
2796 known good state)."""
2802 known good state)."""
2797 if hasunfilteredcache(self, 'dirstate'):
2803 if hasunfilteredcache(self, 'dirstate'):
2798 for k in self.dirstate._filecache:
2804 for k in self.dirstate._filecache:
2799 try:
2805 try:
2800 delattr(self.dirstate, k)
2806 delattr(self.dirstate, k)
2801 except AttributeError:
2807 except AttributeError:
2802 pass
2808 pass
2803 delattr(self.unfiltered(), 'dirstate')
2809 delattr(self.unfiltered(), 'dirstate')
2804
2810
2805 def invalidate(self, clearfilecache=False):
2811 def invalidate(self, clearfilecache=False):
2806 """Invalidates both store and non-store parts other than dirstate
2812 """Invalidates both store and non-store parts other than dirstate
2807
2813
2808 If a transaction is running, invalidation of store is omitted,
2814 If a transaction is running, invalidation of store is omitted,
2809 because discarding in-memory changes might cause inconsistency
2815 because discarding in-memory changes might cause inconsistency
2810 (e.g. incomplete fncache causes unintentional failure, but
2816 (e.g. incomplete fncache causes unintentional failure, but
2811 redundant one doesn't).
2817 redundant one doesn't).
2812 """
2818 """
2813 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2819 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2814 for k in list(self._filecache.keys()):
2820 for k in list(self._filecache.keys()):
2815 # dirstate is invalidated separately in invalidatedirstate()
2821 # dirstate is invalidated separately in invalidatedirstate()
2816 if k == b'dirstate':
2822 if k == b'dirstate':
2817 continue
2823 continue
2818 if (
2824 if (
2819 k == b'changelog'
2825 k == b'changelog'
2820 and self.currenttransaction()
2826 and self.currenttransaction()
2821 and self.changelog._delayed
2827 and self.changelog._delayed
2822 ):
2828 ):
2823 # The changelog object may store unwritten revisions. We don't
2829 # The changelog object may store unwritten revisions. We don't
2824 # want to lose them.
2830 # want to lose them.
2825 # TODO: Solve the problem instead of working around it.
2831 # TODO: Solve the problem instead of working around it.
2826 continue
2832 continue
2827
2833
2828 if clearfilecache:
2834 if clearfilecache:
2829 del self._filecache[k]
2835 del self._filecache[k]
2830 try:
2836 try:
2831 delattr(unfiltered, k)
2837 delattr(unfiltered, k)
2832 except AttributeError:
2838 except AttributeError:
2833 pass
2839 pass
2834 self.invalidatecaches()
2840 self.invalidatecaches()
2835 if not self.currenttransaction():
2841 if not self.currenttransaction():
2836 # TODO: Changing contents of store outside transaction
2842 # TODO: Changing contents of store outside transaction
2837 # causes inconsistency. We should make in-memory store
2843 # causes inconsistency. We should make in-memory store
2838 # changes detectable, and abort if changed.
2844 # changes detectable, and abort if changed.
2839 self.store.invalidatecaches()
2845 self.store.invalidatecaches()
2840
2846
2841 def invalidateall(self):
2847 def invalidateall(self):
2842 """Fully invalidates both store and non-store parts, causing the
2848 """Fully invalidates both store and non-store parts, causing the
2843 subsequent operation to reread any outside changes."""
2849 subsequent operation to reread any outside changes."""
2844 # extension should hook this to invalidate its caches
2850 # extension should hook this to invalidate its caches
2845 self.invalidate()
2851 self.invalidate()
2846 self.invalidatedirstate()
2852 self.invalidatedirstate()
2847
2853
2848 @unfilteredmethod
2854 @unfilteredmethod
2849 def _refreshfilecachestats(self, tr):
2855 def _refreshfilecachestats(self, tr):
2850 """Reload stats of cached files so that they are flagged as valid"""
2856 """Reload stats of cached files so that they are flagged as valid"""
2851 for k, ce in self._filecache.items():
2857 for k, ce in self._filecache.items():
2852 k = pycompat.sysstr(k)
2858 k = pycompat.sysstr(k)
2853 if k == 'dirstate' or k not in self.__dict__:
2859 if k == 'dirstate' or k not in self.__dict__:
2854 continue
2860 continue
2855 ce.refresh()
2861 ce.refresh()
2856
2862
2857 def _lock(
2863 def _lock(
2858 self,
2864 self,
2859 vfs,
2865 vfs,
2860 lockname,
2866 lockname,
2861 wait,
2867 wait,
2862 releasefn,
2868 releasefn,
2863 acquirefn,
2869 acquirefn,
2864 desc,
2870 desc,
2865 ):
2871 ):
2866 timeout = 0
2872 timeout = 0
2867 warntimeout = 0
2873 warntimeout = 0
2868 if wait:
2874 if wait:
2869 timeout = self.ui.configint(b"ui", b"timeout")
2875 timeout = self.ui.configint(b"ui", b"timeout")
2870 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2876 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2871 # internal config: ui.signal-safe-lock
2877 # internal config: ui.signal-safe-lock
2872 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2878 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2873
2879
2874 l = lockmod.trylock(
2880 l = lockmod.trylock(
2875 self.ui,
2881 self.ui,
2876 vfs,
2882 vfs,
2877 lockname,
2883 lockname,
2878 timeout,
2884 timeout,
2879 warntimeout,
2885 warntimeout,
2880 releasefn=releasefn,
2886 releasefn=releasefn,
2881 acquirefn=acquirefn,
2887 acquirefn=acquirefn,
2882 desc=desc,
2888 desc=desc,
2883 signalsafe=signalsafe,
2889 signalsafe=signalsafe,
2884 )
2890 )
2885 return l
2891 return l
2886
2892
2887 def _afterlock(self, callback):
2893 def _afterlock(self, callback):
2888 """add a callback to be run when the repository is fully unlocked
2894 """add a callback to be run when the repository is fully unlocked
2889
2895
2890 The callback will be executed when the outermost lock is released
2896 The callback will be executed when the outermost lock is released
2891 (with wlock being higher level than 'lock')."""
2897 (with wlock being higher level than 'lock')."""
2892 for ref in (self._wlockref, self._lockref):
2898 for ref in (self._wlockref, self._lockref):
2893 l = ref and ref()
2899 l = ref and ref()
2894 if l and l.held:
2900 if l and l.held:
2895 l.postrelease.append(callback)
2901 l.postrelease.append(callback)
2896 break
2902 break
2897 else: # no lock have been found.
2903 else: # no lock have been found.
2898 callback(True)
2904 callback(True)
2899
2905
2900 def lock(self, wait=True):
2906 def lock(self, wait=True):
2901 """Lock the repository store (.hg/store) and return a weak reference
2907 """Lock the repository store (.hg/store) and return a weak reference
2902 to the lock. Use this before modifying the store (e.g. committing or
2908 to the lock. Use this before modifying the store (e.g. committing or
2903 stripping). If you are opening a transaction, get a lock as well.)
2909 stripping). If you are opening a transaction, get a lock as well.)
2904
2910
2905 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2911 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2906 'wlock' first to avoid a dead-lock hazard."""
2912 'wlock' first to avoid a dead-lock hazard."""
2907 l = self._currentlock(self._lockref)
2913 l = self._currentlock(self._lockref)
2908 if l is not None:
2914 if l is not None:
2909 l.lock()
2915 l.lock()
2910 return l
2916 return l
2911
2917
2912 l = self._lock(
2918 l = self._lock(
2913 vfs=self.svfs,
2919 vfs=self.svfs,
2914 lockname=b"lock",
2920 lockname=b"lock",
2915 wait=wait,
2921 wait=wait,
2916 releasefn=None,
2922 releasefn=None,
2917 acquirefn=self.invalidate,
2923 acquirefn=self.invalidate,
2918 desc=_(b'repository %s') % self.origroot,
2924 desc=_(b'repository %s') % self.origroot,
2919 )
2925 )
2920 self._lockref = weakref.ref(l)
2926 self._lockref = weakref.ref(l)
2921 return l
2927 return l
2922
2928
2923 def wlock(self, wait=True):
2929 def wlock(self, wait=True):
2924 """Lock the non-store parts of the repository (everything under
2930 """Lock the non-store parts of the repository (everything under
2925 .hg except .hg/store) and return a weak reference to the lock.
2931 .hg except .hg/store) and return a weak reference to the lock.
2926
2932
2927 Use this before modifying files in .hg.
2933 Use this before modifying files in .hg.
2928
2934
2929 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2935 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2930 'wlock' first to avoid a dead-lock hazard."""
2936 'wlock' first to avoid a dead-lock hazard."""
2931 l = self._wlockref() if self._wlockref else None
2937 l = self._wlockref() if self._wlockref else None
2932 if l is not None and l.held:
2938 if l is not None and l.held:
2933 l.lock()
2939 l.lock()
2934 return l
2940 return l
2935
2941
2936 # We do not need to check for non-waiting lock acquisition. Such
2942 # We do not need to check for non-waiting lock acquisition. Such
2937 # acquisition would not cause dead-lock as they would just fail.
2943 # acquisition would not cause dead-lock as they would just fail.
2938 if wait and (
2944 if wait and (
2939 self.ui.configbool(b'devel', b'all-warnings')
2945 self.ui.configbool(b'devel', b'all-warnings')
2940 or self.ui.configbool(b'devel', b'check-locks')
2946 or self.ui.configbool(b'devel', b'check-locks')
2941 ):
2947 ):
2942 if self._currentlock(self._lockref) is not None:
2948 if self._currentlock(self._lockref) is not None:
2943 self.ui.develwarn(b'"wlock" acquired after "lock"')
2949 self.ui.develwarn(b'"wlock" acquired after "lock"')
2944
2950
2945 def unlock():
2951 def unlock():
2946 if self.dirstate.pendingparentchange():
2952 if self.dirstate.pendingparentchange():
2947 self.dirstate.invalidate()
2953 self.dirstate.invalidate()
2948 else:
2954 else:
2949 self.dirstate.write(None)
2955 self.dirstate.write(None)
2950
2956
2951 self._filecache[b'dirstate'].refresh()
2957 self._filecache[b'dirstate'].refresh()
2952
2958
2953 l = self._lock(
2959 l = self._lock(
2954 self.vfs,
2960 self.vfs,
2955 b"wlock",
2961 b"wlock",
2956 wait,
2962 wait,
2957 unlock,
2963 unlock,
2958 self.invalidatedirstate,
2964 self.invalidatedirstate,
2959 _(b'working directory of %s') % self.origroot,
2965 _(b'working directory of %s') % self.origroot,
2960 )
2966 )
2961 self._wlockref = weakref.ref(l)
2967 self._wlockref = weakref.ref(l)
2962 return l
2968 return l
2963
2969
2964 def _currentlock(self, lockref):
2970 def _currentlock(self, lockref):
2965 """Returns the lock if it's held, or None if it's not."""
2971 """Returns the lock if it's held, or None if it's not."""
2966 if lockref is None:
2972 if lockref is None:
2967 return None
2973 return None
2968 l = lockref()
2974 l = lockref()
2969 if l is None or not l.held:
2975 if l is None or not l.held:
2970 return None
2976 return None
2971 return l
2977 return l
2972
2978
2973 def currentwlock(self):
2979 def currentwlock(self):
2974 """Returns the wlock if it's held, or None if it's not."""
2980 """Returns the wlock if it's held, or None if it's not."""
2975 return self._currentlock(self._wlockref)
2981 return self._currentlock(self._wlockref)
2976
2982
2977 def checkcommitpatterns(self, wctx, match, status, fail):
2983 def checkcommitpatterns(self, wctx, match, status, fail):
2978 """check for commit arguments that aren't committable"""
2984 """check for commit arguments that aren't committable"""
2979 if match.isexact() or match.prefix():
2985 if match.isexact() or match.prefix():
2980 matched = set(status.modified + status.added + status.removed)
2986 matched = set(status.modified + status.added + status.removed)
2981
2987
2982 for f in match.files():
2988 for f in match.files():
2983 f = self.dirstate.normalize(f)
2989 f = self.dirstate.normalize(f)
2984 if f == b'.' or f in matched or f in wctx.substate:
2990 if f == b'.' or f in matched or f in wctx.substate:
2985 continue
2991 continue
2986 if f in status.deleted:
2992 if f in status.deleted:
2987 fail(f, _(b'file not found!'))
2993 fail(f, _(b'file not found!'))
2988 # Is it a directory that exists or used to exist?
2994 # Is it a directory that exists or used to exist?
2989 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2995 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2990 d = f + b'/'
2996 d = f + b'/'
2991 for mf in matched:
2997 for mf in matched:
2992 if mf.startswith(d):
2998 if mf.startswith(d):
2993 break
2999 break
2994 else:
3000 else:
2995 fail(f, _(b"no match under directory!"))
3001 fail(f, _(b"no match under directory!"))
2996 elif f not in self.dirstate:
3002 elif f not in self.dirstate:
2997 fail(f, _(b"file not tracked!"))
3003 fail(f, _(b"file not tracked!"))
2998
3004
2999 @unfilteredmethod
3005 @unfilteredmethod
3000 def commit(
3006 def commit(
3001 self,
3007 self,
3002 text=b"",
3008 text=b"",
3003 user=None,
3009 user=None,
3004 date=None,
3010 date=None,
3005 match=None,
3011 match=None,
3006 force=False,
3012 force=False,
3007 editor=None,
3013 editor=None,
3008 extra=None,
3014 extra=None,
3009 ):
3015 ):
3010 """Add a new revision to current repository.
3016 """Add a new revision to current repository.
3011
3017
3012 Revision information is gathered from the working directory,
3018 Revision information is gathered from the working directory,
3013 match can be used to filter the committed files. If editor is
3019 match can be used to filter the committed files. If editor is
3014 supplied, it is called to get a commit message.
3020 supplied, it is called to get a commit message.
3015 """
3021 """
3016 if extra is None:
3022 if extra is None:
3017 extra = {}
3023 extra = {}
3018
3024
3019 def fail(f, msg):
3025 def fail(f, msg):
3020 raise error.InputError(b'%s: %s' % (f, msg))
3026 raise error.InputError(b'%s: %s' % (f, msg))
3021
3027
3022 if not match:
3028 if not match:
3023 match = matchmod.always()
3029 match = matchmod.always()
3024
3030
3025 if not force:
3031 if not force:
3026 match.bad = fail
3032 match.bad = fail
3027
3033
3028 # lock() for recent changelog (see issue4368)
3034 # lock() for recent changelog (see issue4368)
3029 with self.wlock(), self.lock():
3035 with self.wlock(), self.lock():
3030 wctx = self[None]
3036 wctx = self[None]
3031 merge = len(wctx.parents()) > 1
3037 merge = len(wctx.parents()) > 1
3032
3038
3033 if not force and merge and not match.always():
3039 if not force and merge and not match.always():
3034 raise error.Abort(
3040 raise error.Abort(
3035 _(
3041 _(
3036 b'cannot partially commit a merge '
3042 b'cannot partially commit a merge '
3037 b'(do not specify files or patterns)'
3043 b'(do not specify files or patterns)'
3038 )
3044 )
3039 )
3045 )
3040
3046
3041 status = self.status(match=match, clean=force)
3047 status = self.status(match=match, clean=force)
3042 if force:
3048 if force:
3043 status.modified.extend(
3049 status.modified.extend(
3044 status.clean
3050 status.clean
3045 ) # mq may commit clean files
3051 ) # mq may commit clean files
3046
3052
3047 # check subrepos
3053 # check subrepos
3048 subs, commitsubs, newstate = subrepoutil.precommit(
3054 subs, commitsubs, newstate = subrepoutil.precommit(
3049 self.ui, wctx, status, match, force=force
3055 self.ui, wctx, status, match, force=force
3050 )
3056 )
3051
3057
3052 # make sure all explicit patterns are matched
3058 # make sure all explicit patterns are matched
3053 if not force:
3059 if not force:
3054 self.checkcommitpatterns(wctx, match, status, fail)
3060 self.checkcommitpatterns(wctx, match, status, fail)
3055
3061
3056 cctx = context.workingcommitctx(
3062 cctx = context.workingcommitctx(
3057 self, status, text, user, date, extra
3063 self, status, text, user, date, extra
3058 )
3064 )
3059
3065
3060 ms = mergestatemod.mergestate.read(self)
3066 ms = mergestatemod.mergestate.read(self)
3061 mergeutil.checkunresolved(ms)
3067 mergeutil.checkunresolved(ms)
3062
3068
3063 # internal config: ui.allowemptycommit
3069 # internal config: ui.allowemptycommit
3064 if cctx.isempty() and not self.ui.configbool(
3070 if cctx.isempty() and not self.ui.configbool(
3065 b'ui', b'allowemptycommit'
3071 b'ui', b'allowemptycommit'
3066 ):
3072 ):
3067 self.ui.debug(b'nothing to commit, clearing merge state\n')
3073 self.ui.debug(b'nothing to commit, clearing merge state\n')
3068 ms.reset()
3074 ms.reset()
3069 return None
3075 return None
3070
3076
3071 if merge and cctx.deleted():
3077 if merge and cctx.deleted():
3072 raise error.Abort(_(b"cannot commit merge with missing files"))
3078 raise error.Abort(_(b"cannot commit merge with missing files"))
3073
3079
3074 if editor:
3080 if editor:
3075 cctx._text = editor(self, cctx, subs)
3081 cctx._text = editor(self, cctx, subs)
3076 edited = text != cctx._text
3082 edited = text != cctx._text
3077
3083
3078 # Save commit message in case this transaction gets rolled back
3084 # Save commit message in case this transaction gets rolled back
3079 # (e.g. by a pretxncommit hook). Leave the content alone on
3085 # (e.g. by a pretxncommit hook). Leave the content alone on
3080 # the assumption that the user will use the same editor again.
3086 # the assumption that the user will use the same editor again.
3081 msgfn = self.savecommitmessage(cctx._text)
3087 msgfn = self.savecommitmessage(cctx._text)
3082
3088
3083 # commit subs and write new state
3089 # commit subs and write new state
3084 if subs:
3090 if subs:
3085 uipathfn = scmutil.getuipathfn(self)
3091 uipathfn = scmutil.getuipathfn(self)
3086 for s in sorted(commitsubs):
3092 for s in sorted(commitsubs):
3087 sub = wctx.sub(s)
3093 sub = wctx.sub(s)
3088 self.ui.status(
3094 self.ui.status(
3089 _(b'committing subrepository %s\n')
3095 _(b'committing subrepository %s\n')
3090 % uipathfn(subrepoutil.subrelpath(sub))
3096 % uipathfn(subrepoutil.subrelpath(sub))
3091 )
3097 )
3092 sr = sub.commit(cctx._text, user, date)
3098 sr = sub.commit(cctx._text, user, date)
3093 newstate[s] = (newstate[s][0], sr)
3099 newstate[s] = (newstate[s][0], sr)
3094 subrepoutil.writestate(self, newstate)
3100 subrepoutil.writestate(self, newstate)
3095
3101
3096 p1, p2 = self.dirstate.parents()
3102 p1, p2 = self.dirstate.parents()
3097 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3103 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3098 try:
3104 try:
3099 self.hook(
3105 self.hook(
3100 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3106 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3101 )
3107 )
3102 with self.transaction(b'commit'):
3108 with self.transaction(b'commit'):
3103 ret = self.commitctx(cctx, True)
3109 ret = self.commitctx(cctx, True)
3104 # update bookmarks, dirstate and mergestate
3110 # update bookmarks, dirstate and mergestate
3105 bookmarks.update(self, [p1, p2], ret)
3111 bookmarks.update(self, [p1, p2], ret)
3106 cctx.markcommitted(ret)
3112 cctx.markcommitted(ret)
3107 ms.reset()
3113 ms.reset()
3108 except: # re-raises
3114 except: # re-raises
3109 if edited:
3115 if edited:
3110 self.ui.write(
3116 self.ui.write(
3111 _(b'note: commit message saved in %s\n') % msgfn
3117 _(b'note: commit message saved in %s\n') % msgfn
3112 )
3118 )
3113 self.ui.write(
3119 self.ui.write(
3114 _(
3120 _(
3115 b"note: use 'hg commit --logfile "
3121 b"note: use 'hg commit --logfile "
3116 b".hg/last-message.txt --edit' to reuse it\n"
3122 b".hg/last-message.txt --edit' to reuse it\n"
3117 )
3123 )
3118 )
3124 )
3119 raise
3125 raise
3120
3126
3121 def commithook(unused_success):
3127 def commithook(unused_success):
3122 # hack for command that use a temporary commit (eg: histedit)
3128 # hack for command that use a temporary commit (eg: histedit)
3123 # temporary commit got stripped before hook release
3129 # temporary commit got stripped before hook release
3124 if self.changelog.hasnode(ret):
3130 if self.changelog.hasnode(ret):
3125 self.hook(
3131 self.hook(
3126 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3132 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3127 )
3133 )
3128
3134
3129 self._afterlock(commithook)
3135 self._afterlock(commithook)
3130 return ret
3136 return ret
3131
3137
3132 @unfilteredmethod
3138 @unfilteredmethod
3133 def commitctx(self, ctx, error=False, origctx=None):
3139 def commitctx(self, ctx, error=False, origctx=None):
3134 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3140 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3135
3141
3136 @unfilteredmethod
3142 @unfilteredmethod
3137 def destroying(self):
3143 def destroying(self):
3138 """Inform the repository that nodes are about to be destroyed.
3144 """Inform the repository that nodes are about to be destroyed.
3139 Intended for use by strip and rollback, so there's a common
3145 Intended for use by strip and rollback, so there's a common
3140 place for anything that has to be done before destroying history.
3146 place for anything that has to be done before destroying history.
3141
3147
3142 This is mostly useful for saving state that is in memory and waiting
3148 This is mostly useful for saving state that is in memory and waiting
3143 to be flushed when the current lock is released. Because a call to
3149 to be flushed when the current lock is released. Because a call to
3144 destroyed is imminent, the repo will be invalidated causing those
3150 destroyed is imminent, the repo will be invalidated causing those
3145 changes to stay in memory (waiting for the next unlock), or vanish
3151 changes to stay in memory (waiting for the next unlock), or vanish
3146 completely.
3152 completely.
3147 """
3153 """
3148 # When using the same lock to commit and strip, the phasecache is left
3154 # When using the same lock to commit and strip, the phasecache is left
3149 # dirty after committing. Then when we strip, the repo is invalidated,
3155 # dirty after committing. Then when we strip, the repo is invalidated,
3150 # causing those changes to disappear.
3156 # causing those changes to disappear.
3151 if '_phasecache' in vars(self):
3157 if '_phasecache' in vars(self):
3152 self._phasecache.write()
3158 self._phasecache.write()
3153
3159
3154 @unfilteredmethod
3160 @unfilteredmethod
3155 def destroyed(self):
3161 def destroyed(self):
3156 """Inform the repository that nodes have been destroyed.
3162 """Inform the repository that nodes have been destroyed.
3157 Intended for use by strip and rollback, so there's a common
3163 Intended for use by strip and rollback, so there's a common
3158 place for anything that has to be done after destroying history.
3164 place for anything that has to be done after destroying history.
3159 """
3165 """
3160 # When one tries to:
3166 # When one tries to:
3161 # 1) destroy nodes thus calling this method (e.g. strip)
3167 # 1) destroy nodes thus calling this method (e.g. strip)
3162 # 2) use phasecache somewhere (e.g. commit)
3168 # 2) use phasecache somewhere (e.g. commit)
3163 #
3169 #
3164 # then 2) will fail because the phasecache contains nodes that were
3170 # then 2) will fail because the phasecache contains nodes that were
3165 # removed. We can either remove phasecache from the filecache,
3171 # removed. We can either remove phasecache from the filecache,
3166 # causing it to reload next time it is accessed, or simply filter
3172 # causing it to reload next time it is accessed, or simply filter
3167 # the removed nodes now and write the updated cache.
3173 # the removed nodes now and write the updated cache.
3168 self._phasecache.filterunknown(self)
3174 self._phasecache.filterunknown(self)
3169 self._phasecache.write()
3175 self._phasecache.write()
3170
3176
3171 # refresh all repository caches
3177 # refresh all repository caches
3172 self.updatecaches()
3178 self.updatecaches()
3173
3179
3174 # Ensure the persistent tag cache is updated. Doing it now
3180 # Ensure the persistent tag cache is updated. Doing it now
3175 # means that the tag cache only has to worry about destroyed
3181 # means that the tag cache only has to worry about destroyed
3176 # heads immediately after a strip/rollback. That in turn
3182 # heads immediately after a strip/rollback. That in turn
3177 # guarantees that "cachetip == currenttip" (comparing both rev
3183 # guarantees that "cachetip == currenttip" (comparing both rev
3178 # and node) always means no nodes have been added or destroyed.
3184 # and node) always means no nodes have been added or destroyed.
3179
3185
3180 # XXX this is suboptimal when qrefresh'ing: we strip the current
3186 # XXX this is suboptimal when qrefresh'ing: we strip the current
3181 # head, refresh the tag cache, then immediately add a new head.
3187 # head, refresh the tag cache, then immediately add a new head.
3182 # But I think doing it this way is necessary for the "instant
3188 # But I think doing it this way is necessary for the "instant
3183 # tag cache retrieval" case to work.
3189 # tag cache retrieval" case to work.
3184 self.invalidate()
3190 self.invalidate()
3185
3191
3186 def status(
3192 def status(
3187 self,
3193 self,
3188 node1=b'.',
3194 node1=b'.',
3189 node2=None,
3195 node2=None,
3190 match=None,
3196 match=None,
3191 ignored=False,
3197 ignored=False,
3192 clean=False,
3198 clean=False,
3193 unknown=False,
3199 unknown=False,
3194 listsubrepos=False,
3200 listsubrepos=False,
3195 ):
3201 ):
3196 '''a convenience method that calls node1.status(node2)'''
3202 '''a convenience method that calls node1.status(node2)'''
3197 return self[node1].status(
3203 return self[node1].status(
3198 node2, match, ignored, clean, unknown, listsubrepos
3204 node2, match, ignored, clean, unknown, listsubrepos
3199 )
3205 )
3200
3206
3201 def addpostdsstatus(self, ps):
3207 def addpostdsstatus(self, ps):
3202 """Add a callback to run within the wlock, at the point at which status
3208 """Add a callback to run within the wlock, at the point at which status
3203 fixups happen.
3209 fixups happen.
3204
3210
3205 On status completion, callback(wctx, status) will be called with the
3211 On status completion, callback(wctx, status) will be called with the
3206 wlock held, unless the dirstate has changed from underneath or the wlock
3212 wlock held, unless the dirstate has changed from underneath or the wlock
3207 couldn't be grabbed.
3213 couldn't be grabbed.
3208
3214
3209 Callbacks should not capture and use a cached copy of the dirstate --
3215 Callbacks should not capture and use a cached copy of the dirstate --
3210 it might change in the meanwhile. Instead, they should access the
3216 it might change in the meanwhile. Instead, they should access the
3211 dirstate via wctx.repo().dirstate.
3217 dirstate via wctx.repo().dirstate.
3212
3218
3213 This list is emptied out after each status run -- extensions should
3219 This list is emptied out after each status run -- extensions should
3214 make sure it adds to this list each time dirstate.status is called.
3220 make sure it adds to this list each time dirstate.status is called.
3215 Extensions should also make sure they don't call this for statuses
3221 Extensions should also make sure they don't call this for statuses
3216 that don't involve the dirstate.
3222 that don't involve the dirstate.
3217 """
3223 """
3218
3224
3219 # The list is located here for uniqueness reasons -- it is actually
3225 # The list is located here for uniqueness reasons -- it is actually
3220 # managed by the workingctx, but that isn't unique per-repo.
3226 # managed by the workingctx, but that isn't unique per-repo.
3221 self._postdsstatus.append(ps)
3227 self._postdsstatus.append(ps)
3222
3228
3223 def postdsstatus(self):
3229 def postdsstatus(self):
3224 """Used by workingctx to get the list of post-dirstate-status hooks."""
3230 """Used by workingctx to get the list of post-dirstate-status hooks."""
3225 return self._postdsstatus
3231 return self._postdsstatus
3226
3232
3227 def clearpostdsstatus(self):
3233 def clearpostdsstatus(self):
3228 """Used by workingctx to clear post-dirstate-status hooks."""
3234 """Used by workingctx to clear post-dirstate-status hooks."""
3229 del self._postdsstatus[:]
3235 del self._postdsstatus[:]
3230
3236
3231 def heads(self, start=None):
3237 def heads(self, start=None):
3232 if start is None:
3238 if start is None:
3233 cl = self.changelog
3239 cl = self.changelog
3234 headrevs = reversed(cl.headrevs())
3240 headrevs = reversed(cl.headrevs())
3235 return [cl.node(rev) for rev in headrevs]
3241 return [cl.node(rev) for rev in headrevs]
3236
3242
3237 heads = self.changelog.heads(start)
3243 heads = self.changelog.heads(start)
3238 # sort the output in rev descending order
3244 # sort the output in rev descending order
3239 return sorted(heads, key=self.changelog.rev, reverse=True)
3245 return sorted(heads, key=self.changelog.rev, reverse=True)
3240
3246
3241 def branchheads(self, branch=None, start=None, closed=False):
3247 def branchheads(self, branch=None, start=None, closed=False):
3242 """return a (possibly filtered) list of heads for the given branch
3248 """return a (possibly filtered) list of heads for the given branch
3243
3249
3244 Heads are returned in topological order, from newest to oldest.
3250 Heads are returned in topological order, from newest to oldest.
3245 If branch is None, use the dirstate branch.
3251 If branch is None, use the dirstate branch.
3246 If start is not None, return only heads reachable from start.
3252 If start is not None, return only heads reachable from start.
3247 If closed is True, return heads that are marked as closed as well.
3253 If closed is True, return heads that are marked as closed as well.
3248 """
3254 """
3249 if branch is None:
3255 if branch is None:
3250 branch = self[None].branch()
3256 branch = self[None].branch()
3251 branches = self.branchmap()
3257 branches = self.branchmap()
3252 if not branches.hasbranch(branch):
3258 if not branches.hasbranch(branch):
3253 return []
3259 return []
3254 # the cache returns heads ordered lowest to highest
3260 # the cache returns heads ordered lowest to highest
3255 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3261 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3256 if start is not None:
3262 if start is not None:
3257 # filter out the heads that cannot be reached from startrev
3263 # filter out the heads that cannot be reached from startrev
3258 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3264 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3259 bheads = [h for h in bheads if h in fbheads]
3265 bheads = [h for h in bheads if h in fbheads]
3260 return bheads
3266 return bheads
3261
3267
3262 def branches(self, nodes):
3268 def branches(self, nodes):
3263 if not nodes:
3269 if not nodes:
3264 nodes = [self.changelog.tip()]
3270 nodes = [self.changelog.tip()]
3265 b = []
3271 b = []
3266 for n in nodes:
3272 for n in nodes:
3267 t = n
3273 t = n
3268 while True:
3274 while True:
3269 p = self.changelog.parents(n)
3275 p = self.changelog.parents(n)
3270 if p[1] != nullid or p[0] == nullid:
3276 if p[1] != nullid or p[0] == nullid:
3271 b.append((t, n, p[0], p[1]))
3277 b.append((t, n, p[0], p[1]))
3272 break
3278 break
3273 n = p[0]
3279 n = p[0]
3274 return b
3280 return b
3275
3281
3276 def between(self, pairs):
3282 def between(self, pairs):
3277 r = []
3283 r = []
3278
3284
3279 for top, bottom in pairs:
3285 for top, bottom in pairs:
3280 n, l, i = top, [], 0
3286 n, l, i = top, [], 0
3281 f = 1
3287 f = 1
3282
3288
3283 while n != bottom and n != nullid:
3289 while n != bottom and n != nullid:
3284 p = self.changelog.parents(n)[0]
3290 p = self.changelog.parents(n)[0]
3285 if i == f:
3291 if i == f:
3286 l.append(n)
3292 l.append(n)
3287 f = f * 2
3293 f = f * 2
3288 n = p
3294 n = p
3289 i += 1
3295 i += 1
3290
3296
3291 r.append(l)
3297 r.append(l)
3292
3298
3293 return r
3299 return r
3294
3300
3295 def checkpush(self, pushop):
3301 def checkpush(self, pushop):
3296 """Extensions can override this function if additional checks have
3302 """Extensions can override this function if additional checks have
3297 to be performed before pushing, or call it if they override push
3303 to be performed before pushing, or call it if they override push
3298 command.
3304 command.
3299 """
3305 """
3300
3306
3301 @unfilteredpropertycache
3307 @unfilteredpropertycache
3302 def prepushoutgoinghooks(self):
3308 def prepushoutgoinghooks(self):
3303 """Return util.hooks consists of a pushop with repo, remote, outgoing
3309 """Return util.hooks consists of a pushop with repo, remote, outgoing
3304 methods, which are called before pushing changesets.
3310 methods, which are called before pushing changesets.
3305 """
3311 """
3306 return util.hooks()
3312 return util.hooks()
3307
3313
3308 def pushkey(self, namespace, key, old, new):
3314 def pushkey(self, namespace, key, old, new):
3309 try:
3315 try:
3310 tr = self.currenttransaction()
3316 tr = self.currenttransaction()
3311 hookargs = {}
3317 hookargs = {}
3312 if tr is not None:
3318 if tr is not None:
3313 hookargs.update(tr.hookargs)
3319 hookargs.update(tr.hookargs)
3314 hookargs = pycompat.strkwargs(hookargs)
3320 hookargs = pycompat.strkwargs(hookargs)
3315 hookargs['namespace'] = namespace
3321 hookargs['namespace'] = namespace
3316 hookargs['key'] = key
3322 hookargs['key'] = key
3317 hookargs['old'] = old
3323 hookargs['old'] = old
3318 hookargs['new'] = new
3324 hookargs['new'] = new
3319 self.hook(b'prepushkey', throw=True, **hookargs)
3325 self.hook(b'prepushkey', throw=True, **hookargs)
3320 except error.HookAbort as exc:
3326 except error.HookAbort as exc:
3321 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3327 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3322 if exc.hint:
3328 if exc.hint:
3323 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3329 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3324 return False
3330 return False
3325 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3331 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3326 ret = pushkey.push(self, namespace, key, old, new)
3332 ret = pushkey.push(self, namespace, key, old, new)
3327
3333
3328 def runhook(unused_success):
3334 def runhook(unused_success):
3329 self.hook(
3335 self.hook(
3330 b'pushkey',
3336 b'pushkey',
3331 namespace=namespace,
3337 namespace=namespace,
3332 key=key,
3338 key=key,
3333 old=old,
3339 old=old,
3334 new=new,
3340 new=new,
3335 ret=ret,
3341 ret=ret,
3336 )
3342 )
3337
3343
3338 self._afterlock(runhook)
3344 self._afterlock(runhook)
3339 return ret
3345 return ret
3340
3346
3341 def listkeys(self, namespace):
3347 def listkeys(self, namespace):
3342 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3348 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3343 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3349 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3344 values = pushkey.list(self, namespace)
3350 values = pushkey.list(self, namespace)
3345 self.hook(b'listkeys', namespace=namespace, values=values)
3351 self.hook(b'listkeys', namespace=namespace, values=values)
3346 return values
3352 return values
3347
3353
3348 def debugwireargs(self, one, two, three=None, four=None, five=None):
3354 def debugwireargs(self, one, two, three=None, four=None, five=None):
3349 '''used to test argument passing over the wire'''
3355 '''used to test argument passing over the wire'''
3350 return b"%s %s %s %s %s" % (
3356 return b"%s %s %s %s %s" % (
3351 one,
3357 one,
3352 two,
3358 two,
3353 pycompat.bytestr(three),
3359 pycompat.bytestr(three),
3354 pycompat.bytestr(four),
3360 pycompat.bytestr(four),
3355 pycompat.bytestr(five),
3361 pycompat.bytestr(five),
3356 )
3362 )
3357
3363
3358 def savecommitmessage(self, text):
3364 def savecommitmessage(self, text):
3359 fp = self.vfs(b'last-message.txt', b'wb')
3365 fp = self.vfs(b'last-message.txt', b'wb')
3360 try:
3366 try:
3361 fp.write(text)
3367 fp.write(text)
3362 finally:
3368 finally:
3363 fp.close()
3369 fp.close()
3364 return self.pathto(fp.name[len(self.root) + 1 :])
3370 return self.pathto(fp.name[len(self.root) + 1 :])
3365
3371
3366 def register_wanted_sidedata(self, category):
3372 def register_wanted_sidedata(self, category):
3367 self._wanted_sidedata.add(pycompat.bytestr(category))
3373 self._wanted_sidedata.add(pycompat.bytestr(category))
3368
3374
3369 def register_sidedata_computer(self, kind, category, keys, computer):
3375 def register_sidedata_computer(self, kind, category, keys, computer):
3370 if kind not in (b"changelog", b"manifest", b"filelog"):
3376 if kind not in (b"changelog", b"manifest", b"filelog"):
3371 msg = _(b"unexpected revlog kind '%s'.")
3377 msg = _(b"unexpected revlog kind '%s'.")
3372 raise error.ProgrammingError(msg % kind)
3378 raise error.ProgrammingError(msg % kind)
3373 category = pycompat.bytestr(category)
3379 category = pycompat.bytestr(category)
3374 if category in self._sidedata_computers.get(kind, []):
3380 if category in self._sidedata_computers.get(kind, []):
3375 msg = _(
3381 msg = _(
3376 b"cannot register a sidedata computer twice for category '%s'."
3382 b"cannot register a sidedata computer twice for category '%s'."
3377 )
3383 )
3378 raise error.ProgrammingError(msg % category)
3384 raise error.ProgrammingError(msg % category)
3379 self._sidedata_computers.setdefault(kind, {})
3385 self._sidedata_computers.setdefault(kind, {})
3380 self._sidedata_computers[kind][category] = (keys, computer)
3386 self._sidedata_computers[kind][category] = (keys, computer)
3381
3387
3382
3388
3383 # used to avoid circular references so destructors work
3389 # used to avoid circular references so destructors work
3384 def aftertrans(files):
3390 def aftertrans(files):
3385 renamefiles = [tuple(t) for t in files]
3391 renamefiles = [tuple(t) for t in files]
3386
3392
3387 def a():
3393 def a():
3388 for vfs, src, dest in renamefiles:
3394 for vfs, src, dest in renamefiles:
3389 # if src and dest refer to a same file, vfs.rename is a no-op,
3395 # if src and dest refer to a same file, vfs.rename is a no-op,
3390 # leaving both src and dest on disk. delete dest to make sure
3396 # leaving both src and dest on disk. delete dest to make sure
3391 # the rename couldn't be such a no-op.
3397 # the rename couldn't be such a no-op.
3392 vfs.tryunlink(dest)
3398 vfs.tryunlink(dest)
3393 try:
3399 try:
3394 vfs.rename(src, dest)
3400 vfs.rename(src, dest)
3395 except OSError: # journal file does not yet exist
3401 except OSError: # journal file does not yet exist
3396 pass
3402 pass
3397
3403
3398 return a
3404 return a
3399
3405
3400
3406
3401 def undoname(fn):
3407 def undoname(fn):
3402 base, name = os.path.split(fn)
3408 base, name = os.path.split(fn)
3403 assert name.startswith(b'journal')
3409 assert name.startswith(b'journal')
3404 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3410 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3405
3411
3406
3412
3407 def instance(ui, path, create, intents=None, createopts=None):
3413 def instance(ui, path, create, intents=None, createopts=None):
3408 localpath = urlutil.urllocalpath(path)
3414 localpath = urlutil.urllocalpath(path)
3409 if create:
3415 if create:
3410 createrepository(ui, localpath, createopts=createopts)
3416 createrepository(ui, localpath, createopts=createopts)
3411
3417
3412 return makelocalrepository(ui, localpath, intents=intents)
3418 return makelocalrepository(ui, localpath, intents=intents)
3413
3419
3414
3420
3415 def islocal(path):
3421 def islocal(path):
3416 return True
3422 return True
3417
3423
3418
3424
3419 def defaultcreateopts(ui, createopts=None):
3425 def defaultcreateopts(ui, createopts=None):
3420 """Populate the default creation options for a repository.
3426 """Populate the default creation options for a repository.
3421
3427
3422 A dictionary of explicitly requested creation options can be passed
3428 A dictionary of explicitly requested creation options can be passed
3423 in. Missing keys will be populated.
3429 in. Missing keys will be populated.
3424 """
3430 """
3425 createopts = dict(createopts or {})
3431 createopts = dict(createopts or {})
3426
3432
3427 if b'backend' not in createopts:
3433 if b'backend' not in createopts:
3428 # experimental config: storage.new-repo-backend
3434 # experimental config: storage.new-repo-backend
3429 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3435 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3430
3436
3431 return createopts
3437 return createopts
3432
3438
3433
3439
3434 def newreporequirements(ui, createopts):
3440 def newreporequirements(ui, createopts):
3435 """Determine the set of requirements for a new local repository.
3441 """Determine the set of requirements for a new local repository.
3436
3442
3437 Extensions can wrap this function to specify custom requirements for
3443 Extensions can wrap this function to specify custom requirements for
3438 new repositories.
3444 new repositories.
3439 """
3445 """
3440 # If the repo is being created from a shared repository, we copy
3446 # If the repo is being created from a shared repository, we copy
3441 # its requirements.
3447 # its requirements.
3442 if b'sharedrepo' in createopts:
3448 if b'sharedrepo' in createopts:
3443 requirements = set(createopts[b'sharedrepo'].requirements)
3449 requirements = set(createopts[b'sharedrepo'].requirements)
3444 if createopts.get(b'sharedrelative'):
3450 if createopts.get(b'sharedrelative'):
3445 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3451 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3446 else:
3452 else:
3447 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3453 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3448
3454
3449 return requirements
3455 return requirements
3450
3456
3451 if b'backend' not in createopts:
3457 if b'backend' not in createopts:
3452 raise error.ProgrammingError(
3458 raise error.ProgrammingError(
3453 b'backend key not present in createopts; '
3459 b'backend key not present in createopts; '
3454 b'was defaultcreateopts() called?'
3460 b'was defaultcreateopts() called?'
3455 )
3461 )
3456
3462
3457 if createopts[b'backend'] != b'revlogv1':
3463 if createopts[b'backend'] != b'revlogv1':
3458 raise error.Abort(
3464 raise error.Abort(
3459 _(
3465 _(
3460 b'unable to determine repository requirements for '
3466 b'unable to determine repository requirements for '
3461 b'storage backend: %s'
3467 b'storage backend: %s'
3462 )
3468 )
3463 % createopts[b'backend']
3469 % createopts[b'backend']
3464 )
3470 )
3465
3471
3466 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3472 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3467 if ui.configbool(b'format', b'usestore'):
3473 if ui.configbool(b'format', b'usestore'):
3468 requirements.add(requirementsmod.STORE_REQUIREMENT)
3474 requirements.add(requirementsmod.STORE_REQUIREMENT)
3469 if ui.configbool(b'format', b'usefncache'):
3475 if ui.configbool(b'format', b'usefncache'):
3470 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3476 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3471 if ui.configbool(b'format', b'dotencode'):
3477 if ui.configbool(b'format', b'dotencode'):
3472 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3478 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3473
3479
3474 compengines = ui.configlist(b'format', b'revlog-compression')
3480 compengines = ui.configlist(b'format', b'revlog-compression')
3475 for compengine in compengines:
3481 for compengine in compengines:
3476 if compengine in util.compengines:
3482 if compengine in util.compengines:
3477 engine = util.compengines[compengine]
3483 engine = util.compengines[compengine]
3478 if engine.available() and engine.revlogheader():
3484 if engine.available() and engine.revlogheader():
3479 break
3485 break
3480 else:
3486 else:
3481 raise error.Abort(
3487 raise error.Abort(
3482 _(
3488 _(
3483 b'compression engines %s defined by '
3489 b'compression engines %s defined by '
3484 b'format.revlog-compression not available'
3490 b'format.revlog-compression not available'
3485 )
3491 )
3486 % b', '.join(b'"%s"' % e for e in compengines),
3492 % b', '.join(b'"%s"' % e for e in compengines),
3487 hint=_(
3493 hint=_(
3488 b'run "hg debuginstall" to list available '
3494 b'run "hg debuginstall" to list available '
3489 b'compression engines'
3495 b'compression engines'
3490 ),
3496 ),
3491 )
3497 )
3492
3498
3493 # zlib is the historical default and doesn't need an explicit requirement.
3499 # zlib is the historical default and doesn't need an explicit requirement.
3494 if compengine == b'zstd':
3500 if compengine == b'zstd':
3495 requirements.add(b'revlog-compression-zstd')
3501 requirements.add(b'revlog-compression-zstd')
3496 elif compengine != b'zlib':
3502 elif compengine != b'zlib':
3497 requirements.add(b'exp-compression-%s' % compengine)
3503 requirements.add(b'exp-compression-%s' % compengine)
3498
3504
3499 if scmutil.gdinitconfig(ui):
3505 if scmutil.gdinitconfig(ui):
3500 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3506 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3501 if ui.configbool(b'format', b'sparse-revlog'):
3507 if ui.configbool(b'format', b'sparse-revlog'):
3502 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3508 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3503
3509
3504 # experimental config: format.exp-use-side-data
3510 # experimental config: format.exp-use-side-data
3505 if ui.configbool(b'format', b'exp-use-side-data'):
3511 if ui.configbool(b'format', b'exp-use-side-data'):
3506 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3512 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3507 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3513 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3508 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3514 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3509 # experimental config: format.exp-use-copies-side-data-changeset
3515 # experimental config: format.exp-use-copies-side-data-changeset
3510 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3516 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3511 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3517 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3512 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3518 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3513 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3519 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3514 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3520 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3515 if ui.configbool(b'experimental', b'treemanifest'):
3521 if ui.configbool(b'experimental', b'treemanifest'):
3516 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3522 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3517
3523
3518 revlogv2 = ui.config(b'experimental', b'revlogv2')
3524 revlogv2 = ui.config(b'experimental', b'revlogv2')
3519 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3525 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3520 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3526 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3521 # generaldelta is implied by revlogv2.
3527 # generaldelta is implied by revlogv2.
3522 requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
3528 requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
3523 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3529 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3524 # experimental config: format.internal-phase
3530 # experimental config: format.internal-phase
3525 if ui.configbool(b'format', b'internal-phase'):
3531 if ui.configbool(b'format', b'internal-phase'):
3526 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3532 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3527
3533
3528 if createopts.get(b'narrowfiles'):
3534 if createopts.get(b'narrowfiles'):
3529 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3535 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3530
3536
3531 if createopts.get(b'lfs'):
3537 if createopts.get(b'lfs'):
3532 requirements.add(b'lfs')
3538 requirements.add(b'lfs')
3533
3539
3534 if ui.configbool(b'format', b'bookmarks-in-store'):
3540 if ui.configbool(b'format', b'bookmarks-in-store'):
3535 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3541 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3536
3542
3537 if ui.configbool(b'format', b'use-persistent-nodemap'):
3543 if ui.configbool(b'format', b'use-persistent-nodemap'):
3538 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3544 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3539
3545
3540 # if share-safe is enabled, let's create the new repository with the new
3546 # if share-safe is enabled, let's create the new repository with the new
3541 # requirement
3547 # requirement
3542 if ui.configbool(b'format', b'use-share-safe'):
3548 if ui.configbool(b'format', b'use-share-safe'):
3543 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3549 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3544
3550
3545 return requirements
3551 return requirements
3546
3552
3547
3553
3548 def checkrequirementscompat(ui, requirements):
3554 def checkrequirementscompat(ui, requirements):
3549 """Checks compatibility of repository requirements enabled and disabled.
3555 """Checks compatibility of repository requirements enabled and disabled.
3550
3556
3551 Returns a set of requirements which needs to be dropped because dependend
3557 Returns a set of requirements which needs to be dropped because dependend
3552 requirements are not enabled. Also warns users about it"""
3558 requirements are not enabled. Also warns users about it"""
3553
3559
3554 dropped = set()
3560 dropped = set()
3555
3561
3556 if requirementsmod.STORE_REQUIREMENT not in requirements:
3562 if requirementsmod.STORE_REQUIREMENT not in requirements:
3557 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3563 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3558 ui.warn(
3564 ui.warn(
3559 _(
3565 _(
3560 b'ignoring enabled \'format.bookmarks-in-store\' config '
3566 b'ignoring enabled \'format.bookmarks-in-store\' config '
3561 b'beacuse it is incompatible with disabled '
3567 b'beacuse it is incompatible with disabled '
3562 b'\'format.usestore\' config\n'
3568 b'\'format.usestore\' config\n'
3563 )
3569 )
3564 )
3570 )
3565 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3571 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3566
3572
3567 if (
3573 if (
3568 requirementsmod.SHARED_REQUIREMENT in requirements
3574 requirementsmod.SHARED_REQUIREMENT in requirements
3569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3575 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3570 ):
3576 ):
3571 raise error.Abort(
3577 raise error.Abort(
3572 _(
3578 _(
3573 b"cannot create shared repository as source was created"
3579 b"cannot create shared repository as source was created"
3574 b" with 'format.usestore' config disabled"
3580 b" with 'format.usestore' config disabled"
3575 )
3581 )
3576 )
3582 )
3577
3583
3578 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3584 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3579 ui.warn(
3585 ui.warn(
3580 _(
3586 _(
3581 b"ignoring enabled 'format.use-share-safe' config because "
3587 b"ignoring enabled 'format.use-share-safe' config because "
3582 b"it is incompatible with disabled 'format.usestore'"
3588 b"it is incompatible with disabled 'format.usestore'"
3583 b" config\n"
3589 b" config\n"
3584 )
3590 )
3585 )
3591 )
3586 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3592 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3587
3593
3588 return dropped
3594 return dropped
3589
3595
3590
3596
3591 def filterknowncreateopts(ui, createopts):
3597 def filterknowncreateopts(ui, createopts):
3592 """Filters a dict of repo creation options against options that are known.
3598 """Filters a dict of repo creation options against options that are known.
3593
3599
3594 Receives a dict of repo creation options and returns a dict of those
3600 Receives a dict of repo creation options and returns a dict of those
3595 options that we don't know how to handle.
3601 options that we don't know how to handle.
3596
3602
3597 This function is called as part of repository creation. If the
3603 This function is called as part of repository creation. If the
3598 returned dict contains any items, repository creation will not
3604 returned dict contains any items, repository creation will not
3599 be allowed, as it means there was a request to create a repository
3605 be allowed, as it means there was a request to create a repository
3600 with options not recognized by loaded code.
3606 with options not recognized by loaded code.
3601
3607
3602 Extensions can wrap this function to filter out creation options
3608 Extensions can wrap this function to filter out creation options
3603 they know how to handle.
3609 they know how to handle.
3604 """
3610 """
3605 known = {
3611 known = {
3606 b'backend',
3612 b'backend',
3607 b'lfs',
3613 b'lfs',
3608 b'narrowfiles',
3614 b'narrowfiles',
3609 b'sharedrepo',
3615 b'sharedrepo',
3610 b'sharedrelative',
3616 b'sharedrelative',
3611 b'shareditems',
3617 b'shareditems',
3612 b'shallowfilestore',
3618 b'shallowfilestore',
3613 }
3619 }
3614
3620
3615 return {k: v for k, v in createopts.items() if k not in known}
3621 return {k: v for k, v in createopts.items() if k not in known}
3616
3622
3617
3623
3618 def createrepository(ui, path, createopts=None):
3624 def createrepository(ui, path, createopts=None):
3619 """Create a new repository in a vfs.
3625 """Create a new repository in a vfs.
3620
3626
3621 ``path`` path to the new repo's working directory.
3627 ``path`` path to the new repo's working directory.
3622 ``createopts`` options for the new repository.
3628 ``createopts`` options for the new repository.
3623
3629
3624 The following keys for ``createopts`` are recognized:
3630 The following keys for ``createopts`` are recognized:
3625
3631
3626 backend
3632 backend
3627 The storage backend to use.
3633 The storage backend to use.
3628 lfs
3634 lfs
3629 Repository will be created with ``lfs`` requirement. The lfs extension
3635 Repository will be created with ``lfs`` requirement. The lfs extension
3630 will automatically be loaded when the repository is accessed.
3636 will automatically be loaded when the repository is accessed.
3631 narrowfiles
3637 narrowfiles
3632 Set up repository to support narrow file storage.
3638 Set up repository to support narrow file storage.
3633 sharedrepo
3639 sharedrepo
3634 Repository object from which storage should be shared.
3640 Repository object from which storage should be shared.
3635 sharedrelative
3641 sharedrelative
3636 Boolean indicating if the path to the shared repo should be
3642 Boolean indicating if the path to the shared repo should be
3637 stored as relative. By default, the pointer to the "parent" repo
3643 stored as relative. By default, the pointer to the "parent" repo
3638 is stored as an absolute path.
3644 is stored as an absolute path.
3639 shareditems
3645 shareditems
3640 Set of items to share to the new repository (in addition to storage).
3646 Set of items to share to the new repository (in addition to storage).
3641 shallowfilestore
3647 shallowfilestore
3642 Indicates that storage for files should be shallow (not all ancestor
3648 Indicates that storage for files should be shallow (not all ancestor
3643 revisions are known).
3649 revisions are known).
3644 """
3650 """
3645 createopts = defaultcreateopts(ui, createopts=createopts)
3651 createopts = defaultcreateopts(ui, createopts=createopts)
3646
3652
3647 unknownopts = filterknowncreateopts(ui, createopts)
3653 unknownopts = filterknowncreateopts(ui, createopts)
3648
3654
3649 if not isinstance(unknownopts, dict):
3655 if not isinstance(unknownopts, dict):
3650 raise error.ProgrammingError(
3656 raise error.ProgrammingError(
3651 b'filterknowncreateopts() did not return a dict'
3657 b'filterknowncreateopts() did not return a dict'
3652 )
3658 )
3653
3659
3654 if unknownopts:
3660 if unknownopts:
3655 raise error.Abort(
3661 raise error.Abort(
3656 _(
3662 _(
3657 b'unable to create repository because of unknown '
3663 b'unable to create repository because of unknown '
3658 b'creation option: %s'
3664 b'creation option: %s'
3659 )
3665 )
3660 % b', '.join(sorted(unknownopts)),
3666 % b', '.join(sorted(unknownopts)),
3661 hint=_(b'is a required extension not loaded?'),
3667 hint=_(b'is a required extension not loaded?'),
3662 )
3668 )
3663
3669
3664 requirements = newreporequirements(ui, createopts=createopts)
3670 requirements = newreporequirements(ui, createopts=createopts)
3665 requirements -= checkrequirementscompat(ui, requirements)
3671 requirements -= checkrequirementscompat(ui, requirements)
3666
3672
3667 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3673 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3668
3674
3669 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3675 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3670 if hgvfs.exists():
3676 if hgvfs.exists():
3671 raise error.RepoError(_(b'repository %s already exists') % path)
3677 raise error.RepoError(_(b'repository %s already exists') % path)
3672
3678
3673 if b'sharedrepo' in createopts:
3679 if b'sharedrepo' in createopts:
3674 sharedpath = createopts[b'sharedrepo'].sharedpath
3680 sharedpath = createopts[b'sharedrepo'].sharedpath
3675
3681
3676 if createopts.get(b'sharedrelative'):
3682 if createopts.get(b'sharedrelative'):
3677 try:
3683 try:
3678 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3684 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3679 sharedpath = util.pconvert(sharedpath)
3685 sharedpath = util.pconvert(sharedpath)
3680 except (IOError, ValueError) as e:
3686 except (IOError, ValueError) as e:
3681 # ValueError is raised on Windows if the drive letters differ
3687 # ValueError is raised on Windows if the drive letters differ
3682 # on each path.
3688 # on each path.
3683 raise error.Abort(
3689 raise error.Abort(
3684 _(b'cannot calculate relative path'),
3690 _(b'cannot calculate relative path'),
3685 hint=stringutil.forcebytestr(e),
3691 hint=stringutil.forcebytestr(e),
3686 )
3692 )
3687
3693
3688 if not wdirvfs.exists():
3694 if not wdirvfs.exists():
3689 wdirvfs.makedirs()
3695 wdirvfs.makedirs()
3690
3696
3691 hgvfs.makedir(notindexed=True)
3697 hgvfs.makedir(notindexed=True)
3692 if b'sharedrepo' not in createopts:
3698 if b'sharedrepo' not in createopts:
3693 hgvfs.mkdir(b'cache')
3699 hgvfs.mkdir(b'cache')
3694 hgvfs.mkdir(b'wcache')
3700 hgvfs.mkdir(b'wcache')
3695
3701
3696 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3702 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3697 if has_store and b'sharedrepo' not in createopts:
3703 if has_store and b'sharedrepo' not in createopts:
3698 hgvfs.mkdir(b'store')
3704 hgvfs.mkdir(b'store')
3699
3705
3700 # We create an invalid changelog outside the store so very old
3706 # We create an invalid changelog outside the store so very old
3701 # Mercurial versions (which didn't know about the requirements
3707 # Mercurial versions (which didn't know about the requirements
3702 # file) encounter an error on reading the changelog. This
3708 # file) encounter an error on reading the changelog. This
3703 # effectively locks out old clients and prevents them from
3709 # effectively locks out old clients and prevents them from
3704 # mucking with a repo in an unknown format.
3710 # mucking with a repo in an unknown format.
3705 #
3711 #
3706 # The revlog header has version 65535, which won't be recognized by
3712 # The revlog header has version 65535, which won't be recognized by
3707 # such old clients.
3713 # such old clients.
3708 hgvfs.append(
3714 hgvfs.append(
3709 b'00changelog.i',
3715 b'00changelog.i',
3710 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3716 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3711 b'layout',
3717 b'layout',
3712 )
3718 )
3713
3719
3714 # Filter the requirements into working copy and store ones
3720 # Filter the requirements into working copy and store ones
3715 wcreq, storereq = scmutil.filterrequirements(requirements)
3721 wcreq, storereq = scmutil.filterrequirements(requirements)
3716 # write working copy ones
3722 # write working copy ones
3717 scmutil.writerequires(hgvfs, wcreq)
3723 scmutil.writerequires(hgvfs, wcreq)
3718 # If there are store requirements and the current repository
3724 # If there are store requirements and the current repository
3719 # is not a shared one, write stored requirements
3725 # is not a shared one, write stored requirements
3720 # For new shared repository, we don't need to write the store
3726 # For new shared repository, we don't need to write the store
3721 # requirements as they are already present in store requires
3727 # requirements as they are already present in store requires
3722 if storereq and b'sharedrepo' not in createopts:
3728 if storereq and b'sharedrepo' not in createopts:
3723 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3729 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3724 scmutil.writerequires(storevfs, storereq)
3730 scmutil.writerequires(storevfs, storereq)
3725
3731
3726 # Write out file telling readers where to find the shared store.
3732 # Write out file telling readers where to find the shared store.
3727 if b'sharedrepo' in createopts:
3733 if b'sharedrepo' in createopts:
3728 hgvfs.write(b'sharedpath', sharedpath)
3734 hgvfs.write(b'sharedpath', sharedpath)
3729
3735
3730 if createopts.get(b'shareditems'):
3736 if createopts.get(b'shareditems'):
3731 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3737 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3732 hgvfs.write(b'shared', shared)
3738 hgvfs.write(b'shared', shared)
3733
3739
3734
3740
3735 def poisonrepository(repo):
3741 def poisonrepository(repo):
3736 """Poison a repository instance so it can no longer be used."""
3742 """Poison a repository instance so it can no longer be used."""
3737 # Perform any cleanup on the instance.
3743 # Perform any cleanup on the instance.
3738 repo.close()
3744 repo.close()
3739
3745
3740 # Our strategy is to replace the type of the object with one that
3746 # Our strategy is to replace the type of the object with one that
3741 # has all attribute lookups result in error.
3747 # has all attribute lookups result in error.
3742 #
3748 #
3743 # But we have to allow the close() method because some constructors
3749 # But we have to allow the close() method because some constructors
3744 # of repos call close() on repo references.
3750 # of repos call close() on repo references.
3745 class poisonedrepository(object):
3751 class poisonedrepository(object):
3746 def __getattribute__(self, item):
3752 def __getattribute__(self, item):
3747 if item == 'close':
3753 if item == 'close':
3748 return object.__getattribute__(self, item)
3754 return object.__getattribute__(self, item)
3749
3755
3750 raise error.ProgrammingError(
3756 raise error.ProgrammingError(
3751 b'repo instances should not be used after unshare'
3757 b'repo instances should not be used after unshare'
3752 )
3758 )
3753
3759
3754 def close(self):
3760 def close(self):
3755 pass
3761 pass
3756
3762
3757 # We may have a repoview, which intercepts __setattr__. So be sure
3763 # We may have a repoview, which intercepts __setattr__. So be sure
3758 # we operate at the lowest level possible.
3764 # we operate at the lowest level possible.
3759 object.__setattr__(repo, '__class__', poisonedrepository)
3765 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,671 +1,670 b''
1 #require serve no-reposimplestore no-chg
1 #require serve no-reposimplestore no-chg
2
2
3 #testcases stream-legacy stream-bundle2
3 #testcases stream-legacy stream-bundle2
4
4
5 #if stream-legacy
5 #if stream-legacy
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [server]
7 > [server]
8 > bundle2.stream = no
8 > bundle2.stream = no
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 Initialize repository
12 Initialize repository
13 the status call is to check for issue5130
13 the status call is to check for issue5130
14
14
15 $ hg init server
15 $ hg init server
16 $ cd server
16 $ cd server
17 $ touch foo
17 $ touch foo
18 $ hg -q commit -A -m initial
18 $ hg -q commit -A -m initial
19 >>> for i in range(1024):
19 >>> for i in range(1024):
20 ... with open(str(i), 'wb') as fh:
20 ... with open(str(i), 'wb') as fh:
21 ... fh.write(b"%d" % i) and None
21 ... fh.write(b"%d" % i) and None
22 $ hg -q commit -A -m 'add a lot of files'
22 $ hg -q commit -A -m 'add a lot of files'
23 $ hg st
23 $ hg st
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
25 $ cat hg.pid > $DAEMON_PIDS
25 $ cat hg.pid > $DAEMON_PIDS
26 $ cd ..
26 $ cd ..
27
27
28 Cannot stream clone when server.uncompressed is set
28 Cannot stream clone when server.uncompressed is set
29
29
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
31 200 Script output follows
31 200 Script output follows
32
32
33 1
33 1
34
34
35 #if stream-legacy
35 #if stream-legacy
36 $ hg debugcapabilities http://localhost:$HGPORT
36 $ hg debugcapabilities http://localhost:$HGPORT
37 Main capabilities:
37 Main capabilities:
38 batch
38 batch
39 branchmap
39 branchmap
40 $USUAL_BUNDLE2_CAPS_SERVER$
40 $USUAL_BUNDLE2_CAPS_SERVER$
41 changegroupsubset
41 changegroupsubset
42 compression=$BUNDLE2_COMPRESSIONS$
42 compression=$BUNDLE2_COMPRESSIONS$
43 getbundle
43 getbundle
44 httpheader=1024
44 httpheader=1024
45 httpmediatype=0.1rx,0.1tx,0.2tx
45 httpmediatype=0.1rx,0.1tx,0.2tx
46 known
46 known
47 lookup
47 lookup
48 pushkey
48 pushkey
49 unbundle=HG10GZ,HG10BZ,HG10UN
49 unbundle=HG10GZ,HG10BZ,HG10UN
50 unbundlehash
50 unbundlehash
51 Bundle2 capabilities:
51 Bundle2 capabilities:
52 HG20
52 HG20
53 bookmarks
53 bookmarks
54 changegroup
54 changegroup
55 01
55 01
56 02
56 02
57 checkheads
57 checkheads
58 related
58 related
59 digests
59 digests
60 md5
60 md5
61 sha1
61 sha1
62 sha512
62 sha512
63 error
63 error
64 abort
64 abort
65 unsupportedcontent
65 unsupportedcontent
66 pushraced
66 pushraced
67 pushkey
67 pushkey
68 hgtagsfnodes
68 hgtagsfnodes
69 listkeys
69 listkeys
70 phases
70 phases
71 heads
71 heads
72 pushkey
72 pushkey
73 remote-changegroup
73 remote-changegroup
74 http
74 http
75 https
75 https
76
76
77 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
77 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
78 warning: stream clone requested but server has them disabled
78 warning: stream clone requested but server has them disabled
79 requesting all changes
79 requesting all changes
80 adding changesets
80 adding changesets
81 adding manifests
81 adding manifests
82 adding file changes
82 adding file changes
83 added 2 changesets with 1025 changes to 1025 files
83 added 2 changesets with 1025 changes to 1025 files
84 new changesets 96ee1d7354c4:c17445101a72
84 new changesets 96ee1d7354c4:c17445101a72
85
85
86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
87 200 Script output follows
87 200 Script output follows
88 content-type: application/mercurial-0.2
88 content-type: application/mercurial-0.2
89
89
90
90
91 $ f --size body --hexdump --bytes 100
91 $ f --size body --hexdump --bytes 100
92 body: size=232
92 body: size=232
93 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
93 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
94 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
94 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
95 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
95 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
96 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
96 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
97 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
97 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
98 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
98 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
99 0060: 69 73 20 66 |is f|
99 0060: 69 73 20 66 |is f|
100
100
101 #endif
101 #endif
102 #if stream-bundle2
102 #if stream-bundle2
103 $ hg debugcapabilities http://localhost:$HGPORT
103 $ hg debugcapabilities http://localhost:$HGPORT
104 Main capabilities:
104 Main capabilities:
105 batch
105 batch
106 branchmap
106 branchmap
107 $USUAL_BUNDLE2_CAPS_SERVER$
107 $USUAL_BUNDLE2_CAPS_SERVER$
108 changegroupsubset
108 changegroupsubset
109 compression=$BUNDLE2_COMPRESSIONS$
109 compression=$BUNDLE2_COMPRESSIONS$
110 getbundle
110 getbundle
111 httpheader=1024
111 httpheader=1024
112 httpmediatype=0.1rx,0.1tx,0.2tx
112 httpmediatype=0.1rx,0.1tx,0.2tx
113 known
113 known
114 lookup
114 lookup
115 pushkey
115 pushkey
116 unbundle=HG10GZ,HG10BZ,HG10UN
116 unbundle=HG10GZ,HG10BZ,HG10UN
117 unbundlehash
117 unbundlehash
118 Bundle2 capabilities:
118 Bundle2 capabilities:
119 HG20
119 HG20
120 bookmarks
120 bookmarks
121 changegroup
121 changegroup
122 01
122 01
123 02
123 02
124 checkheads
124 checkheads
125 related
125 related
126 digests
126 digests
127 md5
127 md5
128 sha1
128 sha1
129 sha512
129 sha512
130 error
130 error
131 abort
131 abort
132 unsupportedcontent
132 unsupportedcontent
133 pushraced
133 pushraced
134 pushkey
134 pushkey
135 hgtagsfnodes
135 hgtagsfnodes
136 listkeys
136 listkeys
137 phases
137 phases
138 heads
138 heads
139 pushkey
139 pushkey
140 remote-changegroup
140 remote-changegroup
141 http
141 http
142 https
142 https
143
143
144 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
144 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
145 warning: stream clone requested but server has them disabled
145 warning: stream clone requested but server has them disabled
146 requesting all changes
146 requesting all changes
147 adding changesets
147 adding changesets
148 adding manifests
148 adding manifests
149 adding file changes
149 adding file changes
150 added 2 changesets with 1025 changes to 1025 files
150 added 2 changesets with 1025 changes to 1025 files
151 new changesets 96ee1d7354c4:c17445101a72
151 new changesets 96ee1d7354c4:c17445101a72
152
152
153 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
153 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
154 200 Script output follows
154 200 Script output follows
155 content-type: application/mercurial-0.2
155 content-type: application/mercurial-0.2
156
156
157
157
158 $ f --size body --hexdump --bytes 100
158 $ f --size body --hexdump --bytes 100
159 body: size=232
159 body: size=232
160 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
160 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
161 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
161 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
162 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
162 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
163 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
163 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
164 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
164 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
165 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
165 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
166 0060: 69 73 20 66 |is f|
166 0060: 69 73 20 66 |is f|
167
167
168 #endif
168 #endif
169
169
170 $ killdaemons.py
170 $ killdaemons.py
171 $ cd server
171 $ cd server
172 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
172 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
173 $ cat hg.pid > $DAEMON_PIDS
173 $ cat hg.pid > $DAEMON_PIDS
174 $ cd ..
174 $ cd ..
175
175
176 Basic clone
176 Basic clone
177
177
178 #if stream-legacy
178 #if stream-legacy
179 $ hg clone --stream -U http://localhost:$HGPORT clone1
179 $ hg clone --stream -U http://localhost:$HGPORT clone1
180 streaming all changes
180 streaming all changes
181 1027 files to transfer, 96.3 KB of data (no-zstd !)
181 1027 files to transfer, 96.3 KB of data (no-zstd !)
182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
183 1027 files to transfer, 93.5 KB of data (zstd !)
183 1027 files to transfer, 93.5 KB of data (zstd !)
184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
185 searching for changes
185 searching for changes
186 no changes found
186 no changes found
187 $ cat server/errors.txt
187 $ cat server/errors.txt
188 #endif
188 #endif
189 #if stream-bundle2
189 #if stream-bundle2
190 $ hg clone --stream -U http://localhost:$HGPORT clone1
190 $ hg clone --stream -U http://localhost:$HGPORT clone1
191 streaming all changes
191 streaming all changes
192 1030 files to transfer, 96.5 KB of data (no-zstd !)
192 1030 files to transfer, 96.5 KB of data (no-zstd !)
193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
194 1030 files to transfer, 93.6 KB of data (zstd !)
194 1030 files to transfer, 93.6 KB of data (zstd !)
195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
196
196
197 $ ls -1 clone1/.hg/cache
197 $ ls -1 clone1/.hg/cache
198 branch2-base
198 branch2-base
199 branch2-immutable
199 branch2-immutable
200 branch2-served
200 branch2-served
201 branch2-served.hidden
201 branch2-served.hidden
202 branch2-visible
202 branch2-visible
203 branch2-visible-hidden
203 branch2-visible-hidden
204 hgtagsfnodes1
205 rbc-names-v1
204 rbc-names-v1
206 rbc-revs-v1
205 rbc-revs-v1
207 tags2
206 tags2
208 tags2-served
207 tags2-served
209 $ cat server/errors.txt
208 $ cat server/errors.txt
210 #endif
209 #endif
211
210
212 getbundle requests with stream=1 are uncompressed
211 getbundle requests with stream=1 are uncompressed
213
212
214 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
213 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
215 200 Script output follows
214 200 Script output follows
216 content-type: application/mercurial-0.2
215 content-type: application/mercurial-0.2
217
216
218
217
219 $ f --size --hex --bytes 256 body
218 $ f --size --hex --bytes 256 body
220 body: size=112262 (no-zstd !)
219 body: size=112262 (no-zstd !)
221 body: size=109410 (zstd no-rust !)
220 body: size=109410 (zstd no-rust !)
222 body: size=109431 (rust !)
221 body: size=109431 (rust !)
223 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
222 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
224 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
223 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
225 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
224 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
226 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
225 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
227 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
226 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
228 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
227 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
229 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
228 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
230 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
229 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
231 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
230 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
232 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
231 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
233 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
232 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
234 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
233 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
235 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
234 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
236 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
235 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
237 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
236 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
238 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
237 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
239 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
238 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
240 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
239 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
241 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
240 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
242 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
241 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
243 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
242 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
244 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
243 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
245 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
244 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
246 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
245 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
247 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
246 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
248 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
247 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
249 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
248 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
250 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
249 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
251 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
250 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
252 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
251 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
253 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
252 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
254 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
253 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
255 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
254 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
256 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
255 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
257 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
256 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
258 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
257 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
259 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
258 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
260 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
259 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
261 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
260 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
262
261
263 --uncompressed is an alias to --stream
262 --uncompressed is an alias to --stream
264
263
265 #if stream-legacy
264 #if stream-legacy
266 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
265 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
267 streaming all changes
266 streaming all changes
268 1027 files to transfer, 96.3 KB of data (no-zstd !)
267 1027 files to transfer, 96.3 KB of data (no-zstd !)
269 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
268 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
270 1027 files to transfer, 93.5 KB of data (zstd !)
269 1027 files to transfer, 93.5 KB of data (zstd !)
271 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
270 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
272 searching for changes
271 searching for changes
273 no changes found
272 no changes found
274 #endif
273 #endif
275 #if stream-bundle2
274 #if stream-bundle2
276 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
275 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
277 streaming all changes
276 streaming all changes
278 1030 files to transfer, 96.5 KB of data (no-zstd !)
277 1030 files to transfer, 96.5 KB of data (no-zstd !)
279 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
278 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
280 1030 files to transfer, 93.6 KB of data (zstd !)
279 1030 files to transfer, 93.6 KB of data (zstd !)
281 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
280 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
282 #endif
281 #endif
283
282
284 Clone with background file closing enabled
283 Clone with background file closing enabled
285
284
286 #if stream-legacy
285 #if stream-legacy
287 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
286 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
288 using http://localhost:$HGPORT/
287 using http://localhost:$HGPORT/
289 sending capabilities command
288 sending capabilities command
290 sending branchmap command
289 sending branchmap command
291 streaming all changes
290 streaming all changes
292 sending stream_out command
291 sending stream_out command
293 1027 files to transfer, 96.3 KB of data (no-zstd !)
292 1027 files to transfer, 96.3 KB of data (no-zstd !)
294 1027 files to transfer, 93.5 KB of data (zstd !)
293 1027 files to transfer, 93.5 KB of data (zstd !)
295 starting 4 threads for background file closing
294 starting 4 threads for background file closing
296 updating the branch cache
295 updating the branch cache
297 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
296 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
298 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
297 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
299 query 1; heads
298 query 1; heads
300 sending batch command
299 sending batch command
301 searching for changes
300 searching for changes
302 all remote heads known locally
301 all remote heads known locally
303 no changes found
302 no changes found
304 sending getbundle command
303 sending getbundle command
305 bundle2-input-bundle: with-transaction
304 bundle2-input-bundle: with-transaction
306 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
305 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
307 bundle2-input-part: "phase-heads" supported
306 bundle2-input-part: "phase-heads" supported
308 bundle2-input-part: total payload size 24
307 bundle2-input-part: total payload size 24
309 bundle2-input-bundle: 2 parts total
308 bundle2-input-bundle: 2 parts total
310 checking for updated bookmarks
309 checking for updated bookmarks
311 updating the branch cache
310 updating the branch cache
312 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
311 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
313 #endif
312 #endif
314 #if stream-bundle2
313 #if stream-bundle2
315 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
314 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
316 using http://localhost:$HGPORT/
315 using http://localhost:$HGPORT/
317 sending capabilities command
316 sending capabilities command
318 query 1; heads
317 query 1; heads
319 sending batch command
318 sending batch command
320 streaming all changes
319 streaming all changes
321 sending getbundle command
320 sending getbundle command
322 bundle2-input-bundle: with-transaction
321 bundle2-input-bundle: with-transaction
323 bundle2-input-part: "stream2" (params: 3 mandatory) supported
322 bundle2-input-part: "stream2" (params: 3 mandatory) supported
324 applying stream bundle
323 applying stream bundle
325 1030 files to transfer, 96.5 KB of data (no-zstd !)
324 1030 files to transfer, 96.5 KB of data (no-zstd !)
326 1030 files to transfer, 93.6 KB of data (zstd !)
325 1030 files to transfer, 93.6 KB of data (zstd !)
327 starting 4 threads for background file closing
326 starting 4 threads for background file closing
328 starting 4 threads for background file closing
327 starting 4 threads for background file closing
329 updating the branch cache
328 updating the branch cache
330 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
329 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
331 bundle2-input-part: total payload size 112094 (no-zstd !)
330 bundle2-input-part: total payload size 112094 (no-zstd !)
332 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
331 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
333 bundle2-input-part: total payload size 109216 (zstd !)
332 bundle2-input-part: total payload size 109216 (zstd !)
334 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
333 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
335 bundle2-input-bundle: 2 parts total
334 bundle2-input-bundle: 2 parts total
336 checking for updated bookmarks
335 checking for updated bookmarks
337 updating the branch cache
336 updating the branch cache
338 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
337 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
339 #endif
338 #endif
340
339
341 Cannot stream clone when there are secret changesets
340 Cannot stream clone when there are secret changesets
342
341
343 $ hg -R server phase --force --secret -r tip
342 $ hg -R server phase --force --secret -r tip
344 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
343 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
345 warning: stream clone requested but server has them disabled
344 warning: stream clone requested but server has them disabled
346 requesting all changes
345 requesting all changes
347 adding changesets
346 adding changesets
348 adding manifests
347 adding manifests
349 adding file changes
348 adding file changes
350 added 1 changesets with 1 changes to 1 files
349 added 1 changesets with 1 changes to 1 files
351 new changesets 96ee1d7354c4
350 new changesets 96ee1d7354c4
352
351
353 $ killdaemons.py
352 $ killdaemons.py
354
353
355 Streaming of secrets can be overridden by server config
354 Streaming of secrets can be overridden by server config
356
355
357 $ cd server
356 $ cd server
358 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
357 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
359 $ cat hg.pid > $DAEMON_PIDS
358 $ cat hg.pid > $DAEMON_PIDS
360 $ cd ..
359 $ cd ..
361
360
362 #if stream-legacy
361 #if stream-legacy
363 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
362 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
364 streaming all changes
363 streaming all changes
365 1027 files to transfer, 96.3 KB of data (no-zstd !)
364 1027 files to transfer, 96.3 KB of data (no-zstd !)
366 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
365 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
367 1027 files to transfer, 93.5 KB of data (zstd !)
366 1027 files to transfer, 93.5 KB of data (zstd !)
368 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
367 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
369 searching for changes
368 searching for changes
370 no changes found
369 no changes found
371 #endif
370 #endif
372 #if stream-bundle2
371 #if stream-bundle2
373 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
372 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
374 streaming all changes
373 streaming all changes
375 1030 files to transfer, 96.5 KB of data (no-zstd !)
374 1030 files to transfer, 96.5 KB of data (no-zstd !)
376 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
375 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
377 1030 files to transfer, 93.6 KB of data (zstd !)
376 1030 files to transfer, 93.6 KB of data (zstd !)
378 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
377 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
379 #endif
378 #endif
380
379
381 $ killdaemons.py
380 $ killdaemons.py
382
381
383 Verify interaction between preferuncompressed and secret presence
382 Verify interaction between preferuncompressed and secret presence
384
383
385 $ cd server
384 $ cd server
386 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
385 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
387 $ cat hg.pid > $DAEMON_PIDS
386 $ cat hg.pid > $DAEMON_PIDS
388 $ cd ..
387 $ cd ..
389
388
390 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
389 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
391 requesting all changes
390 requesting all changes
392 adding changesets
391 adding changesets
393 adding manifests
392 adding manifests
394 adding file changes
393 adding file changes
395 added 1 changesets with 1 changes to 1 files
394 added 1 changesets with 1 changes to 1 files
396 new changesets 96ee1d7354c4
395 new changesets 96ee1d7354c4
397
396
398 $ killdaemons.py
397 $ killdaemons.py
399
398
400 Clone not allowed when full bundles disabled and can't serve secrets
399 Clone not allowed when full bundles disabled and can't serve secrets
401
400
402 $ cd server
401 $ cd server
403 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
402 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
404 $ cat hg.pid > $DAEMON_PIDS
403 $ cat hg.pid > $DAEMON_PIDS
405 $ cd ..
404 $ cd ..
406
405
407 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
406 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
408 warning: stream clone requested but server has them disabled
407 warning: stream clone requested but server has them disabled
409 requesting all changes
408 requesting all changes
410 remote: abort: server has pull-based clones disabled
409 remote: abort: server has pull-based clones disabled
411 abort: pull failed on remote
410 abort: pull failed on remote
412 (remove --pull if specified or upgrade Mercurial)
411 (remove --pull if specified or upgrade Mercurial)
413 [100]
412 [100]
414
413
415 Local stream clone with secrets involved
414 Local stream clone with secrets involved
416 (This is just a test over behavior: if you have access to the repo's files,
415 (This is just a test over behavior: if you have access to the repo's files,
417 there is no security so it isn't important to prevent a clone here.)
416 there is no security so it isn't important to prevent a clone here.)
418
417
419 $ hg clone -U --stream server local-secret
418 $ hg clone -U --stream server local-secret
420 warning: stream clone requested but server has them disabled
419 warning: stream clone requested but server has them disabled
421 requesting all changes
420 requesting all changes
422 adding changesets
421 adding changesets
423 adding manifests
422 adding manifests
424 adding file changes
423 adding file changes
425 added 1 changesets with 1 changes to 1 files
424 added 1 changesets with 1 changes to 1 files
426 new changesets 96ee1d7354c4
425 new changesets 96ee1d7354c4
427
426
428 Stream clone while repo is changing:
427 Stream clone while repo is changing:
429
428
430 $ mkdir changing
429 $ mkdir changing
431 $ cd changing
430 $ cd changing
432
431
433 extension for delaying the server process so we reliably can modify the repo
432 extension for delaying the server process so we reliably can modify the repo
434 while cloning
433 while cloning
435
434
436 $ cat > stream_steps.py <<EOF
435 $ cat > stream_steps.py <<EOF
437 > import os
436 > import os
438 > import sys
437 > import sys
439 > from mercurial import (
438 > from mercurial import (
440 > encoding,
439 > encoding,
441 > extensions,
440 > extensions,
442 > streamclone,
441 > streamclone,
443 > testing,
442 > testing,
444 > )
443 > )
445 > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
444 > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
446 > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
445 > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
447 >
446 >
448 > def _test_sync_point_walk_1(orig, repo):
447 > def _test_sync_point_walk_1(orig, repo):
449 > testing.write_file(WALKED_FILE_1)
448 > testing.write_file(WALKED_FILE_1)
450 >
449 >
451 > def _test_sync_point_walk_2(orig, repo):
450 > def _test_sync_point_walk_2(orig, repo):
452 > assert repo._currentlock(repo._lockref) is None
451 > assert repo._currentlock(repo._lockref) is None
453 > testing.wait_file(WALKED_FILE_2)
452 > testing.wait_file(WALKED_FILE_2)
454 >
453 >
455 > extensions.wrapfunction(
454 > extensions.wrapfunction(
456 > streamclone,
455 > streamclone,
457 > '_test_sync_point_walk_1',
456 > '_test_sync_point_walk_1',
458 > _test_sync_point_walk_1
457 > _test_sync_point_walk_1
459 > )
458 > )
460 > extensions.wrapfunction(
459 > extensions.wrapfunction(
461 > streamclone,
460 > streamclone,
462 > '_test_sync_point_walk_2',
461 > '_test_sync_point_walk_2',
463 > _test_sync_point_walk_2
462 > _test_sync_point_walk_2
464 > )
463 > )
465 > EOF
464 > EOF
466
465
467 prepare repo with small and big file to cover both code paths in emitrevlogdata
466 prepare repo with small and big file to cover both code paths in emitrevlogdata
468
467
469 $ hg init repo
468 $ hg init repo
470 $ touch repo/f1
469 $ touch repo/f1
471 $ $TESTDIR/seq.py 50000 > repo/f2
470 $ $TESTDIR/seq.py 50000 > repo/f2
472 $ hg -R repo ci -Aqm "0"
471 $ hg -R repo ci -Aqm "0"
473 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
472 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
474 $ export HG_TEST_STREAM_WALKED_FILE_1
473 $ export HG_TEST_STREAM_WALKED_FILE_1
475 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
474 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
476 $ export HG_TEST_STREAM_WALKED_FILE_2
475 $ export HG_TEST_STREAM_WALKED_FILE_2
477 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
476 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
478 $ export HG_TEST_STREAM_WALKED_FILE_3
477 $ export HG_TEST_STREAM_WALKED_FILE_3
479 # $ cat << EOF >> $HGRCPATH
478 # $ cat << EOF >> $HGRCPATH
480 # > [hooks]
479 # > [hooks]
481 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
480 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
482 # > EOF
481 # > EOF
483 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
482 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
484 $ cat hg.pid >> $DAEMON_PIDS
483 $ cat hg.pid >> $DAEMON_PIDS
485
484
486 clone while modifying the repo between stating file with write lock and
485 clone while modifying the repo between stating file with write lock and
487 actually serving file content
486 actually serving file content
488
487
489 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
488 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
490 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
489 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
491 $ echo >> repo/f1
490 $ echo >> repo/f1
492 $ echo >> repo/f2
491 $ echo >> repo/f2
493 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
492 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
494 $ touch $HG_TEST_STREAM_WALKED_FILE_2
493 $ touch $HG_TEST_STREAM_WALKED_FILE_2
495 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
494 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
496 $ hg -R clone id
495 $ hg -R clone id
497 000000000000
496 000000000000
498 $ cat errors.log
497 $ cat errors.log
499 $ cd ..
498 $ cd ..
500
499
501 Stream repository with bookmarks
500 Stream repository with bookmarks
502 --------------------------------
501 --------------------------------
503
502
504 (revert introduction of secret changeset)
503 (revert introduction of secret changeset)
505
504
506 $ hg -R server phase --draft 'secret()'
505 $ hg -R server phase --draft 'secret()'
507
506
508 add a bookmark
507 add a bookmark
509
508
510 $ hg -R server bookmark -r tip some-bookmark
509 $ hg -R server bookmark -r tip some-bookmark
511
510
512 clone it
511 clone it
513
512
514 #if stream-legacy
513 #if stream-legacy
515 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
514 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
516 streaming all changes
515 streaming all changes
517 1027 files to transfer, 96.3 KB of data (no-zstd !)
516 1027 files to transfer, 96.3 KB of data (no-zstd !)
518 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
517 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
519 1027 files to transfer, 93.5 KB of data (zstd !)
518 1027 files to transfer, 93.5 KB of data (zstd !)
520 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
519 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
521 searching for changes
520 searching for changes
522 no changes found
521 no changes found
523 updating to branch default
522 updating to branch default
524 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
523 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
525 #endif
524 #endif
526 #if stream-bundle2
525 #if stream-bundle2
527 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
526 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
528 streaming all changes
527 streaming all changes
529 1033 files to transfer, 96.6 KB of data (no-zstd !)
528 1033 files to transfer, 96.6 KB of data (no-zstd !)
530 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
529 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
531 1033 files to transfer, 93.8 KB of data (zstd !)
530 1033 files to transfer, 93.8 KB of data (zstd !)
532 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
531 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
533 updating to branch default
532 updating to branch default
534 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
533 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
535 #endif
534 #endif
536 $ hg -R with-bookmarks bookmarks
535 $ hg -R with-bookmarks bookmarks
537 some-bookmark 1:c17445101a72
536 some-bookmark 1:c17445101a72
538
537
539 Stream repository with phases
538 Stream repository with phases
540 -----------------------------
539 -----------------------------
541
540
542 Clone as publishing
541 Clone as publishing
543
542
544 $ hg -R server phase -r 'all()'
543 $ hg -R server phase -r 'all()'
545 0: draft
544 0: draft
546 1: draft
545 1: draft
547
546
548 #if stream-legacy
547 #if stream-legacy
549 $ hg clone --stream http://localhost:$HGPORT phase-publish
548 $ hg clone --stream http://localhost:$HGPORT phase-publish
550 streaming all changes
549 streaming all changes
551 1027 files to transfer, 96.3 KB of data (no-zstd !)
550 1027 files to transfer, 96.3 KB of data (no-zstd !)
552 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
551 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
553 1027 files to transfer, 93.5 KB of data (zstd !)
552 1027 files to transfer, 93.5 KB of data (zstd !)
554 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
553 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
555 searching for changes
554 searching for changes
556 no changes found
555 no changes found
557 updating to branch default
556 updating to branch default
558 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
557 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
559 #endif
558 #endif
560 #if stream-bundle2
559 #if stream-bundle2
561 $ hg clone --stream http://localhost:$HGPORT phase-publish
560 $ hg clone --stream http://localhost:$HGPORT phase-publish
562 streaming all changes
561 streaming all changes
563 1033 files to transfer, 96.6 KB of data (no-zstd !)
562 1033 files to transfer, 96.6 KB of data (no-zstd !)
564 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
563 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
565 1033 files to transfer, 93.8 KB of data (zstd !)
564 1033 files to transfer, 93.8 KB of data (zstd !)
566 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
565 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
567 updating to branch default
566 updating to branch default
568 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
567 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
569 #endif
568 #endif
570 $ hg -R phase-publish phase -r 'all()'
569 $ hg -R phase-publish phase -r 'all()'
571 0: public
570 0: public
572 1: public
571 1: public
573
572
574 Clone as non publishing
573 Clone as non publishing
575
574
576 $ cat << EOF >> server/.hg/hgrc
575 $ cat << EOF >> server/.hg/hgrc
577 > [phases]
576 > [phases]
578 > publish = False
577 > publish = False
579 > EOF
578 > EOF
580 $ killdaemons.py
579 $ killdaemons.py
581 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
580 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
582 $ cat hg.pid > $DAEMON_PIDS
581 $ cat hg.pid > $DAEMON_PIDS
583
582
584 #if stream-legacy
583 #if stream-legacy
585
584
586 With v1 of the stream protocol, changeset are always cloned as public. It make
585 With v1 of the stream protocol, changeset are always cloned as public. It make
587 stream v1 unsuitable for non-publishing repository.
586 stream v1 unsuitable for non-publishing repository.
588
587
589 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
588 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
590 streaming all changes
589 streaming all changes
591 1027 files to transfer, 96.3 KB of data (no-zstd !)
590 1027 files to transfer, 96.3 KB of data (no-zstd !)
592 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
591 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
593 1027 files to transfer, 93.5 KB of data (zstd !)
592 1027 files to transfer, 93.5 KB of data (zstd !)
594 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
593 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
595 searching for changes
594 searching for changes
596 no changes found
595 no changes found
597 updating to branch default
596 updating to branch default
598 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
597 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
599 $ hg -R phase-no-publish phase -r 'all()'
598 $ hg -R phase-no-publish phase -r 'all()'
600 0: public
599 0: public
601 1: public
600 1: public
602 #endif
601 #endif
603 #if stream-bundle2
602 #if stream-bundle2
604 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
603 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
605 streaming all changes
604 streaming all changes
606 1034 files to transfer, 96.7 KB of data (no-zstd !)
605 1034 files to transfer, 96.7 KB of data (no-zstd !)
607 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
606 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
608 1034 files to transfer, 93.9 KB of data (zstd !)
607 1034 files to transfer, 93.9 KB of data (zstd !)
609 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
608 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
610 updating to branch default
609 updating to branch default
611 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
610 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
612 $ hg -R phase-no-publish phase -r 'all()'
611 $ hg -R phase-no-publish phase -r 'all()'
613 0: draft
612 0: draft
614 1: draft
613 1: draft
615 #endif
614 #endif
616
615
617 $ killdaemons.py
616 $ killdaemons.py
618
617
619 #if stream-legacy
618 #if stream-legacy
620
619
621 With v1 of the stream protocol, changeset are always cloned as public. There's
620 With v1 of the stream protocol, changeset are always cloned as public. There's
622 no obsolescence markers exchange in stream v1.
621 no obsolescence markers exchange in stream v1.
623
622
624 #endif
623 #endif
625 #if stream-bundle2
624 #if stream-bundle2
626
625
627 Stream repository with obsolescence
626 Stream repository with obsolescence
628 -----------------------------------
627 -----------------------------------
629
628
630 Clone non-publishing with obsolescence
629 Clone non-publishing with obsolescence
631
630
632 $ cat >> $HGRCPATH << EOF
631 $ cat >> $HGRCPATH << EOF
633 > [experimental]
632 > [experimental]
634 > evolution=all
633 > evolution=all
635 > EOF
634 > EOF
636
635
637 $ cd server
636 $ cd server
638 $ echo foo > foo
637 $ echo foo > foo
639 $ hg -q commit -m 'about to be pruned'
638 $ hg -q commit -m 'about to be pruned'
640 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
639 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
641 1 new obsolescence markers
640 1 new obsolescence markers
642 obsoleted 1 changesets
641 obsoleted 1 changesets
643 $ hg up null -q
642 $ hg up null -q
644 $ hg log -T '{rev}: {phase}\n'
643 $ hg log -T '{rev}: {phase}\n'
645 1: draft
644 1: draft
646 0: draft
645 0: draft
647 $ hg serve -p $HGPORT -d --pid-file=hg.pid
646 $ hg serve -p $HGPORT -d --pid-file=hg.pid
648 $ cat hg.pid > $DAEMON_PIDS
647 $ cat hg.pid > $DAEMON_PIDS
649 $ cd ..
648 $ cd ..
650
649
651 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
650 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
652 streaming all changes
651 streaming all changes
653 1035 files to transfer, 97.1 KB of data (no-zstd !)
652 1035 files to transfer, 97.1 KB of data (no-zstd !)
654 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
653 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
655 1035 files to transfer, 94.3 KB of data (zstd !)
654 1035 files to transfer, 94.3 KB of data (zstd !)
656 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
655 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
657 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
656 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
658 1: draft
657 1: draft
659 0: draft
658 0: draft
660 $ hg debugobsolete -R with-obsolescence
659 $ hg debugobsolete -R with-obsolescence
661 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
660 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
662
661
663 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
662 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
664 streaming all changes
663 streaming all changes
665 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
664 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
666 abort: pull failed on remote
665 abort: pull failed on remote
667 [100]
666 [100]
668
667
669 $ killdaemons.py
668 $ killdaemons.py
670
669
671 #endif
670 #endif
@@ -1,1326 +1,1324 b''
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 Prepare repo a:
11 Prepare repo a:
12
12
13 $ hg init a
13 $ hg init a
14 $ cd a
14 $ cd a
15 $ echo a > a
15 $ echo a > a
16 $ hg add a
16 $ hg add a
17 $ hg commit -m test
17 $ hg commit -m test
18 $ echo first line > b
18 $ echo first line > b
19 $ hg add b
19 $ hg add b
20
20
21 Create a non-inlined filelog:
21 Create a non-inlined filelog:
22
22
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 > cat data1 >> b
25 > cat data1 >> b
26 > hg commit -m test
26 > hg commit -m test
27 > done
27 > done
28
28
29 List files in store/data (should show a 'b.d'):
29 List files in store/data (should show a 'b.d'):
30
30
31 #if reporevlogstore
31 #if reporevlogstore
32 $ for i in .hg/store/data/*; do
32 $ for i in .hg/store/data/*; do
33 > echo $i
33 > echo $i
34 > done
34 > done
35 .hg/store/data/a.i
35 .hg/store/data/a.i
36 .hg/store/data/b.d
36 .hg/store/data/b.d
37 .hg/store/data/b.i
37 .hg/store/data/b.i
38 #endif
38 #endif
39
39
40 Trigger branchcache creation:
40 Trigger branchcache creation:
41
41
42 $ hg branches
42 $ hg branches
43 default 10:a7949464abda
43 default 10:a7949464abda
44 $ ls .hg/cache
44 $ ls .hg/cache
45 branch2-served
45 branch2-served
46 rbc-names-v1
46 rbc-names-v1
47 rbc-revs-v1
47 rbc-revs-v1
48
48
49 Default operation:
49 Default operation:
50
50
51 $ hg clone . ../b
51 $ hg clone . ../b
52 updating to branch default
52 updating to branch default
53 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 $ cd ../b
54 $ cd ../b
55
55
56 Ensure branchcache got copied over:
56 Ensure branchcache got copied over:
57
57
58 $ ls .hg/cache
58 $ ls .hg/cache
59 branch2-base
59 branch2-base
60 branch2-immutable
60 branch2-immutable
61 branch2-served
61 branch2-served
62 branch2-served.hidden
62 branch2-served.hidden
63 branch2-visible
63 branch2-visible
64 branch2-visible-hidden
64 branch2-visible-hidden
65 hgtagsfnodes1
66 rbc-names-v1
65 rbc-names-v1
67 rbc-revs-v1
66 rbc-revs-v1
68 tags2
67 tags2
69 tags2-served
68 tags2-served
70
69
71 $ cat a
70 $ cat a
72 a
71 a
73 $ hg verify
72 $ hg verify
74 checking changesets
73 checking changesets
75 checking manifests
74 checking manifests
76 crosschecking files in changesets and manifests
75 crosschecking files in changesets and manifests
77 checking files
76 checking files
78 checked 11 changesets with 11 changes to 2 files
77 checked 11 changesets with 11 changes to 2 files
79
78
80 Invalid dest '' must abort:
79 Invalid dest '' must abort:
81
80
82 $ hg clone . ''
81 $ hg clone . ''
83 abort: empty destination path is not valid
82 abort: empty destination path is not valid
84 [10]
83 [10]
85
84
86 No update, with debug option:
85 No update, with debug option:
87
86
88 #if hardlink
87 #if hardlink
89 $ hg --debug clone -U . ../c --config progress.debug=true
88 $ hg --debug clone -U . ../c --config progress.debug=true
90 linking: 1 files
89 linking: 1 files
91 linking: 2 files
90 linking: 2 files
92 linking: 3 files
91 linking: 3 files
93 linking: 4 files
92 linking: 4 files
94 linking: 5 files
93 linking: 5 files
95 linking: 6 files
94 linking: 6 files
96 linking: 7 files
95 linking: 7 files
97 linking: 8 files
96 linking: 8 files
98 linked 8 files (reporevlogstore !)
97 linked 8 files (reporevlogstore !)
99 linking: 9 files (reposimplestore !)
98 linking: 9 files (reposimplestore !)
100 linking: 10 files (reposimplestore !)
99 linking: 10 files (reposimplestore !)
101 linking: 11 files (reposimplestore !)
100 linking: 11 files (reposimplestore !)
102 linking: 12 files (reposimplestore !)
101 linking: 12 files (reposimplestore !)
103 linking: 13 files (reposimplestore !)
102 linking: 13 files (reposimplestore !)
104 linking: 14 files (reposimplestore !)
103 linking: 14 files (reposimplestore !)
105 linking: 15 files (reposimplestore !)
104 linking: 15 files (reposimplestore !)
106 linking: 16 files (reposimplestore !)
105 linking: 16 files (reposimplestore !)
107 linking: 17 files (reposimplestore !)
106 linking: 17 files (reposimplestore !)
108 linking: 18 files (reposimplestore !)
107 linking: 18 files (reposimplestore !)
109 linked 18 files (reposimplestore !)
108 linked 18 files (reposimplestore !)
110 updating the branch cache
109 updating the branch cache
111 #else
110 #else
112 $ hg --debug clone -U . ../c --config progress.debug=true
111 $ hg --debug clone -U . ../c --config progress.debug=true
113 linking: 1 files
112 linking: 1 files
114 copying: 2 files
113 copying: 2 files
115 copying: 3 files
114 copying: 3 files
116 copying: 4 files
115 copying: 4 files
117 copying: 5 files
116 copying: 5 files
118 copying: 6 files
117 copying: 6 files
119 copying: 7 files
118 copying: 7 files
120 copying: 8 files
119 copying: 8 files
121 copied 8 files (reporevlogstore !)
120 copied 8 files (reporevlogstore !)
122 copying: 9 files (reposimplestore !)
121 copying: 9 files (reposimplestore !)
123 copying: 10 files (reposimplestore !)
122 copying: 10 files (reposimplestore !)
124 copying: 11 files (reposimplestore !)
123 copying: 11 files (reposimplestore !)
125 copying: 12 files (reposimplestore !)
124 copying: 12 files (reposimplestore !)
126 copying: 13 files (reposimplestore !)
125 copying: 13 files (reposimplestore !)
127 copying: 14 files (reposimplestore !)
126 copying: 14 files (reposimplestore !)
128 copying: 15 files (reposimplestore !)
127 copying: 15 files (reposimplestore !)
129 copying: 16 files (reposimplestore !)
128 copying: 16 files (reposimplestore !)
130 copying: 17 files (reposimplestore !)
129 copying: 17 files (reposimplestore !)
131 copying: 18 files (reposimplestore !)
130 copying: 18 files (reposimplestore !)
132 copied 18 files (reposimplestore !)
131 copied 18 files (reposimplestore !)
133 #endif
132 #endif
134 $ cd ../c
133 $ cd ../c
135
134
136 Ensure branchcache got copied over:
135 Ensure branchcache got copied over:
137
136
138 $ ls .hg/cache
137 $ ls .hg/cache
139 branch2-base
138 branch2-base
140 branch2-immutable
139 branch2-immutable
141 branch2-served
140 branch2-served
142 branch2-served.hidden
141 branch2-served.hidden
143 branch2-visible
142 branch2-visible
144 branch2-visible-hidden
143 branch2-visible-hidden
145 hgtagsfnodes1
146 rbc-names-v1
144 rbc-names-v1
147 rbc-revs-v1
145 rbc-revs-v1
148 tags2
146 tags2
149 tags2-served
147 tags2-served
150
148
151 $ cat a 2>/dev/null || echo "a not present"
149 $ cat a 2>/dev/null || echo "a not present"
152 a not present
150 a not present
153 $ hg verify
151 $ hg verify
154 checking changesets
152 checking changesets
155 checking manifests
153 checking manifests
156 crosschecking files in changesets and manifests
154 crosschecking files in changesets and manifests
157 checking files
155 checking files
158 checked 11 changesets with 11 changes to 2 files
156 checked 11 changesets with 11 changes to 2 files
159
157
160 Default destination:
158 Default destination:
161
159
162 $ mkdir ../d
160 $ mkdir ../d
163 $ cd ../d
161 $ cd ../d
164 $ hg clone ../a
162 $ hg clone ../a
165 destination directory: a
163 destination directory: a
166 updating to branch default
164 updating to branch default
167 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
168 $ cd a
166 $ cd a
169 $ hg cat a
167 $ hg cat a
170 a
168 a
171 $ cd ../..
169 $ cd ../..
172
170
173 Check that we drop the 'file:' from the path before writing the .hgrc:
171 Check that we drop the 'file:' from the path before writing the .hgrc:
174
172
175 $ hg clone file:a e
173 $ hg clone file:a e
176 updating to branch default
174 updating to branch default
177 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
178 $ grep 'file:' e/.hg/hgrc
176 $ grep 'file:' e/.hg/hgrc
179 [1]
177 [1]
180
178
181 Check that path aliases are expanded:
179 Check that path aliases are expanded:
182
180
183 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
181 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
184 $ hg -R f showconfig paths.default
182 $ hg -R f showconfig paths.default
185 $TESTTMP/a#0
183 $TESTTMP/a#0
186
184
187 Use --pull:
185 Use --pull:
188
186
189 $ hg clone --pull a g
187 $ hg clone --pull a g
190 requesting all changes
188 requesting all changes
191 adding changesets
189 adding changesets
192 adding manifests
190 adding manifests
193 adding file changes
191 adding file changes
194 added 11 changesets with 11 changes to 2 files
192 added 11 changesets with 11 changes to 2 files
195 new changesets acb14030fe0a:a7949464abda
193 new changesets acb14030fe0a:a7949464abda
196 updating to branch default
194 updating to branch default
197 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
195 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
198 $ hg -R g verify
196 $ hg -R g verify
199 checking changesets
197 checking changesets
200 checking manifests
198 checking manifests
201 crosschecking files in changesets and manifests
199 crosschecking files in changesets and manifests
202 checking files
200 checking files
203 checked 11 changesets with 11 changes to 2 files
201 checked 11 changesets with 11 changes to 2 files
204
202
205 Invalid dest '' with --pull must abort (issue2528):
203 Invalid dest '' with --pull must abort (issue2528):
206
204
207 $ hg clone --pull a ''
205 $ hg clone --pull a ''
208 abort: empty destination path is not valid
206 abort: empty destination path is not valid
209 [10]
207 [10]
210
208
211 Clone to '.':
209 Clone to '.':
212
210
213 $ mkdir h
211 $ mkdir h
214 $ cd h
212 $ cd h
215 $ hg clone ../a .
213 $ hg clone ../a .
216 updating to branch default
214 updating to branch default
217 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
215 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
218 $ cd ..
216 $ cd ..
219
217
220
218
221 *** Tests for option -u ***
219 *** Tests for option -u ***
222
220
223 Adding some more history to repo a:
221 Adding some more history to repo a:
224
222
225 $ cd a
223 $ cd a
226 $ hg tag ref1
224 $ hg tag ref1
227 $ echo the quick brown fox >a
225 $ echo the quick brown fox >a
228 $ hg ci -m "hacked default"
226 $ hg ci -m "hacked default"
229 $ hg up ref1
227 $ hg up ref1
230 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
228 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
231 $ hg branch stable
229 $ hg branch stable
232 marked working directory as branch stable
230 marked working directory as branch stable
233 (branches are permanent and global, did you want a bookmark?)
231 (branches are permanent and global, did you want a bookmark?)
234 $ echo some text >a
232 $ echo some text >a
235 $ hg ci -m "starting branch stable"
233 $ hg ci -m "starting branch stable"
236 $ hg tag ref2
234 $ hg tag ref2
237 $ echo some more text >a
235 $ echo some more text >a
238 $ hg ci -m "another change for branch stable"
236 $ hg ci -m "another change for branch stable"
239 $ hg up ref2
237 $ hg up ref2
240 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
238 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
241 $ hg parents
239 $ hg parents
242 changeset: 13:e8ece76546a6
240 changeset: 13:e8ece76546a6
243 branch: stable
241 branch: stable
244 tag: ref2
242 tag: ref2
245 parent: 10:a7949464abda
243 parent: 10:a7949464abda
246 user: test
244 user: test
247 date: Thu Jan 01 00:00:00 1970 +0000
245 date: Thu Jan 01 00:00:00 1970 +0000
248 summary: starting branch stable
246 summary: starting branch stable
249
247
250
248
251 Repo a has two heads:
249 Repo a has two heads:
252
250
253 $ hg heads
251 $ hg heads
254 changeset: 15:0aae7cf88f0d
252 changeset: 15:0aae7cf88f0d
255 branch: stable
253 branch: stable
256 tag: tip
254 tag: tip
257 user: test
255 user: test
258 date: Thu Jan 01 00:00:00 1970 +0000
256 date: Thu Jan 01 00:00:00 1970 +0000
259 summary: another change for branch stable
257 summary: another change for branch stable
260
258
261 changeset: 12:f21241060d6a
259 changeset: 12:f21241060d6a
262 user: test
260 user: test
263 date: Thu Jan 01 00:00:00 1970 +0000
261 date: Thu Jan 01 00:00:00 1970 +0000
264 summary: hacked default
262 summary: hacked default
265
263
266
264
267 $ cd ..
265 $ cd ..
268
266
269
267
270 Testing --noupdate with --updaterev (must abort):
268 Testing --noupdate with --updaterev (must abort):
271
269
272 $ hg clone --noupdate --updaterev 1 a ua
270 $ hg clone --noupdate --updaterev 1 a ua
273 abort: cannot specify both --noupdate and --updaterev
271 abort: cannot specify both --noupdate and --updaterev
274 [10]
272 [10]
275
273
276
274
277 Testing clone -u:
275 Testing clone -u:
278
276
279 $ hg clone -u . a ua
277 $ hg clone -u . a ua
280 updating to branch stable
278 updating to branch stable
281 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
279 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
282
280
283 Repo ua has both heads:
281 Repo ua has both heads:
284
282
285 $ hg -R ua heads
283 $ hg -R ua heads
286 changeset: 15:0aae7cf88f0d
284 changeset: 15:0aae7cf88f0d
287 branch: stable
285 branch: stable
288 tag: tip
286 tag: tip
289 user: test
287 user: test
290 date: Thu Jan 01 00:00:00 1970 +0000
288 date: Thu Jan 01 00:00:00 1970 +0000
291 summary: another change for branch stable
289 summary: another change for branch stable
292
290
293 changeset: 12:f21241060d6a
291 changeset: 12:f21241060d6a
294 user: test
292 user: test
295 date: Thu Jan 01 00:00:00 1970 +0000
293 date: Thu Jan 01 00:00:00 1970 +0000
296 summary: hacked default
294 summary: hacked default
297
295
298
296
299 Same revision checked out in repo a and ua:
297 Same revision checked out in repo a and ua:
300
298
301 $ hg -R a parents --template "{node|short}\n"
299 $ hg -R a parents --template "{node|short}\n"
302 e8ece76546a6
300 e8ece76546a6
303 $ hg -R ua parents --template "{node|short}\n"
301 $ hg -R ua parents --template "{node|short}\n"
304 e8ece76546a6
302 e8ece76546a6
305
303
306 $ rm -r ua
304 $ rm -r ua
307
305
308
306
309 Testing clone --pull -u:
307 Testing clone --pull -u:
310
308
311 $ hg clone --pull -u . a ua
309 $ hg clone --pull -u . a ua
312 requesting all changes
310 requesting all changes
313 adding changesets
311 adding changesets
314 adding manifests
312 adding manifests
315 adding file changes
313 adding file changes
316 added 16 changesets with 16 changes to 3 files (+1 heads)
314 added 16 changesets with 16 changes to 3 files (+1 heads)
317 new changesets acb14030fe0a:0aae7cf88f0d
315 new changesets acb14030fe0a:0aae7cf88f0d
318 updating to branch stable
316 updating to branch stable
319 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
317 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
320
318
321 Repo ua has both heads:
319 Repo ua has both heads:
322
320
323 $ hg -R ua heads
321 $ hg -R ua heads
324 changeset: 15:0aae7cf88f0d
322 changeset: 15:0aae7cf88f0d
325 branch: stable
323 branch: stable
326 tag: tip
324 tag: tip
327 user: test
325 user: test
328 date: Thu Jan 01 00:00:00 1970 +0000
326 date: Thu Jan 01 00:00:00 1970 +0000
329 summary: another change for branch stable
327 summary: another change for branch stable
330
328
331 changeset: 12:f21241060d6a
329 changeset: 12:f21241060d6a
332 user: test
330 user: test
333 date: Thu Jan 01 00:00:00 1970 +0000
331 date: Thu Jan 01 00:00:00 1970 +0000
334 summary: hacked default
332 summary: hacked default
335
333
336
334
337 Same revision checked out in repo a and ua:
335 Same revision checked out in repo a and ua:
338
336
339 $ hg -R a parents --template "{node|short}\n"
337 $ hg -R a parents --template "{node|short}\n"
340 e8ece76546a6
338 e8ece76546a6
341 $ hg -R ua parents --template "{node|short}\n"
339 $ hg -R ua parents --template "{node|short}\n"
342 e8ece76546a6
340 e8ece76546a6
343
341
344 $ rm -r ua
342 $ rm -r ua
345
343
346
344
347 Testing clone -u <branch>:
345 Testing clone -u <branch>:
348
346
349 $ hg clone -u stable a ua
347 $ hg clone -u stable a ua
350 updating to branch stable
348 updating to branch stable
351 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
349 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
352
350
353 Repo ua has both heads:
351 Repo ua has both heads:
354
352
355 $ hg -R ua heads
353 $ hg -R ua heads
356 changeset: 15:0aae7cf88f0d
354 changeset: 15:0aae7cf88f0d
357 branch: stable
355 branch: stable
358 tag: tip
356 tag: tip
359 user: test
357 user: test
360 date: Thu Jan 01 00:00:00 1970 +0000
358 date: Thu Jan 01 00:00:00 1970 +0000
361 summary: another change for branch stable
359 summary: another change for branch stable
362
360
363 changeset: 12:f21241060d6a
361 changeset: 12:f21241060d6a
364 user: test
362 user: test
365 date: Thu Jan 01 00:00:00 1970 +0000
363 date: Thu Jan 01 00:00:00 1970 +0000
366 summary: hacked default
364 summary: hacked default
367
365
368
366
369 Branch 'stable' is checked out:
367 Branch 'stable' is checked out:
370
368
371 $ hg -R ua parents
369 $ hg -R ua parents
372 changeset: 15:0aae7cf88f0d
370 changeset: 15:0aae7cf88f0d
373 branch: stable
371 branch: stable
374 tag: tip
372 tag: tip
375 user: test
373 user: test
376 date: Thu Jan 01 00:00:00 1970 +0000
374 date: Thu Jan 01 00:00:00 1970 +0000
377 summary: another change for branch stable
375 summary: another change for branch stable
378
376
379
377
380 $ rm -r ua
378 $ rm -r ua
381
379
382
380
383 Testing default checkout:
381 Testing default checkout:
384
382
385 $ hg clone a ua
383 $ hg clone a ua
386 updating to branch default
384 updating to branch default
387 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
385 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
388
386
389 Repo ua has both heads:
387 Repo ua has both heads:
390
388
391 $ hg -R ua heads
389 $ hg -R ua heads
392 changeset: 15:0aae7cf88f0d
390 changeset: 15:0aae7cf88f0d
393 branch: stable
391 branch: stable
394 tag: tip
392 tag: tip
395 user: test
393 user: test
396 date: Thu Jan 01 00:00:00 1970 +0000
394 date: Thu Jan 01 00:00:00 1970 +0000
397 summary: another change for branch stable
395 summary: another change for branch stable
398
396
399 changeset: 12:f21241060d6a
397 changeset: 12:f21241060d6a
400 user: test
398 user: test
401 date: Thu Jan 01 00:00:00 1970 +0000
399 date: Thu Jan 01 00:00:00 1970 +0000
402 summary: hacked default
400 summary: hacked default
403
401
404
402
405 Branch 'default' is checked out:
403 Branch 'default' is checked out:
406
404
407 $ hg -R ua parents
405 $ hg -R ua parents
408 changeset: 12:f21241060d6a
406 changeset: 12:f21241060d6a
409 user: test
407 user: test
410 date: Thu Jan 01 00:00:00 1970 +0000
408 date: Thu Jan 01 00:00:00 1970 +0000
411 summary: hacked default
409 summary: hacked default
412
410
413 Test clone with a branch named "@" (issue3677)
411 Test clone with a branch named "@" (issue3677)
414
412
415 $ hg -R ua branch @
413 $ hg -R ua branch @
416 marked working directory as branch @
414 marked working directory as branch @
417 $ hg -R ua commit -m 'created branch @'
415 $ hg -R ua commit -m 'created branch @'
418 $ hg clone ua atbranch
416 $ hg clone ua atbranch
419 updating to branch default
417 updating to branch default
420 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
418 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
421 $ hg -R atbranch heads
419 $ hg -R atbranch heads
422 changeset: 16:798b6d97153e
420 changeset: 16:798b6d97153e
423 branch: @
421 branch: @
424 tag: tip
422 tag: tip
425 parent: 12:f21241060d6a
423 parent: 12:f21241060d6a
426 user: test
424 user: test
427 date: Thu Jan 01 00:00:00 1970 +0000
425 date: Thu Jan 01 00:00:00 1970 +0000
428 summary: created branch @
426 summary: created branch @
429
427
430 changeset: 15:0aae7cf88f0d
428 changeset: 15:0aae7cf88f0d
431 branch: stable
429 branch: stable
432 user: test
430 user: test
433 date: Thu Jan 01 00:00:00 1970 +0000
431 date: Thu Jan 01 00:00:00 1970 +0000
434 summary: another change for branch stable
432 summary: another change for branch stable
435
433
436 changeset: 12:f21241060d6a
434 changeset: 12:f21241060d6a
437 user: test
435 user: test
438 date: Thu Jan 01 00:00:00 1970 +0000
436 date: Thu Jan 01 00:00:00 1970 +0000
439 summary: hacked default
437 summary: hacked default
440
438
441 $ hg -R atbranch parents
439 $ hg -R atbranch parents
442 changeset: 12:f21241060d6a
440 changeset: 12:f21241060d6a
443 user: test
441 user: test
444 date: Thu Jan 01 00:00:00 1970 +0000
442 date: Thu Jan 01 00:00:00 1970 +0000
445 summary: hacked default
443 summary: hacked default
446
444
447
445
448 $ rm -r ua atbranch
446 $ rm -r ua atbranch
449
447
450
448
451 Testing #<branch>:
449 Testing #<branch>:
452
450
453 $ hg clone -u . a#stable ua
451 $ hg clone -u . a#stable ua
454 adding changesets
452 adding changesets
455 adding manifests
453 adding manifests
456 adding file changes
454 adding file changes
457 added 14 changesets with 14 changes to 3 files
455 added 14 changesets with 14 changes to 3 files
458 new changesets acb14030fe0a:0aae7cf88f0d
456 new changesets acb14030fe0a:0aae7cf88f0d
459 updating to branch stable
457 updating to branch stable
460 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
458 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
461
459
462 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
460 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
463
461
464 $ hg -R ua heads
462 $ hg -R ua heads
465 changeset: 13:0aae7cf88f0d
463 changeset: 13:0aae7cf88f0d
466 branch: stable
464 branch: stable
467 tag: tip
465 tag: tip
468 user: test
466 user: test
469 date: Thu Jan 01 00:00:00 1970 +0000
467 date: Thu Jan 01 00:00:00 1970 +0000
470 summary: another change for branch stable
468 summary: another change for branch stable
471
469
472 changeset: 10:a7949464abda
470 changeset: 10:a7949464abda
473 user: test
471 user: test
474 date: Thu Jan 01 00:00:00 1970 +0000
472 date: Thu Jan 01 00:00:00 1970 +0000
475 summary: test
473 summary: test
476
474
477
475
478 Same revision checked out in repo a and ua:
476 Same revision checked out in repo a and ua:
479
477
480 $ hg -R a parents --template "{node|short}\n"
478 $ hg -R a parents --template "{node|short}\n"
481 e8ece76546a6
479 e8ece76546a6
482 $ hg -R ua parents --template "{node|short}\n"
480 $ hg -R ua parents --template "{node|short}\n"
483 e8ece76546a6
481 e8ece76546a6
484
482
485 $ rm -r ua
483 $ rm -r ua
486
484
487
485
488 Testing -u -r <branch>:
486 Testing -u -r <branch>:
489
487
490 $ hg clone -u . -r stable a ua
488 $ hg clone -u . -r stable a ua
491 adding changesets
489 adding changesets
492 adding manifests
490 adding manifests
493 adding file changes
491 adding file changes
494 added 14 changesets with 14 changes to 3 files
492 added 14 changesets with 14 changes to 3 files
495 new changesets acb14030fe0a:0aae7cf88f0d
493 new changesets acb14030fe0a:0aae7cf88f0d
496 updating to branch stable
494 updating to branch stable
497 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
495 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
498
496
499 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
497 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
500
498
501 $ hg -R ua heads
499 $ hg -R ua heads
502 changeset: 13:0aae7cf88f0d
500 changeset: 13:0aae7cf88f0d
503 branch: stable
501 branch: stable
504 tag: tip
502 tag: tip
505 user: test
503 user: test
506 date: Thu Jan 01 00:00:00 1970 +0000
504 date: Thu Jan 01 00:00:00 1970 +0000
507 summary: another change for branch stable
505 summary: another change for branch stable
508
506
509 changeset: 10:a7949464abda
507 changeset: 10:a7949464abda
510 user: test
508 user: test
511 date: Thu Jan 01 00:00:00 1970 +0000
509 date: Thu Jan 01 00:00:00 1970 +0000
512 summary: test
510 summary: test
513
511
514
512
515 Same revision checked out in repo a and ua:
513 Same revision checked out in repo a and ua:
516
514
517 $ hg -R a parents --template "{node|short}\n"
515 $ hg -R a parents --template "{node|short}\n"
518 e8ece76546a6
516 e8ece76546a6
519 $ hg -R ua parents --template "{node|short}\n"
517 $ hg -R ua parents --template "{node|short}\n"
520 e8ece76546a6
518 e8ece76546a6
521
519
522 $ rm -r ua
520 $ rm -r ua
523
521
524
522
525 Testing -r <branch>:
523 Testing -r <branch>:
526
524
527 $ hg clone -r stable a ua
525 $ hg clone -r stable a ua
528 adding changesets
526 adding changesets
529 adding manifests
527 adding manifests
530 adding file changes
528 adding file changes
531 added 14 changesets with 14 changes to 3 files
529 added 14 changesets with 14 changes to 3 files
532 new changesets acb14030fe0a:0aae7cf88f0d
530 new changesets acb14030fe0a:0aae7cf88f0d
533 updating to branch stable
531 updating to branch stable
534 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
532 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
535
533
536 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
534 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
537
535
538 $ hg -R ua heads
536 $ hg -R ua heads
539 changeset: 13:0aae7cf88f0d
537 changeset: 13:0aae7cf88f0d
540 branch: stable
538 branch: stable
541 tag: tip
539 tag: tip
542 user: test
540 user: test
543 date: Thu Jan 01 00:00:00 1970 +0000
541 date: Thu Jan 01 00:00:00 1970 +0000
544 summary: another change for branch stable
542 summary: another change for branch stable
545
543
546 changeset: 10:a7949464abda
544 changeset: 10:a7949464abda
547 user: test
545 user: test
548 date: Thu Jan 01 00:00:00 1970 +0000
546 date: Thu Jan 01 00:00:00 1970 +0000
549 summary: test
547 summary: test
550
548
551
549
552 Branch 'stable' is checked out:
550 Branch 'stable' is checked out:
553
551
554 $ hg -R ua parents
552 $ hg -R ua parents
555 changeset: 13:0aae7cf88f0d
553 changeset: 13:0aae7cf88f0d
556 branch: stable
554 branch: stable
557 tag: tip
555 tag: tip
558 user: test
556 user: test
559 date: Thu Jan 01 00:00:00 1970 +0000
557 date: Thu Jan 01 00:00:00 1970 +0000
560 summary: another change for branch stable
558 summary: another change for branch stable
561
559
562
560
563 $ rm -r ua
561 $ rm -r ua
564
562
565
563
566 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
564 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
567 iterable in addbranchrevs()
565 iterable in addbranchrevs()
568
566
569 $ cat <<EOF > simpleclone.py
567 $ cat <<EOF > simpleclone.py
570 > from mercurial import hg, ui as uimod
568 > from mercurial import hg, ui as uimod
571 > myui = uimod.ui.load()
569 > myui = uimod.ui.load()
572 > repo = hg.repository(myui, b'a')
570 > repo = hg.repository(myui, b'a')
573 > hg.clone(myui, {}, repo, dest=b"ua")
571 > hg.clone(myui, {}, repo, dest=b"ua")
574 > EOF
572 > EOF
575
573
576 $ "$PYTHON" simpleclone.py
574 $ "$PYTHON" simpleclone.py
577 updating to branch default
575 updating to branch default
578 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
576 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
579
577
580 $ rm -r ua
578 $ rm -r ua
581
579
582 $ cat <<EOF > branchclone.py
580 $ cat <<EOF > branchclone.py
583 > from mercurial import extensions, hg, ui as uimod
581 > from mercurial import extensions, hg, ui as uimod
584 > myui = uimod.ui.load()
582 > myui = uimod.ui.load()
585 > extensions.loadall(myui)
583 > extensions.loadall(myui)
586 > extensions.populateui(myui)
584 > extensions.populateui(myui)
587 > repo = hg.repository(myui, b'a')
585 > repo = hg.repository(myui, b'a')
588 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
586 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
589 > EOF
587 > EOF
590
588
591 $ "$PYTHON" branchclone.py
589 $ "$PYTHON" branchclone.py
592 adding changesets
590 adding changesets
593 adding manifests
591 adding manifests
594 adding file changes
592 adding file changes
595 added 14 changesets with 14 changes to 3 files
593 added 14 changesets with 14 changes to 3 files
596 new changesets acb14030fe0a:0aae7cf88f0d
594 new changesets acb14030fe0a:0aae7cf88f0d
597 updating to branch stable
595 updating to branch stable
598 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
596 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
599 $ rm -r ua
597 $ rm -r ua
600
598
601
599
602 Test clone with special '@' bookmark:
600 Test clone with special '@' bookmark:
603 $ cd a
601 $ cd a
604 $ hg bookmark -r a7949464abda @ # branch point of stable from default
602 $ hg bookmark -r a7949464abda @ # branch point of stable from default
605 $ hg clone . ../i
603 $ hg clone . ../i
606 updating to bookmark @
604 updating to bookmark @
607 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
605 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
608 $ hg id -i ../i
606 $ hg id -i ../i
609 a7949464abda
607 a7949464abda
610 $ rm -r ../i
608 $ rm -r ../i
611
609
612 $ hg bookmark -f -r stable @
610 $ hg bookmark -f -r stable @
613 $ hg bookmarks
611 $ hg bookmarks
614 @ 15:0aae7cf88f0d
612 @ 15:0aae7cf88f0d
615 $ hg clone . ../i
613 $ hg clone . ../i
616 updating to bookmark @ on branch stable
614 updating to bookmark @ on branch stable
617 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
615 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
618 $ hg id -i ../i
616 $ hg id -i ../i
619 0aae7cf88f0d
617 0aae7cf88f0d
620 $ cd "$TESTTMP"
618 $ cd "$TESTTMP"
621
619
622
620
623 Testing failures:
621 Testing failures:
624
622
625 $ mkdir fail
623 $ mkdir fail
626 $ cd fail
624 $ cd fail
627
625
628 No local source
626 No local source
629
627
630 $ hg clone a b
628 $ hg clone a b
631 abort: repository a not found
629 abort: repository a not found
632 [255]
630 [255]
633
631
634 Invalid URL
632 Invalid URL
635
633
636 $ hg clone http://invalid:url/a b
634 $ hg clone http://invalid:url/a b
637 abort: error: nonnumeric port: 'url'
635 abort: error: nonnumeric port: 'url'
638 [100]
636 [100]
639
637
640 No remote source
638 No remote source
641
639
642 #if windows
640 #if windows
643 $ hg clone http://$LOCALIP:3121/a b
641 $ hg clone http://$LOCALIP:3121/a b
644 abort: error: * (glob)
642 abort: error: * (glob)
645 [100]
643 [100]
646 #else
644 #else
647 $ hg clone http://$LOCALIP:3121/a b
645 $ hg clone http://$LOCALIP:3121/a b
648 abort: error: *refused* (glob)
646 abort: error: *refused* (glob)
649 [100]
647 [100]
650 #endif
648 #endif
651 $ rm -rf b # work around bug with http clone
649 $ rm -rf b # work around bug with http clone
652
650
653
651
654 #if unix-permissions no-root
652 #if unix-permissions no-root
655
653
656 Inaccessible source
654 Inaccessible source
657
655
658 $ mkdir a
656 $ mkdir a
659 $ chmod 000 a
657 $ chmod 000 a
660 $ hg clone a b
658 $ hg clone a b
661 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
659 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
662 [255]
660 [255]
663
661
664 Inaccessible destination
662 Inaccessible destination
665
663
666 $ hg init b
664 $ hg init b
667 $ cd b
665 $ cd b
668 $ hg clone . ../a
666 $ hg clone . ../a
669 abort: Permission denied: *../a* (glob)
667 abort: Permission denied: *../a* (glob)
670 [255]
668 [255]
671 $ cd ..
669 $ cd ..
672 $ chmod 700 a
670 $ chmod 700 a
673 $ rm -r a b
671 $ rm -r a b
674
672
675 #endif
673 #endif
676
674
677
675
678 #if fifo
676 #if fifo
679
677
680 Source of wrong type
678 Source of wrong type
681
679
682 $ mkfifo a
680 $ mkfifo a
683 $ hg clone a b
681 $ hg clone a b
684 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
682 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
685 [255]
683 [255]
686 $ rm a
684 $ rm a
687
685
688 #endif
686 #endif
689
687
690 Default destination, same directory
688 Default destination, same directory
691
689
692 $ hg init q
690 $ hg init q
693 $ hg clone q
691 $ hg clone q
694 destination directory: q
692 destination directory: q
695 abort: destination 'q' is not empty
693 abort: destination 'q' is not empty
696 [10]
694 [10]
697
695
698 destination directory not empty
696 destination directory not empty
699
697
700 $ mkdir a
698 $ mkdir a
701 $ echo stuff > a/a
699 $ echo stuff > a/a
702 $ hg clone q a
700 $ hg clone q a
703 abort: destination 'a' is not empty
701 abort: destination 'a' is not empty
704 [10]
702 [10]
705
703
706
704
707 #if unix-permissions no-root
705 #if unix-permissions no-root
708
706
709 leave existing directory in place after clone failure
707 leave existing directory in place after clone failure
710
708
711 $ hg init c
709 $ hg init c
712 $ cd c
710 $ cd c
713 $ echo c > c
711 $ echo c > c
714 $ hg commit -A -m test
712 $ hg commit -A -m test
715 adding c
713 adding c
716 $ chmod -rx .hg/store/data
714 $ chmod -rx .hg/store/data
717 $ cd ..
715 $ cd ..
718 $ mkdir d
716 $ mkdir d
719 $ hg clone c d 2> err
717 $ hg clone c d 2> err
720 [255]
718 [255]
721 $ test -d d
719 $ test -d d
722 $ test -d d/.hg
720 $ test -d d/.hg
723 [1]
721 [1]
724
722
725 re-enable perm to allow deletion
723 re-enable perm to allow deletion
726
724
727 $ chmod +rx c/.hg/store/data
725 $ chmod +rx c/.hg/store/data
728
726
729 #endif
727 #endif
730
728
731 $ cd ..
729 $ cd ..
732
730
733 Test clone from the repository in (emulated) revlog format 0 (issue4203):
731 Test clone from the repository in (emulated) revlog format 0 (issue4203):
734
732
735 $ mkdir issue4203
733 $ mkdir issue4203
736 $ mkdir -p src/.hg
734 $ mkdir -p src/.hg
737 $ echo foo > src/foo
735 $ echo foo > src/foo
738 $ hg -R src add src/foo
736 $ hg -R src add src/foo
739 $ hg -R src commit -m '#0'
737 $ hg -R src commit -m '#0'
740 $ hg -R src log -q
738 $ hg -R src log -q
741 0:e1bab28bca43
739 0:e1bab28bca43
742 $ hg -R src debugrevlog -c | egrep 'format|flags'
740 $ hg -R src debugrevlog -c | egrep 'format|flags'
743 format : 0
741 format : 0
744 flags : (none)
742 flags : (none)
745 $ hg root -R src -T json | sed 's|\\\\|\\|g'
743 $ hg root -R src -T json | sed 's|\\\\|\\|g'
746 [
744 [
747 {
745 {
748 "hgpath": "$TESTTMP/src/.hg",
746 "hgpath": "$TESTTMP/src/.hg",
749 "reporoot": "$TESTTMP/src",
747 "reporoot": "$TESTTMP/src",
750 "storepath": "$TESTTMP/src/.hg"
748 "storepath": "$TESTTMP/src/.hg"
751 }
749 }
752 ]
750 ]
753 $ hg clone -U -q src dst
751 $ hg clone -U -q src dst
754 $ hg -R dst log -q
752 $ hg -R dst log -q
755 0:e1bab28bca43
753 0:e1bab28bca43
756
754
757 Create repositories to test auto sharing functionality
755 Create repositories to test auto sharing functionality
758
756
759 $ cat >> $HGRCPATH << EOF
757 $ cat >> $HGRCPATH << EOF
760 > [extensions]
758 > [extensions]
761 > share=
759 > share=
762 > EOF
760 > EOF
763
761
764 $ hg init empty
762 $ hg init empty
765 $ hg init source1a
763 $ hg init source1a
766 $ cd source1a
764 $ cd source1a
767 $ echo initial1 > foo
765 $ echo initial1 > foo
768 $ hg -q commit -A -m initial
766 $ hg -q commit -A -m initial
769 $ echo second > foo
767 $ echo second > foo
770 $ hg commit -m second
768 $ hg commit -m second
771 $ cd ..
769 $ cd ..
772
770
773 $ hg init filteredrev0
771 $ hg init filteredrev0
774 $ cd filteredrev0
772 $ cd filteredrev0
775 $ cat >> .hg/hgrc << EOF
773 $ cat >> .hg/hgrc << EOF
776 > [experimental]
774 > [experimental]
777 > evolution.createmarkers=True
775 > evolution.createmarkers=True
778 > EOF
776 > EOF
779 $ echo initial1 > foo
777 $ echo initial1 > foo
780 $ hg -q commit -A -m initial0
778 $ hg -q commit -A -m initial0
781 $ hg -q up -r null
779 $ hg -q up -r null
782 $ echo initial2 > foo
780 $ echo initial2 > foo
783 $ hg -q commit -A -m initial1
781 $ hg -q commit -A -m initial1
784 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
782 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
785 1 new obsolescence markers
783 1 new obsolescence markers
786 obsoleted 1 changesets
784 obsoleted 1 changesets
787 $ cd ..
785 $ cd ..
788
786
789 $ hg -q clone --pull source1a source1b
787 $ hg -q clone --pull source1a source1b
790 $ cd source1a
788 $ cd source1a
791 $ hg bookmark bookA
789 $ hg bookmark bookA
792 $ echo 1a > foo
790 $ echo 1a > foo
793 $ hg commit -m 1a
791 $ hg commit -m 1a
794 $ cd ../source1b
792 $ cd ../source1b
795 $ hg -q up -r 0
793 $ hg -q up -r 0
796 $ echo head1 > foo
794 $ echo head1 > foo
797 $ hg commit -m head1
795 $ hg commit -m head1
798 created new head
796 created new head
799 $ hg bookmark head1
797 $ hg bookmark head1
800 $ hg -q up -r 0
798 $ hg -q up -r 0
801 $ echo head2 > foo
799 $ echo head2 > foo
802 $ hg commit -m head2
800 $ hg commit -m head2
803 created new head
801 created new head
804 $ hg bookmark head2
802 $ hg bookmark head2
805 $ hg -q up -r 0
803 $ hg -q up -r 0
806 $ hg branch branch1
804 $ hg branch branch1
807 marked working directory as branch branch1
805 marked working directory as branch branch1
808 (branches are permanent and global, did you want a bookmark?)
806 (branches are permanent and global, did you want a bookmark?)
809 $ echo branch1 > foo
807 $ echo branch1 > foo
810 $ hg commit -m branch1
808 $ hg commit -m branch1
811 $ hg -q up -r 0
809 $ hg -q up -r 0
812 $ hg branch branch2
810 $ hg branch branch2
813 marked working directory as branch branch2
811 marked working directory as branch branch2
814 $ echo branch2 > foo
812 $ echo branch2 > foo
815 $ hg commit -m branch2
813 $ hg commit -m branch2
816 $ cd ..
814 $ cd ..
817 $ hg init source2
815 $ hg init source2
818 $ cd source2
816 $ cd source2
819 $ echo initial2 > foo
817 $ echo initial2 > foo
820 $ hg -q commit -A -m initial2
818 $ hg -q commit -A -m initial2
821 $ echo second > foo
819 $ echo second > foo
822 $ hg commit -m second
820 $ hg commit -m second
823 $ cd ..
821 $ cd ..
824
822
825 Clone with auto share from an empty repo should not result in share
823 Clone with auto share from an empty repo should not result in share
826
824
827 $ mkdir share
825 $ mkdir share
828 $ hg --config share.pool=share clone empty share-empty
826 $ hg --config share.pool=share clone empty share-empty
829 (not using pooled storage: remote appears to be empty)
827 (not using pooled storage: remote appears to be empty)
830 updating to branch default
828 updating to branch default
831 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
829 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
832 $ ls share
830 $ ls share
833 $ test -d share-empty/.hg/store
831 $ test -d share-empty/.hg/store
834 $ test -f share-empty/.hg/sharedpath
832 $ test -f share-empty/.hg/sharedpath
835 [1]
833 [1]
836
834
837 Clone with auto share from a repo with filtered revision 0 should not result in share
835 Clone with auto share from a repo with filtered revision 0 should not result in share
838
836
839 $ hg --config share.pool=share clone filteredrev0 share-filtered
837 $ hg --config share.pool=share clone filteredrev0 share-filtered
840 (not using pooled storage: unable to resolve identity of remote)
838 (not using pooled storage: unable to resolve identity of remote)
841 requesting all changes
839 requesting all changes
842 adding changesets
840 adding changesets
843 adding manifests
841 adding manifests
844 adding file changes
842 adding file changes
845 added 1 changesets with 1 changes to 1 files
843 added 1 changesets with 1 changes to 1 files
846 new changesets e082c1832e09
844 new changesets e082c1832e09
847 updating to branch default
845 updating to branch default
848 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
846 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
849
847
850 Clone from repo with content should result in shared store being created
848 Clone from repo with content should result in shared store being created
851
849
852 $ hg --config share.pool=share clone source1a share-dest1a
850 $ hg --config share.pool=share clone source1a share-dest1a
853 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
851 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
854 requesting all changes
852 requesting all changes
855 adding changesets
853 adding changesets
856 adding manifests
854 adding manifests
857 adding file changes
855 adding file changes
858 added 3 changesets with 3 changes to 1 files
856 added 3 changesets with 3 changes to 1 files
859 new changesets b5f04eac9d8f:e5bfe23c0b47
857 new changesets b5f04eac9d8f:e5bfe23c0b47
860 searching for changes
858 searching for changes
861 no changes found
859 no changes found
862 adding remote bookmark bookA
860 adding remote bookmark bookA
863 updating working directory
861 updating working directory
864 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
862 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
865
863
866 The shared repo should have been created
864 The shared repo should have been created
867
865
868 $ ls share
866 $ ls share
869 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
867 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
870
868
871 The destination should point to it
869 The destination should point to it
872
870
873 $ cat share-dest1a/.hg/sharedpath; echo
871 $ cat share-dest1a/.hg/sharedpath; echo
874 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
872 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
875
873
876 The destination should have bookmarks
874 The destination should have bookmarks
877
875
878 $ hg -R share-dest1a bookmarks
876 $ hg -R share-dest1a bookmarks
879 bookA 2:e5bfe23c0b47
877 bookA 2:e5bfe23c0b47
880
878
881 The default path should be the remote, not the share
879 The default path should be the remote, not the share
882
880
883 $ hg -R share-dest1a config paths.default
881 $ hg -R share-dest1a config paths.default
884 $TESTTMP/source1a
882 $TESTTMP/source1a
885
883
886 Clone with existing share dir should result in pull + share
884 Clone with existing share dir should result in pull + share
887
885
888 $ hg --config share.pool=share clone source1b share-dest1b
886 $ hg --config share.pool=share clone source1b share-dest1b
889 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
887 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
890 searching for changes
888 searching for changes
891 adding changesets
889 adding changesets
892 adding manifests
890 adding manifests
893 adding file changes
891 adding file changes
894 adding remote bookmark head1
892 adding remote bookmark head1
895 adding remote bookmark head2
893 adding remote bookmark head2
896 added 4 changesets with 4 changes to 1 files (+4 heads)
894 added 4 changesets with 4 changes to 1 files (+4 heads)
897 new changesets 4a8dc1ab4c13:6bacf4683960
895 new changesets 4a8dc1ab4c13:6bacf4683960
898 updating working directory
896 updating working directory
899 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
897 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
900
898
901 $ ls share
899 $ ls share
902 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
900 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
903
901
904 $ cat share-dest1b/.hg/sharedpath; echo
902 $ cat share-dest1b/.hg/sharedpath; echo
905 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
903 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
906
904
907 We only get bookmarks from the remote, not everything in the share
905 We only get bookmarks from the remote, not everything in the share
908
906
909 $ hg -R share-dest1b bookmarks
907 $ hg -R share-dest1b bookmarks
910 head1 3:4a8dc1ab4c13
908 head1 3:4a8dc1ab4c13
911 head2 4:99f71071f117
909 head2 4:99f71071f117
912
910
913 Default path should be source, not share.
911 Default path should be source, not share.
914
912
915 $ hg -R share-dest1b config paths.default
913 $ hg -R share-dest1b config paths.default
916 $TESTTMP/source1b
914 $TESTTMP/source1b
917
915
918 Checked out revision should be head of default branch
916 Checked out revision should be head of default branch
919
917
920 $ hg -R share-dest1b log -r .
918 $ hg -R share-dest1b log -r .
921 changeset: 4:99f71071f117
919 changeset: 4:99f71071f117
922 bookmark: head2
920 bookmark: head2
923 parent: 0:b5f04eac9d8f
921 parent: 0:b5f04eac9d8f
924 user: test
922 user: test
925 date: Thu Jan 01 00:00:00 1970 +0000
923 date: Thu Jan 01 00:00:00 1970 +0000
926 summary: head2
924 summary: head2
927
925
928
926
929 Clone from unrelated repo should result in new share
927 Clone from unrelated repo should result in new share
930
928
931 $ hg --config share.pool=share clone source2 share-dest2
929 $ hg --config share.pool=share clone source2 share-dest2
932 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
930 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
933 requesting all changes
931 requesting all changes
934 adding changesets
932 adding changesets
935 adding manifests
933 adding manifests
936 adding file changes
934 adding file changes
937 added 2 changesets with 2 changes to 1 files
935 added 2 changesets with 2 changes to 1 files
938 new changesets 22aeff664783:63cf6c3dba4a
936 new changesets 22aeff664783:63cf6c3dba4a
939 searching for changes
937 searching for changes
940 no changes found
938 no changes found
941 updating working directory
939 updating working directory
942 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
940 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
943
941
944 $ ls share
942 $ ls share
945 22aeff664783fd44c6d9b435618173c118c3448e
943 22aeff664783fd44c6d9b435618173c118c3448e
946 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
944 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
947
945
948 remote naming mode works as advertised
946 remote naming mode works as advertised
949
947
950 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
948 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
951 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
949 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
952 requesting all changes
950 requesting all changes
953 adding changesets
951 adding changesets
954 adding manifests
952 adding manifests
955 adding file changes
953 adding file changes
956 added 3 changesets with 3 changes to 1 files
954 added 3 changesets with 3 changes to 1 files
957 new changesets b5f04eac9d8f:e5bfe23c0b47
955 new changesets b5f04eac9d8f:e5bfe23c0b47
958 searching for changes
956 searching for changes
959 no changes found
957 no changes found
960 adding remote bookmark bookA
958 adding remote bookmark bookA
961 updating working directory
959 updating working directory
962 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
960 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
963
961
964 $ ls shareremote
962 $ ls shareremote
965 195bb1fcdb595c14a6c13e0269129ed78f6debde
963 195bb1fcdb595c14a6c13e0269129ed78f6debde
966
964
967 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
965 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
968 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
966 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
969 requesting all changes
967 requesting all changes
970 adding changesets
968 adding changesets
971 adding manifests
969 adding manifests
972 adding file changes
970 adding file changes
973 added 6 changesets with 6 changes to 1 files (+4 heads)
971 added 6 changesets with 6 changes to 1 files (+4 heads)
974 new changesets b5f04eac9d8f:6bacf4683960
972 new changesets b5f04eac9d8f:6bacf4683960
975 searching for changes
973 searching for changes
976 no changes found
974 no changes found
977 adding remote bookmark head1
975 adding remote bookmark head1
978 adding remote bookmark head2
976 adding remote bookmark head2
979 updating working directory
977 updating working directory
980 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
978 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
981
979
982 $ ls shareremote
980 $ ls shareremote
983 195bb1fcdb595c14a6c13e0269129ed78f6debde
981 195bb1fcdb595c14a6c13e0269129ed78f6debde
984 c0d4f83847ca2a873741feb7048a45085fd47c46
982 c0d4f83847ca2a873741feb7048a45085fd47c46
985
983
986 request to clone a single revision is respected in sharing mode
984 request to clone a single revision is respected in sharing mode
987
985
988 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
986 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
989 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
987 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
990 adding changesets
988 adding changesets
991 adding manifests
989 adding manifests
992 adding file changes
990 adding file changes
993 added 2 changesets with 2 changes to 1 files
991 added 2 changesets with 2 changes to 1 files
994 new changesets b5f04eac9d8f:4a8dc1ab4c13
992 new changesets b5f04eac9d8f:4a8dc1ab4c13
995 no changes found
993 no changes found
996 adding remote bookmark head1
994 adding remote bookmark head1
997 updating working directory
995 updating working directory
998 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
996 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
999
997
1000 $ hg -R share-1arev log -G
998 $ hg -R share-1arev log -G
1001 @ changeset: 1:4a8dc1ab4c13
999 @ changeset: 1:4a8dc1ab4c13
1002 | bookmark: head1
1000 | bookmark: head1
1003 | tag: tip
1001 | tag: tip
1004 | user: test
1002 | user: test
1005 | date: Thu Jan 01 00:00:00 1970 +0000
1003 | date: Thu Jan 01 00:00:00 1970 +0000
1006 | summary: head1
1004 | summary: head1
1007 |
1005 |
1008 o changeset: 0:b5f04eac9d8f
1006 o changeset: 0:b5f04eac9d8f
1009 user: test
1007 user: test
1010 date: Thu Jan 01 00:00:00 1970 +0000
1008 date: Thu Jan 01 00:00:00 1970 +0000
1011 summary: initial
1009 summary: initial
1012
1010
1013
1011
1014 making another clone should only pull down requested rev
1012 making another clone should only pull down requested rev
1015
1013
1016 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
1014 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
1017 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1015 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1018 searching for changes
1016 searching for changes
1019 adding changesets
1017 adding changesets
1020 adding manifests
1018 adding manifests
1021 adding file changes
1019 adding file changes
1022 adding remote bookmark head1
1020 adding remote bookmark head1
1023 adding remote bookmark head2
1021 adding remote bookmark head2
1024 added 1 changesets with 1 changes to 1 files (+1 heads)
1022 added 1 changesets with 1 changes to 1 files (+1 heads)
1025 new changesets 99f71071f117
1023 new changesets 99f71071f117
1026 updating working directory
1024 updating working directory
1027 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1025 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1028
1026
1029 $ hg -R share-1brev log -G
1027 $ hg -R share-1brev log -G
1030 @ changeset: 2:99f71071f117
1028 @ changeset: 2:99f71071f117
1031 | bookmark: head2
1029 | bookmark: head2
1032 | tag: tip
1030 | tag: tip
1033 | parent: 0:b5f04eac9d8f
1031 | parent: 0:b5f04eac9d8f
1034 | user: test
1032 | user: test
1035 | date: Thu Jan 01 00:00:00 1970 +0000
1033 | date: Thu Jan 01 00:00:00 1970 +0000
1036 | summary: head2
1034 | summary: head2
1037 |
1035 |
1038 | o changeset: 1:4a8dc1ab4c13
1036 | o changeset: 1:4a8dc1ab4c13
1039 |/ bookmark: head1
1037 |/ bookmark: head1
1040 | user: test
1038 | user: test
1041 | date: Thu Jan 01 00:00:00 1970 +0000
1039 | date: Thu Jan 01 00:00:00 1970 +0000
1042 | summary: head1
1040 | summary: head1
1043 |
1041 |
1044 o changeset: 0:b5f04eac9d8f
1042 o changeset: 0:b5f04eac9d8f
1045 user: test
1043 user: test
1046 date: Thu Jan 01 00:00:00 1970 +0000
1044 date: Thu Jan 01 00:00:00 1970 +0000
1047 summary: initial
1045 summary: initial
1048
1046
1049
1047
1050 Request to clone a single branch is respected in sharing mode
1048 Request to clone a single branch is respected in sharing mode
1051
1049
1052 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1050 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1053 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1051 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1054 adding changesets
1052 adding changesets
1055 adding manifests
1053 adding manifests
1056 adding file changes
1054 adding file changes
1057 added 2 changesets with 2 changes to 1 files
1055 added 2 changesets with 2 changes to 1 files
1058 new changesets b5f04eac9d8f:5f92a6c1a1b1
1056 new changesets b5f04eac9d8f:5f92a6c1a1b1
1059 no changes found
1057 no changes found
1060 updating working directory
1058 updating working directory
1061 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1059 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1062
1060
1063 $ hg -R share-1bbranch1 log -G
1061 $ hg -R share-1bbranch1 log -G
1064 o changeset: 1:5f92a6c1a1b1
1062 o changeset: 1:5f92a6c1a1b1
1065 | branch: branch1
1063 | branch: branch1
1066 | tag: tip
1064 | tag: tip
1067 | user: test
1065 | user: test
1068 | date: Thu Jan 01 00:00:00 1970 +0000
1066 | date: Thu Jan 01 00:00:00 1970 +0000
1069 | summary: branch1
1067 | summary: branch1
1070 |
1068 |
1071 @ changeset: 0:b5f04eac9d8f
1069 @ changeset: 0:b5f04eac9d8f
1072 user: test
1070 user: test
1073 date: Thu Jan 01 00:00:00 1970 +0000
1071 date: Thu Jan 01 00:00:00 1970 +0000
1074 summary: initial
1072 summary: initial
1075
1073
1076
1074
1077 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1075 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1078 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1076 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1079 searching for changes
1077 searching for changes
1080 adding changesets
1078 adding changesets
1081 adding manifests
1079 adding manifests
1082 adding file changes
1080 adding file changes
1083 added 1 changesets with 1 changes to 1 files (+1 heads)
1081 added 1 changesets with 1 changes to 1 files (+1 heads)
1084 new changesets 6bacf4683960
1082 new changesets 6bacf4683960
1085 updating working directory
1083 updating working directory
1086 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1084 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1087
1085
1088 $ hg -R share-1bbranch2 log -G
1086 $ hg -R share-1bbranch2 log -G
1089 o changeset: 2:6bacf4683960
1087 o changeset: 2:6bacf4683960
1090 | branch: branch2
1088 | branch: branch2
1091 | tag: tip
1089 | tag: tip
1092 | parent: 0:b5f04eac9d8f
1090 | parent: 0:b5f04eac9d8f
1093 | user: test
1091 | user: test
1094 | date: Thu Jan 01 00:00:00 1970 +0000
1092 | date: Thu Jan 01 00:00:00 1970 +0000
1095 | summary: branch2
1093 | summary: branch2
1096 |
1094 |
1097 | o changeset: 1:5f92a6c1a1b1
1095 | o changeset: 1:5f92a6c1a1b1
1098 |/ branch: branch1
1096 |/ branch: branch1
1099 | user: test
1097 | user: test
1100 | date: Thu Jan 01 00:00:00 1970 +0000
1098 | date: Thu Jan 01 00:00:00 1970 +0000
1101 | summary: branch1
1099 | summary: branch1
1102 |
1100 |
1103 @ changeset: 0:b5f04eac9d8f
1101 @ changeset: 0:b5f04eac9d8f
1104 user: test
1102 user: test
1105 date: Thu Jan 01 00:00:00 1970 +0000
1103 date: Thu Jan 01 00:00:00 1970 +0000
1106 summary: initial
1104 summary: initial
1107
1105
1108
1106
1109 -U is respected in share clone mode
1107 -U is respected in share clone mode
1110
1108
1111 $ hg --config share.pool=share clone -U source1a share-1anowc
1109 $ hg --config share.pool=share clone -U source1a share-1anowc
1112 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1110 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1113 searching for changes
1111 searching for changes
1114 no changes found
1112 no changes found
1115 adding remote bookmark bookA
1113 adding remote bookmark bookA
1116
1114
1117 $ ls -A share-1anowc
1115 $ ls -A share-1anowc
1118 .hg
1116 .hg
1119
1117
1120 Test that auto sharing doesn't cause failure of "hg clone local remote"
1118 Test that auto sharing doesn't cause failure of "hg clone local remote"
1121
1119
1122 $ cd $TESTTMP
1120 $ cd $TESTTMP
1123 $ hg -R a id -r 0
1121 $ hg -R a id -r 0
1124 acb14030fe0a
1122 acb14030fe0a
1125 $ hg id -R remote -r 0
1123 $ hg id -R remote -r 0
1126 abort: repository remote not found
1124 abort: repository remote not found
1127 [255]
1125 [255]
1128 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1126 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1129 $ hg -R remote id -r 0
1127 $ hg -R remote id -r 0
1130 acb14030fe0a
1128 acb14030fe0a
1131
1129
1132 Cloning into pooled storage doesn't race (issue5104)
1130 Cloning into pooled storage doesn't race (issue5104)
1133
1131
1134 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1132 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1135 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1133 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1136 $ wait
1134 $ wait
1137
1135
1138 $ hg -R share-destrace1 log -r tip
1136 $ hg -R share-destrace1 log -r tip
1139 changeset: 2:e5bfe23c0b47
1137 changeset: 2:e5bfe23c0b47
1140 bookmark: bookA
1138 bookmark: bookA
1141 tag: tip
1139 tag: tip
1142 user: test
1140 user: test
1143 date: Thu Jan 01 00:00:00 1970 +0000
1141 date: Thu Jan 01 00:00:00 1970 +0000
1144 summary: 1a
1142 summary: 1a
1145
1143
1146
1144
1147 $ hg -R share-destrace2 log -r tip
1145 $ hg -R share-destrace2 log -r tip
1148 changeset: 2:e5bfe23c0b47
1146 changeset: 2:e5bfe23c0b47
1149 bookmark: bookA
1147 bookmark: bookA
1150 tag: tip
1148 tag: tip
1151 user: test
1149 user: test
1152 date: Thu Jan 01 00:00:00 1970 +0000
1150 date: Thu Jan 01 00:00:00 1970 +0000
1153 summary: 1a
1151 summary: 1a
1154
1152
1155 One repo should be new, the other should be shared from the pool. We
1153 One repo should be new, the other should be shared from the pool. We
1156 don't care which is which, so we just make sure we always print the
1154 don't care which is which, so we just make sure we always print the
1157 one containing "new pooled" first, then one one containing "existing
1155 one containing "new pooled" first, then one one containing "existing
1158 pooled".
1156 pooled".
1159
1157
1160 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1158 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1161 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1159 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1162 requesting all changes
1160 requesting all changes
1163 adding changesets
1161 adding changesets
1164 adding manifests
1162 adding manifests
1165 adding file changes
1163 adding file changes
1166 added 3 changesets with 3 changes to 1 files
1164 added 3 changesets with 3 changes to 1 files
1167 new changesets b5f04eac9d8f:e5bfe23c0b47
1165 new changesets b5f04eac9d8f:e5bfe23c0b47
1168 searching for changes
1166 searching for changes
1169 no changes found
1167 no changes found
1170 adding remote bookmark bookA
1168 adding remote bookmark bookA
1171 updating working directory
1169 updating working directory
1172 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1173
1171
1174 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1172 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1175 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1173 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1176 searching for changes
1174 searching for changes
1177 no changes found
1175 no changes found
1178 adding remote bookmark bookA
1176 adding remote bookmark bookA
1179 updating working directory
1177 updating working directory
1180 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1178 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1181
1179
1182 SEC: check for unsafe ssh url
1180 SEC: check for unsafe ssh url
1183
1181
1184 $ cat >> $HGRCPATH << EOF
1182 $ cat >> $HGRCPATH << EOF
1185 > [ui]
1183 > [ui]
1186 > ssh = sh -c "read l; read l; read l"
1184 > ssh = sh -c "read l; read l; read l"
1187 > EOF
1185 > EOF
1188
1186
1189 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1187 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1190 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1188 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1191 [255]
1189 [255]
1192 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1190 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1193 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1191 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1194 [255]
1192 [255]
1195 $ hg clone 'ssh://fakehost|touch%20owned/path'
1193 $ hg clone 'ssh://fakehost|touch%20owned/path'
1196 abort: no suitable response from remote hg
1194 abort: no suitable response from remote hg
1197 [255]
1195 [255]
1198 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1196 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1199 abort: no suitable response from remote hg
1197 abort: no suitable response from remote hg
1200 [255]
1198 [255]
1201
1199
1202 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1200 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1203 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1201 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1204 [255]
1202 [255]
1205
1203
1206 #if windows
1204 #if windows
1207 $ hg clone "ssh://%26touch%20owned%20/" --debug
1205 $ hg clone "ssh://%26touch%20owned%20/" --debug
1208 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1206 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1209 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1207 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1210 sending hello command
1208 sending hello command
1211 sending between command
1209 sending between command
1212 abort: no suitable response from remote hg
1210 abort: no suitable response from remote hg
1213 [255]
1211 [255]
1214 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1212 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1215 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1213 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1216 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1214 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1217 sending hello command
1215 sending hello command
1218 sending between command
1216 sending between command
1219 abort: no suitable response from remote hg
1217 abort: no suitable response from remote hg
1220 [255]
1218 [255]
1221 #else
1219 #else
1222 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1220 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1223 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1221 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1224 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1222 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1225 sending hello command
1223 sending hello command
1226 sending between command
1224 sending between command
1227 abort: no suitable response from remote hg
1225 abort: no suitable response from remote hg
1228 [255]
1226 [255]
1229 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1227 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1230 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1228 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1231 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1229 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1232 sending hello command
1230 sending hello command
1233 sending between command
1231 sending between command
1234 abort: no suitable response from remote hg
1232 abort: no suitable response from remote hg
1235 [255]
1233 [255]
1236 #endif
1234 #endif
1237
1235
1238 $ hg clone "ssh://v-alid.example.com/" --debug
1236 $ hg clone "ssh://v-alid.example.com/" --debug
1239 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1237 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1240 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1238 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1241 sending hello command
1239 sending hello command
1242 sending between command
1240 sending between command
1243 abort: no suitable response from remote hg
1241 abort: no suitable response from remote hg
1244 [255]
1242 [255]
1245
1243
1246 We should not have created a file named owned - if it exists, the
1244 We should not have created a file named owned - if it exists, the
1247 attack succeeded.
1245 attack succeeded.
1248 $ if test -f owned; then echo 'you got owned'; fi
1246 $ if test -f owned; then echo 'you got owned'; fi
1249
1247
1250 Cloning without fsmonitor enabled does not print a warning for small repos
1248 Cloning without fsmonitor enabled does not print a warning for small repos
1251
1249
1252 $ hg clone a fsmonitor-default
1250 $ hg clone a fsmonitor-default
1253 updating to bookmark @ on branch stable
1251 updating to bookmark @ on branch stable
1254 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1252 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1255
1253
1256 Lower the warning threshold to simulate a large repo
1254 Lower the warning threshold to simulate a large repo
1257
1255
1258 $ cat >> $HGRCPATH << EOF
1256 $ cat >> $HGRCPATH << EOF
1259 > [fsmonitor]
1257 > [fsmonitor]
1260 > warn_update_file_count = 2
1258 > warn_update_file_count = 2
1261 > warn_update_file_count_rust = 2
1259 > warn_update_file_count_rust = 2
1262 > EOF
1260 > EOF
1263
1261
1264 We should see a warning about no fsmonitor on supported platforms
1262 We should see a warning about no fsmonitor on supported platforms
1265
1263
1266 #if linuxormacos no-fsmonitor
1264 #if linuxormacos no-fsmonitor
1267 $ hg clone a nofsmonitor
1265 $ hg clone a nofsmonitor
1268 updating to bookmark @ on branch stable
1266 updating to bookmark @ on branch stable
1269 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1267 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1270 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1268 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1271 #else
1269 #else
1272 $ hg clone a nofsmonitor
1270 $ hg clone a nofsmonitor
1273 updating to bookmark @ on branch stable
1271 updating to bookmark @ on branch stable
1274 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1272 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1275 #endif
1273 #endif
1276
1274
1277 We should not see warning about fsmonitor when it is enabled
1275 We should not see warning about fsmonitor when it is enabled
1278
1276
1279 #if fsmonitor
1277 #if fsmonitor
1280 $ hg clone a fsmonitor-enabled
1278 $ hg clone a fsmonitor-enabled
1281 updating to bookmark @ on branch stable
1279 updating to bookmark @ on branch stable
1282 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1280 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1283 #endif
1281 #endif
1284
1282
1285 We can disable the fsmonitor warning
1283 We can disable the fsmonitor warning
1286
1284
1287 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1285 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1288 updating to bookmark @ on branch stable
1286 updating to bookmark @ on branch stable
1289 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1287 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290
1288
1291 Loaded fsmonitor but disabled in config should still print warning
1289 Loaded fsmonitor but disabled in config should still print warning
1292
1290
1293 #if linuxormacos fsmonitor
1291 #if linuxormacos fsmonitor
1294 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1292 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1295 updating to bookmark @ on branch stable
1293 updating to bookmark @ on branch stable
1296 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1294 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1297 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1295 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1298 #endif
1296 #endif
1299
1297
1300 Warning not printed if working directory isn't empty
1298 Warning not printed if working directory isn't empty
1301
1299
1302 $ hg -q clone a fsmonitor-update
1300 $ hg -q clone a fsmonitor-update
1303 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1301 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1304 $ cd fsmonitor-update
1302 $ cd fsmonitor-update
1305 $ hg up acb14030fe0a
1303 $ hg up acb14030fe0a
1306 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1304 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1307 (leaving bookmark @)
1305 (leaving bookmark @)
1308 $ hg up cf0fe1914066
1306 $ hg up cf0fe1914066
1309 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1307 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1310
1308
1311 `hg update` from null revision also prints
1309 `hg update` from null revision also prints
1312
1310
1313 $ hg up null
1311 $ hg up null
1314 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1312 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1315
1313
1316 #if linuxormacos no-fsmonitor
1314 #if linuxormacos no-fsmonitor
1317 $ hg up cf0fe1914066
1315 $ hg up cf0fe1914066
1318 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1316 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1319 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1317 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1320 #else
1318 #else
1321 $ hg up cf0fe1914066
1319 $ hg up cf0fe1914066
1322 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1320 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1323 #endif
1321 #endif
1324
1322
1325 $ cd ..
1323 $ cd ..
1326
1324
@@ -1,447 +1,445 b''
1 #require hardlink reporevlogstore
1 #require hardlink reporevlogstore
2
2
3 $ cat > nlinks.py <<EOF
3 $ cat > nlinks.py <<EOF
4 > from __future__ import print_function
4 > from __future__ import print_function
5 > import sys
5 > import sys
6 > from mercurial import pycompat, util
6 > from mercurial import pycompat, util
7 > for f in sorted(sys.stdin.readlines()):
7 > for f in sorted(sys.stdin.readlines()):
8 > f = f[:-1]
8 > f = f[:-1]
9 > print(util.nlinks(pycompat.fsencode(f)), f)
9 > print(util.nlinks(pycompat.fsencode(f)), f)
10 > EOF
10 > EOF
11
11
12 $ nlinksdir()
12 $ nlinksdir()
13 > {
13 > {
14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
15 > }
15 > }
16
16
17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
18
18
19 $ cat > linkcp.py <<EOF
19 $ cat > linkcp.py <<EOF
20 > from __future__ import absolute_import
20 > from __future__ import absolute_import
21 > import sys
21 > import sys
22 > from mercurial import pycompat, util
22 > from mercurial import pycompat, util
23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
25 > EOF
25 > EOF
26
26
27 $ linkcp()
27 $ linkcp()
28 > {
28 > {
29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
30 > }
30 > }
31
31
32 Prepare repo r1:
32 Prepare repo r1:
33
33
34 $ hg init r1
34 $ hg init r1
35 $ cd r1
35 $ cd r1
36
36
37 $ echo c1 > f1
37 $ echo c1 > f1
38 $ hg add f1
38 $ hg add f1
39 $ hg ci -m0
39 $ hg ci -m0
40
40
41 $ mkdir d1
41 $ mkdir d1
42 $ cd d1
42 $ cd d1
43 $ echo c2 > f2
43 $ echo c2 > f2
44 $ hg add f2
44 $ hg add f2
45 $ hg ci -m1
45 $ hg ci -m1
46 $ cd ../..
46 $ cd ../..
47
47
48 $ nlinksdir r1/.hg/store
48 $ nlinksdir r1/.hg/store
49 1 r1/.hg/store/00changelog.i
49 1 r1/.hg/store/00changelog.i
50 1 r1/.hg/store/00manifest.i
50 1 r1/.hg/store/00manifest.i
51 1 r1/.hg/store/data/d1/f2.i
51 1 r1/.hg/store/data/d1/f2.i
52 1 r1/.hg/store/data/f1.i
52 1 r1/.hg/store/data/f1.i
53 1 r1/.hg/store/fncache (repofncache !)
53 1 r1/.hg/store/fncache (repofncache !)
54 1 r1/.hg/store/phaseroots
54 1 r1/.hg/store/phaseroots
55 1 r1/.hg/store/undo
55 1 r1/.hg/store/undo
56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
57 1 r1/.hg/store/undo.backupfiles
57 1 r1/.hg/store/undo.backupfiles
58 1 r1/.hg/store/undo.phaseroots
58 1 r1/.hg/store/undo.phaseroots
59
59
60
60
61 Create hardlinked clone r2:
61 Create hardlinked clone r2:
62
62
63 $ hg clone -U --debug r1 r2 --config progress.debug=true
63 $ hg clone -U --debug r1 r2 --config progress.debug=true
64 linking: 1 files
64 linking: 1 files
65 linking: 2 files
65 linking: 2 files
66 linking: 3 files
66 linking: 3 files
67 linking: 4 files
67 linking: 4 files
68 linking: 5 files
68 linking: 5 files
69 linking: 6 files
69 linking: 6 files
70 linking: 7 files
70 linking: 7 files
71 linked 7 files
71 linked 7 files
72 updating the branch cache
72 updating the branch cache
73
73
74 Create non-hardlinked clone r3:
74 Create non-hardlinked clone r3:
75
75
76 $ hg clone --pull r1 r3
76 $ hg clone --pull r1 r3
77 requesting all changes
77 requesting all changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 2 changesets with 2 changes to 2 files
81 added 2 changesets with 2 changes to 2 files
82 new changesets 40d85e9847f2:7069c422939c
82 new changesets 40d85e9847f2:7069c422939c
83 updating to branch default
83 updating to branch default
84 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
85
85
86
86
87 Repos r1 and r2 should now contain hardlinked files:
87 Repos r1 and r2 should now contain hardlinked files:
88
88
89 $ nlinksdir r1/.hg/store
89 $ nlinksdir r1/.hg/store
90 2 r1/.hg/store/00changelog.i
90 2 r1/.hg/store/00changelog.i
91 2 r1/.hg/store/00manifest.i
91 2 r1/.hg/store/00manifest.i
92 2 r1/.hg/store/data/d1/f2.i
92 2 r1/.hg/store/data/d1/f2.i
93 2 r1/.hg/store/data/f1.i
93 2 r1/.hg/store/data/f1.i
94 2 r1/.hg/store/fncache (repofncache !)
94 2 r1/.hg/store/fncache (repofncache !)
95 1 r1/.hg/store/phaseroots
95 1 r1/.hg/store/phaseroots
96 1 r1/.hg/store/undo
96 1 r1/.hg/store/undo
97 1 r1/.hg/store/undo.backup.fncache (repofncache !)
97 1 r1/.hg/store/undo.backup.fncache (repofncache !)
98 1 r1/.hg/store/undo.backupfiles
98 1 r1/.hg/store/undo.backupfiles
99 1 r1/.hg/store/undo.phaseroots
99 1 r1/.hg/store/undo.phaseroots
100
100
101 $ nlinksdir r2/.hg/store
101 $ nlinksdir r2/.hg/store
102 2 r2/.hg/store/00changelog.i
102 2 r2/.hg/store/00changelog.i
103 2 r2/.hg/store/00manifest.i
103 2 r2/.hg/store/00manifest.i
104 2 r2/.hg/store/data/d1/f2.i
104 2 r2/.hg/store/data/d1/f2.i
105 2 r2/.hg/store/data/f1.i
105 2 r2/.hg/store/data/f1.i
106 2 r2/.hg/store/fncache (repofncache !)
106 2 r2/.hg/store/fncache (repofncache !)
107
107
108 Repo r3 should not be hardlinked:
108 Repo r3 should not be hardlinked:
109
109
110 $ nlinksdir r3/.hg/store
110 $ nlinksdir r3/.hg/store
111 1 r3/.hg/store/00changelog.i
111 1 r3/.hg/store/00changelog.i
112 1 r3/.hg/store/00manifest.i
112 1 r3/.hg/store/00manifest.i
113 1 r3/.hg/store/data/d1/f2.i
113 1 r3/.hg/store/data/d1/f2.i
114 1 r3/.hg/store/data/f1.i
114 1 r3/.hg/store/data/f1.i
115 1 r3/.hg/store/fncache (repofncache !)
115 1 r3/.hg/store/fncache (repofncache !)
116 1 r3/.hg/store/phaseroots
116 1 r3/.hg/store/phaseroots
117 1 r3/.hg/store/undo
117 1 r3/.hg/store/undo
118 1 r3/.hg/store/undo.backupfiles
118 1 r3/.hg/store/undo.backupfiles
119 1 r3/.hg/store/undo.phaseroots
119 1 r3/.hg/store/undo.phaseroots
120
120
121
121
122 Create a non-inlined filelog in r3:
122 Create a non-inlined filelog in r3:
123
123
124 $ cd r3/d1
124 $ cd r3/d1
125 >>> f = open('data1', 'wb')
125 >>> f = open('data1', 'wb')
126 >>> for x in range(10000):
126 >>> for x in range(10000):
127 ... f.write(b"%d\n" % x) and None
127 ... f.write(b"%d\n" % x) and None
128 >>> f.close()
128 >>> f.close()
129 $ for j in 0 1 2 3 4 5 6 7 8 9; do
129 $ for j in 0 1 2 3 4 5 6 7 8 9; do
130 > cat data1 >> f2
130 > cat data1 >> f2
131 > hg commit -m$j
131 > hg commit -m$j
132 > done
132 > done
133 $ cd ../..
133 $ cd ../..
134
134
135 $ nlinksdir r3/.hg/store
135 $ nlinksdir r3/.hg/store
136 1 r3/.hg/store/00changelog.i
136 1 r3/.hg/store/00changelog.i
137 1 r3/.hg/store/00manifest.i
137 1 r3/.hg/store/00manifest.i
138 1 r3/.hg/store/data/d1/f2.d
138 1 r3/.hg/store/data/d1/f2.d
139 1 r3/.hg/store/data/d1/f2.i
139 1 r3/.hg/store/data/d1/f2.i
140 1 r3/.hg/store/data/f1.i
140 1 r3/.hg/store/data/f1.i
141 1 r3/.hg/store/fncache (repofncache !)
141 1 r3/.hg/store/fncache (repofncache !)
142 1 r3/.hg/store/phaseroots
142 1 r3/.hg/store/phaseroots
143 1 r3/.hg/store/undo
143 1 r3/.hg/store/undo
144 1 r3/.hg/store/undo.backup.fncache (repofncache !)
144 1 r3/.hg/store/undo.backup.fncache (repofncache !)
145 1 r3/.hg/store/undo.backup.phaseroots
145 1 r3/.hg/store/undo.backup.phaseroots
146 1 r3/.hg/store/undo.backupfiles
146 1 r3/.hg/store/undo.backupfiles
147 1 r3/.hg/store/undo.phaseroots
147 1 r3/.hg/store/undo.phaseroots
148
148
149 Push to repo r1 should break up most hardlinks in r2:
149 Push to repo r1 should break up most hardlinks in r2:
150
150
151 $ hg -R r2 verify
151 $ hg -R r2 verify
152 checking changesets
152 checking changesets
153 checking manifests
153 checking manifests
154 crosschecking files in changesets and manifests
154 crosschecking files in changesets and manifests
155 checking files
155 checking files
156 checked 2 changesets with 2 changes to 2 files
156 checked 2 changesets with 2 changes to 2 files
157
157
158 $ cd r3
158 $ cd r3
159 $ hg push
159 $ hg push
160 pushing to $TESTTMP/r1
160 pushing to $TESTTMP/r1
161 searching for changes
161 searching for changes
162 adding changesets
162 adding changesets
163 adding manifests
163 adding manifests
164 adding file changes
164 adding file changes
165 added 10 changesets with 10 changes to 1 files
165 added 10 changesets with 10 changes to 1 files
166
166
167 $ cd ..
167 $ cd ..
168
168
169 $ nlinksdir r2/.hg/store
169 $ nlinksdir r2/.hg/store
170 1 r2/.hg/store/00changelog.i
170 1 r2/.hg/store/00changelog.i
171 1 r2/.hg/store/00manifest.i
171 1 r2/.hg/store/00manifest.i
172 1 r2/.hg/store/data/d1/f2.i
172 1 r2/.hg/store/data/d1/f2.i
173 2 r2/.hg/store/data/f1.i
173 2 r2/.hg/store/data/f1.i
174 [12] r2/\.hg/store/fncache (re) (repofncache !)
174 [12] r2/\.hg/store/fncache (re) (repofncache !)
175
175
176 #if hardlink-whitelisted repofncache
176 #if hardlink-whitelisted repofncache
177 $ nlinksdir r2/.hg/store/fncache
177 $ nlinksdir r2/.hg/store/fncache
178 2 r2/.hg/store/fncache
178 2 r2/.hg/store/fncache
179 #endif
179 #endif
180
180
181 $ hg -R r2 verify
181 $ hg -R r2 verify
182 checking changesets
182 checking changesets
183 checking manifests
183 checking manifests
184 crosschecking files in changesets and manifests
184 crosschecking files in changesets and manifests
185 checking files
185 checking files
186 checked 2 changesets with 2 changes to 2 files
186 checked 2 changesets with 2 changes to 2 files
187
187
188
188
189 $ cd r1
189 $ cd r1
190 $ hg up
190 $ hg up
191 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
191 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
192
192
193 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
193 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
194
194
195 $ echo c1c1 >> f1
195 $ echo c1c1 >> f1
196 $ hg ci -m00
196 $ hg ci -m00
197 $ cd ..
197 $ cd ..
198
198
199 $ nlinksdir r2/.hg/store
199 $ nlinksdir r2/.hg/store
200 1 r2/.hg/store/00changelog.i
200 1 r2/.hg/store/00changelog.i
201 1 r2/.hg/store/00manifest.i
201 1 r2/.hg/store/00manifest.i
202 1 r2/.hg/store/data/d1/f2.i
202 1 r2/.hg/store/data/d1/f2.i
203 1 r2/.hg/store/data/f1.i
203 1 r2/.hg/store/data/f1.i
204 [12] r2/\.hg/store/fncache (re) (repofncache !)
204 [12] r2/\.hg/store/fncache (re) (repofncache !)
205
205
206 #if hardlink-whitelisted repofncache
206 #if hardlink-whitelisted repofncache
207 $ nlinksdir r2/.hg/store/fncache
207 $ nlinksdir r2/.hg/store/fncache
208 2 r2/.hg/store/fncache
208 2 r2/.hg/store/fncache
209 #endif
209 #endif
210
210
211 Create a file which exec permissions we will change
211 Create a file which exec permissions we will change
212 $ cd r3
212 $ cd r3
213 $ echo "echo hello world" > f3
213 $ echo "echo hello world" > f3
214 $ hg add f3
214 $ hg add f3
215 $ hg ci -mf3
215 $ hg ci -mf3
216 $ cd ..
216 $ cd ..
217
217
218 $ cd r3
218 $ cd r3
219 $ hg tip --template '{rev}:{node|short}\n'
219 $ hg tip --template '{rev}:{node|short}\n'
220 12:d3b77733a28a
220 12:d3b77733a28a
221 $ echo bla > f1
221 $ echo bla > f1
222 $ chmod +x f3
222 $ chmod +x f3
223 $ hg ci -m1
223 $ hg ci -m1
224 $ cd ..
224 $ cd ..
225
225
226 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
226 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
227
227
228 $ linkcp r3 r4
228 $ linkcp r3 r4
229
229
230 'checklink' is produced by hardlinking a symlink, which is undefined whether
230 'checklink' is produced by hardlinking a symlink, which is undefined whether
231 the symlink should be followed or not. It does behave differently on Linux and
231 the symlink should be followed or not. It does behave differently on Linux and
232 BSD. Just remove it so the test pass on both platforms.
232 BSD. Just remove it so the test pass on both platforms.
233
233
234 $ rm -f r4/.hg/wcache/checklink
234 $ rm -f r4/.hg/wcache/checklink
235
235
236 r4 has hardlinks in the working dir (not just inside .hg):
236 r4 has hardlinks in the working dir (not just inside .hg):
237
237
238 $ nlinksdir r4
238 $ nlinksdir r4
239 2 r4/.hg/00changelog.i
239 2 r4/.hg/00changelog.i
240 2 r4/.hg/branch
240 2 r4/.hg/branch
241 2 r4/.hg/cache/branch2-base
241 2 r4/.hg/cache/branch2-base
242 2 r4/.hg/cache/branch2-immutable
242 2 r4/.hg/cache/branch2-immutable
243 2 r4/.hg/cache/branch2-served
243 2 r4/.hg/cache/branch2-served
244 2 r4/.hg/cache/branch2-served.hidden
244 2 r4/.hg/cache/branch2-served.hidden
245 2 r4/.hg/cache/branch2-visible
245 2 r4/.hg/cache/branch2-visible
246 2 r4/.hg/cache/branch2-visible-hidden
246 2 r4/.hg/cache/branch2-visible-hidden
247 2 r4/.hg/cache/hgtagsfnodes1
248 2 r4/.hg/cache/rbc-names-v1
247 2 r4/.hg/cache/rbc-names-v1
249 2 r4/.hg/cache/rbc-revs-v1
248 2 r4/.hg/cache/rbc-revs-v1
250 2 r4/.hg/cache/tags2
249 2 r4/.hg/cache/tags2
251 2 r4/.hg/cache/tags2-served
250 2 r4/.hg/cache/tags2-served
252 2 r4/.hg/dirstate
251 2 r4/.hg/dirstate
253 2 r4/.hg/fsmonitor.state (fsmonitor !)
252 2 r4/.hg/fsmonitor.state (fsmonitor !)
254 2 r4/.hg/hgrc
253 2 r4/.hg/hgrc
255 2 r4/.hg/last-message.txt
254 2 r4/.hg/last-message.txt
256 2 r4/.hg/requires
255 2 r4/.hg/requires
257 2 r4/.hg/store/00changelog.i
256 2 r4/.hg/store/00changelog.i
258 2 r4/.hg/store/00manifest.i
257 2 r4/.hg/store/00manifest.i
259 2 r4/.hg/store/data/d1/f2.d
258 2 r4/.hg/store/data/d1/f2.d
260 2 r4/.hg/store/data/d1/f2.i
259 2 r4/.hg/store/data/d1/f2.i
261 2 r4/.hg/store/data/f1.i
260 2 r4/.hg/store/data/f1.i
262 2 r4/.hg/store/data/f3.i
261 2 r4/.hg/store/data/f3.i
263 2 r4/.hg/store/fncache (repofncache !)
262 2 r4/.hg/store/fncache (repofncache !)
264 2 r4/.hg/store/phaseroots
263 2 r4/.hg/store/phaseroots
265 2 r4/.hg/store/undo
264 2 r4/.hg/store/undo
266 2 r4/.hg/store/undo.backup.fncache (repofncache !)
265 2 r4/.hg/store/undo.backup.fncache (repofncache !)
267 2 r4/.hg/store/undo.backup.phaseroots
266 2 r4/.hg/store/undo.backup.phaseroots
268 2 r4/.hg/store/undo.backupfiles
267 2 r4/.hg/store/undo.backupfiles
269 2 r4/.hg/store/undo.phaseroots
268 2 r4/.hg/store/undo.phaseroots
270 [24] r4/\.hg/undo\.backup\.dirstate (re)
269 [24] r4/\.hg/undo\.backup\.dirstate (re)
271 2 r4/.hg/undo.bookmarks
270 2 r4/.hg/undo.bookmarks
272 2 r4/.hg/undo.branch
271 2 r4/.hg/undo.branch
273 2 r4/.hg/undo.desc
272 2 r4/.hg/undo.desc
274 [24] r4/\.hg/undo\.dirstate (re)
273 [24] r4/\.hg/undo\.dirstate (re)
275 2 r4/.hg/wcache/checkisexec (execbit !)
274 2 r4/.hg/wcache/checkisexec (execbit !)
276 2 r4/.hg/wcache/checklink-target (symlink !)
275 2 r4/.hg/wcache/checklink-target (symlink !)
277 2 r4/.hg/wcache/checknoexec (execbit !)
276 2 r4/.hg/wcache/checknoexec (execbit !)
278 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
277 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
279 2 r4/d1/data1
278 2 r4/d1/data1
280 2 r4/d1/f2
279 2 r4/d1/f2
281 2 r4/f1
280 2 r4/f1
282 2 r4/f3
281 2 r4/f3
283
282
284 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
283 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
285 #if hardlink-whitelisted
284 #if hardlink-whitelisted
286 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
285 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
287 4 r4/.hg/undo.backup.dirstate
286 4 r4/.hg/undo.backup.dirstate
288 4 r4/.hg/undo.dirstate
287 4 r4/.hg/undo.dirstate
289 #endif
288 #endif
290
289
291
290
292 $ hg -R r4 up 12
291 $ hg -R r4 up 12
293 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
292 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
294 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
295
294
296 $ nlinksdir r4
295 $ nlinksdir r4
297 2 r4/.hg/00changelog.i
296 2 r4/.hg/00changelog.i
298 1 r4/.hg/branch
297 1 r4/.hg/branch
299 2 r4/.hg/cache/branch2-base
298 2 r4/.hg/cache/branch2-base
300 2 r4/.hg/cache/branch2-immutable
299 2 r4/.hg/cache/branch2-immutable
301 2 r4/.hg/cache/branch2-served
300 2 r4/.hg/cache/branch2-served
302 2 r4/.hg/cache/branch2-served.hidden
301 2 r4/.hg/cache/branch2-served.hidden
303 2 r4/.hg/cache/branch2-visible
302 2 r4/.hg/cache/branch2-visible
304 2 r4/.hg/cache/branch2-visible-hidden
303 2 r4/.hg/cache/branch2-visible-hidden
305 2 r4/.hg/cache/hgtagsfnodes1
306 2 r4/.hg/cache/rbc-names-v1
304 2 r4/.hg/cache/rbc-names-v1
307 2 r4/.hg/cache/rbc-revs-v1
305 2 r4/.hg/cache/rbc-revs-v1
308 2 r4/.hg/cache/tags2
306 2 r4/.hg/cache/tags2
309 2 r4/.hg/cache/tags2-served
307 2 r4/.hg/cache/tags2-served
310 1 r4/.hg/dirstate
308 1 r4/.hg/dirstate
311 1 r4/.hg/fsmonitor.state (fsmonitor !)
309 1 r4/.hg/fsmonitor.state (fsmonitor !)
312 2 r4/.hg/hgrc
310 2 r4/.hg/hgrc
313 2 r4/.hg/last-message.txt
311 2 r4/.hg/last-message.txt
314 2 r4/.hg/requires
312 2 r4/.hg/requires
315 2 r4/.hg/store/00changelog.i
313 2 r4/.hg/store/00changelog.i
316 2 r4/.hg/store/00manifest.i
314 2 r4/.hg/store/00manifest.i
317 2 r4/.hg/store/data/d1/f2.d
315 2 r4/.hg/store/data/d1/f2.d
318 2 r4/.hg/store/data/d1/f2.i
316 2 r4/.hg/store/data/d1/f2.i
319 2 r4/.hg/store/data/f1.i
317 2 r4/.hg/store/data/f1.i
320 2 r4/.hg/store/data/f3.i
318 2 r4/.hg/store/data/f3.i
321 2 r4/.hg/store/fncache
319 2 r4/.hg/store/fncache
322 2 r4/.hg/store/phaseroots
320 2 r4/.hg/store/phaseroots
323 2 r4/.hg/store/undo
321 2 r4/.hg/store/undo
324 2 r4/.hg/store/undo.backup.fncache (repofncache !)
322 2 r4/.hg/store/undo.backup.fncache (repofncache !)
325 2 r4/.hg/store/undo.backup.phaseroots
323 2 r4/.hg/store/undo.backup.phaseroots
326 2 r4/.hg/store/undo.backupfiles
324 2 r4/.hg/store/undo.backupfiles
327 2 r4/.hg/store/undo.phaseroots
325 2 r4/.hg/store/undo.phaseroots
328 [24] r4/\.hg/undo\.backup\.dirstate (re)
326 [24] r4/\.hg/undo\.backup\.dirstate (re)
329 2 r4/.hg/undo.bookmarks
327 2 r4/.hg/undo.bookmarks
330 2 r4/.hg/undo.branch
328 2 r4/.hg/undo.branch
331 2 r4/.hg/undo.desc
329 2 r4/.hg/undo.desc
332 [24] r4/\.hg/undo\.dirstate (re)
330 [24] r4/\.hg/undo\.dirstate (re)
333 2 r4/.hg/wcache/checkisexec (execbit !)
331 2 r4/.hg/wcache/checkisexec (execbit !)
334 2 r4/.hg/wcache/checklink-target (symlink !)
332 2 r4/.hg/wcache/checklink-target (symlink !)
335 2 r4/.hg/wcache/checknoexec (execbit !)
333 2 r4/.hg/wcache/checknoexec (execbit !)
336 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
334 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
337 2 r4/d1/data1
335 2 r4/d1/data1
338 2 r4/d1/f2
336 2 r4/d1/f2
339 1 r4/f1
337 1 r4/f1
340 1 r4/f3 (execbit !)
338 1 r4/f3 (execbit !)
341 2 r4/f3 (no-execbit !)
339 2 r4/f3 (no-execbit !)
342
340
343 #if hardlink-whitelisted
341 #if hardlink-whitelisted
344 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
342 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
345 4 r4/.hg/undo.backup.dirstate
343 4 r4/.hg/undo.backup.dirstate
346 4 r4/.hg/undo.dirstate
344 4 r4/.hg/undo.dirstate
347 #endif
345 #endif
348
346
349 Test hardlinking outside hg:
347 Test hardlinking outside hg:
350
348
351 $ mkdir x
349 $ mkdir x
352 $ echo foo > x/a
350 $ echo foo > x/a
353
351
354 $ linkcp x y
352 $ linkcp x y
355 $ echo bar >> y/a
353 $ echo bar >> y/a
356
354
357 No diff if hardlink:
355 No diff if hardlink:
358
356
359 $ diff x/a y/a
357 $ diff x/a y/a
360
358
361 Test mq hardlinking:
359 Test mq hardlinking:
362
360
363 $ echo "[extensions]" >> $HGRCPATH
361 $ echo "[extensions]" >> $HGRCPATH
364 $ echo "mq=" >> $HGRCPATH
362 $ echo "mq=" >> $HGRCPATH
365
363
366 $ hg init a
364 $ hg init a
367 $ cd a
365 $ cd a
368
366
369 $ hg qimport -n foo - << EOF
367 $ hg qimport -n foo - << EOF
370 > # HG changeset patch
368 > # HG changeset patch
371 > # Date 1 0
369 > # Date 1 0
372 > diff -r 2588a8b53d66 a
370 > diff -r 2588a8b53d66 a
373 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
371 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
374 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
372 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
375 > @@ -0,0 +1,1 @@
373 > @@ -0,0 +1,1 @@
376 > +a
374 > +a
377 > EOF
375 > EOF
378 adding foo to series file
376 adding foo to series file
379
377
380 $ hg qpush
378 $ hg qpush
381 applying foo
379 applying foo
382 now at: foo
380 now at: foo
383
381
384 $ cd ..
382 $ cd ..
385 $ linkcp a b
383 $ linkcp a b
386 $ cd b
384 $ cd b
387
385
388 $ hg qimport -n bar - << EOF
386 $ hg qimport -n bar - << EOF
389 > # HG changeset patch
387 > # HG changeset patch
390 > # Date 2 0
388 > # Date 2 0
391 > diff -r 2588a8b53d66 a
389 > diff -r 2588a8b53d66 a
392 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
390 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
393 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
391 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
394 > @@ -0,0 +1,1 @@
392 > @@ -0,0 +1,1 @@
395 > +b
393 > +b
396 > EOF
394 > EOF
397 adding bar to series file
395 adding bar to series file
398
396
399 $ hg qpush
397 $ hg qpush
400 applying bar
398 applying bar
401 now at: bar
399 now at: bar
402
400
403 $ cat .hg/patches/status
401 $ cat .hg/patches/status
404 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
402 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
405 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
403 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
406
404
407 $ cat .hg/patches/series
405 $ cat .hg/patches/series
408 foo
406 foo
409 bar
407 bar
410
408
411 $ cat ../a/.hg/patches/status
409 $ cat ../a/.hg/patches/status
412 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
410 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
413
411
414 $ cat ../a/.hg/patches/series
412 $ cat ../a/.hg/patches/series
415 foo
413 foo
416
414
417 Test tags hardlinking:
415 Test tags hardlinking:
418
416
419 $ hg qdel -r qbase:qtip
417 $ hg qdel -r qbase:qtip
420 patch foo finalized without changeset message
418 patch foo finalized without changeset message
421 patch bar finalized without changeset message
419 patch bar finalized without changeset message
422
420
423 $ hg tag -l lfoo
421 $ hg tag -l lfoo
424 $ hg tag foo
422 $ hg tag foo
425
423
426 $ cd ..
424 $ cd ..
427 $ linkcp b c
425 $ linkcp b c
428 $ cd c
426 $ cd c
429
427
430 $ hg tag -l -r 0 lbar
428 $ hg tag -l -r 0 lbar
431 $ hg tag -r 0 bar
429 $ hg tag -r 0 bar
432
430
433 $ cat .hgtags
431 $ cat .hgtags
434 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
432 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
435 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
433 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
436
434
437 $ cat .hg/localtags
435 $ cat .hg/localtags
438 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
436 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
439 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
437 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
440
438
441 $ cat ../b/.hgtags
439 $ cat ../b/.hgtags
442 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
440 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
443
441
444 $ cat ../b/.hg/localtags
442 $ cat ../b/.hg/localtags
445 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
443 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
446
444
447 $ cd ..
445 $ cd ..
@@ -1,287 +1,286 b''
1 #testcases safe normal
1 #testcases safe normal
2
2
3 #if safe
3 #if safe
4 $ echo "[format]" >> $HGRCPATH
4 $ echo "[format]" >> $HGRCPATH
5 $ echo "use-share-safe = True" >> $HGRCPATH
5 $ echo "use-share-safe = True" >> $HGRCPATH
6 #endif
6 #endif
7
7
8 $ echo "[extensions]" >> $HGRCPATH
8 $ echo "[extensions]" >> $HGRCPATH
9 $ echo "share = " >> $HGRCPATH
9 $ echo "share = " >> $HGRCPATH
10
10
11 prepare repo1
11 prepare repo1
12
12
13 $ hg init repo1
13 $ hg init repo1
14 $ cd repo1
14 $ cd repo1
15 $ echo a > a
15 $ echo a > a
16 $ hg commit -A -m'init'
16 $ hg commit -A -m'init'
17 adding a
17 adding a
18
18
19 share it
19 share it
20
20
21 $ cd ..
21 $ cd ..
22 $ hg share repo1 repo2
22 $ hg share repo1 repo2
23 updating working directory
23 updating working directory
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25
25
26 share shouldn't have a store dir
26 share shouldn't have a store dir
27
27
28 $ cd repo2
28 $ cd repo2
29 $ test -d .hg/store
29 $ test -d .hg/store
30 [1]
30 [1]
31 $ hg root -Tjson | sed 's|\\\\|\\|g'
31 $ hg root -Tjson | sed 's|\\\\|\\|g'
32 [
32 [
33 {
33 {
34 "hgpath": "$TESTTMP/repo2/.hg",
34 "hgpath": "$TESTTMP/repo2/.hg",
35 "reporoot": "$TESTTMP/repo2",
35 "reporoot": "$TESTTMP/repo2",
36 "storepath": "$TESTTMP/repo1/.hg/store"
36 "storepath": "$TESTTMP/repo1/.hg/store"
37 }
37 }
38 ]
38 ]
39
39
40 share shouldn't have a full cache dir, original repo should
40 share shouldn't have a full cache dir, original repo should
41
41
42 $ hg branches
42 $ hg branches
43 default 0:d3873e73d99e
43 default 0:d3873e73d99e
44 $ hg tags
44 $ hg tags
45 tip 0:d3873e73d99e
45 tip 0:d3873e73d99e
46 $ test -d .hg/cache
46 $ test -d .hg/cache
47 [1]
47 [1]
48 $ ls -1 .hg/wcache || true
48 $ ls -1 .hg/wcache || true
49 checkisexec (execbit !)
49 checkisexec (execbit !)
50 checklink (symlink !)
50 checklink (symlink !)
51 checklink-target (symlink !)
51 checklink-target (symlink !)
52 manifestfulltextcache (reporevlogstore !)
52 manifestfulltextcache (reporevlogstore !)
53 $ ls -1 ../repo1/.hg/cache
53 $ ls -1 ../repo1/.hg/cache
54 branch2-served
54 branch2-served
55 rbc-names-v1
55 rbc-names-v1
56 rbc-revs-v1
56 rbc-revs-v1
57 tags2-visible
57 tags2-visible
58
58
59 Cloning a shared repo should pick up the full cache dir on the other hand.
59 Cloning a shared repo should pick up the full cache dir on the other hand.
60
60
61 $ hg clone . ../repo2-clone
61 $ hg clone . ../repo2-clone
62 updating to branch default
62 updating to branch default
63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
64 $ ls -1 ../repo2-clone/.hg/cache
64 $ ls -1 ../repo2-clone/.hg/cache
65 branch2-base
65 branch2-base
66 branch2-immutable
66 branch2-immutable
67 branch2-served
67 branch2-served
68 branch2-served.hidden
68 branch2-served.hidden
69 branch2-visible
69 branch2-visible
70 branch2-visible-hidden
70 branch2-visible-hidden
71 hgtagsfnodes1
72 rbc-names-v1
71 rbc-names-v1
73 rbc-revs-v1
72 rbc-revs-v1
74 tags2
73 tags2
75 tags2-served
74 tags2-served
76 tags2-visible
75 tags2-visible
77
76
78 Some sed versions appends newline, some don't, and some just fails
77 Some sed versions appends newline, some don't, and some just fails
79
78
80 $ cat .hg/sharedpath; echo
79 $ cat .hg/sharedpath; echo
81 $TESTTMP/repo1/.hg
80 $TESTTMP/repo1/.hg
82
81
83 trailing newline on .hg/sharedpath is ok
82 trailing newline on .hg/sharedpath is ok
84 $ hg tip -q
83 $ hg tip -q
85 0:d3873e73d99e
84 0:d3873e73d99e
86 $ echo '' >> .hg/sharedpath
85 $ echo '' >> .hg/sharedpath
87 $ cat .hg/sharedpath
86 $ cat .hg/sharedpath
88 $TESTTMP/repo1/.hg
87 $TESTTMP/repo1/.hg
89 $ hg tip -q
88 $ hg tip -q
90 0:d3873e73d99e
89 0:d3873e73d99e
91
90
92 commit in shared clone
91 commit in shared clone
93
92
94 $ echo a >> a
93 $ echo a >> a
95 $ hg commit -m'change in shared clone'
94 $ hg commit -m'change in shared clone'
96
95
97 check original
96 check original
98
97
99 $ cd ../repo1
98 $ cd ../repo1
100 $ hg log
99 $ hg log
101 changeset: 1:8af4dc49db9e
100 changeset: 1:8af4dc49db9e
102 tag: tip
101 tag: tip
103 user: test
102 user: test
104 date: Thu Jan 01 00:00:00 1970 +0000
103 date: Thu Jan 01 00:00:00 1970 +0000
105 summary: change in shared clone
104 summary: change in shared clone
106
105
107 changeset: 0:d3873e73d99e
106 changeset: 0:d3873e73d99e
108 user: test
107 user: test
109 date: Thu Jan 01 00:00:00 1970 +0000
108 date: Thu Jan 01 00:00:00 1970 +0000
110 summary: init
109 summary: init
111
110
112 $ hg update
111 $ hg update
113 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 $ cat a # should be two lines of "a"
113 $ cat a # should be two lines of "a"
115 a
114 a
116 a
115 a
117
116
118 commit in original
117 commit in original
119
118
120 $ echo b > b
119 $ echo b > b
121 $ hg commit -A -m'another file'
120 $ hg commit -A -m'another file'
122 adding b
121 adding b
123
122
124 check in shared clone
123 check in shared clone
125
124
126 $ cd ../repo2
125 $ cd ../repo2
127 $ hg log
126 $ hg log
128 changeset: 2:c2e0ac586386
127 changeset: 2:c2e0ac586386
129 tag: tip
128 tag: tip
130 user: test
129 user: test
131 date: Thu Jan 01 00:00:00 1970 +0000
130 date: Thu Jan 01 00:00:00 1970 +0000
132 summary: another file
131 summary: another file
133
132
134 changeset: 1:8af4dc49db9e
133 changeset: 1:8af4dc49db9e
135 user: test
134 user: test
136 date: Thu Jan 01 00:00:00 1970 +0000
135 date: Thu Jan 01 00:00:00 1970 +0000
137 summary: change in shared clone
136 summary: change in shared clone
138
137
139 changeset: 0:d3873e73d99e
138 changeset: 0:d3873e73d99e
140 user: test
139 user: test
141 date: Thu Jan 01 00:00:00 1970 +0000
140 date: Thu Jan 01 00:00:00 1970 +0000
142 summary: init
141 summary: init
143
142
144 $ hg update
143 $ hg update
145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 $ cat b # should exist with one "b"
145 $ cat b # should exist with one "b"
147 b
146 b
148
147
149 hg serve shared clone
148 hg serve shared clone
150
149
151 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
150 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
152 $ cat hg.pid >> $DAEMON_PIDS
151 $ cat hg.pid >> $DAEMON_PIDS
153 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
152 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
154 200 Script output follows
153 200 Script output follows
155
154
156
155
157 -rw-r--r-- 4 a
156 -rw-r--r-- 4 a
158 -rw-r--r-- 2 b
157 -rw-r--r-- 2 b
159
158
160
159
161 Cloning a shared repo via bundle2 results in a non-shared clone
160 Cloning a shared repo via bundle2 results in a non-shared clone
162
161
163 $ cd ..
162 $ cd ..
164 $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
163 $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
165 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
164 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
166 [1]
165 [1]
167 $ hg id --cwd cloned-via-bundle2 -r tip
166 $ hg id --cwd cloned-via-bundle2 -r tip
168 c2e0ac586386 tip
167 c2e0ac586386 tip
169 $ cd repo2
168 $ cd repo2
170
169
171 test unshare command
170 test unshare command
172
171
173 $ hg unshare
172 $ hg unshare
174 $ test -d .hg/store
173 $ test -d .hg/store
175 $ test -f .hg/sharedpath
174 $ test -f .hg/sharedpath
176 [1]
175 [1]
177 $ grep shared .hg/requires
176 $ grep shared .hg/requires
178 [1]
177 [1]
179 $ hg unshare
178 $ hg unshare
180 abort: this is not a shared repo
179 abort: this is not a shared repo
181 [255]
180 [255]
182
181
183 check that a change does not propagate
182 check that a change does not propagate
184
183
185 $ echo b >> b
184 $ echo b >> b
186 $ hg commit -m'change in unshared'
185 $ hg commit -m'change in unshared'
187 $ cd ../repo1
186 $ cd ../repo1
188 $ hg id -r tip
187 $ hg id -r tip
189 c2e0ac586386 tip
188 c2e0ac586386 tip
190
189
191 $ cd ..
190 $ cd ..
192
191
193
192
194 non largefiles repos won't enable largefiles
193 non largefiles repos won't enable largefiles
195
194
196 $ hg share --config extensions.largefiles= repo2 sharedrepo
195 $ hg share --config extensions.largefiles= repo2 sharedrepo
197 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
196 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
198 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
197 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
199 updating working directory
198 updating working directory
200 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 $ [ -f sharedrepo/.hg/hgrc ]
200 $ [ -f sharedrepo/.hg/hgrc ]
202 [1]
201 [1]
203
202
204 test shared clones using relative paths work
203 test shared clones using relative paths work
205
204
206 $ mkdir thisdir
205 $ mkdir thisdir
207 $ hg init thisdir/orig
206 $ hg init thisdir/orig
208 $ hg share -U thisdir/orig thisdir/abs
207 $ hg share -U thisdir/orig thisdir/abs
209 $ hg share -U --relative thisdir/abs thisdir/rel
208 $ hg share -U --relative thisdir/abs thisdir/rel
210 $ cat thisdir/rel/.hg/sharedpath
209 $ cat thisdir/rel/.hg/sharedpath
211 ../../orig/.hg (no-eol)
210 ../../orig/.hg (no-eol)
212 $ grep shared thisdir/*/.hg/requires
211 $ grep shared thisdir/*/.hg/requires
213 thisdir/abs/.hg/requires:shared
212 thisdir/abs/.hg/requires:shared
214 thisdir/rel/.hg/requires:relshared
213 thisdir/rel/.hg/requires:relshared
215 thisdir/rel/.hg/requires:shared
214 thisdir/rel/.hg/requires:shared
216
215
217 test that relative shared paths aren't relative to $PWD
216 test that relative shared paths aren't relative to $PWD
218
217
219 $ cd thisdir
218 $ cd thisdir
220 $ hg -R rel root
219 $ hg -R rel root
221 $TESTTMP/thisdir/rel
220 $TESTTMP/thisdir/rel
222 $ cd ..
221 $ cd ..
223
222
224 now test that relative paths really are relative, survive across
223 now test that relative paths really are relative, survive across
225 renames and changes of PWD
224 renames and changes of PWD
226
225
227 $ hg -R thisdir/abs root
226 $ hg -R thisdir/abs root
228 $TESTTMP/thisdir/abs
227 $TESTTMP/thisdir/abs
229 $ hg -R thisdir/rel root
228 $ hg -R thisdir/rel root
230 $TESTTMP/thisdir/rel
229 $TESTTMP/thisdir/rel
231 $ mv thisdir thatdir
230 $ mv thisdir thatdir
232 $ hg -R thatdir/abs root
231 $ hg -R thatdir/abs root
233 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg
232 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg
234 [255]
233 [255]
235 $ hg -R thatdir/rel root
234 $ hg -R thatdir/rel root
236 $TESTTMP/thatdir/rel
235 $TESTTMP/thatdir/rel
237
236
238 test unshare relshared repo
237 test unshare relshared repo
239
238
240 $ cd thatdir/rel
239 $ cd thatdir/rel
241 $ hg unshare
240 $ hg unshare
242 $ test -d .hg/store
241 $ test -d .hg/store
243 $ test -f .hg/sharedpath
242 $ test -f .hg/sharedpath
244 [1]
243 [1]
245 $ grep shared .hg/requires
244 $ grep shared .hg/requires
246 [1]
245 [1]
247 $ hg unshare
246 $ hg unshare
248 abort: this is not a shared repo
247 abort: this is not a shared repo
249 [255]
248 [255]
250 $ cd ../..
249 $ cd ../..
251
250
252 $ rm -r thatdir
251 $ rm -r thatdir
253
252
254 Demonstrate buggy behavior around requirements validation
253 Demonstrate buggy behavior around requirements validation
255 See comment in localrepo.py:makelocalrepository() for more.
254 See comment in localrepo.py:makelocalrepository() for more.
256
255
257 $ hg init sharenewrequires
256 $ hg init sharenewrequires
258 $ hg share sharenewrequires shareoldrequires
257 $ hg share sharenewrequires shareoldrequires
259 updating working directory
258 updating working directory
260 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
261
260
262 $ cat >> sharenewrequires/.hg/requires << EOF
261 $ cat >> sharenewrequires/.hg/requires << EOF
263 > missing-requirement
262 > missing-requirement
264 > EOF
263 > EOF
265
264
266 We cannot open the repo with the unknown requirement
265 We cannot open the repo with the unknown requirement
267
266
268 $ hg -R sharenewrequires status
267 $ hg -R sharenewrequires status
269 abort: repository requires features unknown to this Mercurial: missing-requirement
268 abort: repository requires features unknown to this Mercurial: missing-requirement
270 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
269 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
271 [255]
270 [255]
272
271
273 BUG: we don't get the same error when opening the shared repo pointing to it
272 BUG: we don't get the same error when opening the shared repo pointing to it
274
273
275 $ hg -R shareoldrequires status
274 $ hg -R shareoldrequires status
276
275
277 Explicitly kill daemons to let the test exit on Windows
276 Explicitly kill daemons to let the test exit on Windows
278
277
279 $ killdaemons.py
278 $ killdaemons.py
280
279
281 Test sharing a repository which was created with store requirement disable
280 Test sharing a repository which was created with store requirement disable
282
281
283 $ hg init nostore --config format.usestore=false
282 $ hg init nostore --config format.usestore=false
284 ignoring enabled 'format.use-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
283 ignoring enabled 'format.use-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
285 $ hg share nostore sharednostore
284 $ hg share nostore sharednostore
286 abort: cannot create shared repository as source was created with 'format.usestore' config disabled
285 abort: cannot create shared repository as source was created with 'format.usestore' config disabled
287 [255]
286 [255]
@@ -1,733 +1,733 b''
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 This test tries to exercise the ssh functionality with a dummy script
11 This test tries to exercise the ssh functionality with a dummy script
12
12
13 creating 'remote' repo
13 creating 'remote' repo
14
14
15 $ hg init remote
15 $ hg init remote
16 $ cd remote
16 $ cd remote
17 $ echo this > foo
17 $ echo this > foo
18 $ echo this > fooO
18 $ echo this > fooO
19 $ hg ci -A -m "init" foo fooO
19 $ hg ci -A -m "init" foo fooO
20
20
21 insert a closed branch (issue4428)
21 insert a closed branch (issue4428)
22
22
23 $ hg up null
23 $ hg up null
24 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
24 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
25 $ hg branch closed
25 $ hg branch closed
26 marked working directory as branch closed
26 marked working directory as branch closed
27 (branches are permanent and global, did you want a bookmark?)
27 (branches are permanent and global, did you want a bookmark?)
28 $ hg ci -mc0
28 $ hg ci -mc0
29 $ hg ci --close-branch -mc1
29 $ hg ci --close-branch -mc1
30 $ hg up -q default
30 $ hg up -q default
31
31
32 configure for serving
32 configure for serving
33
33
34 $ cat <<EOF > .hg/hgrc
34 $ cat <<EOF > .hg/hgrc
35 > [server]
35 > [server]
36 > uncompressed = True
36 > uncompressed = True
37 >
37 >
38 > [hooks]
38 > [hooks]
39 > changegroup = sh -c "printenv.py --line changegroup-in-remote 0 ../dummylog"
39 > changegroup = sh -c "printenv.py --line changegroup-in-remote 0 ../dummylog"
40 > EOF
40 > EOF
41 $ cd $TESTTMP
41 $ cd $TESTTMP
42
42
43 repo not found error
43 repo not found error
44
44
45 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
45 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
46 remote: abort: repository nonexistent not found
46 remote: abort: repository nonexistent not found
47 abort: no suitable response from remote hg
47 abort: no suitable response from remote hg
48 [255]
48 [255]
49 $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
49 $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
50 remote: abort: repository nonexistent not found
50 remote: abort: repository nonexistent not found
51 abort: no suitable response from remote hg
51 abort: no suitable response from remote hg
52 [255]
52 [255]
53
53
54 non-existent absolute path
54 non-existent absolute path
55
55
56 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
56 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
57 remote: abort: repository $TESTTMP/nonexistent not found
57 remote: abort: repository $TESTTMP/nonexistent not found
58 abort: no suitable response from remote hg
58 abort: no suitable response from remote hg
59 [255]
59 [255]
60
60
61 clone remote via stream
61 clone remote via stream
62
62
63 #if no-reposimplestore
63 #if no-reposimplestore
64
64
65 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
65 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
66 streaming all changes
66 streaming all changes
67 8 files to transfer, 827 bytes of data (no-zstd !)
67 8 files to transfer, 827 bytes of data (no-zstd !)
68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
68 transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
69 8 files to transfer, 846 bytes of data (zstd !)
69 8 files to transfer, 846 bytes of data (zstd !)
70 transferred * bytes in * seconds (* */sec) (glob) (zstd !)
70 transferred * bytes in * seconds (* */sec) (glob) (zstd !)
71 updating to branch default
71 updating to branch default
72 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 $ cd local-stream
73 $ cd local-stream
74 $ hg verify
74 $ hg verify
75 checking changesets
75 checking changesets
76 checking manifests
76 checking manifests
77 crosschecking files in changesets and manifests
77 crosschecking files in changesets and manifests
78 checking files
78 checking files
79 checked 3 changesets with 2 changes to 2 files
79 checked 3 changesets with 2 changes to 2 files
80 $ hg branches
80 $ hg branches
81 default 0:1160648e36ce
81 default 0:1160648e36ce
82 $ cd $TESTTMP
82 $ cd $TESTTMP
83
83
84 clone bookmarks via stream
84 clone bookmarks via stream
85
85
86 $ hg -R local-stream book mybook
86 $ hg -R local-stream book mybook
87 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
87 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
88 streaming all changes
88 streaming all changes
89 16 files to transfer, * of data (glob)
89 15 files to transfer, * of data (glob)
90 transferred * in * seconds (*) (glob)
90 transferred * in * seconds (*) (glob)
91 updating to branch default
91 updating to branch default
92 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 $ cd stream2
93 $ cd stream2
94 $ hg book
94 $ hg book
95 mybook 0:1160648e36ce
95 mybook 0:1160648e36ce
96 $ cd $TESTTMP
96 $ cd $TESTTMP
97 $ rm -rf local-stream stream2
97 $ rm -rf local-stream stream2
98
98
99 #endif
99 #endif
100
100
101 clone remote via pull
101 clone remote via pull
102
102
103 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
103 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
104 requesting all changes
104 requesting all changes
105 adding changesets
105 adding changesets
106 adding manifests
106 adding manifests
107 adding file changes
107 adding file changes
108 added 3 changesets with 2 changes to 2 files
108 added 3 changesets with 2 changes to 2 files
109 new changesets 1160648e36ce:ad076bfb429d
109 new changesets 1160648e36ce:ad076bfb429d
110 updating to branch default
110 updating to branch default
111 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
112
112
113 verify
113 verify
114
114
115 $ cd local
115 $ cd local
116 $ hg verify
116 $ hg verify
117 checking changesets
117 checking changesets
118 checking manifests
118 checking manifests
119 crosschecking files in changesets and manifests
119 crosschecking files in changesets and manifests
120 checking files
120 checking files
121 checked 3 changesets with 2 changes to 2 files
121 checked 3 changesets with 2 changes to 2 files
122 $ cat >> .hg/hgrc <<EOF
122 $ cat >> .hg/hgrc <<EOF
123 > [hooks]
123 > [hooks]
124 > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
124 > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
125 > EOF
125 > EOF
126
126
127 empty default pull
127 empty default pull
128
128
129 $ hg paths
129 $ hg paths
130 default = ssh://user@dummy/remote
130 default = ssh://user@dummy/remote
131 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
131 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
132 pulling from ssh://user@dummy/remote
132 pulling from ssh://user@dummy/remote
133 searching for changes
133 searching for changes
134 no changes found
134 no changes found
135
135
136 pull from wrong ssh URL
136 pull from wrong ssh URL
137
137
138 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
138 $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
139 pulling from ssh://user@dummy/doesnotexist
139 pulling from ssh://user@dummy/doesnotexist
140 remote: abort: repository doesnotexist not found
140 remote: abort: repository doesnotexist not found
141 abort: no suitable response from remote hg
141 abort: no suitable response from remote hg
142 [255]
142 [255]
143
143
144 local change
144 local change
145
145
146 $ echo bleah > foo
146 $ echo bleah > foo
147 $ hg ci -m "add"
147 $ hg ci -m "add"
148
148
149 updating rc
149 updating rc
150
150
151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
151 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
152 $ echo "[ui]" >> .hg/hgrc
152 $ echo "[ui]" >> .hg/hgrc
153 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
153 $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
154
154
155 find outgoing
155 find outgoing
156
156
157 $ hg out ssh://user@dummy/remote
157 $ hg out ssh://user@dummy/remote
158 comparing with ssh://user@dummy/remote
158 comparing with ssh://user@dummy/remote
159 searching for changes
159 searching for changes
160 changeset: 3:a28a9d1a809c
160 changeset: 3:a28a9d1a809c
161 tag: tip
161 tag: tip
162 parent: 0:1160648e36ce
162 parent: 0:1160648e36ce
163 user: test
163 user: test
164 date: Thu Jan 01 00:00:00 1970 +0000
164 date: Thu Jan 01 00:00:00 1970 +0000
165 summary: add
165 summary: add
166
166
167
167
168 find incoming on the remote side
168 find incoming on the remote side
169
169
170 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
170 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
171 comparing with ssh://user@dummy/local
171 comparing with ssh://user@dummy/local
172 searching for changes
172 searching for changes
173 changeset: 3:a28a9d1a809c
173 changeset: 3:a28a9d1a809c
174 tag: tip
174 tag: tip
175 parent: 0:1160648e36ce
175 parent: 0:1160648e36ce
176 user: test
176 user: test
177 date: Thu Jan 01 00:00:00 1970 +0000
177 date: Thu Jan 01 00:00:00 1970 +0000
178 summary: add
178 summary: add
179
179
180
180
181 find incoming on the remote side (using absolute path)
181 find incoming on the remote side (using absolute path)
182
182
183 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
183 $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
184 comparing with ssh://user@dummy/$TESTTMP/local
184 comparing with ssh://user@dummy/$TESTTMP/local
185 searching for changes
185 searching for changes
186 changeset: 3:a28a9d1a809c
186 changeset: 3:a28a9d1a809c
187 tag: tip
187 tag: tip
188 parent: 0:1160648e36ce
188 parent: 0:1160648e36ce
189 user: test
189 user: test
190 date: Thu Jan 01 00:00:00 1970 +0000
190 date: Thu Jan 01 00:00:00 1970 +0000
191 summary: add
191 summary: add
192
192
193
193
194 push
194 push
195
195
196 $ hg push
196 $ hg push
197 pushing to ssh://user@dummy/remote
197 pushing to ssh://user@dummy/remote
198 searching for changes
198 searching for changes
199 remote: adding changesets
199 remote: adding changesets
200 remote: adding manifests
200 remote: adding manifests
201 remote: adding file changes
201 remote: adding file changes
202 remote: added 1 changesets with 1 changes to 1 files
202 remote: added 1 changesets with 1 changes to 1 files
203 $ cd $TESTTMP/remote
203 $ cd $TESTTMP/remote
204
204
205 check remote tip
205 check remote tip
206
206
207 $ hg tip
207 $ hg tip
208 changeset: 3:a28a9d1a809c
208 changeset: 3:a28a9d1a809c
209 tag: tip
209 tag: tip
210 parent: 0:1160648e36ce
210 parent: 0:1160648e36ce
211 user: test
211 user: test
212 date: Thu Jan 01 00:00:00 1970 +0000
212 date: Thu Jan 01 00:00:00 1970 +0000
213 summary: add
213 summary: add
214
214
215 $ hg verify
215 $ hg verify
216 checking changesets
216 checking changesets
217 checking manifests
217 checking manifests
218 crosschecking files in changesets and manifests
218 crosschecking files in changesets and manifests
219 checking files
219 checking files
220 checked 4 changesets with 3 changes to 2 files
220 checked 4 changesets with 3 changes to 2 files
221 $ hg cat -r tip foo
221 $ hg cat -r tip foo
222 bleah
222 bleah
223 $ echo z > z
223 $ echo z > z
224 $ hg ci -A -m z z
224 $ hg ci -A -m z z
225 created new head
225 created new head
226
226
227 test pushkeys and bookmarks
227 test pushkeys and bookmarks
228
228
229 $ cd $TESTTMP/local
229 $ cd $TESTTMP/local
230 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
230 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
231 bookmarks
231 bookmarks
232 namespaces
232 namespaces
233 phases
233 phases
234 $ hg book foo -r 0
234 $ hg book foo -r 0
235 $ hg out -B --config paths.default=bogus://invalid --config paths.default:pushurl=`hg paths default`
235 $ hg out -B --config paths.default=bogus://invalid --config paths.default:pushurl=`hg paths default`
236 comparing with ssh://user@dummy/remote
236 comparing with ssh://user@dummy/remote
237 searching for changed bookmarks
237 searching for changed bookmarks
238 foo 1160648e36ce
238 foo 1160648e36ce
239 $ hg push -B foo
239 $ hg push -B foo
240 pushing to ssh://user@dummy/remote
240 pushing to ssh://user@dummy/remote
241 searching for changes
241 searching for changes
242 no changes found
242 no changes found
243 exporting bookmark foo
243 exporting bookmark foo
244 [1]
244 [1]
245 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
245 $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
246 foo 1160648e36cec0054048a7edc4110c6f84fde594
246 foo 1160648e36cec0054048a7edc4110c6f84fde594
247 $ hg book -f foo
247 $ hg book -f foo
248 $ hg push --traceback
248 $ hg push --traceback
249 pushing to ssh://user@dummy/remote
249 pushing to ssh://user@dummy/remote
250 searching for changes
250 searching for changes
251 no changes found
251 no changes found
252 updating bookmark foo
252 updating bookmark foo
253 [1]
253 [1]
254 $ hg book -d foo
254 $ hg book -d foo
255 $ hg in -B
255 $ hg in -B
256 comparing with ssh://user@dummy/remote
256 comparing with ssh://user@dummy/remote
257 searching for changed bookmarks
257 searching for changed bookmarks
258 foo a28a9d1a809c
258 foo a28a9d1a809c
259 $ hg book -f -r 0 foo
259 $ hg book -f -r 0 foo
260 $ hg pull -B foo
260 $ hg pull -B foo
261 pulling from ssh://user@dummy/remote
261 pulling from ssh://user@dummy/remote
262 no changes found
262 no changes found
263 updating bookmark foo
263 updating bookmark foo
264 $ hg book -d foo
264 $ hg book -d foo
265 $ hg push -B foo
265 $ hg push -B foo
266 pushing to ssh://user@dummy/remote
266 pushing to ssh://user@dummy/remote
267 searching for changes
267 searching for changes
268 no changes found
268 no changes found
269 deleting remote bookmark foo
269 deleting remote bookmark foo
270 [1]
270 [1]
271
271
272 a bad, evil hook that prints to stdout
272 a bad, evil hook that prints to stdout
273
273
274 $ cat <<EOF > $TESTTMP/badhook
274 $ cat <<EOF > $TESTTMP/badhook
275 > import sys
275 > import sys
276 > sys.stdout.write("KABOOM\n")
276 > sys.stdout.write("KABOOM\n")
277 > sys.stdout.flush()
277 > sys.stdout.flush()
278 > EOF
278 > EOF
279
279
280 $ cat <<EOF > $TESTTMP/badpyhook.py
280 $ cat <<EOF > $TESTTMP/badpyhook.py
281 > import sys
281 > import sys
282 > def hook(ui, repo, hooktype, **kwargs):
282 > def hook(ui, repo, hooktype, **kwargs):
283 > sys.stdout.write("KABOOM IN PROCESS\n")
283 > sys.stdout.write("KABOOM IN PROCESS\n")
284 > sys.stdout.flush()
284 > sys.stdout.flush()
285 > EOF
285 > EOF
286
286
287 $ cat <<EOF >> ../remote/.hg/hgrc
287 $ cat <<EOF >> ../remote/.hg/hgrc
288 > [hooks]
288 > [hooks]
289 > changegroup.stdout = "$PYTHON" $TESTTMP/badhook
289 > changegroup.stdout = "$PYTHON" $TESTTMP/badhook
290 > changegroup.pystdout = python:$TESTTMP/badpyhook.py:hook
290 > changegroup.pystdout = python:$TESTTMP/badpyhook.py:hook
291 > EOF
291 > EOF
292 $ echo r > r
292 $ echo r > r
293 $ hg ci -A -m z r
293 $ hg ci -A -m z r
294
294
295 push should succeed even though it has an unexpected response
295 push should succeed even though it has an unexpected response
296
296
297 $ hg push
297 $ hg push
298 pushing to ssh://user@dummy/remote
298 pushing to ssh://user@dummy/remote
299 searching for changes
299 searching for changes
300 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
300 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
301 remote: adding changesets
301 remote: adding changesets
302 remote: adding manifests
302 remote: adding manifests
303 remote: adding file changes
303 remote: adding file changes
304 remote: added 1 changesets with 1 changes to 1 files
304 remote: added 1 changesets with 1 changes to 1 files
305 remote: KABOOM
305 remote: KABOOM
306 remote: KABOOM IN PROCESS
306 remote: KABOOM IN PROCESS
307 $ hg -R ../remote heads
307 $ hg -R ../remote heads
308 changeset: 5:1383141674ec
308 changeset: 5:1383141674ec
309 tag: tip
309 tag: tip
310 parent: 3:a28a9d1a809c
310 parent: 3:a28a9d1a809c
311 user: test
311 user: test
312 date: Thu Jan 01 00:00:00 1970 +0000
312 date: Thu Jan 01 00:00:00 1970 +0000
313 summary: z
313 summary: z
314
314
315 changeset: 4:6c0482d977a3
315 changeset: 4:6c0482d977a3
316 parent: 0:1160648e36ce
316 parent: 0:1160648e36ce
317 user: test
317 user: test
318 date: Thu Jan 01 00:00:00 1970 +0000
318 date: Thu Jan 01 00:00:00 1970 +0000
319 summary: z
319 summary: z
320
320
321
321
322 #if chg
322 #if chg
323
323
324 try again with remote chg, which should succeed as well
324 try again with remote chg, which should succeed as well
325
325
326 $ hg rollback -R ../remote
326 $ hg rollback -R ../remote
327 repository tip rolled back to revision 4 (undo serve)
327 repository tip rolled back to revision 4 (undo serve)
328
328
329 $ hg push --config ui.remotecmd=chg
329 $ hg push --config ui.remotecmd=chg
330 pushing to ssh://user@dummy/remote
330 pushing to ssh://user@dummy/remote
331 searching for changes
331 searching for changes
332 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
332 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
333 remote: adding changesets
333 remote: adding changesets
334 remote: adding manifests
334 remote: adding manifests
335 remote: adding file changes
335 remote: adding file changes
336 remote: added 1 changesets with 1 changes to 1 files (py3 !)
336 remote: added 1 changesets with 1 changes to 1 files (py3 !)
337 remote: KABOOM
337 remote: KABOOM
338 remote: KABOOM IN PROCESS
338 remote: KABOOM IN PROCESS
339 remote: added 1 changesets with 1 changes to 1 files (no-py3 !)
339 remote: added 1 changesets with 1 changes to 1 files (no-py3 !)
340
340
341 #endif
341 #endif
342
342
343 clone bookmarks
343 clone bookmarks
344
344
345 $ hg -R ../remote bookmark test
345 $ hg -R ../remote bookmark test
346 $ hg -R ../remote bookmarks
346 $ hg -R ../remote bookmarks
347 * test 4:6c0482d977a3
347 * test 4:6c0482d977a3
348 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
348 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
349 requesting all changes
349 requesting all changes
350 adding changesets
350 adding changesets
351 adding manifests
351 adding manifests
352 adding file changes
352 adding file changes
353 added 6 changesets with 5 changes to 4 files (+1 heads)
353 added 6 changesets with 5 changes to 4 files (+1 heads)
354 new changesets 1160648e36ce:1383141674ec
354 new changesets 1160648e36ce:1383141674ec
355 updating to branch default
355 updating to branch default
356 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
356 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
357 $ hg -R local-bookmarks bookmarks
357 $ hg -R local-bookmarks bookmarks
358 test 4:6c0482d977a3
358 test 4:6c0482d977a3
359
359
360 passwords in ssh urls are not supported
360 passwords in ssh urls are not supported
361 (we use a glob here because different Python versions give different
361 (we use a glob here because different Python versions give different
362 results here)
362 results here)
363
363
364 $ hg push ssh://user:erroneouspwd@dummy/remote
364 $ hg push ssh://user:erroneouspwd@dummy/remote
365 pushing to ssh://user:*@dummy/remote (glob)
365 pushing to ssh://user:*@dummy/remote (glob)
366 abort: password in URL not supported
366 abort: password in URL not supported
367 [255]
367 [255]
368
368
369 $ cd $TESTTMP
369 $ cd $TESTTMP
370
370
371 hide outer repo
371 hide outer repo
372 $ hg init
372 $ hg init
373
373
374 Test remote paths with spaces (issue2983):
374 Test remote paths with spaces (issue2983):
375
375
376 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
376 $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
377 $ touch "$TESTTMP/a repo/test"
377 $ touch "$TESTTMP/a repo/test"
378 $ hg -R 'a repo' commit -A -m "test"
378 $ hg -R 'a repo' commit -A -m "test"
379 adding test
379 adding test
380 $ hg -R 'a repo' tag tag
380 $ hg -R 'a repo' tag tag
381 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
381 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
382 73649e48688a
382 73649e48688a
383
383
384 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
384 $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
385 abort: unknown revision 'noNoNO'
385 abort: unknown revision 'noNoNO'
386 [255]
386 [255]
387
387
388 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
388 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
389
389
390 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
390 $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
391 destination directory: a repo
391 destination directory: a repo
392 abort: destination 'a repo' is not empty
392 abort: destination 'a repo' is not empty
393 [10]
393 [10]
394
394
395 #if no-rhg
395 #if no-rhg
396 Make sure hg is really paranoid in serve --stdio mode. It used to be
396 Make sure hg is really paranoid in serve --stdio mode. It used to be
397 possible to get a debugger REPL by specifying a repo named --debugger.
397 possible to get a debugger REPL by specifying a repo named --debugger.
398 $ hg -R --debugger serve --stdio
398 $ hg -R --debugger serve --stdio
399 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio']
399 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio']
400 [255]
400 [255]
401 $ hg -R --config=ui.debugger=yes serve --stdio
401 $ hg -R --config=ui.debugger=yes serve --stdio
402 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio']
402 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio']
403 [255]
403 [255]
404 Abbreviations of 'serve' also don't work, to avoid shenanigans.
404 Abbreviations of 'serve' also don't work, to avoid shenanigans.
405 $ hg -R narf serv --stdio
405 $ hg -R narf serv --stdio
406 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
406 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
407 [255]
407 [255]
408 #else
408 #else
409 rhg aborts early on -R without a repository at that path
409 rhg aborts early on -R without a repository at that path
410 $ hg -R --debugger serve --stdio
410 $ hg -R --debugger serve --stdio
411 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio'] (missing-correct-output !)
411 abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio'] (missing-correct-output !)
412 abort: repository --debugger not found (known-bad-output !)
412 abort: repository --debugger not found (known-bad-output !)
413 [255]
413 [255]
414 $ hg -R --config=ui.debugger=yes serve --stdio
414 $ hg -R --config=ui.debugger=yes serve --stdio
415 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio'] (missing-correct-output !)
415 abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio'] (missing-correct-output !)
416 abort: repository --config=ui.debugger=yes not found (known-bad-output !)
416 abort: repository --config=ui.debugger=yes not found (known-bad-output !)
417 [255]
417 [255]
418 $ hg -R narf serv --stdio
418 $ hg -R narf serv --stdio
419 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio'] (missing-correct-output !)
419 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio'] (missing-correct-output !)
420 abort: repository narf not found (known-bad-output !)
420 abort: repository narf not found (known-bad-output !)
421 [255]
421 [255]
422 If the repo does exist, rhg finds an unsupported command and falls back to Python
422 If the repo does exist, rhg finds an unsupported command and falls back to Python
423 which still does the right thing
423 which still does the right thing
424 $ hg init narf
424 $ hg init narf
425 $ hg -R narf serv --stdio
425 $ hg -R narf serv --stdio
426 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
426 abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
427 [255]
427 [255]
428 #endif
428 #endif
429
429
430 Test hg-ssh using a helper script that will restore PYTHONPATH (which might
430 Test hg-ssh using a helper script that will restore PYTHONPATH (which might
431 have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
431 have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
432 parameters:
432 parameters:
433
433
434 $ cat > ssh.sh << EOF
434 $ cat > ssh.sh << EOF
435 > userhost="\$1"
435 > userhost="\$1"
436 > SSH_ORIGINAL_COMMAND="\$2"
436 > SSH_ORIGINAL_COMMAND="\$2"
437 > export SSH_ORIGINAL_COMMAND
437 > export SSH_ORIGINAL_COMMAND
438 > PYTHONPATH="$PYTHONPATH"
438 > PYTHONPATH="$PYTHONPATH"
439 > export PYTHONPATH
439 > export PYTHONPATH
440 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" "$TESTTMP/a repo"
440 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" "$TESTTMP/a repo"
441 > EOF
441 > EOF
442
442
443 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a repo"
443 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a repo"
444 73649e48688a
444 73649e48688a
445
445
446 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a'repo"
446 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a'repo"
447 remote: Illegal repository "$TESTTMP/a'repo"
447 remote: Illegal repository "$TESTTMP/a'repo"
448 abort: no suitable response from remote hg
448 abort: no suitable response from remote hg
449 [255]
449 [255]
450
450
451 $ hg id --ssh "sh ssh.sh" --remotecmd hacking "ssh://user@dummy/a'repo"
451 $ hg id --ssh "sh ssh.sh" --remotecmd hacking "ssh://user@dummy/a'repo"
452 remote: Illegal command "hacking -R 'a'\''repo' serve --stdio"
452 remote: Illegal command "hacking -R 'a'\''repo' serve --stdio"
453 abort: no suitable response from remote hg
453 abort: no suitable response from remote hg
454 [255]
454 [255]
455
455
456 $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" "$PYTHON" "$TESTDIR/../contrib/hg-ssh"
456 $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" "$PYTHON" "$TESTDIR/../contrib/hg-ssh"
457 Illegal command "'hg' -R 'a'repo' serve --stdio": No closing quotation
457 Illegal command "'hg' -R 'a'repo' serve --stdio": No closing quotation
458 [255]
458 [255]
459
459
460 Test hg-ssh in read-only mode:
460 Test hg-ssh in read-only mode:
461
461
462 $ cat > ssh.sh << EOF
462 $ cat > ssh.sh << EOF
463 > userhost="\$1"
463 > userhost="\$1"
464 > SSH_ORIGINAL_COMMAND="\$2"
464 > SSH_ORIGINAL_COMMAND="\$2"
465 > export SSH_ORIGINAL_COMMAND
465 > export SSH_ORIGINAL_COMMAND
466 > PYTHONPATH="$PYTHONPATH"
466 > PYTHONPATH="$PYTHONPATH"
467 > export PYTHONPATH
467 > export PYTHONPATH
468 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" --read-only "$TESTTMP/remote"
468 > "$PYTHON" "$TESTDIR/../contrib/hg-ssh" --read-only "$TESTTMP/remote"
469 > EOF
469 > EOF
470
470
471 $ hg clone --ssh "sh ssh.sh" "ssh://user@dummy/$TESTTMP/remote" read-only-local
471 $ hg clone --ssh "sh ssh.sh" "ssh://user@dummy/$TESTTMP/remote" read-only-local
472 requesting all changes
472 requesting all changes
473 adding changesets
473 adding changesets
474 adding manifests
474 adding manifests
475 adding file changes
475 adding file changes
476 added 6 changesets with 5 changes to 4 files (+1 heads)
476 added 6 changesets with 5 changes to 4 files (+1 heads)
477 new changesets 1160648e36ce:1383141674ec
477 new changesets 1160648e36ce:1383141674ec
478 updating to branch default
478 updating to branch default
479 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
479 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
480
480
481 $ cd read-only-local
481 $ cd read-only-local
482 $ echo "baz" > bar
482 $ echo "baz" > bar
483 $ hg ci -A -m "unpushable commit" bar
483 $ hg ci -A -m "unpushable commit" bar
484 $ hg push --ssh "sh ../ssh.sh"
484 $ hg push --ssh "sh ../ssh.sh"
485 pushing to ssh://user@dummy/*/remote (glob)
485 pushing to ssh://user@dummy/*/remote (glob)
486 searching for changes
486 searching for changes
487 remote: Permission denied
487 remote: Permission denied
488 remote: pretxnopen.hg-ssh hook failed
488 remote: pretxnopen.hg-ssh hook failed
489 abort: push failed on remote
489 abort: push failed on remote
490 [100]
490 [100]
491
491
492 $ cd $TESTTMP
492 $ cd $TESTTMP
493
493
494 stderr from remote commands should be printed before stdout from local code (issue4336)
494 stderr from remote commands should be printed before stdout from local code (issue4336)
495
495
496 $ hg clone remote stderr-ordering
496 $ hg clone remote stderr-ordering
497 updating to branch default
497 updating to branch default
498 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
498 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
499 $ cd stderr-ordering
499 $ cd stderr-ordering
500 $ cat >> localwrite.py << EOF
500 $ cat >> localwrite.py << EOF
501 > from mercurial import exchange, extensions
501 > from mercurial import exchange, extensions
502 >
502 >
503 > def wrappedpush(orig, repo, *args, **kwargs):
503 > def wrappedpush(orig, repo, *args, **kwargs):
504 > res = orig(repo, *args, **kwargs)
504 > res = orig(repo, *args, **kwargs)
505 > repo.ui.write(b'local stdout\n')
505 > repo.ui.write(b'local stdout\n')
506 > repo.ui.flush()
506 > repo.ui.flush()
507 > return res
507 > return res
508 >
508 >
509 > def extsetup(ui):
509 > def extsetup(ui):
510 > extensions.wrapfunction(exchange, b'push', wrappedpush)
510 > extensions.wrapfunction(exchange, b'push', wrappedpush)
511 > EOF
511 > EOF
512
512
513 $ cat >> .hg/hgrc << EOF
513 $ cat >> .hg/hgrc << EOF
514 > [paths]
514 > [paths]
515 > default-push = ssh://user@dummy/remote
515 > default-push = ssh://user@dummy/remote
516 > [ui]
516 > [ui]
517 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
517 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
518 > [extensions]
518 > [extensions]
519 > localwrite = localwrite.py
519 > localwrite = localwrite.py
520 > EOF
520 > EOF
521
521
522 $ echo localwrite > foo
522 $ echo localwrite > foo
523 $ hg commit -m 'testing localwrite'
523 $ hg commit -m 'testing localwrite'
524 $ hg push
524 $ hg push
525 pushing to ssh://user@dummy/remote
525 pushing to ssh://user@dummy/remote
526 searching for changes
526 searching for changes
527 remote: adding changesets
527 remote: adding changesets
528 remote: adding manifests
528 remote: adding manifests
529 remote: adding file changes
529 remote: adding file changes
530 remote: added 1 changesets with 1 changes to 1 files
530 remote: added 1 changesets with 1 changes to 1 files
531 remote: KABOOM
531 remote: KABOOM
532 remote: KABOOM IN PROCESS
532 remote: KABOOM IN PROCESS
533 local stdout
533 local stdout
534
534
535 debug output
535 debug output
536
536
537 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
537 $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
538 pulling from ssh://user@dummy/remote
538 pulling from ssh://user@dummy/remote
539 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
539 running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
540 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
540 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
541 devel-peer-request: hello+between
541 devel-peer-request: hello+between
542 devel-peer-request: pairs: 81 bytes
542 devel-peer-request: pairs: 81 bytes
543 sending hello command
543 sending hello command
544 sending between command
544 sending between command
545 remote: 444 (sshv1 no-rust !)
545 remote: 444 (sshv1 no-rust !)
546 remote: 463 (sshv1 rust !)
546 remote: 463 (sshv1 rust !)
547 protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
547 protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
548 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
548 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
549 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
549 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
550 remote: 1 (sshv1 !)
550 remote: 1 (sshv1 !)
551 devel-peer-request: protocaps
551 devel-peer-request: protocaps
552 devel-peer-request: caps: * bytes (glob)
552 devel-peer-request: caps: * bytes (glob)
553 sending protocaps command
553 sending protocaps command
554 query 1; heads
554 query 1; heads
555 devel-peer-request: batched-content
555 devel-peer-request: batched-content
556 devel-peer-request: - heads (0 arguments)
556 devel-peer-request: - heads (0 arguments)
557 devel-peer-request: - known (1 arguments)
557 devel-peer-request: - known (1 arguments)
558 devel-peer-request: batch
558 devel-peer-request: batch
559 devel-peer-request: cmds: 141 bytes
559 devel-peer-request: cmds: 141 bytes
560 sending batch command
560 sending batch command
561 searching for changes
561 searching for changes
562 all remote heads known locally
562 all remote heads known locally
563 no changes found
563 no changes found
564 devel-peer-request: getbundle
564 devel-peer-request: getbundle
565 devel-peer-request: bookmarks: 1 bytes
565 devel-peer-request: bookmarks: 1 bytes
566 devel-peer-request: bundlecaps: 270 bytes
566 devel-peer-request: bundlecaps: 270 bytes
567 devel-peer-request: cg: 1 bytes
567 devel-peer-request: cg: 1 bytes
568 devel-peer-request: common: 122 bytes
568 devel-peer-request: common: 122 bytes
569 devel-peer-request: heads: 122 bytes
569 devel-peer-request: heads: 122 bytes
570 devel-peer-request: listkeys: 9 bytes
570 devel-peer-request: listkeys: 9 bytes
571 devel-peer-request: phases: 1 bytes
571 devel-peer-request: phases: 1 bytes
572 sending getbundle command
572 sending getbundle command
573 bundle2-input-bundle: with-transaction
573 bundle2-input-bundle: with-transaction
574 bundle2-input-part: "bookmarks" supported
574 bundle2-input-part: "bookmarks" supported
575 bundle2-input-part: total payload size 26
575 bundle2-input-part: total payload size 26
576 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
576 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
577 bundle2-input-part: total payload size 45
577 bundle2-input-part: total payload size 45
578 bundle2-input-part: "phase-heads" supported
578 bundle2-input-part: "phase-heads" supported
579 bundle2-input-part: total payload size 72
579 bundle2-input-part: total payload size 72
580 bundle2-input-bundle: 3 parts total
580 bundle2-input-bundle: 3 parts total
581 checking for updated bookmarks
581 checking for updated bookmarks
582
582
583 $ cd $TESTTMP
583 $ cd $TESTTMP
584
584
585 $ cat dummylog
585 $ cat dummylog
586 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
586 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
587 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
587 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
588 Got arguments 1:user@dummy 2:hg -R $TESTTMP/nonexistent serve --stdio
588 Got arguments 1:user@dummy 2:hg -R $TESTTMP/nonexistent serve --stdio
589 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
589 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
590 Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
590 Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
591 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
591 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
592 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
592 Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
593 Got arguments 1:user@dummy 2:hg -R doesnotexist serve --stdio
593 Got arguments 1:user@dummy 2:hg -R doesnotexist serve --stdio
594 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
594 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
595 Got arguments 1:user@dummy 2:hg -R local serve --stdio
595 Got arguments 1:user@dummy 2:hg -R local serve --stdio
596 Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
596 Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
597 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
597 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
598 changegroup-in-remote hook: HG_BUNDLE2=1
598 changegroup-in-remote hook: HG_BUNDLE2=1
599 HG_HOOKNAME=changegroup
599 HG_HOOKNAME=changegroup
600 HG_HOOKTYPE=changegroup
600 HG_HOOKTYPE=changegroup
601 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
601 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
602 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
602 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60
603 HG_SOURCE=serve
603 HG_SOURCE=serve
604 HG_TXNID=TXN:$ID$
604 HG_TXNID=TXN:$ID$
605 HG_TXNNAME=serve
605 HG_TXNNAME=serve
606 HG_URL=remote:ssh:$LOCALIP
606 HG_URL=remote:ssh:$LOCALIP
607
607
608 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
608 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
609 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
609 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
610 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
610 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
611 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
611 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
612 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
612 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
613 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
613 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
614 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
614 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
615 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
615 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
616 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
616 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
617 changegroup-in-remote hook: HG_BUNDLE2=1
617 changegroup-in-remote hook: HG_BUNDLE2=1
618 HG_HOOKNAME=changegroup
618 HG_HOOKNAME=changegroup
619 HG_HOOKTYPE=changegroup
619 HG_HOOKTYPE=changegroup
620 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6
620 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6
621 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6
621 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6
622 HG_SOURCE=serve
622 HG_SOURCE=serve
623 HG_TXNID=TXN:$ID$
623 HG_TXNID=TXN:$ID$
624 HG_TXNNAME=serve
624 HG_TXNNAME=serve
625 HG_URL=remote:ssh:$LOCALIP
625 HG_URL=remote:ssh:$LOCALIP
626
626
627 Got arguments 1:user@dummy 2:chg -R remote serve --stdio (chg !)
627 Got arguments 1:user@dummy 2:chg -R remote serve --stdio (chg !)
628 changegroup-in-remote hook: HG_BUNDLE2=1 (chg !)
628 changegroup-in-remote hook: HG_BUNDLE2=1 (chg !)
629 HG_HOOKNAME=changegroup (chg !)
629 HG_HOOKNAME=changegroup (chg !)
630 HG_HOOKTYPE=changegroup (chg !)
630 HG_HOOKTYPE=changegroup (chg !)
631 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
631 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
632 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
632 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 (chg !)
633 HG_SOURCE=serve (chg !)
633 HG_SOURCE=serve (chg !)
634 HG_TXNID=TXN:$ID$ (chg !)
634 HG_TXNID=TXN:$ID$ (chg !)
635 HG_TXNNAME=serve (chg !)
635 HG_TXNNAME=serve (chg !)
636 HG_URL=remote:ssh:$LOCALIP (chg !)
636 HG_URL=remote:ssh:$LOCALIP (chg !)
637 (chg !)
637 (chg !)
638 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
638 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
639 Got arguments 1:user@dummy 2:hg init 'a repo'
639 Got arguments 1:user@dummy 2:hg init 'a repo'
640 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
640 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
641 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
641 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
642 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
642 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
643 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
643 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
644 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
644 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
645 changegroup-in-remote hook: HG_BUNDLE2=1
645 changegroup-in-remote hook: HG_BUNDLE2=1
646 HG_HOOKNAME=changegroup
646 HG_HOOKNAME=changegroup
647 HG_HOOKTYPE=changegroup
647 HG_HOOKTYPE=changegroup
648 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8
648 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8
649 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8
649 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8
650 HG_SOURCE=serve
650 HG_SOURCE=serve
651 HG_TXNID=TXN:$ID$
651 HG_TXNID=TXN:$ID$
652 HG_TXNNAME=serve
652 HG_TXNNAME=serve
653 HG_URL=remote:ssh:$LOCALIP
653 HG_URL=remote:ssh:$LOCALIP
654
654
655 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
655 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
656
656
657
657
658 remote hook failure is attributed to remote
658 remote hook failure is attributed to remote
659
659
660 $ cat > $TESTTMP/failhook << EOF
660 $ cat > $TESTTMP/failhook << EOF
661 > def hook(ui, repo, **kwargs):
661 > def hook(ui, repo, **kwargs):
662 > ui.write(b'hook failure!\n')
662 > ui.write(b'hook failure!\n')
663 > ui.flush()
663 > ui.flush()
664 > return 1
664 > return 1
665 > EOF
665 > EOF
666
666
667 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
667 $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
668
668
669 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
669 $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
670 $ cd hookout
670 $ cd hookout
671 $ touch hookfailure
671 $ touch hookfailure
672 $ hg -q commit -A -m 'remote hook failure'
672 $ hg -q commit -A -m 'remote hook failure'
673 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
673 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
674 pushing to ssh://user@dummy/remote
674 pushing to ssh://user@dummy/remote
675 searching for changes
675 searching for changes
676 remote: adding changesets
676 remote: adding changesets
677 remote: adding manifests
677 remote: adding manifests
678 remote: adding file changes
678 remote: adding file changes
679 remote: hook failure!
679 remote: hook failure!
680 remote: transaction abort!
680 remote: transaction abort!
681 remote: rollback completed
681 remote: rollback completed
682 remote: pretxnchangegroup.fail hook failed
682 remote: pretxnchangegroup.fail hook failed
683 abort: push failed on remote
683 abort: push failed on remote
684 [100]
684 [100]
685
685
686 abort during pull is properly reported as such
686 abort during pull is properly reported as such
687
687
688 $ echo morefoo >> ../remote/foo
688 $ echo morefoo >> ../remote/foo
689 $ hg -R ../remote commit --message "more foo to be pulled"
689 $ hg -R ../remote commit --message "more foo to be pulled"
690 $ cat >> ../remote/.hg/hgrc << EOF
690 $ cat >> ../remote/.hg/hgrc << EOF
691 > [extensions]
691 > [extensions]
692 > crash = ${TESTDIR}/crashgetbundler.py
692 > crash = ${TESTDIR}/crashgetbundler.py
693 > EOF
693 > EOF
694 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
694 $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
695 pulling from ssh://user@dummy/remote
695 pulling from ssh://user@dummy/remote
696 searching for changes
696 searching for changes
697 remote: abort: this is an exercise
697 remote: abort: this is an exercise
698 abort: pull failed on remote
698 abort: pull failed on remote
699 [100]
699 [100]
700
700
701 abort with no error hint when there is a ssh problem when pulling
701 abort with no error hint when there is a ssh problem when pulling
702
702
703 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
703 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
704 pulling from ssh://brokenrepository/
704 pulling from ssh://brokenrepository/
705 abort: no suitable response from remote hg
705 abort: no suitable response from remote hg
706 [255]
706 [255]
707
707
708 abort with configured error hint when there is a ssh problem when pulling
708 abort with configured error hint when there is a ssh problem when pulling
709
709
710 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
710 $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
711 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
711 > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
712 pulling from ssh://brokenrepository/
712 pulling from ssh://brokenrepository/
713 abort: no suitable response from remote hg
713 abort: no suitable response from remote hg
714 (Please see http://company/internalwiki/ssh.html)
714 (Please see http://company/internalwiki/ssh.html)
715 [255]
715 [255]
716
716
717 test that custom environment is passed down to ssh executable
717 test that custom environment is passed down to ssh executable
718 $ cat >>dumpenv <<EOF
718 $ cat >>dumpenv <<EOF
719 > #! /bin/sh
719 > #! /bin/sh
720 > echo \$VAR >&2
720 > echo \$VAR >&2
721 > EOF
721 > EOF
722 $ chmod +x dumpenv
722 $ chmod +x dumpenv
723 $ hg pull ssh://something --config ui.ssh="sh dumpenv"
723 $ hg pull ssh://something --config ui.ssh="sh dumpenv"
724 pulling from ssh://something/
724 pulling from ssh://something/
725 remote:
725 remote:
726 abort: no suitable response from remote hg
726 abort: no suitable response from remote hg
727 [255]
727 [255]
728 $ hg pull ssh://something --config ui.ssh="sh dumpenv" --config sshenv.VAR=17
728 $ hg pull ssh://something --config ui.ssh="sh dumpenv" --config sshenv.VAR=17
729 pulling from ssh://something/
729 pulling from ssh://something/
730 remote: 17
730 remote: 17
731 abort: no suitable response from remote hg
731 abort: no suitable response from remote hg
732 [255]
732 [255]
733
733
@@ -1,935 +1,935 b''
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > blackbox=
5 > blackbox=
6 > mock=$TESTDIR/mockblackbox.py
6 > mock=$TESTDIR/mockblackbox.py
7 > [blackbox]
7 > [blackbox]
8 > track = command, commandfinish, tagscache
8 > track = command, commandfinish, tagscache
9 > EOF
9 > EOF
10
10
11 Helper functions:
11 Helper functions:
12
12
13 $ cacheexists() {
13 $ cacheexists() {
14 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
14 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
15 > }
15 > }
16
16
17 $ fnodescacheexists() {
17 $ fnodescacheexists() {
18 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
18 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
19 > }
19 > }
20
20
21 $ dumptags() {
21 $ dumptags() {
22 > rev=$1
22 > rev=$1
23 > echo "rev $rev: .hgtags:"
23 > echo "rev $rev: .hgtags:"
24 > hg cat -r$rev .hgtags
24 > hg cat -r$rev .hgtags
25 > }
25 > }
26
26
27 # XXX need to test that the tag cache works when we strip an old head
27 # XXX need to test that the tag cache works when we strip an old head
28 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
28 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
29 # same, but stuff has changed behind tip.
29 # same, but stuff has changed behind tip.
30
30
31 Setup:
31 Setup:
32
32
33 $ hg init t
33 $ hg init t
34 $ cd t
34 $ cd t
35 $ cacheexists
35 $ cacheexists
36 no tag cache
36 no tag cache
37 $ fnodescacheexists
37 $ fnodescacheexists
38 no fnodes cache
38 no fnodes cache
39 $ hg id
39 $ hg id
40 000000000000 tip
40 000000000000 tip
41 $ cacheexists
41 $ cacheexists
42 no tag cache
42 no tag cache
43 $ fnodescacheexists
43 $ fnodescacheexists
44 no fnodes cache
44 no fnodes cache
45 $ echo a > a
45 $ echo a > a
46 $ hg add a
46 $ hg add a
47 $ hg commit -m "test"
47 $ hg commit -m "test"
48 $ hg co
48 $ hg co
49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 $ hg identify
50 $ hg identify
51 acb14030fe0a tip
51 acb14030fe0a tip
52 $ hg identify -r 'wdir()'
52 $ hg identify -r 'wdir()'
53 acb14030fe0a tip
53 acb14030fe0a tip
54 $ cacheexists
54 $ cacheexists
55 tag cache exists
55 tag cache exists
56 No fnodes cache because .hgtags file doesn't exist
56 No fnodes cache because .hgtags file doesn't exist
57 (this is an implementation detail)
57 (this is an implementation detail)
58 $ fnodescacheexists
58 $ fnodescacheexists
59 no fnodes cache
59 no fnodes cache
60
60
61 Try corrupting the cache
61 Try corrupting the cache
62
62
63 $ printf 'a b' > .hg/cache/tags2-visible
63 $ printf 'a b' > .hg/cache/tags2-visible
64 $ hg identify
64 $ hg identify
65 acb14030fe0a tip
65 acb14030fe0a tip
66 $ cacheexists
66 $ cacheexists
67 tag cache exists
67 tag cache exists
68 $ fnodescacheexists
68 $ fnodescacheexists
69 no fnodes cache
69 no fnodes cache
70 $ hg identify
70 $ hg identify
71 acb14030fe0a tip
71 acb14030fe0a tip
72
72
73 Create local tag with long name:
73 Create local tag with long name:
74
74
75 $ T=`hg identify --debug --id`
75 $ T=`hg identify --debug --id`
76 $ hg tag -l "This is a local tag with a really long name!"
76 $ hg tag -l "This is a local tag with a really long name!"
77 $ hg tags
77 $ hg tags
78 tip 0:acb14030fe0a
78 tip 0:acb14030fe0a
79 This is a local tag with a really long name! 0:acb14030fe0a
79 This is a local tag with a really long name! 0:acb14030fe0a
80 $ rm .hg/localtags
80 $ rm .hg/localtags
81
81
82 Create a tag behind hg's back:
82 Create a tag behind hg's back:
83
83
84 $ echo "$T first" > .hgtags
84 $ echo "$T first" > .hgtags
85 $ cat .hgtags
85 $ cat .hgtags
86 acb14030fe0a21b60322c440ad2d20cf7685a376 first
86 acb14030fe0a21b60322c440ad2d20cf7685a376 first
87 $ hg add .hgtags
87 $ hg add .hgtags
88 $ hg commit -m "add tags"
88 $ hg commit -m "add tags"
89 $ hg tags
89 $ hg tags
90 tip 1:b9154636be93
90 tip 1:b9154636be93
91 first 0:acb14030fe0a
91 first 0:acb14030fe0a
92 $ hg identify
92 $ hg identify
93 b9154636be93 tip
93 b9154636be93 tip
94
94
95 We should have a fnodes cache now that we have a real tag
95 We should have a fnodes cache now that we have a real tag
96 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
96 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
97
97
98
98
99 $ fnodescacheexists
99 $ fnodescacheexists
100 fnodes cache exists
100 fnodes cache exists
101 $ f --size --hexdump .hg/cache/hgtagsfnodes1
101 $ f --size --hexdump .hg/cache/hgtagsfnodes1
102 .hg/cache/hgtagsfnodes1: size=48
102 .hg/cache/hgtagsfnodes1: size=48
103 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
103 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
104 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
104 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
105 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
105 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
106 $ hg debugtagscache
106 $ hg debugtagscache
107 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing
107 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing
108 1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
108 1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
109
109
110 Repeat with cold tag cache:
110 Repeat with cold tag cache:
111
111
112 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
112 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
113 $ hg identify
113 $ hg identify
114 b9154636be93 tip
114 b9154636be93 tip
115
115
116 $ fnodescacheexists
116 $ fnodescacheexists
117 fnodes cache exists
117 fnodes cache exists
118 $ f --size --hexdump .hg/cache/hgtagsfnodes1
118 $ f --size --hexdump .hg/cache/hgtagsfnodes1
119 .hg/cache/hgtagsfnodes1: size=48
119 .hg/cache/hgtagsfnodes1: size=48
120 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
120 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
121 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
121 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
122 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
122 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
123
123
124 And again, but now unable to write tag cache or lock file:
124 And again, but now unable to write tag cache or lock file:
125
125
126 #if unix-permissions no-fsmonitor
126 #if unix-permissions no-fsmonitor
127
127
128 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
128 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
129 $ chmod 555 .hg/cache
129 $ chmod 555 .hg/cache
130 $ hg identify
130 $ hg identify
131 b9154636be93 tip
131 b9154636be93 tip
132 $ chmod 755 .hg/cache
132 $ chmod 755 .hg/cache
133
133
134 (this block should be protected by no-fsmonitor, because "chmod 555 .hg"
134 (this block should be protected by no-fsmonitor, because "chmod 555 .hg"
135 makes watchman fail at accessing to files under .hg)
135 makes watchman fail at accessing to files under .hg)
136
136
137 $ chmod 555 .hg
137 $ chmod 555 .hg
138 $ hg identify
138 $ hg identify
139 b9154636be93 tip
139 b9154636be93 tip
140 $ chmod 755 .hg
140 $ chmod 755 .hg
141 #endif
141 #endif
142
142
143 Tag cache debug info written to blackbox log
143 Tag cache debug info written to blackbox log
144
144
145 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
145 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
146 $ hg identify
146 $ hg identify
147 b9154636be93 tip
147 b9154636be93 tip
148 $ hg blackbox -l 6
148 $ hg blackbox -l 6
149 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
149 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
150 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
150 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
151 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
151 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
152 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
152 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
153 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
153 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
155
155
156 Failure to acquire lock results in no write
156 Failure to acquire lock results in no write
157
157
158 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
158 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
159 $ echo 'foo:1' > .hg/store/lock
159 $ echo 'foo:1' > .hg/store/lock
160 $ hg identify
160 $ hg identify
161 b9154636be93 tip
161 b9154636be93 tip
162 $ hg blackbox -l 6
162 $ hg blackbox -l 6
163 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
163 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
164 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
164 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
165 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
165 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
166 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
166 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
167 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
167 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
168 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
168 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
169
169
170 $ fnodescacheexists
170 $ fnodescacheexists
171 no fnodes cache
171 no fnodes cache
172
172
173 $ rm .hg/store/lock
173 $ rm .hg/store/lock
174
174
175 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
175 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
176 $ hg identify
176 $ hg identify
177 b9154636be93 tip
177 b9154636be93 tip
178
178
179 Create a branch:
179 Create a branch:
180
180
181 $ echo bb > a
181 $ echo bb > a
182 $ hg status
182 $ hg status
183 M a
183 M a
184 $ hg identify
184 $ hg identify
185 b9154636be93+ tip
185 b9154636be93+ tip
186 $ hg co first
186 $ hg co first
187 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
187 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
188 $ hg id
188 $ hg id
189 acb14030fe0a+ first
189 acb14030fe0a+ first
190 $ hg id -r 'wdir()'
190 $ hg id -r 'wdir()'
191 acb14030fe0a+ first
191 acb14030fe0a+ first
192 $ hg -v id
192 $ hg -v id
193 acb14030fe0a+ first
193 acb14030fe0a+ first
194 $ hg status
194 $ hg status
195 M a
195 M a
196 $ echo 1 > b
196 $ echo 1 > b
197 $ hg add b
197 $ hg add b
198 $ hg commit -m "branch"
198 $ hg commit -m "branch"
199 created new head
199 created new head
200
200
201 Creating a new commit shouldn't append the .hgtags fnodes cache until
201 Creating a new commit shouldn't append the .hgtags fnodes cache until
202 tags info is accessed
202 tags info is accessed
203
203
204 $ f --size --hexdump .hg/cache/hgtagsfnodes1
204 $ f --size --hexdump .hg/cache/hgtagsfnodes1
205 .hg/cache/hgtagsfnodes1: size=48
205 .hg/cache/hgtagsfnodes1: size=48
206 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
206 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
207 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
207 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
208 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
208 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
209
209
210 $ hg id
210 $ hg id
211 c8edf04160c7 tip
211 c8edf04160c7 tip
212
212
213 First 4 bytes of record 3 are changeset fragment
213 First 4 bytes of record 3 are changeset fragment
214
214
215 $ f --size --hexdump .hg/cache/hgtagsfnodes1
215 $ f --size --hexdump .hg/cache/hgtagsfnodes1
216 .hg/cache/hgtagsfnodes1: size=72
216 .hg/cache/hgtagsfnodes1: size=72
217 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
217 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
218 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
218 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
219 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
219 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
220 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
220 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
221 0040: 00 00 00 00 00 00 00 00 |........|
221 0040: 00 00 00 00 00 00 00 00 |........|
222
222
223 Merge the two heads:
223 Merge the two heads:
224
224
225 $ hg merge 1
225 $ hg merge 1
226 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 (branch merge, don't forget to commit)
227 (branch merge, don't forget to commit)
228 $ hg blackbox -l3
228 $ hg blackbox -l3
229 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
229 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
230 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
230 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
231 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
231 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
232 $ hg id
232 $ hg id
233 c8edf04160c7+b9154636be93+ tip
233 c8edf04160c7+b9154636be93+ tip
234 $ hg status
234 $ hg status
235 M .hgtags
235 M .hgtags
236 $ hg commit -m "merge"
236 $ hg commit -m "merge"
237
237
238 Create a fake head, make sure tag not visible afterwards:
238 Create a fake head, make sure tag not visible afterwards:
239
239
240 $ cp .hgtags tags
240 $ cp .hgtags tags
241 $ hg tag last
241 $ hg tag last
242 $ hg rm .hgtags
242 $ hg rm .hgtags
243 $ hg commit -m "remove"
243 $ hg commit -m "remove"
244
244
245 $ mv tags .hgtags
245 $ mv tags .hgtags
246 $ hg add .hgtags
246 $ hg add .hgtags
247 $ hg commit -m "readd"
247 $ hg commit -m "readd"
248 $
248 $
249 $ hg tags
249 $ hg tags
250 tip 6:35ff301afafe
250 tip 6:35ff301afafe
251 first 0:acb14030fe0a
251 first 0:acb14030fe0a
252
252
253 Add invalid tags:
253 Add invalid tags:
254
254
255 $ echo "spam" >> .hgtags
255 $ echo "spam" >> .hgtags
256 $ echo >> .hgtags
256 $ echo >> .hgtags
257 $ echo "foo bar" >> .hgtags
257 $ echo "foo bar" >> .hgtags
258 $ echo "a5a5 invalid" >> .hg/localtags
258 $ echo "a5a5 invalid" >> .hg/localtags
259 $ cat .hgtags
259 $ cat .hgtags
260 acb14030fe0a21b60322c440ad2d20cf7685a376 first
260 acb14030fe0a21b60322c440ad2d20cf7685a376 first
261 spam
261 spam
262
262
263 foo bar
263 foo bar
264 $ hg commit -m "tags"
264 $ hg commit -m "tags"
265
265
266 Report tag parse error on other head:
266 Report tag parse error on other head:
267
267
268 $ hg up 3
268 $ hg up 3
269 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
269 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
270 $ echo 'x y' >> .hgtags
270 $ echo 'x y' >> .hgtags
271 $ hg commit -m "head"
271 $ hg commit -m "head"
272 created new head
272 created new head
273
273
274 $ hg tags --debug
274 $ hg tags --debug
275 .hgtags@75d9f02dfe28, line 2: cannot parse entry
275 .hgtags@75d9f02dfe28, line 2: cannot parse entry
276 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
276 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
277 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
277 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
278 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
278 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
279 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
279 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
280 $ hg tip
280 $ hg tip
281 changeset: 8:c4be69a18c11
281 changeset: 8:c4be69a18c11
282 tag: tip
282 tag: tip
283 parent: 3:ac5e980c4dc0
283 parent: 3:ac5e980c4dc0
284 user: test
284 user: test
285 date: Thu Jan 01 00:00:00 1970 +0000
285 date: Thu Jan 01 00:00:00 1970 +0000
286 summary: head
286 summary: head
287
287
288
288
289 Test tag precedence rules:
289 Test tag precedence rules:
290
290
291 $ cd ..
291 $ cd ..
292 $ hg init t2
292 $ hg init t2
293 $ cd t2
293 $ cd t2
294 $ echo foo > foo
294 $ echo foo > foo
295 $ hg add foo
295 $ hg add foo
296 $ hg ci -m 'add foo' # rev 0
296 $ hg ci -m 'add foo' # rev 0
297 $ hg tag bar # rev 1
297 $ hg tag bar # rev 1
298 $ echo >> foo
298 $ echo >> foo
299 $ hg ci -m 'change foo 1' # rev 2
299 $ hg ci -m 'change foo 1' # rev 2
300 $ hg up -C 1
300 $ hg up -C 1
301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 $ hg tag -r 1 -f bar # rev 3
302 $ hg tag -r 1 -f bar # rev 3
303 $ hg up -C 1
303 $ hg up -C 1
304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
305 $ echo >> foo
305 $ echo >> foo
306 $ hg ci -m 'change foo 2' # rev 4
306 $ hg ci -m 'change foo 2' # rev 4
307 created new head
307 created new head
308 $ hg tags
308 $ hg tags
309 tip 4:0c192d7d5e6b
309 tip 4:0c192d7d5e6b
310 bar 1:78391a272241
310 bar 1:78391a272241
311
311
312 Repeat in case of cache effects:
312 Repeat in case of cache effects:
313
313
314 $ hg tags
314 $ hg tags
315 tip 4:0c192d7d5e6b
315 tip 4:0c192d7d5e6b
316 bar 1:78391a272241
316 bar 1:78391a272241
317
317
318 Detailed dump of tag info:
318 Detailed dump of tag info:
319
319
320 $ hg heads -q # expect 4, 3, 2
320 $ hg heads -q # expect 4, 3, 2
321 4:0c192d7d5e6b
321 4:0c192d7d5e6b
322 3:6fa450212aeb
322 3:6fa450212aeb
323 2:7a94127795a3
323 2:7a94127795a3
324 $ dumptags 2
324 $ dumptags 2
325 rev 2: .hgtags:
325 rev 2: .hgtags:
326 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
326 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
327 $ dumptags 3
327 $ dumptags 3
328 rev 3: .hgtags:
328 rev 3: .hgtags:
329 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
329 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
330 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
330 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
331 78391a272241d70354aa14c874552cad6b51bb42 bar
331 78391a272241d70354aa14c874552cad6b51bb42 bar
332 $ dumptags 4
332 $ dumptags 4
333 rev 4: .hgtags:
333 rev 4: .hgtags:
334 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
334 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
335
335
336 Dump cache:
336 Dump cache:
337
337
338 $ cat .hg/cache/tags2-visible
338 $ cat .hg/cache/tags2-visible
339 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
339 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
340 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
340 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
341 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
341 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
342 78391a272241d70354aa14c874552cad6b51bb42 bar
342 78391a272241d70354aa14c874552cad6b51bb42 bar
343
343
344 $ f --size --hexdump .hg/cache/hgtagsfnodes1
344 $ f --size --hexdump .hg/cache/hgtagsfnodes1
345 .hg/cache/hgtagsfnodes1: size=120
345 .hg/cache/hgtagsfnodes1: size=120
346 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
346 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
347 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
347 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
348 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
348 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
349 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
349 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
350 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
350 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
351 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
351 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
352 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
352 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
353 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
353 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
354
354
355 Corrupt the .hgtags fnodes cache
355 Corrupt the .hgtags fnodes cache
356 Extra junk data at the end should get overwritten on next cache update
356 Extra junk data at the end should get overwritten on next cache update
357
357
358 $ echo extra >> .hg/cache/hgtagsfnodes1
358 $ echo extra >> .hg/cache/hgtagsfnodes1
359 $ echo dummy1 > foo
359 $ echo dummy1 > foo
360 $ hg commit -m throwaway1
360 $ hg commit -m throwaway1
361
361
362 $ hg tags
362 $ hg tags
363 tip 5:8dbfe60eff30
363 tip 5:8dbfe60eff30
364 bar 1:78391a272241
364 bar 1:78391a272241
365
365
366 $ hg blackbox -l 6
366 $ hg blackbox -l 6
367 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
367 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
368 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
368 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
369 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 3/4 cache hits/lookups in * seconds (glob)
369 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 3/4 cache hits/lookups in * seconds (glob)
370 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
370 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
371 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
371 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
372 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
372 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
373
373
374 On junk data + missing cache entries, hg also overwrites the junk.
374 On junk data + missing cache entries, hg also overwrites the junk.
375
375
376 $ rm -f .hg/cache/tags2-visible
376 $ rm -f .hg/cache/tags2-visible
377 >>> import os
377 >>> import os
378 >>> with open(".hg/cache/hgtagsfnodes1", "ab+") as fp:
378 >>> with open(".hg/cache/hgtagsfnodes1", "ab+") as fp:
379 ... fp.seek(-10, os.SEEK_END) and None
379 ... fp.seek(-10, os.SEEK_END) and None
380 ... fp.truncate() and None
380 ... fp.truncate() and None
381
381
382 $ hg debugtagscache | tail -2
382 $ hg debugtagscache | tail -2
383 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
383 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
384 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing
384 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing
385 $ hg tags
385 $ hg tags
386 tip 5:8dbfe60eff30
386 tip 5:8dbfe60eff30
387 bar 1:78391a272241
387 bar 1:78391a272241
388 $ hg debugtagscache | tail -2
388 $ hg debugtagscache | tail -2
389 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
389 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
390 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
390 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
391
391
392 If the 4 bytes of node hash for a record don't match an existing node, the entry
392 If the 4 bytes of node hash for a record don't match an existing node, the entry
393 is flagged as invalid.
393 is flagged as invalid.
394
394
395 >>> import os
395 >>> import os
396 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
396 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
397 ... fp.seek(-24, os.SEEK_END) and None
397 ... fp.seek(-24, os.SEEK_END) and None
398 ... fp.write(b'\xde\xad') and None
398 ... fp.write(b'\xde\xad') and None
399
399
400 $ f --size --hexdump .hg/cache/hgtagsfnodes1
400 $ f --size --hexdump .hg/cache/hgtagsfnodes1
401 .hg/cache/hgtagsfnodes1: size=144
401 .hg/cache/hgtagsfnodes1: size=144
402 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
402 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
403 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
403 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
404 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
404 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
405 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
405 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
406 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
406 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
407 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
407 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
408 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
408 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
409 0070: 78 ee 5a 2d ad bc 94 3d de ad e6 0e 0c 04 f2 a8 |x.Z-...=........|
409 0070: 78 ee 5a 2d ad bc 94 3d de ad e6 0e 0c 04 f2 a8 |x.Z-...=........|
410 0080: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
410 0080: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
411
411
412 $ hg debugtagscache | tail -2
412 $ hg debugtagscache | tail -2
413 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
413 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
414 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 invalid
414 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 invalid
415
415
416 $ hg tags
416 $ hg tags
417 tip 5:8dbfe60eff30
417 tip 5:8dbfe60eff30
418 bar 1:78391a272241
418 bar 1:78391a272241
419
419
420 BUG: If the filenode part of an entry in hgtagsfnodes is corrupt and
420 BUG: If the filenode part of an entry in hgtagsfnodes is corrupt and
421 tags2-visible is missing, `hg tags` aborts. Corrupting the leading 4 bytes of
421 tags2-visible is missing, `hg tags` aborts. Corrupting the leading 4 bytes of
422 node hash (as above) doesn't seem to trigger the issue. Also note that the
422 node hash (as above) doesn't seem to trigger the issue. Also note that the
423 debug command hides the corruption, both with and without tags2-visible.
423 debug command hides the corruption, both with and without tags2-visible.
424
424
425 $ mv .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1.bak
425 $ mv .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1.bak
426 $ hg debugupdatecaches
426 $ hg debugupdatecaches
427
427
428 >>> import os
428 >>> import os
429 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
429 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
430 ... fp.seek(-16, os.SEEK_END) and None
430 ... fp.seek(-16, os.SEEK_END) and None
431 ... fp.write(b'\xde\xad') and None
431 ... fp.write(b'\xde\xad') and None
432
432
433 $ f --size --hexdump .hg/cache/hgtagsfnodes1
433 $ f --size --hexdump .hg/cache/hgtagsfnodes1
434 .hg/cache/hgtagsfnodes1: size=144
434 .hg/cache/hgtagsfnodes1: size=144
435 0000: bb d1 79 df 00 00 00 00 00 00 00 00 00 00 00 00 |..y.............|
435 0000: bb d1 79 df 00 00 00 00 00 00 00 00 00 00 00 00 |..y.............|
436 0010: 00 00 00 00 00 00 00 00 78 39 1a 27 0c 04 f2 a8 |........x9.'....|
436 0010: 00 00 00 00 00 00 00 00 78 39 1a 27 0c 04 f2 a8 |........x9.'....|
437 0020: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
437 0020: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
438 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
438 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
439 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
439 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
440 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
440 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
441 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
441 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
442 0070: 78 ee 5a 2d ad bc 94 3d 8d bf e6 0e 0c 04 f2 a8 |x.Z-...=........|
442 0070: 78 ee 5a 2d ad bc 94 3d 8d bf e6 0e 0c 04 f2 a8 |x.Z-...=........|
443 0080: de ad de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |......B(x.Z-...=|
443 0080: de ad de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |......B(x.Z-...=|
444
444
445 $ hg debugtagscache | tail -2
445 $ hg debugtagscache | tail -2
446 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
446 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
447 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
447 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
448
448
449 $ rm -f .hg/cache/tags2-visible
449 $ rm -f .hg/cache/tags2-visible
450 $ hg debugtagscache | tail -2
450 $ hg debugtagscache | tail -2
451 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
451 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
452 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
452 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
453
453
454 $ hg tags
454 $ hg tags
455 tip 5:8dbfe60eff30
455 tip 5:8dbfe60eff30
456 bar 1:78391a272241
456 bar 1:78391a272241
457
457
458 BUG: Unless this file is restored, the `hg tags` in the next unix-permissions
458 BUG: Unless this file is restored, the `hg tags` in the next unix-permissions
459 conditional will fail: "abort: data/.hgtags.i@0c04f2a8dead: no match found"
459 conditional will fail: "abort: data/.hgtags.i@0c04f2a8dead: no match found"
460
460
461 $ mv .hg/cache/hgtagsfnodes1.bak .hg/cache/hgtagsfnodes1
461 $ mv .hg/cache/hgtagsfnodes1.bak .hg/cache/hgtagsfnodes1
462
462
463 #if unix-permissions no-root
463 #if unix-permissions no-root
464 Errors writing to .hgtags fnodes cache are silently ignored
464 Errors writing to .hgtags fnodes cache are silently ignored
465
465
466 $ echo dummy2 > foo
466 $ echo dummy2 > foo
467 $ hg commit -m throwaway2
467 $ hg commit -m throwaway2
468
468
469 $ chmod a-w .hg/cache/hgtagsfnodes1
469 $ chmod a-w .hg/cache/hgtagsfnodes1
470 $ rm -f .hg/cache/tags2-visible
470 $ rm -f .hg/cache/tags2-visible
471
471
472 $ hg tags
472 $ hg tags
473 tip 6:b968051b5cf3
473 tip 6:b968051b5cf3
474 bar 1:78391a272241
474 bar 1:78391a272241
475
475
476 $ hg blackbox -l 6
476 $ hg blackbox -l 6
477 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
477 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
478 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
478 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
479 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
479 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
480 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
480 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
481 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
481 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
482 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
482 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
483
483
484 $ chmod a+w .hg/cache/hgtagsfnodes1
484 $ chmod a+w .hg/cache/hgtagsfnodes1
485
485
486 $ rm -f .hg/cache/tags2-visible
486 $ rm -f .hg/cache/tags2-visible
487 $ hg tags
487 $ hg tags
488 tip 6:b968051b5cf3
488 tip 6:b968051b5cf3
489 bar 1:78391a272241
489 bar 1:78391a272241
490
490
491 $ hg blackbox -l 6
491 $ hg blackbox -l 6
492 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
492 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
493 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
493 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
494 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
494 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
495 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
495 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
496 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
496 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
497 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
497 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
498
498
499 $ f --size .hg/cache/hgtagsfnodes1
499 $ f --size .hg/cache/hgtagsfnodes1
500 .hg/cache/hgtagsfnodes1: size=168
500 .hg/cache/hgtagsfnodes1: size=168
501
501
502 $ hg -q --config extensions.strip= strip -r 6 --no-backup
502 $ hg -q --config extensions.strip= strip -r 6 --no-backup
503 #endif
503 #endif
504
504
505 Stripping doesn't truncate the tags cache until new data is available
505 Stripping doesn't truncate the tags cache until new data is available
506
506
507 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
507 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
508 $ hg tags
508 $ hg tags
509 tip 5:8dbfe60eff30
509 tip 5:8dbfe60eff30
510 bar 1:78391a272241
510 bar 1:78391a272241
511
511
512 $ f --size .hg/cache/hgtagsfnodes1
512 $ f --size .hg/cache/hgtagsfnodes1
513 .hg/cache/hgtagsfnodes1: size=144
513 .hg/cache/hgtagsfnodes1: size=144
514
514
515 $ hg -q --config extensions.strip= strip -r 5 --no-backup
515 $ hg -q --config extensions.strip= strip -r 5 --no-backup
516 $ hg tags
516 $ hg tags
517 tip 4:0c192d7d5e6b
517 tip 4:0c192d7d5e6b
518 bar 1:78391a272241
518 bar 1:78391a272241
519
519
520 $ hg blackbox -l 5
520 $ hg blackbox -l 5
521 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
521 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
522 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/4 cache hits/lookups in * seconds (glob)
522 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/4 cache hits/lookups in * seconds (glob)
523 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
523 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
524 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
524 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
525 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
525 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
526
526
527 $ f --size .hg/cache/hgtagsfnodes1
527 $ f --size .hg/cache/hgtagsfnodes1
528 .hg/cache/hgtagsfnodes1: size=120
528 .hg/cache/hgtagsfnodes1: size=120
529
529
530 $ echo dummy > foo
530 $ echo dummy > foo
531 $ hg commit -m throwaway3
531 $ hg commit -m throwaway3
532
532
533 $ hg tags
533 $ hg tags
534 tip 5:035f65efb448
534 tip 5:035f65efb448
535 bar 1:78391a272241
535 bar 1:78391a272241
536
536
537 $ hg blackbox -l 6
537 $ hg blackbox -l 6
538 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
538 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
539 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
539 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
540 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 3/4 cache hits/lookups in * seconds (glob)
540 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 3/4 cache hits/lookups in * seconds (glob)
541 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
541 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
542 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
542 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
543 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
543 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
544 $ f --size .hg/cache/hgtagsfnodes1
544 $ f --size .hg/cache/hgtagsfnodes1
545 .hg/cache/hgtagsfnodes1: size=144
545 .hg/cache/hgtagsfnodes1: size=144
546
546
547 $ hg -q --config extensions.strip= strip -r 5 --no-backup
547 $ hg -q --config extensions.strip= strip -r 5 --no-backup
548
548
549 Test tag removal:
549 Test tag removal:
550
550
551 $ hg tag --remove bar # rev 5
551 $ hg tag --remove bar # rev 5
552 $ hg tip -vp
552 $ hg tip -vp
553 changeset: 5:5f6e8655b1c7
553 changeset: 5:5f6e8655b1c7
554 tag: tip
554 tag: tip
555 user: test
555 user: test
556 date: Thu Jan 01 00:00:00 1970 +0000
556 date: Thu Jan 01 00:00:00 1970 +0000
557 files: .hgtags
557 files: .hgtags
558 description:
558 description:
559 Removed tag bar
559 Removed tag bar
560
560
561
561
562 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
562 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
563 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
563 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
564 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
564 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
565 @@ -1,1 +1,3 @@
565 @@ -1,1 +1,3 @@
566 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
566 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
567 +78391a272241d70354aa14c874552cad6b51bb42 bar
567 +78391a272241d70354aa14c874552cad6b51bb42 bar
568 +0000000000000000000000000000000000000000 bar
568 +0000000000000000000000000000000000000000 bar
569
569
570 $ hg tags
570 $ hg tags
571 tip 5:5f6e8655b1c7
571 tip 5:5f6e8655b1c7
572 $ hg tags # again, try to expose cache bugs
572 $ hg tags # again, try to expose cache bugs
573 tip 5:5f6e8655b1c7
573 tip 5:5f6e8655b1c7
574
574
575 Remove nonexistent tag:
575 Remove nonexistent tag:
576
576
577 $ hg tag --remove foobar
577 $ hg tag --remove foobar
578 abort: tag 'foobar' does not exist
578 abort: tag 'foobar' does not exist
579 [10]
579 [10]
580 $ hg tip
580 $ hg tip
581 changeset: 5:5f6e8655b1c7
581 changeset: 5:5f6e8655b1c7
582 tag: tip
582 tag: tip
583 user: test
583 user: test
584 date: Thu Jan 01 00:00:00 1970 +0000
584 date: Thu Jan 01 00:00:00 1970 +0000
585 summary: Removed tag bar
585 summary: Removed tag bar
586
586
587
587
588 Undo a tag with rollback:
588 Undo a tag with rollback:
589
589
590 $ hg rollback # destroy rev 5 (restore bar)
590 $ hg rollback # destroy rev 5 (restore bar)
591 repository tip rolled back to revision 4 (undo commit)
591 repository tip rolled back to revision 4 (undo commit)
592 working directory now based on revision 4
592 working directory now based on revision 4
593 $ hg tags
593 $ hg tags
594 tip 4:0c192d7d5e6b
594 tip 4:0c192d7d5e6b
595 bar 1:78391a272241
595 bar 1:78391a272241
596 $ hg tags
596 $ hg tags
597 tip 4:0c192d7d5e6b
597 tip 4:0c192d7d5e6b
598 bar 1:78391a272241
598 bar 1:78391a272241
599
599
600 Test tag rank:
600 Test tag rank:
601
601
602 $ cd ..
602 $ cd ..
603 $ hg init t3
603 $ hg init t3
604 $ cd t3
604 $ cd t3
605 $ echo foo > foo
605 $ echo foo > foo
606 $ hg add foo
606 $ hg add foo
607 $ hg ci -m 'add foo' # rev 0
607 $ hg ci -m 'add foo' # rev 0
608 $ hg tag -f bar # rev 1 bar -> 0
608 $ hg tag -f bar # rev 1 bar -> 0
609 $ hg tag -f bar # rev 2 bar -> 1
609 $ hg tag -f bar # rev 2 bar -> 1
610 $ hg tag -fr 0 bar # rev 3 bar -> 0
610 $ hg tag -fr 0 bar # rev 3 bar -> 0
611 $ hg tag -fr 1 bar # rev 4 bar -> 1
611 $ hg tag -fr 1 bar # rev 4 bar -> 1
612 $ hg tag -fr 0 bar # rev 5 bar -> 0
612 $ hg tag -fr 0 bar # rev 5 bar -> 0
613 $ hg tags
613 $ hg tags
614 tip 5:85f05169d91d
614 tip 5:85f05169d91d
615 bar 0:bbd179dfa0a7
615 bar 0:bbd179dfa0a7
616 $ hg co 3
616 $ hg co 3
617 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
617 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
618 $ echo barbar > foo
618 $ echo barbar > foo
619 $ hg ci -m 'change foo' # rev 6
619 $ hg ci -m 'change foo' # rev 6
620 created new head
620 created new head
621 $ hg tags
621 $ hg tags
622 tip 6:735c3ca72986
622 tip 6:735c3ca72986
623 bar 0:bbd179dfa0a7
623 bar 0:bbd179dfa0a7
624
624
625 Don't allow moving tag without -f:
625 Don't allow moving tag without -f:
626
626
627 $ hg tag -r 3 bar
627 $ hg tag -r 3 bar
628 abort: tag 'bar' already exists (use -f to force)
628 abort: tag 'bar' already exists (use -f to force)
629 [10]
629 [10]
630 $ hg tags
630 $ hg tags
631 tip 6:735c3ca72986
631 tip 6:735c3ca72986
632 bar 0:bbd179dfa0a7
632 bar 0:bbd179dfa0a7
633
633
634 Strip 1: expose an old head:
634 Strip 1: expose an old head:
635
635
636 $ hg --config extensions.mq= strip 5
636 $ hg --config extensions.mq= strip 5
637 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
637 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
638 $ hg tags # partly stale cache
638 $ hg tags # partly stale cache
639 tip 5:735c3ca72986
639 tip 5:735c3ca72986
640 bar 1:78391a272241
640 bar 1:78391a272241
641 $ hg tags # up-to-date cache
641 $ hg tags # up-to-date cache
642 tip 5:735c3ca72986
642 tip 5:735c3ca72986
643 bar 1:78391a272241
643 bar 1:78391a272241
644
644
645 Strip 2: destroy whole branch, no old head exposed
645 Strip 2: destroy whole branch, no old head exposed
646
646
647 $ hg --config extensions.mq= strip 4
647 $ hg --config extensions.mq= strip 4
648 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
648 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
649 $ hg tags # partly stale
649 $ hg tags # partly stale
650 tip 4:735c3ca72986
650 tip 4:735c3ca72986
651 bar 0:bbd179dfa0a7
651 bar 0:bbd179dfa0a7
652 $ rm -f .hg/cache/tags2-visible
652 $ rm -f .hg/cache/tags2-visible
653 $ hg tags # cold cache
653 $ hg tags # cold cache
654 tip 4:735c3ca72986
654 tip 4:735c3ca72986
655 bar 0:bbd179dfa0a7
655 bar 0:bbd179dfa0a7
656
656
657 Test tag rank with 3 heads:
657 Test tag rank with 3 heads:
658
658
659 $ cd ..
659 $ cd ..
660 $ hg init t4
660 $ hg init t4
661 $ cd t4
661 $ cd t4
662 $ echo foo > foo
662 $ echo foo > foo
663 $ hg add
663 $ hg add
664 adding foo
664 adding foo
665 $ hg ci -m 'add foo' # rev 0
665 $ hg ci -m 'add foo' # rev 0
666 $ hg tag bar # rev 1 bar -> 0
666 $ hg tag bar # rev 1 bar -> 0
667 $ hg tag -f bar # rev 2 bar -> 1
667 $ hg tag -f bar # rev 2 bar -> 1
668 $ hg up -qC 0
668 $ hg up -qC 0
669 $ hg tag -fr 2 bar # rev 3 bar -> 2
669 $ hg tag -fr 2 bar # rev 3 bar -> 2
670 $ hg tags
670 $ hg tags
671 tip 3:197c21bbbf2c
671 tip 3:197c21bbbf2c
672 bar 2:6fa450212aeb
672 bar 2:6fa450212aeb
673 $ hg up -qC 0
673 $ hg up -qC 0
674 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
674 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
675
675
676 Bar should still point to rev 2:
676 Bar should still point to rev 2:
677
677
678 $ hg tags
678 $ hg tags
679 tip 4:3b4b14ed0202
679 tip 4:3b4b14ed0202
680 bar 2:6fa450212aeb
680 bar 2:6fa450212aeb
681
681
682 Test that removing global/local tags does not get confused when trying
682 Test that removing global/local tags does not get confused when trying
683 to remove a tag of type X which actually only exists as a type Y:
683 to remove a tag of type X which actually only exists as a type Y:
684
684
685 $ cd ..
685 $ cd ..
686 $ hg init t5
686 $ hg init t5
687 $ cd t5
687 $ cd t5
688 $ echo foo > foo
688 $ echo foo > foo
689 $ hg add
689 $ hg add
690 adding foo
690 adding foo
691 $ hg ci -m 'add foo' # rev 0
691 $ hg ci -m 'add foo' # rev 0
692
692
693 $ hg tag -r 0 -l localtag
693 $ hg tag -r 0 -l localtag
694 $ hg tag --remove localtag
694 $ hg tag --remove localtag
695 abort: tag 'localtag' is not a global tag
695 abort: tag 'localtag' is not a global tag
696 [10]
696 [10]
697 $
697 $
698 $ hg tag -r 0 globaltag
698 $ hg tag -r 0 globaltag
699 $ hg tag --remove -l globaltag
699 $ hg tag --remove -l globaltag
700 abort: tag 'globaltag' is not a local tag
700 abort: tag 'globaltag' is not a local tag
701 [10]
701 [10]
702 $ hg tags -v
702 $ hg tags -v
703 tip 1:a0b6fe111088
703 tip 1:a0b6fe111088
704 localtag 0:bbd179dfa0a7 local
704 localtag 0:bbd179dfa0a7 local
705 globaltag 0:bbd179dfa0a7
705 globaltag 0:bbd179dfa0a7
706
706
707 Templated output:
707 Templated output:
708
708
709 (immediate values)
709 (immediate values)
710
710
711 $ hg tags -T '{pad(tag, 9)} {rev}:{node} ({type})\n'
711 $ hg tags -T '{pad(tag, 9)} {rev}:{node} ({type})\n'
712 tip 1:a0b6fe111088c8c29567d3876cc466aa02927cae ()
712 tip 1:a0b6fe111088c8c29567d3876cc466aa02927cae ()
713 localtag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa (local)
713 localtag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa (local)
714 globaltag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa ()
714 globaltag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa ()
715
715
716 (ctx/revcache dependent)
716 (ctx/revcache dependent)
717
717
718 $ hg tags -T '{pad(tag, 9)} {rev} {file_adds}\n'
718 $ hg tags -T '{pad(tag, 9)} {rev} {file_adds}\n'
719 tip 1 .hgtags
719 tip 1 .hgtags
720 localtag 0 foo
720 localtag 0 foo
721 globaltag 0 foo
721 globaltag 0 foo
722
722
723 $ hg tags -T '{pad(tag, 9)} {rev}:{node|shortest}\n'
723 $ hg tags -T '{pad(tag, 9)} {rev}:{node|shortest}\n'
724 tip 1:a0b6
724 tip 1:a0b6
725 localtag 0:bbd1
725 localtag 0:bbd1
726 globaltag 0:bbd1
726 globaltag 0:bbd1
727
727
728 Test for issue3911
728 Test for issue3911
729
729
730 $ hg tag -r 0 -l localtag2
730 $ hg tag -r 0 -l localtag2
731 $ hg tag -l --remove localtag2
731 $ hg tag -l --remove localtag2
732 $ hg tags -v
732 $ hg tags -v
733 tip 1:a0b6fe111088
733 tip 1:a0b6fe111088
734 localtag 0:bbd179dfa0a7 local
734 localtag 0:bbd179dfa0a7 local
735 globaltag 0:bbd179dfa0a7
735 globaltag 0:bbd179dfa0a7
736
736
737 $ hg tag -r 1 -f localtag
737 $ hg tag -r 1 -f localtag
738 $ hg tags -v
738 $ hg tags -v
739 tip 2:5c70a037bb37
739 tip 2:5c70a037bb37
740 localtag 1:a0b6fe111088
740 localtag 1:a0b6fe111088
741 globaltag 0:bbd179dfa0a7
741 globaltag 0:bbd179dfa0a7
742
742
743 $ hg tags -v
743 $ hg tags -v
744 tip 2:5c70a037bb37
744 tip 2:5c70a037bb37
745 localtag 1:a0b6fe111088
745 localtag 1:a0b6fe111088
746 globaltag 0:bbd179dfa0a7
746 globaltag 0:bbd179dfa0a7
747
747
748 $ hg tag -r 1 localtag2
748 $ hg tag -r 1 localtag2
749 $ hg tags -v
749 $ hg tags -v
750 tip 3:bbfb8cd42be2
750 tip 3:bbfb8cd42be2
751 localtag2 1:a0b6fe111088
751 localtag2 1:a0b6fe111088
752 localtag 1:a0b6fe111088
752 localtag 1:a0b6fe111088
753 globaltag 0:bbd179dfa0a7
753 globaltag 0:bbd179dfa0a7
754
754
755 $ hg tags -v
755 $ hg tags -v
756 tip 3:bbfb8cd42be2
756 tip 3:bbfb8cd42be2
757 localtag2 1:a0b6fe111088
757 localtag2 1:a0b6fe111088
758 localtag 1:a0b6fe111088
758 localtag 1:a0b6fe111088
759 globaltag 0:bbd179dfa0a7
759 globaltag 0:bbd179dfa0a7
760
760
761 $ cd ..
761 $ cd ..
762
762
763 Create a repository with tags data to test .hgtags fnodes transfer
763 Create a repository with tags data to test .hgtags fnodes transfer
764
764
765 $ hg init tagsserver
765 $ hg init tagsserver
766 $ cd tagsserver
766 $ cd tagsserver
767 $ touch foo
767 $ touch foo
768 $ hg -q commit -A -m initial
768 $ hg -q commit -A -m initial
769 $ hg tag -m 'tag 0.1' 0.1
769 $ hg tag -m 'tag 0.1' 0.1
770 $ echo second > foo
770 $ echo second > foo
771 $ hg commit -m second
771 $ hg commit -m second
772 $ hg tag -m 'tag 0.2' 0.2
772 $ hg tag -m 'tag 0.2' 0.2
773 $ hg tags
773 $ hg tags
774 tip 3:40f0358cb314
774 tip 3:40f0358cb314
775 0.2 2:f63cc8fe54e4
775 0.2 2:f63cc8fe54e4
776 0.1 0:96ee1d7354c4
776 0.1 0:96ee1d7354c4
777 $ cd ..
777 $ cd ..
778
778
779 Cloning should pull down hgtags fnodes mappings and write the cache file
779 Cloning should pull down hgtags fnodes mappings and write the cache file
780
780
781 $ hg clone --pull tagsserver tagsclient
781 $ hg clone --pull tagsserver tagsclient
782 requesting all changes
782 requesting all changes
783 adding changesets
783 adding changesets
784 adding manifests
784 adding manifests
785 adding file changes
785 adding file changes
786 added 4 changesets with 4 changes to 2 files
786 added 4 changesets with 4 changes to 2 files
787 new changesets 96ee1d7354c4:40f0358cb314
787 new changesets 96ee1d7354c4:40f0358cb314
788 updating to branch default
788 updating to branch default
789 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
789 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
790
790
791 Missing tags2* files means the cache wasn't written through the normal mechanism.
791 Missing tags2* files means the cache wasn't written through the normal mechanism.
792
792
793 $ ls tagsclient/.hg/cache
793 $ ls tagsclient/.hg/cache
794 branch2-base
794 branch2-base
795 branch2-immutable
795 branch2-immutable
796 branch2-served
796 branch2-served
797 branch2-served.hidden
797 branch2-served.hidden
798 branch2-visible
798 branch2-visible
799 branch2-visible-hidden
799 branch2-visible-hidden
800 hgtagsfnodes1
800 hgtagsfnodes1
801 rbc-names-v1
801 rbc-names-v1
802 rbc-revs-v1
802 rbc-revs-v1
803 tags2
803 tags2
804 tags2-served
804 tags2-served
805
805
806 Cache should contain the head only, even though other nodes have tags data
806 Cache should contain the head only, even though other nodes have tags data
807
807
808 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
808 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
809 tagsclient/.hg/cache/hgtagsfnodes1: size=96
809 tagsclient/.hg/cache/hgtagsfnodes1: size=96
810 0000: 96 ee 1d 73 00 00 00 00 00 00 00 00 00 00 00 00 |...s............|
810 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
811 0010: 00 00 00 00 00 00 00 00 c4 da b0 c2 94 65 e1 c6 |.............e..|
811 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
812 0020: 0d f7 f0 dd 32 04 ea 57 78 c8 97 97 79 fc d5 95 |....2..Wx...y...|
812 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
813 0030: f6 3c c8 fe 94 65 e1 c6 0d f7 f0 dd 32 04 ea 57 |.<...e......2..W|
813 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
814 0040: 78 c8 97 97 79 fc d5 95 40 f0 35 8c 19 e0 a7 d3 |x...y...@.5.....|
814 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
815 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
815 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
816
816
817 Running hg tags should produce tags2* file and not change cache
817 Running hg tags should produce tags2* file and not change cache
818
818
819 $ hg -R tagsclient tags
819 $ hg -R tagsclient tags
820 tip 3:40f0358cb314
820 tip 3:40f0358cb314
821 0.2 2:f63cc8fe54e4
821 0.2 2:f63cc8fe54e4
822 0.1 0:96ee1d7354c4
822 0.1 0:96ee1d7354c4
823
823
824 $ ls tagsclient/.hg/cache
824 $ ls tagsclient/.hg/cache
825 branch2-base
825 branch2-base
826 branch2-immutable
826 branch2-immutable
827 branch2-served
827 branch2-served
828 branch2-served.hidden
828 branch2-served.hidden
829 branch2-visible
829 branch2-visible
830 branch2-visible-hidden
830 branch2-visible-hidden
831 hgtagsfnodes1
831 hgtagsfnodes1
832 rbc-names-v1
832 rbc-names-v1
833 rbc-revs-v1
833 rbc-revs-v1
834 tags2
834 tags2
835 tags2-served
835 tags2-served
836 tags2-visible
836 tags2-visible
837
837
838 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
838 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
839 tagsclient/.hg/cache/hgtagsfnodes1: size=96
839 tagsclient/.hg/cache/hgtagsfnodes1: size=96
840 0000: 96 ee 1d 73 00 00 00 00 00 00 00 00 00 00 00 00 |...s............|
840 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
841 0010: 00 00 00 00 00 00 00 00 c4 da b0 c2 94 65 e1 c6 |.............e..|
841 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
842 0020: 0d f7 f0 dd 32 04 ea 57 78 c8 97 97 79 fc d5 95 |....2..Wx...y...|
842 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
843 0030: f6 3c c8 fe 94 65 e1 c6 0d f7 f0 dd 32 04 ea 57 |.<...e......2..W|
843 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
844 0040: 78 c8 97 97 79 fc d5 95 40 f0 35 8c 19 e0 a7 d3 |x...y...@.5.....|
844 0040: ff ff ff ff ff ff ff ff 40 f0 35 8c 19 e0 a7 d3 |........@.5.....|
845 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
845 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
846
846
847 Check that the bundle includes cache data
847 Check that the bundle includes cache data
848
848
849 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
849 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
850 4 changesets found
850 4 changesets found
851 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
851 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
852 Stream params: {Compression: BZ}
852 Stream params: {Compression: BZ}
853 changegroup -- {nbchanges: 4, version: 02} (mandatory: True)
853 changegroup -- {nbchanges: 4, version: 02} (mandatory: True)
854 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
854 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
855 c4dab0c2fd337eb9191f80c3024830a4889a8f34
855 c4dab0c2fd337eb9191f80c3024830a4889a8f34
856 f63cc8fe54e4d326f8d692805d70e092f851ddb1
856 f63cc8fe54e4d326f8d692805d70e092f851ddb1
857 40f0358cb314c824a5929ee527308d90e023bc10
857 40f0358cb314c824a5929ee527308d90e023bc10
858 hgtagsfnodes -- {} (mandatory: True)
858 hgtagsfnodes -- {} (mandatory: True)
859 cache:rev-branch-cache -- {} (mandatory: False)
859 cache:rev-branch-cache -- {} (mandatory: False)
860
860
861 Check that local clone includes cache data
861 Check that local clone includes cache data
862
862
863 $ hg clone tagsclient tags-local-clone
863 $ hg clone tagsclient tags-local-clone
864 updating to branch default
864 updating to branch default
865 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
865 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
866 $ (cd tags-local-clone/.hg/cache/; ls -1 tag*)
866 $ (cd tags-local-clone/.hg/cache/; ls -1 tag*)
867 tags2
867 tags2
868 tags2-served
868 tags2-served
869 tags2-visible
869 tags2-visible
870
870
871 Avoid writing logs on trying to delete an already deleted tag
871 Avoid writing logs on trying to delete an already deleted tag
872 $ hg init issue5752
872 $ hg init issue5752
873 $ cd issue5752
873 $ cd issue5752
874 $ echo > a
874 $ echo > a
875 $ hg commit -Am 'add a'
875 $ hg commit -Am 'add a'
876 adding a
876 adding a
877 $ hg tag a
877 $ hg tag a
878 $ hg tags
878 $ hg tags
879 tip 1:bd7ee4f3939b
879 tip 1:bd7ee4f3939b
880 a 0:a8a82d372bb3
880 a 0:a8a82d372bb3
881 $ hg log
881 $ hg log
882 changeset: 1:bd7ee4f3939b
882 changeset: 1:bd7ee4f3939b
883 tag: tip
883 tag: tip
884 user: test
884 user: test
885 date: Thu Jan 01 00:00:00 1970 +0000
885 date: Thu Jan 01 00:00:00 1970 +0000
886 summary: Added tag a for changeset a8a82d372bb3
886 summary: Added tag a for changeset a8a82d372bb3
887
887
888 changeset: 0:a8a82d372bb3
888 changeset: 0:a8a82d372bb3
889 tag: a
889 tag: a
890 user: test
890 user: test
891 date: Thu Jan 01 00:00:00 1970 +0000
891 date: Thu Jan 01 00:00:00 1970 +0000
892 summary: add a
892 summary: add a
893
893
894 $ hg tag --remove a
894 $ hg tag --remove a
895 $ hg log
895 $ hg log
896 changeset: 2:e7feacc7ec9e
896 changeset: 2:e7feacc7ec9e
897 tag: tip
897 tag: tip
898 user: test
898 user: test
899 date: Thu Jan 01 00:00:00 1970 +0000
899 date: Thu Jan 01 00:00:00 1970 +0000
900 summary: Removed tag a
900 summary: Removed tag a
901
901
902 changeset: 1:bd7ee4f3939b
902 changeset: 1:bd7ee4f3939b
903 user: test
903 user: test
904 date: Thu Jan 01 00:00:00 1970 +0000
904 date: Thu Jan 01 00:00:00 1970 +0000
905 summary: Added tag a for changeset a8a82d372bb3
905 summary: Added tag a for changeset a8a82d372bb3
906
906
907 changeset: 0:a8a82d372bb3
907 changeset: 0:a8a82d372bb3
908 user: test
908 user: test
909 date: Thu Jan 01 00:00:00 1970 +0000
909 date: Thu Jan 01 00:00:00 1970 +0000
910 summary: add a
910 summary: add a
911
911
912 $ hg tag --remove a
912 $ hg tag --remove a
913 abort: tag 'a' is already removed
913 abort: tag 'a' is already removed
914 [10]
914 [10]
915 $ hg log
915 $ hg log
916 changeset: 2:e7feacc7ec9e
916 changeset: 2:e7feacc7ec9e
917 tag: tip
917 tag: tip
918 user: test
918 user: test
919 date: Thu Jan 01 00:00:00 1970 +0000
919 date: Thu Jan 01 00:00:00 1970 +0000
920 summary: Removed tag a
920 summary: Removed tag a
921
921
922 changeset: 1:bd7ee4f3939b
922 changeset: 1:bd7ee4f3939b
923 user: test
923 user: test
924 date: Thu Jan 01 00:00:00 1970 +0000
924 date: Thu Jan 01 00:00:00 1970 +0000
925 summary: Added tag a for changeset a8a82d372bb3
925 summary: Added tag a for changeset a8a82d372bb3
926
926
927 changeset: 0:a8a82d372bb3
927 changeset: 0:a8a82d372bb3
928 user: test
928 user: test
929 date: Thu Jan 01 00:00:00 1970 +0000
929 date: Thu Jan 01 00:00:00 1970 +0000
930 summary: add a
930 summary: add a
931
931
932 $ cat .hgtags
932 $ cat .hgtags
933 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
933 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
934 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
934 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
935 0000000000000000000000000000000000000000 a
935 0000000000000000000000000000000000000000 a
@@ -1,903 +1,903 b''
1 $ cat << EOF >> $HGRCPATH
1 $ cat << EOF >> $HGRCPATH
2 > [ui]
2 > [ui]
3 > ssh="$PYTHON" "$TESTDIR/dummyssh"
3 > ssh="$PYTHON" "$TESTDIR/dummyssh"
4 > EOF
4 > EOF
5
5
6 Set up repo
6 Set up repo
7
7
8 $ hg --config experimental.treemanifest=True init repo
8 $ hg --config experimental.treemanifest=True init repo
9 $ cd repo
9 $ cd repo
10
10
11 Requirements get set on init
11 Requirements get set on init
12
12
13 $ grep treemanifest .hg/requires
13 $ grep treemanifest .hg/requires
14 treemanifest
14 treemanifest
15
15
16 Without directories, looks like any other repo
16 Without directories, looks like any other repo
17
17
18 $ echo 0 > a
18 $ echo 0 > a
19 $ echo 0 > b
19 $ echo 0 > b
20 $ hg ci -Aqm initial
20 $ hg ci -Aqm initial
21 $ hg debugdata -m 0
21 $ hg debugdata -m 0
22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
24
24
25 Submanifest is stored in separate revlog
25 Submanifest is stored in separate revlog
26
26
27 $ mkdir dir1
27 $ mkdir dir1
28 $ echo 1 > dir1/a
28 $ echo 1 > dir1/a
29 $ echo 1 > dir1/b
29 $ echo 1 > dir1/b
30 $ echo 1 > e
30 $ echo 1 > e
31 $ hg ci -Aqm 'add dir1'
31 $ hg ci -Aqm 'add dir1'
32 $ hg debugdata -m 1
32 $ hg debugdata -m 1
33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
37 $ hg debugdata --dir dir1 0
37 $ hg debugdata --dir dir1 0
38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
40
40
41 Can add nested directories
41 Can add nested directories
42
42
43 $ mkdir dir1/dir1
43 $ mkdir dir1/dir1
44 $ echo 2 > dir1/dir1/a
44 $ echo 2 > dir1/dir1/a
45 $ echo 2 > dir1/dir1/b
45 $ echo 2 > dir1/dir1/b
46 $ mkdir dir1/dir2
46 $ mkdir dir1/dir2
47 $ echo 2 > dir1/dir2/a
47 $ echo 2 > dir1/dir2/a
48 $ echo 2 > dir1/dir2/b
48 $ echo 2 > dir1/dir2/b
49 $ hg ci -Aqm 'add dir1/dir1'
49 $ hg ci -Aqm 'add dir1/dir1'
50 $ hg files -r .
50 $ hg files -r .
51 a
51 a
52 b
52 b
53 dir1/a
53 dir1/a
54 dir1/b
54 dir1/b
55 dir1/dir1/a
55 dir1/dir1/a
56 dir1/dir1/b
56 dir1/dir1/b
57 dir1/dir2/a
57 dir1/dir2/a
58 dir1/dir2/b
58 dir1/dir2/b
59 e
59 e
60
60
61 The manifest command works
61 The manifest command works
62
62
63 $ hg manifest
63 $ hg manifest
64 a
64 a
65 b
65 b
66 dir1/a
66 dir1/a
67 dir1/b
67 dir1/b
68 dir1/dir1/a
68 dir1/dir1/a
69 dir1/dir1/b
69 dir1/dir1/b
70 dir1/dir2/a
70 dir1/dir2/a
71 dir1/dir2/b
71 dir1/dir2/b
72 e
72 e
73
73
74 Revision is not created for unchanged directory
74 Revision is not created for unchanged directory
75
75
76 $ mkdir dir2
76 $ mkdir dir2
77 $ echo 3 > dir2/a
77 $ echo 3 > dir2/a
78 $ hg add dir2
78 $ hg add dir2
79 adding dir2/a
79 adding dir2/a
80 $ hg debugindex --dir dir1 > before
80 $ hg debugindex --dir dir1 > before
81 $ hg ci -qm 'add dir2'
81 $ hg ci -qm 'add dir2'
82 $ hg debugindex --dir dir1 > after
82 $ hg debugindex --dir dir1 > after
83 $ diff before after
83 $ diff before after
84 $ rm before after
84 $ rm before after
85
85
86 Removing directory does not create an revlog entry
86 Removing directory does not create an revlog entry
87
87
88 $ hg rm dir1/dir1
88 $ hg rm dir1/dir1
89 removing dir1/dir1/a
89 removing dir1/dir1/a
90 removing dir1/dir1/b
90 removing dir1/dir1/b
91 $ hg debugindex --dir dir1/dir1 > before
91 $ hg debugindex --dir dir1/dir1 > before
92 $ hg ci -qm 'remove dir1/dir1'
92 $ hg ci -qm 'remove dir1/dir1'
93 $ hg debugindex --dir dir1/dir1 > after
93 $ hg debugindex --dir dir1/dir1 > after
94 $ diff before after
94 $ diff before after
95 $ rm before after
95 $ rm before after
96
96
97 Check that hg files (calls treemanifest.walk()) works
97 Check that hg files (calls treemanifest.walk()) works
98 without loading all directory revlogs
98 without loading all directory revlogs
99
99
100 $ hg co 'desc("add dir2")'
100 $ hg co 'desc("add dir2")'
101 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
101 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
102 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
103 $ hg files -r . dir1
103 $ hg files -r . dir1
104 dir1/a
104 dir1/a
105 dir1/b
105 dir1/b
106 dir1/dir1/a
106 dir1/dir1/a
107 dir1/dir1/b
107 dir1/dir1/b
108 dir1/dir2/a
108 dir1/dir2/a
109 dir1/dir2/b
109 dir1/dir2/b
110
110
111 Check that status between revisions works (calls treemanifest.matches())
111 Check that status between revisions works (calls treemanifest.matches())
112 without loading all directory revlogs
112 without loading all directory revlogs
113
113
114 $ hg status --rev 'desc("add dir1")' --rev . dir1
114 $ hg status --rev 'desc("add dir1")' --rev . dir1
115 A dir1/dir1/a
115 A dir1/dir1/a
116 A dir1/dir1/b
116 A dir1/dir1/b
117 A dir1/dir2/a
117 A dir1/dir2/a
118 A dir1/dir2/b
118 A dir1/dir2/b
119 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
119 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
120
120
121 Merge creates 2-parent revision of directory revlog
121 Merge creates 2-parent revision of directory revlog
122
122
123 $ echo 5 > dir1/a
123 $ echo 5 > dir1/a
124 $ hg ci -Aqm 'modify dir1/a'
124 $ hg ci -Aqm 'modify dir1/a'
125 $ hg co '.^'
125 $ hg co '.^'
126 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
126 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
127 $ echo 6 > dir1/b
127 $ echo 6 > dir1/b
128 $ hg ci -Aqm 'modify dir1/b'
128 $ hg ci -Aqm 'modify dir1/b'
129 $ hg merge 'desc("modify dir1/a")'
129 $ hg merge 'desc("modify dir1/a")'
130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
131 (branch merge, don't forget to commit)
131 (branch merge, don't forget to commit)
132 $ hg ci -m 'conflict-free merge involving dir1/'
132 $ hg ci -m 'conflict-free merge involving dir1/'
133 $ cat dir1/a
133 $ cat dir1/a
134 5
134 5
135 $ cat dir1/b
135 $ cat dir1/b
136 6
136 6
137 $ hg debugindex --dir dir1
137 $ hg debugindex --dir dir1
138 rev linkrev nodeid p1 p2
138 rev linkrev nodeid p1 p2
139 0 1 8b3ffd73f901 000000000000 000000000000
139 0 1 8b3ffd73f901 000000000000 000000000000
140 1 2 68e9d057c5a8 8b3ffd73f901 000000000000
140 1 2 68e9d057c5a8 8b3ffd73f901 000000000000
141 2 4 4698198d2624 68e9d057c5a8 000000000000
141 2 4 4698198d2624 68e9d057c5a8 000000000000
142 3 5 44844058ccce 68e9d057c5a8 000000000000
142 3 5 44844058ccce 68e9d057c5a8 000000000000
143 4 6 bf3d9b744927 68e9d057c5a8 000000000000
143 4 6 bf3d9b744927 68e9d057c5a8 000000000000
144 5 7 dde7c0af2a03 bf3d9b744927 44844058ccce
144 5 7 dde7c0af2a03 bf3d9b744927 44844058ccce
145
145
146 Merge keeping directory from parent 1 does not create revlog entry. (Note that
146 Merge keeping directory from parent 1 does not create revlog entry. (Note that
147 dir1's manifest does change, but only because dir1/a's filelog changes.)
147 dir1's manifest does change, but only because dir1/a's filelog changes.)
148
148
149 $ hg co 'desc("add dir2")'
149 $ hg co 'desc("add dir2")'
150 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 $ echo 8 > dir2/a
151 $ echo 8 > dir2/a
152 $ hg ci -m 'modify dir2/a'
152 $ hg ci -m 'modify dir2/a'
153 created new head
153 created new head
154
154
155 $ hg debugindex --dir dir2 > before
155 $ hg debugindex --dir dir2 > before
156 $ hg merge 'desc("modify dir1/a")'
156 $ hg merge 'desc("modify dir1/a")'
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 (branch merge, don't forget to commit)
158 (branch merge, don't forget to commit)
159 $ hg revert -r 'desc("modify dir2/a")' .
159 $ hg revert -r 'desc("modify dir2/a")' .
160 reverting dir1/a
160 reverting dir1/a
161 $ hg ci -m 'merge, keeping parent 1'
161 $ hg ci -m 'merge, keeping parent 1'
162 $ hg debugindex --dir dir2 > after
162 $ hg debugindex --dir dir2 > after
163 $ diff before after
163 $ diff before after
164 $ rm before after
164 $ rm before after
165
165
166 Merge keeping directory from parent 2 does not create revlog entry. (Note that
166 Merge keeping directory from parent 2 does not create revlog entry. (Note that
167 dir2's manifest does change, but only because dir2/a's filelog changes.)
167 dir2's manifest does change, but only because dir2/a's filelog changes.)
168
168
169 $ hg co 'desc("modify dir2/a")'
169 $ hg co 'desc("modify dir2/a")'
170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 $ hg debugindex --dir dir1 > before
171 $ hg debugindex --dir dir1 > before
172 $ hg merge 'desc("modify dir1/a")'
172 $ hg merge 'desc("modify dir1/a")'
173 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
173 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 (branch merge, don't forget to commit)
174 (branch merge, don't forget to commit)
175 $ hg revert -r 'desc("modify dir1/a")' .
175 $ hg revert -r 'desc("modify dir1/a")' .
176 reverting dir2/a
176 reverting dir2/a
177 $ hg ci -m 'merge, keeping parent 2'
177 $ hg ci -m 'merge, keeping parent 2'
178 created new head
178 created new head
179 $ hg debugindex --dir dir1 > after
179 $ hg debugindex --dir dir1 > after
180 $ diff before after
180 $ diff before after
181 $ rm before after
181 $ rm before after
182
182
183 Create flat source repo for tests with mixed flat/tree manifests
183 Create flat source repo for tests with mixed flat/tree manifests
184
184
185 $ cd ..
185 $ cd ..
186 $ hg init repo-flat
186 $ hg init repo-flat
187 $ cd repo-flat
187 $ cd repo-flat
188
188
189 Create a few commits with flat manifest
189 Create a few commits with flat manifest
190
190
191 $ echo 0 > a
191 $ echo 0 > a
192 $ echo 0 > b
192 $ echo 0 > b
193 $ echo 0 > e
193 $ echo 0 > e
194 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
194 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
195 > do
195 > do
196 > mkdir $d
196 > mkdir $d
197 > echo 0 > $d/a
197 > echo 0 > $d/a
198 > echo 0 > $d/b
198 > echo 0 > $d/b
199 > done
199 > done
200 $ hg ci -Aqm initial
200 $ hg ci -Aqm initial
201
201
202 $ echo 1 > a
202 $ echo 1 > a
203 $ echo 1 > dir1/a
203 $ echo 1 > dir1/a
204 $ echo 1 > dir1/dir1/a
204 $ echo 1 > dir1/dir1/a
205 $ hg ci -Aqm 'modify on branch 1'
205 $ hg ci -Aqm 'modify on branch 1'
206
206
207 $ hg co 0
207 $ hg co 0
208 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
208 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
209 $ echo 2 > b
209 $ echo 2 > b
210 $ echo 2 > dir1/b
210 $ echo 2 > dir1/b
211 $ echo 2 > dir1/dir1/b
211 $ echo 2 > dir1/dir1/b
212 $ hg ci -Aqm 'modify on branch 2'
212 $ hg ci -Aqm 'modify on branch 2'
213
213
214 $ hg merge 1
214 $ hg merge 1
215 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
215 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 (branch merge, don't forget to commit)
216 (branch merge, don't forget to commit)
217 $ hg ci -m 'merge of flat manifests to new flat manifest'
217 $ hg ci -m 'merge of flat manifests to new flat manifest'
218
218
219 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
219 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
220 $ cat hg.pid >> $DAEMON_PIDS
220 $ cat hg.pid >> $DAEMON_PIDS
221
221
222 Create clone with tree manifests enabled
222 Create clone with tree manifests enabled
223
223
224 $ cd ..
224 $ cd ..
225 $ hg clone --config experimental.treemanifest=1 \
225 $ hg clone --config experimental.treemanifest=1 \
226 > http://localhost:$HGPORT repo-mixed -r 1
226 > http://localhost:$HGPORT repo-mixed -r 1
227 adding changesets
227 adding changesets
228 adding manifests
228 adding manifests
229 adding file changes
229 adding file changes
230 added 2 changesets with 14 changes to 11 files
230 added 2 changesets with 14 changes to 11 files
231 new changesets 5b02a3e8db7e:581ef6037d8b
231 new changesets 5b02a3e8db7e:581ef6037d8b
232 updating to branch default
232 updating to branch default
233 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
233 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
234 $ cd repo-mixed
234 $ cd repo-mixed
235 $ test -d .hg/store/meta
235 $ test -d .hg/store/meta
236 [1]
236 [1]
237 $ grep treemanifest .hg/requires
237 $ grep treemanifest .hg/requires
238 treemanifest
238 treemanifest
239
239
240 Should be possible to push updates from flat to tree manifest repo
240 Should be possible to push updates from flat to tree manifest repo
241
241
242 $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed
242 $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed
243 pushing to ssh://user@dummy/repo-mixed
243 pushing to ssh://user@dummy/repo-mixed
244 searching for changes
244 searching for changes
245 remote: adding changesets
245 remote: adding changesets
246 remote: adding manifests
246 remote: adding manifests
247 remote: adding file changes
247 remote: adding file changes
248 remote: added 2 changesets with 3 changes to 3 files
248 remote: added 2 changesets with 3 changes to 3 files
249
249
250 Commit should store revlog per directory
250 Commit should store revlog per directory
251
251
252 $ hg co 1
252 $ hg co 1
253 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
253 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
254 $ echo 3 > a
254 $ echo 3 > a
255 $ echo 3 > dir1/a
255 $ echo 3 > dir1/a
256 $ echo 3 > dir1/dir1/a
256 $ echo 3 > dir1/dir1/a
257 $ hg ci -m 'first tree'
257 $ hg ci -m 'first tree'
258 created new head
258 created new head
259 $ find .hg/store/meta | sort
259 $ find .hg/store/meta | sort
260 .hg/store/meta
260 .hg/store/meta
261 .hg/store/meta/dir1
261 .hg/store/meta/dir1
262 .hg/store/meta/dir1/00manifest.i
262 .hg/store/meta/dir1/00manifest.i
263 .hg/store/meta/dir1/dir1
263 .hg/store/meta/dir1/dir1
264 .hg/store/meta/dir1/dir1/00manifest.i
264 .hg/store/meta/dir1/dir1/00manifest.i
265 .hg/store/meta/dir1/dir2
265 .hg/store/meta/dir1/dir2
266 .hg/store/meta/dir1/dir2/00manifest.i
266 .hg/store/meta/dir1/dir2/00manifest.i
267 .hg/store/meta/dir2
267 .hg/store/meta/dir2
268 .hg/store/meta/dir2/00manifest.i
268 .hg/store/meta/dir2/00manifest.i
269
269
270 Merge of two trees
270 Merge of two trees
271
271
272 $ hg co 2
272 $ hg co 2
273 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
273 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
274 $ hg merge 1
274 $ hg merge 1
275 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
275 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
276 (branch merge, don't forget to commit)
276 (branch merge, don't forget to commit)
277 $ hg ci -m 'merge of flat manifests to new tree manifest'
277 $ hg ci -m 'merge of flat manifests to new tree manifest'
278 created new head
278 created new head
279 $ hg diff -r 3
279 $ hg diff -r 3
280
280
281 Parent of tree root manifest should be flat manifest, and two for merge
281 Parent of tree root manifest should be flat manifest, and two for merge
282
282
283 $ hg debugindex -m
283 $ hg debugindex -m
284 rev linkrev nodeid p1 p2
284 rev linkrev nodeid p1 p2
285 0 0 40536115ed9e 000000000000 000000000000
285 0 0 40536115ed9e 000000000000 000000000000
286 1 1 f3376063c255 40536115ed9e 000000000000
286 1 1 f3376063c255 40536115ed9e 000000000000
287 2 2 5d9b9da231a2 40536115ed9e 000000000000
287 2 2 5d9b9da231a2 40536115ed9e 000000000000
288 3 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
288 3 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
289 4 4 51e32a8c60ee f3376063c255 000000000000
289 4 4 51e32a8c60ee f3376063c255 000000000000
290 5 5 cc5baa78b230 5d9b9da231a2 f3376063c255
290 5 5 cc5baa78b230 5d9b9da231a2 f3376063c255
291
291
292
292
293 Status across flat/tree boundary should work
293 Status across flat/tree boundary should work
294
294
295 $ hg status --rev '.^' --rev .
295 $ hg status --rev '.^' --rev .
296 M a
296 M a
297 M dir1/a
297 M dir1/a
298 M dir1/dir1/a
298 M dir1/dir1/a
299
299
300
300
301 Turning off treemanifest config has no effect
301 Turning off treemanifest config has no effect
302
302
303 $ hg debugindex --dir dir1
303 $ hg debugindex --dir dir1
304 rev linkrev nodeid p1 p2
304 rev linkrev nodeid p1 p2
305 0 4 064927a0648a 000000000000 000000000000
305 0 4 064927a0648a 000000000000 000000000000
306 1 5 25ecb8cb8618 000000000000 000000000000
306 1 5 25ecb8cb8618 000000000000 000000000000
307 $ echo 2 > dir1/a
307 $ echo 2 > dir1/a
308 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
308 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
309 $ hg debugindex --dir dir1
309 $ hg debugindex --dir dir1
310 rev linkrev nodeid p1 p2
310 rev linkrev nodeid p1 p2
311 0 4 064927a0648a 000000000000 000000000000
311 0 4 064927a0648a 000000000000 000000000000
312 1 5 25ecb8cb8618 000000000000 000000000000
312 1 5 25ecb8cb8618 000000000000 000000000000
313 2 6 5b16163a30c6 25ecb8cb8618 000000000000
313 2 6 5b16163a30c6 25ecb8cb8618 000000000000
314
314
315 Stripping and recovering changes should work
315 Stripping and recovering changes should work
316
316
317 $ hg st --change tip
317 $ hg st --change tip
318 M dir1/a
318 M dir1/a
319 $ hg --config extensions.strip= strip tip
319 $ hg --config extensions.strip= strip tip
320 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
320 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg
321 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg
322 $ hg debugindex --dir dir1
322 $ hg debugindex --dir dir1
323 rev linkrev nodeid p1 p2
323 rev linkrev nodeid p1 p2
324 0 4 064927a0648a 000000000000 000000000000
324 0 4 064927a0648a 000000000000 000000000000
325 1 5 25ecb8cb8618 000000000000 000000000000
325 1 5 25ecb8cb8618 000000000000 000000000000
326
326
327 #if repobundlerepo
327 #if repobundlerepo
328 $ hg incoming .hg/strip-backup/*
328 $ hg incoming .hg/strip-backup/*
329 comparing with .hg/strip-backup/*-backup.hg (glob)
329 comparing with .hg/strip-backup/*-backup.hg (glob)
330 searching for changes
330 searching for changes
331 changeset: 6:51cfd7b1e13b
331 changeset: 6:51cfd7b1e13b
332 tag: tip
332 tag: tip
333 user: test
333 user: test
334 date: Thu Jan 01 00:00:00 1970 +0000
334 date: Thu Jan 01 00:00:00 1970 +0000
335 summary: modify dir1/a
335 summary: modify dir1/a
336
336
337 #endif
337 #endif
338
338
339 $ hg unbundle .hg/strip-backup/*
339 $ hg unbundle .hg/strip-backup/*
340 adding changesets
340 adding changesets
341 adding manifests
341 adding manifests
342 adding file changes
342 adding file changes
343 added 1 changesets with 1 changes to 1 files
343 added 1 changesets with 1 changes to 1 files
344 new changesets 51cfd7b1e13b (1 drafts)
344 new changesets 51cfd7b1e13b (1 drafts)
345 (run 'hg update' to get a working copy)
345 (run 'hg update' to get a working copy)
346 $ hg --config extensions.strip= strip tip
346 $ hg --config extensions.strip= strip tip
347 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/*-backup.hg (glob)
347 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/*-backup.hg (glob)
348 $ hg unbundle -q .hg/strip-backup/*
348 $ hg unbundle -q .hg/strip-backup/*
349 $ hg debugindex --dir dir1
349 $ hg debugindex --dir dir1
350 rev linkrev nodeid p1 p2
350 rev linkrev nodeid p1 p2
351 0 4 064927a0648a 000000000000 000000000000
351 0 4 064927a0648a 000000000000 000000000000
352 1 5 25ecb8cb8618 000000000000 000000000000
352 1 5 25ecb8cb8618 000000000000 000000000000
353 2 6 5b16163a30c6 25ecb8cb8618 000000000000
353 2 6 5b16163a30c6 25ecb8cb8618 000000000000
354 $ hg st --change tip
354 $ hg st --change tip
355 M dir1/a
355 M dir1/a
356
356
357 Shelving and unshelving should work
357 Shelving and unshelving should work
358
358
359 $ echo foo >> dir1/a
359 $ echo foo >> dir1/a
360 $ hg shelve
360 $ hg shelve
361 shelved as default
361 shelved as default
362 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
362 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
363 $ hg unshelve
363 $ hg unshelve
364 unshelving change 'default'
364 unshelving change 'default'
365 $ hg diff --nodates
365 $ hg diff --nodates
366 diff -r 708a273da119 dir1/a
366 diff -r 708a273da119 dir1/a
367 --- a/dir1/a
367 --- a/dir1/a
368 +++ b/dir1/a
368 +++ b/dir1/a
369 @@ -1,1 +1,2 @@
369 @@ -1,1 +1,2 @@
370 1
370 1
371 +foo
371 +foo
372
372
373 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
373 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
374
374
375 $ cd ..
375 $ cd ..
376 $ hg init empty-repo
376 $ hg init empty-repo
377 $ cat << EOF >> empty-repo/.hg/hgrc
377 $ cat << EOF >> empty-repo/.hg/hgrc
378 > [experimental]
378 > [experimental]
379 > changegroup3=yes
379 > changegroup3=yes
380 > EOF
380 > EOF
381 $ grep treemanifest empty-repo/.hg/requires
381 $ grep treemanifest empty-repo/.hg/requires
382 [1]
382 [1]
383 $ hg push -R repo -r 0 empty-repo
383 $ hg push -R repo -r 0 empty-repo
384 pushing to empty-repo
384 pushing to empty-repo
385 searching for changes
385 searching for changes
386 adding changesets
386 adding changesets
387 adding manifests
387 adding manifests
388 adding file changes
388 adding file changes
389 added 1 changesets with 2 changes to 2 files
389 added 1 changesets with 2 changes to 2 files
390 $ grep treemanifest empty-repo/.hg/requires
390 $ grep treemanifest empty-repo/.hg/requires
391 treemanifest
391 treemanifest
392
392
393 Pushing to an empty repo works
393 Pushing to an empty repo works
394
394
395 $ hg --config experimental.treemanifest=1 init clone
395 $ hg --config experimental.treemanifest=1 init clone
396 $ grep treemanifest clone/.hg/requires
396 $ grep treemanifest clone/.hg/requires
397 treemanifest
397 treemanifest
398 $ hg push -R repo clone
398 $ hg push -R repo clone
399 pushing to clone
399 pushing to clone
400 searching for changes
400 searching for changes
401 adding changesets
401 adding changesets
402 adding manifests
402 adding manifests
403 adding file changes
403 adding file changes
404 added 11 changesets with 15 changes to 10 files (+3 heads)
404 added 11 changesets with 15 changes to 10 files (+3 heads)
405 $ grep treemanifest clone/.hg/requires
405 $ grep treemanifest clone/.hg/requires
406 treemanifest
406 treemanifest
407 $ hg -R clone verify
407 $ hg -R clone verify
408 checking changesets
408 checking changesets
409 checking manifests
409 checking manifests
410 checking directory manifests
410 checking directory manifests
411 crosschecking files in changesets and manifests
411 crosschecking files in changesets and manifests
412 checking files
412 checking files
413 checked 11 changesets with 15 changes to 10 files
413 checked 11 changesets with 15 changes to 10 files
414
414
415 Create deeper repo with tree manifests.
415 Create deeper repo with tree manifests.
416
416
417 $ hg --config experimental.treemanifest=True init deeprepo
417 $ hg --config experimental.treemanifest=True init deeprepo
418 $ cd deeprepo
418 $ cd deeprepo
419
419
420 $ mkdir .A
420 $ mkdir .A
421 $ mkdir b
421 $ mkdir b
422 $ mkdir b/bar
422 $ mkdir b/bar
423 $ mkdir b/bar/orange
423 $ mkdir b/bar/orange
424 $ mkdir b/bar/orange/fly
424 $ mkdir b/bar/orange/fly
425 $ mkdir b/foo
425 $ mkdir b/foo
426 $ mkdir b/foo/apple
426 $ mkdir b/foo/apple
427 $ mkdir b/foo/apple/bees
427 $ mkdir b/foo/apple/bees
428
428
429 $ touch .A/one.txt
429 $ touch .A/one.txt
430 $ touch .A/two.txt
430 $ touch .A/two.txt
431 $ touch b/bar/fruits.txt
431 $ touch b/bar/fruits.txt
432 $ touch b/bar/orange/fly/gnat.py
432 $ touch b/bar/orange/fly/gnat.py
433 $ touch b/bar/orange/fly/housefly.txt
433 $ touch b/bar/orange/fly/housefly.txt
434 $ touch b/foo/apple/bees/flower.py
434 $ touch b/foo/apple/bees/flower.py
435 $ touch c.txt
435 $ touch c.txt
436 $ touch d.py
436 $ touch d.py
437
437
438 $ hg ci -Aqm 'initial'
438 $ hg ci -Aqm 'initial'
439
439
440 $ echo >> .A/one.txt
440 $ echo >> .A/one.txt
441 $ echo >> .A/two.txt
441 $ echo >> .A/two.txt
442 $ echo >> b/bar/fruits.txt
442 $ echo >> b/bar/fruits.txt
443 $ echo >> b/bar/orange/fly/gnat.py
443 $ echo >> b/bar/orange/fly/gnat.py
444 $ echo >> b/bar/orange/fly/housefly.txt
444 $ echo >> b/bar/orange/fly/housefly.txt
445 $ echo >> b/foo/apple/bees/flower.py
445 $ echo >> b/foo/apple/bees/flower.py
446 $ echo >> c.txt
446 $ echo >> c.txt
447 $ echo >> d.py
447 $ echo >> d.py
448 $ hg ci -Aqm 'second'
448 $ hg ci -Aqm 'second'
449
449
450 We'll see that visitdir works by removing some treemanifest revlogs and running
450 We'll see that visitdir works by removing some treemanifest revlogs and running
451 the files command with various parameters.
451 the files command with various parameters.
452
452
453 Test files from the root.
453 Test files from the root.
454
454
455 $ hg files -r .
455 $ hg files -r .
456 .A/one.txt
456 .A/one.txt
457 .A/two.txt
457 .A/two.txt
458 b/bar/fruits.txt
458 b/bar/fruits.txt
459 b/bar/orange/fly/gnat.py
459 b/bar/orange/fly/gnat.py
460 b/bar/orange/fly/housefly.txt
460 b/bar/orange/fly/housefly.txt
461 b/foo/apple/bees/flower.py
461 b/foo/apple/bees/flower.py
462 c.txt
462 c.txt
463 d.py
463 d.py
464
464
465 Excludes with a glob should not exclude everything from the glob's root
465 Excludes with a glob should not exclude everything from the glob's root
466
466
467 $ hg files -r . -X 'b/fo?' b
467 $ hg files -r . -X 'b/fo?' b
468 b/bar/fruits.txt
468 b/bar/fruits.txt
469 b/bar/orange/fly/gnat.py
469 b/bar/orange/fly/gnat.py
470 b/bar/orange/fly/housefly.txt
470 b/bar/orange/fly/housefly.txt
471 $ cp -R .hg/store .hg/store-copy
471 $ cp -R .hg/store .hg/store-copy
472
472
473 Test files for a subdirectory.
473 Test files for a subdirectory.
474
474
475 #if reporevlogstore
475 #if reporevlogstore
476 $ rm -r .hg/store/meta/~2e_a
476 $ rm -r .hg/store/meta/~2e_a
477 #endif
477 #endif
478 #if reposimplestore
478 #if reposimplestore
479 $ rm -r .hg/store/meta/._a
479 $ rm -r .hg/store/meta/._a
480 #endif
480 #endif
481 $ hg files -r . b
481 $ hg files -r . b
482 b/bar/fruits.txt
482 b/bar/fruits.txt
483 b/bar/orange/fly/gnat.py
483 b/bar/orange/fly/gnat.py
484 b/bar/orange/fly/housefly.txt
484 b/bar/orange/fly/housefly.txt
485 b/foo/apple/bees/flower.py
485 b/foo/apple/bees/flower.py
486 $ hg diff -r '.^' -r . --stat b
486 $ hg diff -r '.^' -r . --stat b
487 b/bar/fruits.txt | 1 +
487 b/bar/fruits.txt | 1 +
488 b/bar/orange/fly/gnat.py | 1 +
488 b/bar/orange/fly/gnat.py | 1 +
489 b/bar/orange/fly/housefly.txt | 1 +
489 b/bar/orange/fly/housefly.txt | 1 +
490 b/foo/apple/bees/flower.py | 1 +
490 b/foo/apple/bees/flower.py | 1 +
491 4 files changed, 4 insertions(+), 0 deletions(-)
491 4 files changed, 4 insertions(+), 0 deletions(-)
492 $ cp -R .hg/store-copy/. .hg/store
492 $ cp -R .hg/store-copy/. .hg/store
493
493
494 Test files with just includes and excludes.
494 Test files with just includes and excludes.
495
495
496 #if reporevlogstore
496 #if reporevlogstore
497 $ rm -r .hg/store/meta/~2e_a
497 $ rm -r .hg/store/meta/~2e_a
498 #endif
498 #endif
499 #if reposimplestore
499 #if reposimplestore
500 $ rm -r .hg/store/meta/._a
500 $ rm -r .hg/store/meta/._a
501 #endif
501 #endif
502 $ rm -r .hg/store/meta/b/bar/orange/fly
502 $ rm -r .hg/store/meta/b/bar/orange/fly
503 $ rm -r .hg/store/meta/b/foo/apple/bees
503 $ rm -r .hg/store/meta/b/foo/apple/bees
504 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
504 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
505 b/bar/fruits.txt
505 b/bar/fruits.txt
506 $ hg diff -r '.^' -r . --stat -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
506 $ hg diff -r '.^' -r . --stat -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
507 b/bar/fruits.txt | 1 +
507 b/bar/fruits.txt | 1 +
508 1 files changed, 1 insertions(+), 0 deletions(-)
508 1 files changed, 1 insertions(+), 0 deletions(-)
509 $ cp -R .hg/store-copy/. .hg/store
509 $ cp -R .hg/store-copy/. .hg/store
510
510
511 Test files for a subdirectory, excluding a directory within it.
511 Test files for a subdirectory, excluding a directory within it.
512
512
513 #if reporevlogstore
513 #if reporevlogstore
514 $ rm -r .hg/store/meta/~2e_a
514 $ rm -r .hg/store/meta/~2e_a
515 #endif
515 #endif
516 #if reposimplestore
516 #if reposimplestore
517 $ rm -r .hg/store/meta/._a
517 $ rm -r .hg/store/meta/._a
518 #endif
518 #endif
519 $ rm -r .hg/store/meta/b/foo
519 $ rm -r .hg/store/meta/b/foo
520 $ hg files -r . -X path:b/foo b
520 $ hg files -r . -X path:b/foo b
521 b/bar/fruits.txt
521 b/bar/fruits.txt
522 b/bar/orange/fly/gnat.py
522 b/bar/orange/fly/gnat.py
523 b/bar/orange/fly/housefly.txt
523 b/bar/orange/fly/housefly.txt
524 $ hg diff -r '.^' -r . --stat -X path:b/foo b
524 $ hg diff -r '.^' -r . --stat -X path:b/foo b
525 b/bar/fruits.txt | 1 +
525 b/bar/fruits.txt | 1 +
526 b/bar/orange/fly/gnat.py | 1 +
526 b/bar/orange/fly/gnat.py | 1 +
527 b/bar/orange/fly/housefly.txt | 1 +
527 b/bar/orange/fly/housefly.txt | 1 +
528 3 files changed, 3 insertions(+), 0 deletions(-)
528 3 files changed, 3 insertions(+), 0 deletions(-)
529 $ cp -R .hg/store-copy/. .hg/store
529 $ cp -R .hg/store-copy/. .hg/store
530
530
531 Test files for a sub directory, including only a directory within it, and
531 Test files for a sub directory, including only a directory within it, and
532 including an unrelated directory.
532 including an unrelated directory.
533
533
534 #if reporevlogstore
534 #if reporevlogstore
535 $ rm -r .hg/store/meta/~2e_a
535 $ rm -r .hg/store/meta/~2e_a
536 #endif
536 #endif
537 #if reposimplestore
537 #if reposimplestore
538 $ rm -r .hg/store/meta/._a
538 $ rm -r .hg/store/meta/._a
539 #endif
539 #endif
540 $ rm -r .hg/store/meta/b/foo
540 $ rm -r .hg/store/meta/b/foo
541 $ hg files -r . -I path:b/bar/orange -I path:a b
541 $ hg files -r . -I path:b/bar/orange -I path:a b
542 b/bar/orange/fly/gnat.py
542 b/bar/orange/fly/gnat.py
543 b/bar/orange/fly/housefly.txt
543 b/bar/orange/fly/housefly.txt
544 $ hg diff -r '.^' -r . --stat -I path:b/bar/orange -I path:a b
544 $ hg diff -r '.^' -r . --stat -I path:b/bar/orange -I path:a b
545 b/bar/orange/fly/gnat.py | 1 +
545 b/bar/orange/fly/gnat.py | 1 +
546 b/bar/orange/fly/housefly.txt | 1 +
546 b/bar/orange/fly/housefly.txt | 1 +
547 2 files changed, 2 insertions(+), 0 deletions(-)
547 2 files changed, 2 insertions(+), 0 deletions(-)
548 $ cp -R .hg/store-copy/. .hg/store
548 $ cp -R .hg/store-copy/. .hg/store
549
549
550 Test files for a pattern, including a directory, and excluding a directory
550 Test files for a pattern, including a directory, and excluding a directory
551 within that.
551 within that.
552
552
553 #if reporevlogstore
553 #if reporevlogstore
554 $ rm -r .hg/store/meta/~2e_a
554 $ rm -r .hg/store/meta/~2e_a
555 #endif
555 #endif
556 #if reposimplestore
556 #if reposimplestore
557 $ rm -r .hg/store/meta/._a
557 $ rm -r .hg/store/meta/._a
558 #endif
558 #endif
559 $ rm -r .hg/store/meta/b/foo
559 $ rm -r .hg/store/meta/b/foo
560 $ rm -r .hg/store/meta/b/bar/orange
560 $ rm -r .hg/store/meta/b/bar/orange
561 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
561 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
562 b/bar/fruits.txt
562 b/bar/fruits.txt
563 $ hg diff -r '.^' -r . --stat glob:**.txt -I path:b/bar -X path:b/bar/orange
563 $ hg diff -r '.^' -r . --stat glob:**.txt -I path:b/bar -X path:b/bar/orange
564 b/bar/fruits.txt | 1 +
564 b/bar/fruits.txt | 1 +
565 1 files changed, 1 insertions(+), 0 deletions(-)
565 1 files changed, 1 insertions(+), 0 deletions(-)
566 $ cp -R .hg/store-copy/. .hg/store
566 $ cp -R .hg/store-copy/. .hg/store
567
567
568 Add some more changes to the deep repo
568 Add some more changes to the deep repo
569 $ echo narf >> b/bar/fruits.txt
569 $ echo narf >> b/bar/fruits.txt
570 $ hg ci -m narf
570 $ hg ci -m narf
571 $ echo troz >> b/bar/orange/fly/gnat.py
571 $ echo troz >> b/bar/orange/fly/gnat.py
572 $ hg ci -m troz
572 $ hg ci -m troz
573
573
574 Verify works
574 Verify works
575 $ hg verify
575 $ hg verify
576 checking changesets
576 checking changesets
577 checking manifests
577 checking manifests
578 checking directory manifests
578 checking directory manifests
579 crosschecking files in changesets and manifests
579 crosschecking files in changesets and manifests
580 checking files
580 checking files
581 checked 4 changesets with 18 changes to 8 files
581 checked 4 changesets with 18 changes to 8 files
582
582
583 #if repofncache
583 #if repofncache
584 Dirlogs are included in fncache
584 Dirlogs are included in fncache
585 $ grep meta/.A/00manifest.i .hg/store/fncache
585 $ grep meta/.A/00manifest.i .hg/store/fncache
586 meta/.A/00manifest.i
586 meta/.A/00manifest.i
587
587
588 Rebuilt fncache includes dirlogs
588 Rebuilt fncache includes dirlogs
589 $ rm .hg/store/fncache
589 $ rm .hg/store/fncache
590 $ hg debugrebuildfncache
590 $ hg debugrebuildfncache
591 adding data/.A/one.txt.i
591 adding data/.A/one.txt.i
592 adding data/.A/two.txt.i
592 adding data/.A/two.txt.i
593 adding data/b/bar/fruits.txt.i
593 adding data/b/bar/fruits.txt.i
594 adding data/b/bar/orange/fly/gnat.py.i
594 adding data/b/bar/orange/fly/gnat.py.i
595 adding data/b/bar/orange/fly/housefly.txt.i
595 adding data/b/bar/orange/fly/housefly.txt.i
596 adding data/b/foo/apple/bees/flower.py.i
596 adding data/b/foo/apple/bees/flower.py.i
597 adding data/c.txt.i
597 adding data/c.txt.i
598 adding data/d.py.i
598 adding data/d.py.i
599 adding meta/.A/00manifest.i
599 adding meta/.A/00manifest.i
600 adding meta/b/00manifest.i
600 adding meta/b/00manifest.i
601 adding meta/b/bar/00manifest.i
601 adding meta/b/bar/00manifest.i
602 adding meta/b/bar/orange/00manifest.i
602 adding meta/b/bar/orange/00manifest.i
603 adding meta/b/bar/orange/fly/00manifest.i
603 adding meta/b/bar/orange/fly/00manifest.i
604 adding meta/b/foo/00manifest.i
604 adding meta/b/foo/00manifest.i
605 adding meta/b/foo/apple/00manifest.i
605 adding meta/b/foo/apple/00manifest.i
606 adding meta/b/foo/apple/bees/00manifest.i
606 adding meta/b/foo/apple/bees/00manifest.i
607 16 items added, 0 removed from fncache
607 16 items added, 0 removed from fncache
608 #endif
608 #endif
609
609
610 Finish first server
610 Finish first server
611 $ killdaemons.py
611 $ killdaemons.py
612
612
613 Back up the recently added revlogs
613 Back up the recently added revlogs
614 $ cp -R .hg/store .hg/store-newcopy
614 $ cp -R .hg/store .hg/store-newcopy
615
615
616 Verify reports missing dirlog
616 Verify reports missing dirlog
617 $ rm .hg/store/meta/b/00manifest.*
617 $ rm .hg/store/meta/b/00manifest.*
618 $ hg verify
618 $ hg verify
619 checking changesets
619 checking changesets
620 checking manifests
620 checking manifests
621 checking directory manifests
621 checking directory manifests
622 0: empty or missing b/
622 0: empty or missing b/
623 b/@0: parent-directory manifest refers to unknown revision 67688a370455
623 b/@0: parent-directory manifest refers to unknown revision 67688a370455
624 b/@1: parent-directory manifest refers to unknown revision f065da70369e
624 b/@1: parent-directory manifest refers to unknown revision f065da70369e
625 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
625 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
626 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
626 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
627 warning: orphan data file 'meta/b/bar/00manifest.i' (reporevlogstore !)
627 warning: orphan data file 'meta/b/bar/00manifest.i' (reporevlogstore !)
628 warning: orphan data file 'meta/b/bar/orange/00manifest.i' (reporevlogstore !)
628 warning: orphan data file 'meta/b/bar/orange/00manifest.i' (reporevlogstore !)
629 warning: orphan data file 'meta/b/bar/orange/fly/00manifest.i' (reporevlogstore !)
629 warning: orphan data file 'meta/b/bar/orange/fly/00manifest.i' (reporevlogstore !)
630 warning: orphan data file 'meta/b/foo/00manifest.i' (reporevlogstore !)
630 warning: orphan data file 'meta/b/foo/00manifest.i' (reporevlogstore !)
631 warning: orphan data file 'meta/b/foo/apple/00manifest.i' (reporevlogstore !)
631 warning: orphan data file 'meta/b/foo/apple/00manifest.i' (reporevlogstore !)
632 warning: orphan data file 'meta/b/foo/apple/bees/00manifest.i' (reporevlogstore !)
632 warning: orphan data file 'meta/b/foo/apple/bees/00manifest.i' (reporevlogstore !)
633 crosschecking files in changesets and manifests
633 crosschecking files in changesets and manifests
634 b/bar/fruits.txt@0: in changeset but not in manifest
634 b/bar/fruits.txt@0: in changeset but not in manifest
635 b/bar/orange/fly/gnat.py@0: in changeset but not in manifest
635 b/bar/orange/fly/gnat.py@0: in changeset but not in manifest
636 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
636 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
637 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
637 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
638 checking files
638 checking files
639 checked 4 changesets with 18 changes to 8 files
639 checked 4 changesets with 18 changes to 8 files
640 6 warnings encountered! (reporevlogstore !)
640 6 warnings encountered! (reporevlogstore !)
641 9 integrity errors encountered!
641 9 integrity errors encountered!
642 (first damaged changeset appears to be 0)
642 (first damaged changeset appears to be 0)
643 [1]
643 [1]
644 $ cp -R .hg/store-newcopy/. .hg/store
644 $ cp -R .hg/store-newcopy/. .hg/store
645
645
646 Verify reports missing dirlog entry
646 Verify reports missing dirlog entry
647 $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
647 $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
648 $ hg verify
648 $ hg verify
649 checking changesets
649 checking changesets
650 checking manifests
650 checking manifests
651 checking directory manifests
651 checking directory manifests
652 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
652 b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
653 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
653 b/@3: parent-directory manifest refers to unknown revision 367152e6af28
654 b/bar/@?: rev 2 points to unexpected changeset 2
654 b/bar/@?: rev 2 points to unexpected changeset 2
655 b/bar/@?: 44d7e1146e0d not in parent-directory manifest
655 b/bar/@?: 44d7e1146e0d not in parent-directory manifest
656 b/bar/@?: rev 3 points to unexpected changeset 3
656 b/bar/@?: rev 3 points to unexpected changeset 3
657 b/bar/@?: 70b10c6b17b7 not in parent-directory manifest
657 b/bar/@?: 70b10c6b17b7 not in parent-directory manifest
658 b/bar/orange/@?: rev 2 points to unexpected changeset 3
658 b/bar/orange/@?: rev 2 points to unexpected changeset 3
659 (expected None)
659 (expected None)
660 b/bar/orange/fly/@?: rev 2 points to unexpected changeset 3
660 b/bar/orange/fly/@?: rev 2 points to unexpected changeset 3
661 (expected None)
661 (expected None)
662 crosschecking files in changesets and manifests
662 crosschecking files in changesets and manifests
663 checking files
663 checking files
664 checked 4 changesets with 18 changes to 8 files
664 checked 4 changesets with 18 changes to 8 files
665 2 warnings encountered!
665 2 warnings encountered!
666 8 integrity errors encountered!
666 8 integrity errors encountered!
667 (first damaged changeset appears to be 2)
667 (first damaged changeset appears to be 2)
668 [1]
668 [1]
669 $ cp -R .hg/store-newcopy/. .hg/store
669 $ cp -R .hg/store-newcopy/. .hg/store
670
670
671 Test cloning a treemanifest repo over http.
671 Test cloning a treemanifest repo over http.
672 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
672 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
673 $ cat hg.pid >> $DAEMON_PIDS
673 $ cat hg.pid >> $DAEMON_PIDS
674 $ cd ..
674 $ cd ..
675 We can clone even with the knob turned off and we'll get a treemanifest repo.
675 We can clone even with the knob turned off and we'll get a treemanifest repo.
676 $ hg clone --config experimental.treemanifest=False \
676 $ hg clone --config experimental.treemanifest=False \
677 > --config experimental.changegroup3=True \
677 > --config experimental.changegroup3=True \
678 > http://localhost:$HGPORT deepclone
678 > http://localhost:$HGPORT deepclone
679 requesting all changes
679 requesting all changes
680 adding changesets
680 adding changesets
681 adding manifests
681 adding manifests
682 adding file changes
682 adding file changes
683 added 4 changesets with 18 changes to 8 files
683 added 4 changesets with 18 changes to 8 files
684 new changesets 775704be6f52:523e5c631710
684 new changesets 775704be6f52:523e5c631710
685 updating to branch default
685 updating to branch default
686 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
686 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
687 No server errors.
687 No server errors.
688 $ cat deeprepo/errors.log
688 $ cat deeprepo/errors.log
689 requires got updated to include treemanifest
689 requires got updated to include treemanifest
690 $ cat deepclone/.hg/requires | grep treemanifest
690 $ cat deepclone/.hg/requires | grep treemanifest
691 treemanifest
691 treemanifest
692 Tree manifest revlogs exist.
692 Tree manifest revlogs exist.
693 $ find deepclone/.hg/store/meta | sort
693 $ find deepclone/.hg/store/meta | sort
694 deepclone/.hg/store/meta
694 deepclone/.hg/store/meta
695 deepclone/.hg/store/meta/._a (reposimplestore !)
695 deepclone/.hg/store/meta/._a (reposimplestore !)
696 deepclone/.hg/store/meta/._a/00manifest.i (reposimplestore !)
696 deepclone/.hg/store/meta/._a/00manifest.i (reposimplestore !)
697 deepclone/.hg/store/meta/b
697 deepclone/.hg/store/meta/b
698 deepclone/.hg/store/meta/b/00manifest.i
698 deepclone/.hg/store/meta/b/00manifest.i
699 deepclone/.hg/store/meta/b/bar
699 deepclone/.hg/store/meta/b/bar
700 deepclone/.hg/store/meta/b/bar/00manifest.i
700 deepclone/.hg/store/meta/b/bar/00manifest.i
701 deepclone/.hg/store/meta/b/bar/orange
701 deepclone/.hg/store/meta/b/bar/orange
702 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
702 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
703 deepclone/.hg/store/meta/b/bar/orange/fly
703 deepclone/.hg/store/meta/b/bar/orange/fly
704 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
704 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
705 deepclone/.hg/store/meta/b/foo
705 deepclone/.hg/store/meta/b/foo
706 deepclone/.hg/store/meta/b/foo/00manifest.i
706 deepclone/.hg/store/meta/b/foo/00manifest.i
707 deepclone/.hg/store/meta/b/foo/apple
707 deepclone/.hg/store/meta/b/foo/apple
708 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
708 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
709 deepclone/.hg/store/meta/b/foo/apple/bees
709 deepclone/.hg/store/meta/b/foo/apple/bees
710 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
710 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
711 deepclone/.hg/store/meta/~2e_a (reporevlogstore !)
711 deepclone/.hg/store/meta/~2e_a (reporevlogstore !)
712 deepclone/.hg/store/meta/~2e_a/00manifest.i (reporevlogstore !)
712 deepclone/.hg/store/meta/~2e_a/00manifest.i (reporevlogstore !)
713 Verify passes.
713 Verify passes.
714 $ cd deepclone
714 $ cd deepclone
715 $ hg verify
715 $ hg verify
716 checking changesets
716 checking changesets
717 checking manifests
717 checking manifests
718 checking directory manifests
718 checking directory manifests
719 crosschecking files in changesets and manifests
719 crosschecking files in changesets and manifests
720 checking files
720 checking files
721 checked 4 changesets with 18 changes to 8 files
721 checked 4 changesets with 18 changes to 8 files
722 $ cd ..
722 $ cd ..
723
723
724 #if reporevlogstore
724 #if reporevlogstore
725 Create clones using old repo formats to use in later tests
725 Create clones using old repo formats to use in later tests
726 $ hg clone --config format.usestore=False \
726 $ hg clone --config format.usestore=False \
727 > --config experimental.changegroup3=True \
727 > --config experimental.changegroup3=True \
728 > http://localhost:$HGPORT deeprepo-basicstore
728 > http://localhost:$HGPORT deeprepo-basicstore
729 requesting all changes
729 requesting all changes
730 adding changesets
730 adding changesets
731 adding manifests
731 adding manifests
732 adding file changes
732 adding file changes
733 added 4 changesets with 18 changes to 8 files
733 added 4 changesets with 18 changes to 8 files
734 new changesets 775704be6f52:523e5c631710
734 new changesets 775704be6f52:523e5c631710
735 updating to branch default
735 updating to branch default
736 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
736 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
737 $ cd deeprepo-basicstore
737 $ cd deeprepo-basicstore
738 $ grep store .hg/requires
738 $ grep store .hg/requires
739 [1]
739 [1]
740 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
740 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
741 $ cat hg.pid >> $DAEMON_PIDS
741 $ cat hg.pid >> $DAEMON_PIDS
742 $ cd ..
742 $ cd ..
743 $ hg clone --config format.usefncache=False \
743 $ hg clone --config format.usefncache=False \
744 > --config experimental.changegroup3=True \
744 > --config experimental.changegroup3=True \
745 > http://localhost:$HGPORT deeprepo-encodedstore
745 > http://localhost:$HGPORT deeprepo-encodedstore
746 requesting all changes
746 requesting all changes
747 adding changesets
747 adding changesets
748 adding manifests
748 adding manifests
749 adding file changes
749 adding file changes
750 added 4 changesets with 18 changes to 8 files
750 added 4 changesets with 18 changes to 8 files
751 new changesets 775704be6f52:523e5c631710
751 new changesets 775704be6f52:523e5c631710
752 updating to branch default
752 updating to branch default
753 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
753 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
754 $ cd deeprepo-encodedstore
754 $ cd deeprepo-encodedstore
755 $ grep fncache .hg/requires
755 $ grep fncache .hg/requires
756 [1]
756 [1]
757 $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
757 $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
758 $ cat hg.pid >> $DAEMON_PIDS
758 $ cat hg.pid >> $DAEMON_PIDS
759 $ cd ..
759 $ cd ..
760
760
761 Local clone with basicstore
761 Local clone with basicstore
762 $ hg clone -U deeprepo-basicstore local-clone-basicstore
762 $ hg clone -U deeprepo-basicstore local-clone-basicstore
763 $ hg -R local-clone-basicstore verify
763 $ hg -R local-clone-basicstore verify
764 checking changesets
764 checking changesets
765 checking manifests
765 checking manifests
766 checking directory manifests
766 checking directory manifests
767 crosschecking files in changesets and manifests
767 crosschecking files in changesets and manifests
768 checking files
768 checking files
769 checked 4 changesets with 18 changes to 8 files
769 checked 4 changesets with 18 changes to 8 files
770
770
771 Local clone with encodedstore
771 Local clone with encodedstore
772 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
772 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
773 $ hg -R local-clone-encodedstore verify
773 $ hg -R local-clone-encodedstore verify
774 checking changesets
774 checking changesets
775 checking manifests
775 checking manifests
776 checking directory manifests
776 checking directory manifests
777 crosschecking files in changesets and manifests
777 crosschecking files in changesets and manifests
778 checking files
778 checking files
779 checked 4 changesets with 18 changes to 8 files
779 checked 4 changesets with 18 changes to 8 files
780
780
781 Local clone with fncachestore
781 Local clone with fncachestore
782 $ hg clone -U deeprepo local-clone-fncachestore
782 $ hg clone -U deeprepo local-clone-fncachestore
783 $ hg -R local-clone-fncachestore verify
783 $ hg -R local-clone-fncachestore verify
784 checking changesets
784 checking changesets
785 checking manifests
785 checking manifests
786 checking directory manifests
786 checking directory manifests
787 crosschecking files in changesets and manifests
787 crosschecking files in changesets and manifests
788 checking files
788 checking files
789 checked 4 changesets with 18 changes to 8 files
789 checked 4 changesets with 18 changes to 8 files
790
790
791 Stream clone with basicstore
791 Stream clone with basicstore
792 $ hg clone --config experimental.changegroup3=True --stream -U \
792 $ hg clone --config experimental.changegroup3=True --stream -U \
793 > http://localhost:$HGPORT1 stream-clone-basicstore
793 > http://localhost:$HGPORT1 stream-clone-basicstore
794 streaming all changes
794 streaming all changes
795 29 files to transfer, * of data (glob)
795 28 files to transfer, * of data (glob)
796 transferred * in * seconds (*) (glob)
796 transferred * in * seconds (*) (glob)
797 $ hg -R stream-clone-basicstore verify
797 $ hg -R stream-clone-basicstore verify
798 checking changesets
798 checking changesets
799 checking manifests
799 checking manifests
800 checking directory manifests
800 checking directory manifests
801 crosschecking files in changesets and manifests
801 crosschecking files in changesets and manifests
802 checking files
802 checking files
803 checked 4 changesets with 18 changes to 8 files
803 checked 4 changesets with 18 changes to 8 files
804
804
805 Stream clone with encodedstore
805 Stream clone with encodedstore
806 $ hg clone --config experimental.changegroup3=True --stream -U \
806 $ hg clone --config experimental.changegroup3=True --stream -U \
807 > http://localhost:$HGPORT2 stream-clone-encodedstore
807 > http://localhost:$HGPORT2 stream-clone-encodedstore
808 streaming all changes
808 streaming all changes
809 29 files to transfer, * of data (glob)
809 28 files to transfer, * of data (glob)
810 transferred * in * seconds (*) (glob)
810 transferred * in * seconds (*) (glob)
811 $ hg -R stream-clone-encodedstore verify
811 $ hg -R stream-clone-encodedstore verify
812 checking changesets
812 checking changesets
813 checking manifests
813 checking manifests
814 checking directory manifests
814 checking directory manifests
815 crosschecking files in changesets and manifests
815 crosschecking files in changesets and manifests
816 checking files
816 checking files
817 checked 4 changesets with 18 changes to 8 files
817 checked 4 changesets with 18 changes to 8 files
818
818
819 Stream clone with fncachestore
819 Stream clone with fncachestore
820 $ hg clone --config experimental.changegroup3=True --stream -U \
820 $ hg clone --config experimental.changegroup3=True --stream -U \
821 > http://localhost:$HGPORT stream-clone-fncachestore
821 > http://localhost:$HGPORT stream-clone-fncachestore
822 streaming all changes
822 streaming all changes
823 22 files to transfer, * of data (glob)
823 22 files to transfer, * of data (glob)
824 transferred * in * seconds (*) (glob)
824 transferred * in * seconds (*) (glob)
825 $ hg -R stream-clone-fncachestore verify
825 $ hg -R stream-clone-fncachestore verify
826 checking changesets
826 checking changesets
827 checking manifests
827 checking manifests
828 checking directory manifests
828 checking directory manifests
829 crosschecking files in changesets and manifests
829 crosschecking files in changesets and manifests
830 checking files
830 checking files
831 checked 4 changesets with 18 changes to 8 files
831 checked 4 changesets with 18 changes to 8 files
832
832
833 Packed bundle
833 Packed bundle
834 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
834 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
835 writing 5330 bytes for 18 files (no-zstd !)
835 writing 5330 bytes for 18 files (no-zstd !)
836 writing 5400 bytes for 18 files (zstd !)
836 writing 5400 bytes for 18 files (zstd !)
837 bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest (no-rust !)
837 bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest (no-rust !)
838 bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog, treemanifest (rust !)
838 bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog, treemanifest (rust !)
839 $ hg debugbundle --spec repo-packed.hg
839 $ hg debugbundle --spec repo-packed.hg
840 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest (no-rust !)
840 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest (no-rust !)
841 none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog%2Ctreemanifest (rust !)
841 none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog%2Ctreemanifest (rust !)
842
842
843 #endif
843 #endif
844
844
845 Bundle with changegroup2 is not supported
845 Bundle with changegroup2 is not supported
846
846
847 $ hg -R deeprepo bundle --all -t v2 deeprepo.bundle
847 $ hg -R deeprepo bundle --all -t v2 deeprepo.bundle
848 abort: repository does not support bundle version 02
848 abort: repository does not support bundle version 02
849 [255]
849 [255]
850
850
851 Pull does not include changegroup for manifest the client already has from
851 Pull does not include changegroup for manifest the client already has from
852 other branch
852 other branch
853
853
854 $ mkdir grafted-dir-repo
854 $ mkdir grafted-dir-repo
855 $ cd grafted-dir-repo
855 $ cd grafted-dir-repo
856 $ hg --config experimental.treemanifest=1 init
856 $ hg --config experimental.treemanifest=1 init
857 $ mkdir dir
857 $ mkdir dir
858 $ echo a > dir/file
858 $ echo a > dir/file
859 $ echo a > file
859 $ echo a > file
860 $ hg ci -Am initial
860 $ hg ci -Am initial
861 adding dir/file
861 adding dir/file
862 adding file
862 adding file
863 $ echo b > dir/file
863 $ echo b > dir/file
864 $ hg ci -m updated
864 $ hg ci -m updated
865 $ hg co '.^'
865 $ hg co '.^'
866 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
866 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
867 $ hg revert -r tip dir/
867 $ hg revert -r tip dir/
868 reverting dir/file
868 reverting dir/file
869 $ echo b > file # to make sure root manifest is sent
869 $ echo b > file # to make sure root manifest is sent
870 $ hg ci -m grafted
870 $ hg ci -m grafted
871 created new head
871 created new head
872 $ cd ..
872 $ cd ..
873
873
874 $ hg --config experimental.treemanifest=1 clone --pull -r 1 \
874 $ hg --config experimental.treemanifest=1 clone --pull -r 1 \
875 > grafted-dir-repo grafted-dir-repo-clone
875 > grafted-dir-repo grafted-dir-repo-clone
876 adding changesets
876 adding changesets
877 adding manifests
877 adding manifests
878 adding file changes
878 adding file changes
879 added 2 changesets with 3 changes to 2 files
879 added 2 changesets with 3 changes to 2 files
880 new changesets d84f4c419457:09ab742f3b0f
880 new changesets d84f4c419457:09ab742f3b0f
881 updating to branch default
881 updating to branch default
882 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
882 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
883 $ cd grafted-dir-repo-clone
883 $ cd grafted-dir-repo-clone
884 $ hg pull -r 2
884 $ hg pull -r 2
885 pulling from $TESTTMP/grafted-dir-repo
885 pulling from $TESTTMP/grafted-dir-repo
886 searching for changes
886 searching for changes
887 adding changesets
887 adding changesets
888 adding manifests
888 adding manifests
889 adding file changes
889 adding file changes
890 added 1 changesets with 1 changes to 1 files (+1 heads)
890 added 1 changesets with 1 changes to 1 files (+1 heads)
891 new changesets 73699489fb7c
891 new changesets 73699489fb7c
892 (run 'hg heads' to see heads, 'hg merge' to merge)
892 (run 'hg heads' to see heads, 'hg merge' to merge)
893
893
894 Committing a empty commit does not duplicate root treemanifest
894 Committing a empty commit does not duplicate root treemanifest
895 $ echo z >> z
895 $ echo z >> z
896 $ hg commit -Aqm 'pre-empty commit'
896 $ hg commit -Aqm 'pre-empty commit'
897 $ hg rm z
897 $ hg rm z
898 $ hg commit --amend -m 'empty commit'
898 $ hg commit --amend -m 'empty commit'
899 saved backup bundle to $TESTTMP/grafted-dir-repo-clone/.hg/strip-backup/cb99d5717cea-9e3b6b02-amend.hg
899 saved backup bundle to $TESTTMP/grafted-dir-repo-clone/.hg/strip-backup/cb99d5717cea-9e3b6b02-amend.hg
900 $ hg log -r 'tip + tip^' -T '{manifest}\n'
900 $ hg log -r 'tip + tip^' -T '{manifest}\n'
901 1:678d3574b88c
901 1:678d3574b88c
902 1:678d3574b88c
902 1:678d3574b88c
903 $ hg --config extensions.strip= strip -r . -q
903 $ hg --config extensions.strip= strip -r . -q
General Comments 0
You need to be logged in to leave comments. Login now