##// END OF EJS Templates
localrepo: better error when a repo exists but we lack permissions...
Valentin Gatien-Baron -
r39022:ac0a8716 default
parent child Browse files
Show More
@@ -1,2405 +1,2410 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # A repository with the sparserevlog feature will have delta chains that
357 # A repository with the sparserevlog feature will have delta chains that
358 # can spread over a larger span. Sparse reading cuts these large spans into
358 # can spread over a larger span. Sparse reading cuts these large spans into
359 # pieces, so that each piece isn't too big.
359 # pieces, so that each piece isn't too big.
360 # Without the sparserevlog capability, reading from the repository could use
360 # Without the sparserevlog capability, reading from the repository could use
361 # huge amounts of memory, because the whole span would be read at once,
361 # huge amounts of memory, because the whole span would be read at once,
362 # including all the intermediate revisions that aren't pertinent for the chain.
362 # including all the intermediate revisions that aren't pertinent for the chain.
363 # This is why once a repository has enabled sparse-read, it becomes required.
363 # This is why once a repository has enabled sparse-read, it becomes required.
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365
365
366 # Functions receiving (ui, features) that extensions can register to impact
366 # Functions receiving (ui, features) that extensions can register to impact
367 # the ability to load repositories with custom requirements. Only
367 # the ability to load repositories with custom requirements. Only
368 # functions defined in loaded extensions are called.
368 # functions defined in loaded extensions are called.
369 #
369 #
370 # The function receives a set of requirement strings that the repository
370 # The function receives a set of requirement strings that the repository
371 # is capable of opening. Functions will typically add elements to the
371 # is capable of opening. Functions will typically add elements to the
372 # set to reflect that the extension knows how to handle that requirements.
372 # set to reflect that the extension knows how to handle that requirements.
373 featuresetupfuncs = set()
373 featuresetupfuncs = set()
374
374
375 @interfaceutil.implementer(repository.completelocalrepository)
375 @interfaceutil.implementer(repository.completelocalrepository)
376 class localrepository(object):
376 class localrepository(object):
377
377
378 # obsolete experimental requirements:
378 # obsolete experimental requirements:
379 # - manifestv2: An experimental new manifest format that allowed
379 # - manifestv2: An experimental new manifest format that allowed
380 # for stem compression of long paths. Experiment ended up not
380 # for stem compression of long paths. Experiment ended up not
381 # being successful (repository sizes went up due to worse delta
381 # being successful (repository sizes went up due to worse delta
382 # chains), and the code was deleted in 4.6.
382 # chains), and the code was deleted in 4.6.
383 supportedformats = {
383 supportedformats = {
384 'revlogv1',
384 'revlogv1',
385 'generaldelta',
385 'generaldelta',
386 'treemanifest',
386 'treemanifest',
387 REVLOGV2_REQUIREMENT,
387 REVLOGV2_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
389 }
389 }
390 _basesupported = supportedformats | {
390 _basesupported = supportedformats | {
391 'store',
391 'store',
392 'fncache',
392 'fncache',
393 'shared',
393 'shared',
394 'relshared',
394 'relshared',
395 'dotencode',
395 'dotencode',
396 'exp-sparse',
396 'exp-sparse',
397 }
397 }
398 openerreqs = {
398 openerreqs = {
399 'revlogv1',
399 'revlogv1',
400 'generaldelta',
400 'generaldelta',
401 'treemanifest',
401 'treemanifest',
402 }
402 }
403
403
404 # list of prefix for file which can be written without 'wlock'
404 # list of prefix for file which can be written without 'wlock'
405 # Extensions should extend this list when needed
405 # Extensions should extend this list when needed
406 _wlockfreeprefix = {
406 _wlockfreeprefix = {
407 # We migh consider requiring 'wlock' for the next
407 # We migh consider requiring 'wlock' for the next
408 # two, but pretty much all the existing code assume
408 # two, but pretty much all the existing code assume
409 # wlock is not needed so we keep them excluded for
409 # wlock is not needed so we keep them excluded for
410 # now.
410 # now.
411 'hgrc',
411 'hgrc',
412 'requires',
412 'requires',
413 # XXX cache is a complicatged business someone
413 # XXX cache is a complicatged business someone
414 # should investigate this in depth at some point
414 # should investigate this in depth at some point
415 'cache/',
415 'cache/',
416 # XXX shouldn't be dirstate covered by the wlock?
416 # XXX shouldn't be dirstate covered by the wlock?
417 'dirstate',
417 'dirstate',
418 # XXX bisect was still a bit too messy at the time
418 # XXX bisect was still a bit too messy at the time
419 # this changeset was introduced. Someone should fix
419 # this changeset was introduced. Someone should fix
420 # the remainig bit and drop this line
420 # the remainig bit and drop this line
421 'bisect.state',
421 'bisect.state',
422 }
422 }
423
423
424 def __init__(self, baseui, path, create=False, intents=None):
424 def __init__(self, baseui, path, create=False, intents=None):
425 self.requirements = set()
425 self.requirements = set()
426 self.filtername = None
426 self.filtername = None
427 # wvfs: rooted at the repository root, used to access the working copy
427 # wvfs: rooted at the repository root, used to access the working copy
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 self.vfs = None
430 self.vfs = None
431 # svfs: usually rooted at .hg/store, used to access repository history
431 # svfs: usually rooted at .hg/store, used to access repository history
432 # If this is a shared repository, this vfs may point to another
432 # If this is a shared repository, this vfs may point to another
433 # repository's .hg/store directory.
433 # repository's .hg/store directory.
434 self.svfs = None
434 self.svfs = None
435 self.root = self.wvfs.base
435 self.root = self.wvfs.base
436 self.path = self.wvfs.join(".hg")
436 self.path = self.wvfs.join(".hg")
437 self.origroot = path
437 self.origroot = path
438 # This is only used by context.workingctx.match in order to
438 # This is only used by context.workingctx.match in order to
439 # detect files in subrepos.
439 # detect files in subrepos.
440 self.auditor = pathutil.pathauditor(
440 self.auditor = pathutil.pathauditor(
441 self.root, callback=self._checknested)
441 self.root, callback=self._checknested)
442 # This is only used by context.basectx.match in order to detect
442 # This is only used by context.basectx.match in order to detect
443 # files in subrepos.
443 # files in subrepos.
444 self.nofsauditor = pathutil.pathauditor(
444 self.nofsauditor = pathutil.pathauditor(
445 self.root, callback=self._checknested, realfs=False, cached=True)
445 self.root, callback=self._checknested, realfs=False, cached=True)
446 self.baseui = baseui
446 self.baseui = baseui
447 self.ui = baseui.copy()
447 self.ui = baseui.copy()
448 self.ui.copy = baseui.copy # prevent copying repo configuration
448 self.ui.copy = baseui.copy # prevent copying repo configuration
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 if (self.ui.configbool('devel', 'all-warnings') or
450 if (self.ui.configbool('devel', 'all-warnings') or
451 self.ui.configbool('devel', 'check-locks')):
451 self.ui.configbool('devel', 'check-locks')):
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 # A list of callback to shape the phase if no data were found.
453 # A list of callback to shape the phase if no data were found.
454 # Callback are in the form: func(repo, roots) --> processed root.
454 # Callback are in the form: func(repo, roots) --> processed root.
455 # This list it to be filled by extension during repo setup
455 # This list it to be filled by extension during repo setup
456 self._phasedefaults = []
456 self._phasedefaults = []
457 try:
457 try:
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 self._loadextensions()
459 self._loadextensions()
460 except IOError:
460 except IOError:
461 pass
461 pass
462
462
463 if featuresetupfuncs:
463 if featuresetupfuncs:
464 self.supported = set(self._basesupported) # use private copy
464 self.supported = set(self._basesupported) # use private copy
465 extmods = set(m.__name__ for n, m
465 extmods = set(m.__name__ for n, m
466 in extensions.extensions(self.ui))
466 in extensions.extensions(self.ui))
467 for setupfunc in featuresetupfuncs:
467 for setupfunc in featuresetupfuncs:
468 if setupfunc.__module__ in extmods:
468 if setupfunc.__module__ in extmods:
469 setupfunc(self.ui, self.supported)
469 setupfunc(self.ui, self.supported)
470 else:
470 else:
471 self.supported = self._basesupported
471 self.supported = self._basesupported
472 color.setup(self.ui)
472 color.setup(self.ui)
473
473
474 # Add compression engines.
474 # Add compression engines.
475 for name in util.compengines:
475 for name in util.compengines:
476 engine = util.compengines[name]
476 engine = util.compengines[name]
477 if engine.revlogheader():
477 if engine.revlogheader():
478 self.supported.add('exp-compression-%s' % name)
478 self.supported.add('exp-compression-%s' % name)
479
479
480 if not self.vfs.isdir():
480 if not self.vfs.isdir():
481 if create:
481 if create:
482 self.requirements = newreporequirements(self)
482 self.requirements = newreporequirements(self)
483
483
484 if not self.wvfs.exists():
484 if not self.wvfs.exists():
485 self.wvfs.makedirs()
485 self.wvfs.makedirs()
486 self.vfs.makedir(notindexed=True)
486 self.vfs.makedir(notindexed=True)
487
487
488 if 'store' in self.requirements:
488 if 'store' in self.requirements:
489 self.vfs.mkdir("store")
489 self.vfs.mkdir("store")
490
490
491 # create an invalid changelog
491 # create an invalid changelog
492 self.vfs.append(
492 self.vfs.append(
493 "00changelog.i",
493 "00changelog.i",
494 '\0\0\0\2' # represents revlogv2
494 '\0\0\0\2' # represents revlogv2
495 ' dummy changelog to prevent using the old repo layout'
495 ' dummy changelog to prevent using the old repo layout'
496 )
496 )
497 else:
497 else:
498 try:
499 self.vfs.stat()
500 except OSError as inst:
501 if inst.errno != errno.ENOENT:
502 raise
498 raise error.RepoError(_("repository %s not found") % path)
503 raise error.RepoError(_("repository %s not found") % path)
499 elif create:
504 elif create:
500 raise error.RepoError(_("repository %s already exists") % path)
505 raise error.RepoError(_("repository %s already exists") % path)
501 else:
506 else:
502 try:
507 try:
503 self.requirements = scmutil.readrequires(
508 self.requirements = scmutil.readrequires(
504 self.vfs, self.supported)
509 self.vfs, self.supported)
505 except IOError as inst:
510 except IOError as inst:
506 if inst.errno != errno.ENOENT:
511 if inst.errno != errno.ENOENT:
507 raise
512 raise
508
513
509 cachepath = self.vfs.join('cache')
514 cachepath = self.vfs.join('cache')
510 self.sharedpath = self.path
515 self.sharedpath = self.path
511 try:
516 try:
512 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
517 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
513 if 'relshared' in self.requirements:
518 if 'relshared' in self.requirements:
514 sharedpath = self.vfs.join(sharedpath)
519 sharedpath = self.vfs.join(sharedpath)
515 vfs = vfsmod.vfs(sharedpath, realpath=True)
520 vfs = vfsmod.vfs(sharedpath, realpath=True)
516 cachepath = vfs.join('cache')
521 cachepath = vfs.join('cache')
517 s = vfs.base
522 s = vfs.base
518 if not vfs.exists():
523 if not vfs.exists():
519 raise error.RepoError(
524 raise error.RepoError(
520 _('.hg/sharedpath points to nonexistent directory %s') % s)
525 _('.hg/sharedpath points to nonexistent directory %s') % s)
521 self.sharedpath = s
526 self.sharedpath = s
522 except IOError as inst:
527 except IOError as inst:
523 if inst.errno != errno.ENOENT:
528 if inst.errno != errno.ENOENT:
524 raise
529 raise
525
530
526 if 'exp-sparse' in self.requirements and not sparse.enabled:
531 if 'exp-sparse' in self.requirements and not sparse.enabled:
527 raise error.RepoError(_('repository is using sparse feature but '
532 raise error.RepoError(_('repository is using sparse feature but '
528 'sparse is not enabled; enable the '
533 'sparse is not enabled; enable the '
529 '"sparse" extensions to access'))
534 '"sparse" extensions to access'))
530
535
531 self.store = store.store(
536 self.store = store.store(
532 self.requirements, self.sharedpath,
537 self.requirements, self.sharedpath,
533 lambda base: vfsmod.vfs(base, cacheaudited=True))
538 lambda base: vfsmod.vfs(base, cacheaudited=True))
534 self.spath = self.store.path
539 self.spath = self.store.path
535 self.svfs = self.store.vfs
540 self.svfs = self.store.vfs
536 self.sjoin = self.store.join
541 self.sjoin = self.store.join
537 self.vfs.createmode = self.store.createmode
542 self.vfs.createmode = self.store.createmode
538 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
543 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
539 self.cachevfs.createmode = self.store.createmode
544 self.cachevfs.createmode = self.store.createmode
540 if (self.ui.configbool('devel', 'all-warnings') or
545 if (self.ui.configbool('devel', 'all-warnings') or
541 self.ui.configbool('devel', 'check-locks')):
546 self.ui.configbool('devel', 'check-locks')):
542 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
547 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
543 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
548 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
544 else: # standard vfs
549 else: # standard vfs
545 self.svfs.audit = self._getsvfsward(self.svfs.audit)
550 self.svfs.audit = self._getsvfsward(self.svfs.audit)
546 self._applyopenerreqs()
551 self._applyopenerreqs()
547 if create:
552 if create:
548 self._writerequirements()
553 self._writerequirements()
549
554
550 self._dirstatevalidatewarned = False
555 self._dirstatevalidatewarned = False
551
556
552 self._branchcaches = {}
557 self._branchcaches = {}
553 self._revbranchcache = None
558 self._revbranchcache = None
554 self._filterpats = {}
559 self._filterpats = {}
555 self._datafilters = {}
560 self._datafilters = {}
556 self._transref = self._lockref = self._wlockref = None
561 self._transref = self._lockref = self._wlockref = None
557
562
558 # A cache for various files under .hg/ that tracks file changes,
563 # A cache for various files under .hg/ that tracks file changes,
559 # (used by the filecache decorator)
564 # (used by the filecache decorator)
560 #
565 #
561 # Maps a property name to its util.filecacheentry
566 # Maps a property name to its util.filecacheentry
562 self._filecache = {}
567 self._filecache = {}
563
568
564 # hold sets of revision to be filtered
569 # hold sets of revision to be filtered
565 # should be cleared when something might have changed the filter value:
570 # should be cleared when something might have changed the filter value:
566 # - new changesets,
571 # - new changesets,
567 # - phase change,
572 # - phase change,
568 # - new obsolescence marker,
573 # - new obsolescence marker,
569 # - working directory parent change,
574 # - working directory parent change,
570 # - bookmark changes
575 # - bookmark changes
571 self.filteredrevcache = {}
576 self.filteredrevcache = {}
572
577
573 # post-dirstate-status hooks
578 # post-dirstate-status hooks
574 self._postdsstatus = []
579 self._postdsstatus = []
575
580
576 # generic mapping between names and nodes
581 # generic mapping between names and nodes
577 self.names = namespaces.namespaces()
582 self.names = namespaces.namespaces()
578
583
579 # Key to signature value.
584 # Key to signature value.
580 self._sparsesignaturecache = {}
585 self._sparsesignaturecache = {}
581 # Signature to cached matcher instance.
586 # Signature to cached matcher instance.
582 self._sparsematchercache = {}
587 self._sparsematchercache = {}
583
588
584 def _getvfsward(self, origfunc):
589 def _getvfsward(self, origfunc):
585 """build a ward for self.vfs"""
590 """build a ward for self.vfs"""
586 rref = weakref.ref(self)
591 rref = weakref.ref(self)
587 def checkvfs(path, mode=None):
592 def checkvfs(path, mode=None):
588 ret = origfunc(path, mode=mode)
593 ret = origfunc(path, mode=mode)
589 repo = rref()
594 repo = rref()
590 if (repo is None
595 if (repo is None
591 or not util.safehasattr(repo, '_wlockref')
596 or not util.safehasattr(repo, '_wlockref')
592 or not util.safehasattr(repo, '_lockref')):
597 or not util.safehasattr(repo, '_lockref')):
593 return
598 return
594 if mode in (None, 'r', 'rb'):
599 if mode in (None, 'r', 'rb'):
595 return
600 return
596 if path.startswith(repo.path):
601 if path.startswith(repo.path):
597 # truncate name relative to the repository (.hg)
602 # truncate name relative to the repository (.hg)
598 path = path[len(repo.path) + 1:]
603 path = path[len(repo.path) + 1:]
599 if path.startswith('cache/'):
604 if path.startswith('cache/'):
600 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
605 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
601 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
606 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
602 if path.startswith('journal.'):
607 if path.startswith('journal.'):
603 # journal is covered by 'lock'
608 # journal is covered by 'lock'
604 if repo._currentlock(repo._lockref) is None:
609 if repo._currentlock(repo._lockref) is None:
605 repo.ui.develwarn('write with no lock: "%s"' % path,
610 repo.ui.develwarn('write with no lock: "%s"' % path,
606 stacklevel=2, config='check-locks')
611 stacklevel=2, config='check-locks')
607 elif repo._currentlock(repo._wlockref) is None:
612 elif repo._currentlock(repo._wlockref) is None:
608 # rest of vfs files are covered by 'wlock'
613 # rest of vfs files are covered by 'wlock'
609 #
614 #
610 # exclude special files
615 # exclude special files
611 for prefix in self._wlockfreeprefix:
616 for prefix in self._wlockfreeprefix:
612 if path.startswith(prefix):
617 if path.startswith(prefix):
613 return
618 return
614 repo.ui.develwarn('write with no wlock: "%s"' % path,
619 repo.ui.develwarn('write with no wlock: "%s"' % path,
615 stacklevel=2, config='check-locks')
620 stacklevel=2, config='check-locks')
616 return ret
621 return ret
617 return checkvfs
622 return checkvfs
618
623
619 def _getsvfsward(self, origfunc):
624 def _getsvfsward(self, origfunc):
620 """build a ward for self.svfs"""
625 """build a ward for self.svfs"""
621 rref = weakref.ref(self)
626 rref = weakref.ref(self)
622 def checksvfs(path, mode=None):
627 def checksvfs(path, mode=None):
623 ret = origfunc(path, mode=mode)
628 ret = origfunc(path, mode=mode)
624 repo = rref()
629 repo = rref()
625 if repo is None or not util.safehasattr(repo, '_lockref'):
630 if repo is None or not util.safehasattr(repo, '_lockref'):
626 return
631 return
627 if mode in (None, 'r', 'rb'):
632 if mode in (None, 'r', 'rb'):
628 return
633 return
629 if path.startswith(repo.sharedpath):
634 if path.startswith(repo.sharedpath):
630 # truncate name relative to the repository (.hg)
635 # truncate name relative to the repository (.hg)
631 path = path[len(repo.sharedpath) + 1:]
636 path = path[len(repo.sharedpath) + 1:]
632 if repo._currentlock(repo._lockref) is None:
637 if repo._currentlock(repo._lockref) is None:
633 repo.ui.develwarn('write with no lock: "%s"' % path,
638 repo.ui.develwarn('write with no lock: "%s"' % path,
634 stacklevel=3)
639 stacklevel=3)
635 return ret
640 return ret
636 return checksvfs
641 return checksvfs
637
642
638 def close(self):
643 def close(self):
639 self._writecaches()
644 self._writecaches()
640
645
641 def _loadextensions(self):
646 def _loadextensions(self):
642 extensions.loadall(self.ui)
647 extensions.loadall(self.ui)
643
648
644 def _writecaches(self):
649 def _writecaches(self):
645 if self._revbranchcache:
650 if self._revbranchcache:
646 self._revbranchcache.write()
651 self._revbranchcache.write()
647
652
648 def _restrictcapabilities(self, caps):
653 def _restrictcapabilities(self, caps):
649 if self.ui.configbool('experimental', 'bundle2-advertise'):
654 if self.ui.configbool('experimental', 'bundle2-advertise'):
650 caps = set(caps)
655 caps = set(caps)
651 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
656 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
652 role='client'))
657 role='client'))
653 caps.add('bundle2=' + urlreq.quote(capsblob))
658 caps.add('bundle2=' + urlreq.quote(capsblob))
654 return caps
659 return caps
655
660
656 def _applyopenerreqs(self):
661 def _applyopenerreqs(self):
657 self.svfs.options = dict((r, 1) for r in self.requirements
662 self.svfs.options = dict((r, 1) for r in self.requirements
658 if r in self.openerreqs)
663 if r in self.openerreqs)
659 # experimental config: format.chunkcachesize
664 # experimental config: format.chunkcachesize
660 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
665 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
661 if chunkcachesize is not None:
666 if chunkcachesize is not None:
662 self.svfs.options['chunkcachesize'] = chunkcachesize
667 self.svfs.options['chunkcachesize'] = chunkcachesize
663 # experimental config: format.maxchainlen
668 # experimental config: format.maxchainlen
664 maxchainlen = self.ui.configint('format', 'maxchainlen')
669 maxchainlen = self.ui.configint('format', 'maxchainlen')
665 if maxchainlen is not None:
670 if maxchainlen is not None:
666 self.svfs.options['maxchainlen'] = maxchainlen
671 self.svfs.options['maxchainlen'] = maxchainlen
667 # experimental config: format.manifestcachesize
672 # experimental config: format.manifestcachesize
668 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
673 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
669 if manifestcachesize is not None:
674 if manifestcachesize is not None:
670 self.svfs.options['manifestcachesize'] = manifestcachesize
675 self.svfs.options['manifestcachesize'] = manifestcachesize
671 deltabothparents = self.ui.configbool('storage',
676 deltabothparents = self.ui.configbool('storage',
672 'revlog.optimize-delta-parent-choice')
677 'revlog.optimize-delta-parent-choice')
673 self.svfs.options['deltabothparents'] = deltabothparents
678 self.svfs.options['deltabothparents'] = deltabothparents
674 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
679 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
675 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
680 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
676 if 0 <= chainspan:
681 if 0 <= chainspan:
677 self.svfs.options['maxdeltachainspan'] = chainspan
682 self.svfs.options['maxdeltachainspan'] = chainspan
678 mmapindexthreshold = self.ui.configbytes('experimental',
683 mmapindexthreshold = self.ui.configbytes('experimental',
679 'mmapindexthreshold')
684 'mmapindexthreshold')
680 if mmapindexthreshold is not None:
685 if mmapindexthreshold is not None:
681 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
686 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
682 withsparseread = self.ui.configbool('experimental', 'sparse-read')
687 withsparseread = self.ui.configbool('experimental', 'sparse-read')
683 srdensitythres = float(self.ui.config('experimental',
688 srdensitythres = float(self.ui.config('experimental',
684 'sparse-read.density-threshold'))
689 'sparse-read.density-threshold'))
685 srmingapsize = self.ui.configbytes('experimental',
690 srmingapsize = self.ui.configbytes('experimental',
686 'sparse-read.min-gap-size')
691 'sparse-read.min-gap-size')
687 self.svfs.options['with-sparse-read'] = withsparseread
692 self.svfs.options['with-sparse-read'] = withsparseread
688 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
693 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
689 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
694 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
690 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
695 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
691 self.svfs.options['sparse-revlog'] = sparserevlog
696 self.svfs.options['sparse-revlog'] = sparserevlog
692 if sparserevlog:
697 if sparserevlog:
693 self.svfs.options['generaldelta'] = True
698 self.svfs.options['generaldelta'] = True
694
699
695 for r in self.requirements:
700 for r in self.requirements:
696 if r.startswith('exp-compression-'):
701 if r.startswith('exp-compression-'):
697 self.svfs.options['compengine'] = r[len('exp-compression-'):]
702 self.svfs.options['compengine'] = r[len('exp-compression-'):]
698
703
699 # TODO move "revlogv2" to openerreqs once finalized.
704 # TODO move "revlogv2" to openerreqs once finalized.
700 if REVLOGV2_REQUIREMENT in self.requirements:
705 if REVLOGV2_REQUIREMENT in self.requirements:
701 self.svfs.options['revlogv2'] = True
706 self.svfs.options['revlogv2'] = True
702
707
703 def _writerequirements(self):
708 def _writerequirements(self):
704 scmutil.writerequires(self.vfs, self.requirements)
709 scmutil.writerequires(self.vfs, self.requirements)
705
710
706 def _checknested(self, path):
711 def _checknested(self, path):
707 """Determine if path is a legal nested repository."""
712 """Determine if path is a legal nested repository."""
708 if not path.startswith(self.root):
713 if not path.startswith(self.root):
709 return False
714 return False
710 subpath = path[len(self.root) + 1:]
715 subpath = path[len(self.root) + 1:]
711 normsubpath = util.pconvert(subpath)
716 normsubpath = util.pconvert(subpath)
712
717
713 # XXX: Checking against the current working copy is wrong in
718 # XXX: Checking against the current working copy is wrong in
714 # the sense that it can reject things like
719 # the sense that it can reject things like
715 #
720 #
716 # $ hg cat -r 10 sub/x.txt
721 # $ hg cat -r 10 sub/x.txt
717 #
722 #
718 # if sub/ is no longer a subrepository in the working copy
723 # if sub/ is no longer a subrepository in the working copy
719 # parent revision.
724 # parent revision.
720 #
725 #
721 # However, it can of course also allow things that would have
726 # However, it can of course also allow things that would have
722 # been rejected before, such as the above cat command if sub/
727 # been rejected before, such as the above cat command if sub/
723 # is a subrepository now, but was a normal directory before.
728 # is a subrepository now, but was a normal directory before.
724 # The old path auditor would have rejected by mistake since it
729 # The old path auditor would have rejected by mistake since it
725 # panics when it sees sub/.hg/.
730 # panics when it sees sub/.hg/.
726 #
731 #
727 # All in all, checking against the working copy seems sensible
732 # All in all, checking against the working copy seems sensible
728 # since we want to prevent access to nested repositories on
733 # since we want to prevent access to nested repositories on
729 # the filesystem *now*.
734 # the filesystem *now*.
730 ctx = self[None]
735 ctx = self[None]
731 parts = util.splitpath(subpath)
736 parts = util.splitpath(subpath)
732 while parts:
737 while parts:
733 prefix = '/'.join(parts)
738 prefix = '/'.join(parts)
734 if prefix in ctx.substate:
739 if prefix in ctx.substate:
735 if prefix == normsubpath:
740 if prefix == normsubpath:
736 return True
741 return True
737 else:
742 else:
738 sub = ctx.sub(prefix)
743 sub = ctx.sub(prefix)
739 return sub.checknested(subpath[len(prefix) + 1:])
744 return sub.checknested(subpath[len(prefix) + 1:])
740 else:
745 else:
741 parts.pop()
746 parts.pop()
742 return False
747 return False
743
748
744 def peer(self):
749 def peer(self):
745 return localpeer(self) # not cached to avoid reference cycle
750 return localpeer(self) # not cached to avoid reference cycle
746
751
747 def unfiltered(self):
752 def unfiltered(self):
748 """Return unfiltered version of the repository
753 """Return unfiltered version of the repository
749
754
750 Intended to be overwritten by filtered repo."""
755 Intended to be overwritten by filtered repo."""
751 return self
756 return self
752
757
753 def filtered(self, name, visibilityexceptions=None):
758 def filtered(self, name, visibilityexceptions=None):
754 """Return a filtered version of a repository"""
759 """Return a filtered version of a repository"""
755 cls = repoview.newtype(self.unfiltered().__class__)
760 cls = repoview.newtype(self.unfiltered().__class__)
756 return cls(self, name, visibilityexceptions)
761 return cls(self, name, visibilityexceptions)
757
762
758 @repofilecache('bookmarks', 'bookmarks.current')
763 @repofilecache('bookmarks', 'bookmarks.current')
759 def _bookmarks(self):
764 def _bookmarks(self):
760 return bookmarks.bmstore(self)
765 return bookmarks.bmstore(self)
761
766
762 @property
767 @property
763 def _activebookmark(self):
768 def _activebookmark(self):
764 return self._bookmarks.active
769 return self._bookmarks.active
765
770
766 # _phasesets depend on changelog. what we need is to call
771 # _phasesets depend on changelog. what we need is to call
767 # _phasecache.invalidate() if '00changelog.i' was changed, but it
772 # _phasecache.invalidate() if '00changelog.i' was changed, but it
768 # can't be easily expressed in filecache mechanism.
773 # can't be easily expressed in filecache mechanism.
769 @storecache('phaseroots', '00changelog.i')
774 @storecache('phaseroots', '00changelog.i')
770 def _phasecache(self):
775 def _phasecache(self):
771 return phases.phasecache(self, self._phasedefaults)
776 return phases.phasecache(self, self._phasedefaults)
772
777
773 @storecache('obsstore')
778 @storecache('obsstore')
774 def obsstore(self):
779 def obsstore(self):
775 return obsolete.makestore(self.ui, self)
780 return obsolete.makestore(self.ui, self)
776
781
777 @storecache('00changelog.i')
782 @storecache('00changelog.i')
778 def changelog(self):
783 def changelog(self):
779 return changelog.changelog(self.svfs,
784 return changelog.changelog(self.svfs,
780 trypending=txnutil.mayhavepending(self.root))
785 trypending=txnutil.mayhavepending(self.root))
781
786
782 def _constructmanifest(self):
787 def _constructmanifest(self):
783 # This is a temporary function while we migrate from manifest to
788 # This is a temporary function while we migrate from manifest to
784 # manifestlog. It allows bundlerepo and unionrepo to intercept the
789 # manifestlog. It allows bundlerepo and unionrepo to intercept the
785 # manifest creation.
790 # manifest creation.
786 return manifest.manifestrevlog(self.svfs)
791 return manifest.manifestrevlog(self.svfs)
787
792
788 @storecache('00manifest.i')
793 @storecache('00manifest.i')
789 def manifestlog(self):
794 def manifestlog(self):
790 return manifest.manifestlog(self.svfs, self)
795 return manifest.manifestlog(self.svfs, self)
791
796
792 @repofilecache('dirstate')
797 @repofilecache('dirstate')
793 def dirstate(self):
798 def dirstate(self):
794 return self._makedirstate()
799 return self._makedirstate()
795
800
796 def _makedirstate(self):
801 def _makedirstate(self):
797 """Extension point for wrapping the dirstate per-repo."""
802 """Extension point for wrapping the dirstate per-repo."""
798 sparsematchfn = lambda: sparse.matcher(self)
803 sparsematchfn = lambda: sparse.matcher(self)
799
804
800 return dirstate.dirstate(self.vfs, self.ui, self.root,
805 return dirstate.dirstate(self.vfs, self.ui, self.root,
801 self._dirstatevalidate, sparsematchfn)
806 self._dirstatevalidate, sparsematchfn)
802
807
803 def _dirstatevalidate(self, node):
808 def _dirstatevalidate(self, node):
804 try:
809 try:
805 self.changelog.rev(node)
810 self.changelog.rev(node)
806 return node
811 return node
807 except error.LookupError:
812 except error.LookupError:
808 if not self._dirstatevalidatewarned:
813 if not self._dirstatevalidatewarned:
809 self._dirstatevalidatewarned = True
814 self._dirstatevalidatewarned = True
810 self.ui.warn(_("warning: ignoring unknown"
815 self.ui.warn(_("warning: ignoring unknown"
811 " working parent %s!\n") % short(node))
816 " working parent %s!\n") % short(node))
812 return nullid
817 return nullid
813
818
814 @storecache(narrowspec.FILENAME)
819 @storecache(narrowspec.FILENAME)
815 def narrowpats(self):
820 def narrowpats(self):
816 """matcher patterns for this repository's narrowspec
821 """matcher patterns for this repository's narrowspec
817
822
818 A tuple of (includes, excludes).
823 A tuple of (includes, excludes).
819 """
824 """
820 source = self
825 source = self
821 if self.shared():
826 if self.shared():
822 from . import hg
827 from . import hg
823 source = hg.sharedreposource(self)
828 source = hg.sharedreposource(self)
824 return narrowspec.load(source)
829 return narrowspec.load(source)
825
830
826 @storecache(narrowspec.FILENAME)
831 @storecache(narrowspec.FILENAME)
827 def _narrowmatch(self):
832 def _narrowmatch(self):
828 if repository.NARROW_REQUIREMENT not in self.requirements:
833 if repository.NARROW_REQUIREMENT not in self.requirements:
829 return matchmod.always(self.root, '')
834 return matchmod.always(self.root, '')
830 include, exclude = self.narrowpats
835 include, exclude = self.narrowpats
831 return narrowspec.match(self.root, include=include, exclude=exclude)
836 return narrowspec.match(self.root, include=include, exclude=exclude)
832
837
833 # TODO(martinvonz): make this property-like instead?
838 # TODO(martinvonz): make this property-like instead?
834 def narrowmatch(self):
839 def narrowmatch(self):
835 return self._narrowmatch
840 return self._narrowmatch
836
841
837 def setnarrowpats(self, newincludes, newexcludes):
842 def setnarrowpats(self, newincludes, newexcludes):
838 target = self
843 target = self
839 if self.shared():
844 if self.shared():
840 from . import hg
845 from . import hg
841 target = hg.sharedreposource(self)
846 target = hg.sharedreposource(self)
842 narrowspec.save(target, newincludes, newexcludes)
847 narrowspec.save(target, newincludes, newexcludes)
843 self.invalidate(clearfilecache=True)
848 self.invalidate(clearfilecache=True)
844
849
845 def __getitem__(self, changeid):
850 def __getitem__(self, changeid):
846 if changeid is None:
851 if changeid is None:
847 return context.workingctx(self)
852 return context.workingctx(self)
848 if isinstance(changeid, context.basectx):
853 if isinstance(changeid, context.basectx):
849 return changeid
854 return changeid
850 if isinstance(changeid, slice):
855 if isinstance(changeid, slice):
851 # wdirrev isn't contiguous so the slice shouldn't include it
856 # wdirrev isn't contiguous so the slice shouldn't include it
852 return [context.changectx(self, i)
857 return [context.changectx(self, i)
853 for i in pycompat.xrange(*changeid.indices(len(self)))
858 for i in pycompat.xrange(*changeid.indices(len(self)))
854 if i not in self.changelog.filteredrevs]
859 if i not in self.changelog.filteredrevs]
855 try:
860 try:
856 return context.changectx(self, changeid)
861 return context.changectx(self, changeid)
857 except error.WdirUnsupported:
862 except error.WdirUnsupported:
858 return context.workingctx(self)
863 return context.workingctx(self)
859
864
860 def __contains__(self, changeid):
865 def __contains__(self, changeid):
861 """True if the given changeid exists
866 """True if the given changeid exists
862
867
863 error.AmbiguousPrefixLookupError is raised if an ambiguous node
868 error.AmbiguousPrefixLookupError is raised if an ambiguous node
864 specified.
869 specified.
865 """
870 """
866 try:
871 try:
867 self[changeid]
872 self[changeid]
868 return True
873 return True
869 except error.RepoLookupError:
874 except error.RepoLookupError:
870 return False
875 return False
871
876
872 def __nonzero__(self):
877 def __nonzero__(self):
873 return True
878 return True
874
879
875 __bool__ = __nonzero__
880 __bool__ = __nonzero__
876
881
877 def __len__(self):
882 def __len__(self):
878 # no need to pay the cost of repoview.changelog
883 # no need to pay the cost of repoview.changelog
879 unfi = self.unfiltered()
884 unfi = self.unfiltered()
880 return len(unfi.changelog)
885 return len(unfi.changelog)
881
886
882 def __iter__(self):
887 def __iter__(self):
883 return iter(self.changelog)
888 return iter(self.changelog)
884
889
885 def revs(self, expr, *args):
890 def revs(self, expr, *args):
886 '''Find revisions matching a revset.
891 '''Find revisions matching a revset.
887
892
888 The revset is specified as a string ``expr`` that may contain
893 The revset is specified as a string ``expr`` that may contain
889 %-formatting to escape certain types. See ``revsetlang.formatspec``.
894 %-formatting to escape certain types. See ``revsetlang.formatspec``.
890
895
891 Revset aliases from the configuration are not expanded. To expand
896 Revset aliases from the configuration are not expanded. To expand
892 user aliases, consider calling ``scmutil.revrange()`` or
897 user aliases, consider calling ``scmutil.revrange()`` or
893 ``repo.anyrevs([expr], user=True)``.
898 ``repo.anyrevs([expr], user=True)``.
894
899
895 Returns a revset.abstractsmartset, which is a list-like interface
900 Returns a revset.abstractsmartset, which is a list-like interface
896 that contains integer revisions.
901 that contains integer revisions.
897 '''
902 '''
898 expr = revsetlang.formatspec(expr, *args)
903 expr = revsetlang.formatspec(expr, *args)
899 m = revset.match(None, expr)
904 m = revset.match(None, expr)
900 return m(self)
905 return m(self)
901
906
902 def set(self, expr, *args):
907 def set(self, expr, *args):
903 '''Find revisions matching a revset and emit changectx instances.
908 '''Find revisions matching a revset and emit changectx instances.
904
909
905 This is a convenience wrapper around ``revs()`` that iterates the
910 This is a convenience wrapper around ``revs()`` that iterates the
906 result and is a generator of changectx instances.
911 result and is a generator of changectx instances.
907
912
908 Revset aliases from the configuration are not expanded. To expand
913 Revset aliases from the configuration are not expanded. To expand
909 user aliases, consider calling ``scmutil.revrange()``.
914 user aliases, consider calling ``scmutil.revrange()``.
910 '''
915 '''
911 for r in self.revs(expr, *args):
916 for r in self.revs(expr, *args):
912 yield self[r]
917 yield self[r]
913
918
914 def anyrevs(self, specs, user=False, localalias=None):
919 def anyrevs(self, specs, user=False, localalias=None):
915 '''Find revisions matching one of the given revsets.
920 '''Find revisions matching one of the given revsets.
916
921
917 Revset aliases from the configuration are not expanded by default. To
922 Revset aliases from the configuration are not expanded by default. To
918 expand user aliases, specify ``user=True``. To provide some local
923 expand user aliases, specify ``user=True``. To provide some local
919 definitions overriding user aliases, set ``localalias`` to
924 definitions overriding user aliases, set ``localalias`` to
920 ``{name: definitionstring}``.
925 ``{name: definitionstring}``.
921 '''
926 '''
922 if user:
927 if user:
923 m = revset.matchany(self.ui, specs,
928 m = revset.matchany(self.ui, specs,
924 lookup=revset.lookupfn(self),
929 lookup=revset.lookupfn(self),
925 localalias=localalias)
930 localalias=localalias)
926 else:
931 else:
927 m = revset.matchany(None, specs, localalias=localalias)
932 m = revset.matchany(None, specs, localalias=localalias)
928 return m(self)
933 return m(self)
929
934
930 def url(self):
935 def url(self):
931 return 'file:' + self.root
936 return 'file:' + self.root
932
937
933 def hook(self, name, throw=False, **args):
938 def hook(self, name, throw=False, **args):
934 """Call a hook, passing this repo instance.
939 """Call a hook, passing this repo instance.
935
940
936 This a convenience method to aid invoking hooks. Extensions likely
941 This a convenience method to aid invoking hooks. Extensions likely
937 won't call this unless they have registered a custom hook or are
942 won't call this unless they have registered a custom hook or are
938 replacing code that is expected to call a hook.
943 replacing code that is expected to call a hook.
939 """
944 """
940 return hook.hook(self.ui, self, name, throw, **args)
945 return hook.hook(self.ui, self, name, throw, **args)
941
946
942 @filteredpropertycache
947 @filteredpropertycache
943 def _tagscache(self):
948 def _tagscache(self):
944 '''Returns a tagscache object that contains various tags related
949 '''Returns a tagscache object that contains various tags related
945 caches.'''
950 caches.'''
946
951
947 # This simplifies its cache management by having one decorated
952 # This simplifies its cache management by having one decorated
948 # function (this one) and the rest simply fetch things from it.
953 # function (this one) and the rest simply fetch things from it.
949 class tagscache(object):
954 class tagscache(object):
950 def __init__(self):
955 def __init__(self):
951 # These two define the set of tags for this repository. tags
956 # These two define the set of tags for this repository. tags
952 # maps tag name to node; tagtypes maps tag name to 'global' or
957 # maps tag name to node; tagtypes maps tag name to 'global' or
953 # 'local'. (Global tags are defined by .hgtags across all
958 # 'local'. (Global tags are defined by .hgtags across all
954 # heads, and local tags are defined in .hg/localtags.)
959 # heads, and local tags are defined in .hg/localtags.)
955 # They constitute the in-memory cache of tags.
960 # They constitute the in-memory cache of tags.
956 self.tags = self.tagtypes = None
961 self.tags = self.tagtypes = None
957
962
958 self.nodetagscache = self.tagslist = None
963 self.nodetagscache = self.tagslist = None
959
964
960 cache = tagscache()
965 cache = tagscache()
961 cache.tags, cache.tagtypes = self._findtags()
966 cache.tags, cache.tagtypes = self._findtags()
962
967
963 return cache
968 return cache
964
969
965 def tags(self):
970 def tags(self):
966 '''return a mapping of tag to node'''
971 '''return a mapping of tag to node'''
967 t = {}
972 t = {}
968 if self.changelog.filteredrevs:
973 if self.changelog.filteredrevs:
969 tags, tt = self._findtags()
974 tags, tt = self._findtags()
970 else:
975 else:
971 tags = self._tagscache.tags
976 tags = self._tagscache.tags
972 for k, v in tags.iteritems():
977 for k, v in tags.iteritems():
973 try:
978 try:
974 # ignore tags to unknown nodes
979 # ignore tags to unknown nodes
975 self.changelog.rev(v)
980 self.changelog.rev(v)
976 t[k] = v
981 t[k] = v
977 except (error.LookupError, ValueError):
982 except (error.LookupError, ValueError):
978 pass
983 pass
979 return t
984 return t
980
985
981 def _findtags(self):
986 def _findtags(self):
982 '''Do the hard work of finding tags. Return a pair of dicts
987 '''Do the hard work of finding tags. Return a pair of dicts
983 (tags, tagtypes) where tags maps tag name to node, and tagtypes
988 (tags, tagtypes) where tags maps tag name to node, and tagtypes
984 maps tag name to a string like \'global\' or \'local\'.
989 maps tag name to a string like \'global\' or \'local\'.
985 Subclasses or extensions are free to add their own tags, but
990 Subclasses or extensions are free to add their own tags, but
986 should be aware that the returned dicts will be retained for the
991 should be aware that the returned dicts will be retained for the
987 duration of the localrepo object.'''
992 duration of the localrepo object.'''
988
993
989 # XXX what tagtype should subclasses/extensions use? Currently
994 # XXX what tagtype should subclasses/extensions use? Currently
990 # mq and bookmarks add tags, but do not set the tagtype at all.
995 # mq and bookmarks add tags, but do not set the tagtype at all.
991 # Should each extension invent its own tag type? Should there
996 # Should each extension invent its own tag type? Should there
992 # be one tagtype for all such "virtual" tags? Or is the status
997 # be one tagtype for all such "virtual" tags? Or is the status
993 # quo fine?
998 # quo fine?
994
999
995
1000
996 # map tag name to (node, hist)
1001 # map tag name to (node, hist)
997 alltags = tagsmod.findglobaltags(self.ui, self)
1002 alltags = tagsmod.findglobaltags(self.ui, self)
998 # map tag name to tag type
1003 # map tag name to tag type
999 tagtypes = dict((tag, 'global') for tag in alltags)
1004 tagtypes = dict((tag, 'global') for tag in alltags)
1000
1005
1001 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1006 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1002
1007
1003 # Build the return dicts. Have to re-encode tag names because
1008 # Build the return dicts. Have to re-encode tag names because
1004 # the tags module always uses UTF-8 (in order not to lose info
1009 # the tags module always uses UTF-8 (in order not to lose info
1005 # writing to the cache), but the rest of Mercurial wants them in
1010 # writing to the cache), but the rest of Mercurial wants them in
1006 # local encoding.
1011 # local encoding.
1007 tags = {}
1012 tags = {}
1008 for (name, (node, hist)) in alltags.iteritems():
1013 for (name, (node, hist)) in alltags.iteritems():
1009 if node != nullid:
1014 if node != nullid:
1010 tags[encoding.tolocal(name)] = node
1015 tags[encoding.tolocal(name)] = node
1011 tags['tip'] = self.changelog.tip()
1016 tags['tip'] = self.changelog.tip()
1012 tagtypes = dict([(encoding.tolocal(name), value)
1017 tagtypes = dict([(encoding.tolocal(name), value)
1013 for (name, value) in tagtypes.iteritems()])
1018 for (name, value) in tagtypes.iteritems()])
1014 return (tags, tagtypes)
1019 return (tags, tagtypes)
1015
1020
1016 def tagtype(self, tagname):
1021 def tagtype(self, tagname):
1017 '''
1022 '''
1018 return the type of the given tag. result can be:
1023 return the type of the given tag. result can be:
1019
1024
1020 'local' : a local tag
1025 'local' : a local tag
1021 'global' : a global tag
1026 'global' : a global tag
1022 None : tag does not exist
1027 None : tag does not exist
1023 '''
1028 '''
1024
1029
1025 return self._tagscache.tagtypes.get(tagname)
1030 return self._tagscache.tagtypes.get(tagname)
1026
1031
1027 def tagslist(self):
1032 def tagslist(self):
1028 '''return a list of tags ordered by revision'''
1033 '''return a list of tags ordered by revision'''
1029 if not self._tagscache.tagslist:
1034 if not self._tagscache.tagslist:
1030 l = []
1035 l = []
1031 for t, n in self.tags().iteritems():
1036 for t, n in self.tags().iteritems():
1032 l.append((self.changelog.rev(n), t, n))
1037 l.append((self.changelog.rev(n), t, n))
1033 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1034
1039
1035 return self._tagscache.tagslist
1040 return self._tagscache.tagslist
1036
1041
1037 def nodetags(self, node):
1042 def nodetags(self, node):
1038 '''return the tags associated with a node'''
1043 '''return the tags associated with a node'''
1039 if not self._tagscache.nodetagscache:
1044 if not self._tagscache.nodetagscache:
1040 nodetagscache = {}
1045 nodetagscache = {}
1041 for t, n in self._tagscache.tags.iteritems():
1046 for t, n in self._tagscache.tags.iteritems():
1042 nodetagscache.setdefault(n, []).append(t)
1047 nodetagscache.setdefault(n, []).append(t)
1043 for tags in nodetagscache.itervalues():
1048 for tags in nodetagscache.itervalues():
1044 tags.sort()
1049 tags.sort()
1045 self._tagscache.nodetagscache = nodetagscache
1050 self._tagscache.nodetagscache = nodetagscache
1046 return self._tagscache.nodetagscache.get(node, [])
1051 return self._tagscache.nodetagscache.get(node, [])
1047
1052
1048 def nodebookmarks(self, node):
1053 def nodebookmarks(self, node):
1049 """return the list of bookmarks pointing to the specified node"""
1054 """return the list of bookmarks pointing to the specified node"""
1050 return self._bookmarks.names(node)
1055 return self._bookmarks.names(node)
1051
1056
1052 def branchmap(self):
1057 def branchmap(self):
1053 '''returns a dictionary {branch: [branchheads]} with branchheads
1058 '''returns a dictionary {branch: [branchheads]} with branchheads
1054 ordered by increasing revision number'''
1059 ordered by increasing revision number'''
1055 branchmap.updatecache(self)
1060 branchmap.updatecache(self)
1056 return self._branchcaches[self.filtername]
1061 return self._branchcaches[self.filtername]
1057
1062
1058 @unfilteredmethod
1063 @unfilteredmethod
1059 def revbranchcache(self):
1064 def revbranchcache(self):
1060 if not self._revbranchcache:
1065 if not self._revbranchcache:
1061 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1066 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1062 return self._revbranchcache
1067 return self._revbranchcache
1063
1068
1064 def branchtip(self, branch, ignoremissing=False):
1069 def branchtip(self, branch, ignoremissing=False):
1065 '''return the tip node for a given branch
1070 '''return the tip node for a given branch
1066
1071
1067 If ignoremissing is True, then this method will not raise an error.
1072 If ignoremissing is True, then this method will not raise an error.
1068 This is helpful for callers that only expect None for a missing branch
1073 This is helpful for callers that only expect None for a missing branch
1069 (e.g. namespace).
1074 (e.g. namespace).
1070
1075
1071 '''
1076 '''
1072 try:
1077 try:
1073 return self.branchmap().branchtip(branch)
1078 return self.branchmap().branchtip(branch)
1074 except KeyError:
1079 except KeyError:
1075 if not ignoremissing:
1080 if not ignoremissing:
1076 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1081 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1077 else:
1082 else:
1078 pass
1083 pass
1079
1084
1080 def lookup(self, key):
1085 def lookup(self, key):
1081 return scmutil.revsymbol(self, key).node()
1086 return scmutil.revsymbol(self, key).node()
1082
1087
1083 def lookupbranch(self, key):
1088 def lookupbranch(self, key):
1084 if key in self.branchmap():
1089 if key in self.branchmap():
1085 return key
1090 return key
1086
1091
1087 return scmutil.revsymbol(self, key).branch()
1092 return scmutil.revsymbol(self, key).branch()
1088
1093
1089 def known(self, nodes):
1094 def known(self, nodes):
1090 cl = self.changelog
1095 cl = self.changelog
1091 nm = cl.nodemap
1096 nm = cl.nodemap
1092 filtered = cl.filteredrevs
1097 filtered = cl.filteredrevs
1093 result = []
1098 result = []
1094 for n in nodes:
1099 for n in nodes:
1095 r = nm.get(n)
1100 r = nm.get(n)
1096 resp = not (r is None or r in filtered)
1101 resp = not (r is None or r in filtered)
1097 result.append(resp)
1102 result.append(resp)
1098 return result
1103 return result
1099
1104
1100 def local(self):
1105 def local(self):
1101 return self
1106 return self
1102
1107
1103 def publishing(self):
1108 def publishing(self):
1104 # it's safe (and desirable) to trust the publish flag unconditionally
1109 # it's safe (and desirable) to trust the publish flag unconditionally
1105 # so that we don't finalize changes shared between users via ssh or nfs
1110 # so that we don't finalize changes shared between users via ssh or nfs
1106 return self.ui.configbool('phases', 'publish', untrusted=True)
1111 return self.ui.configbool('phases', 'publish', untrusted=True)
1107
1112
1108 def cancopy(self):
1113 def cancopy(self):
1109 # so statichttprepo's override of local() works
1114 # so statichttprepo's override of local() works
1110 if not self.local():
1115 if not self.local():
1111 return False
1116 return False
1112 if not self.publishing():
1117 if not self.publishing():
1113 return True
1118 return True
1114 # if publishing we can't copy if there is filtered content
1119 # if publishing we can't copy if there is filtered content
1115 return not self.filtered('visible').changelog.filteredrevs
1120 return not self.filtered('visible').changelog.filteredrevs
1116
1121
1117 def shared(self):
1122 def shared(self):
1118 '''the type of shared repository (None if not shared)'''
1123 '''the type of shared repository (None if not shared)'''
1119 if self.sharedpath != self.path:
1124 if self.sharedpath != self.path:
1120 return 'store'
1125 return 'store'
1121 return None
1126 return None
1122
1127
1123 def wjoin(self, f, *insidef):
1128 def wjoin(self, f, *insidef):
1124 return self.vfs.reljoin(self.root, f, *insidef)
1129 return self.vfs.reljoin(self.root, f, *insidef)
1125
1130
1126 def file(self, f):
1131 def file(self, f):
1127 if f[0] == '/':
1132 if f[0] == '/':
1128 f = f[1:]
1133 f = f[1:]
1129 return filelog.filelog(self.svfs, f)
1134 return filelog.filelog(self.svfs, f)
1130
1135
1131 def setparents(self, p1, p2=nullid):
1136 def setparents(self, p1, p2=nullid):
1132 with self.dirstate.parentchange():
1137 with self.dirstate.parentchange():
1133 copies = self.dirstate.setparents(p1, p2)
1138 copies = self.dirstate.setparents(p1, p2)
1134 pctx = self[p1]
1139 pctx = self[p1]
1135 if copies:
1140 if copies:
1136 # Adjust copy records, the dirstate cannot do it, it
1141 # Adjust copy records, the dirstate cannot do it, it
1137 # requires access to parents manifests. Preserve them
1142 # requires access to parents manifests. Preserve them
1138 # only for entries added to first parent.
1143 # only for entries added to first parent.
1139 for f in copies:
1144 for f in copies:
1140 if f not in pctx and copies[f] in pctx:
1145 if f not in pctx and copies[f] in pctx:
1141 self.dirstate.copy(copies[f], f)
1146 self.dirstate.copy(copies[f], f)
1142 if p2 == nullid:
1147 if p2 == nullid:
1143 for f, s in sorted(self.dirstate.copies().items()):
1148 for f, s in sorted(self.dirstate.copies().items()):
1144 if f not in pctx and s not in pctx:
1149 if f not in pctx and s not in pctx:
1145 self.dirstate.copy(None, f)
1150 self.dirstate.copy(None, f)
1146
1151
1147 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1152 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1148 """changeid can be a changeset revision, node, or tag.
1153 """changeid can be a changeset revision, node, or tag.
1149 fileid can be a file revision or node."""
1154 fileid can be a file revision or node."""
1150 return context.filectx(self, path, changeid, fileid,
1155 return context.filectx(self, path, changeid, fileid,
1151 changectx=changectx)
1156 changectx=changectx)
1152
1157
1153 def getcwd(self):
1158 def getcwd(self):
1154 return self.dirstate.getcwd()
1159 return self.dirstate.getcwd()
1155
1160
1156 def pathto(self, f, cwd=None):
1161 def pathto(self, f, cwd=None):
1157 return self.dirstate.pathto(f, cwd)
1162 return self.dirstate.pathto(f, cwd)
1158
1163
1159 def _loadfilter(self, filter):
1164 def _loadfilter(self, filter):
1160 if filter not in self._filterpats:
1165 if filter not in self._filterpats:
1161 l = []
1166 l = []
1162 for pat, cmd in self.ui.configitems(filter):
1167 for pat, cmd in self.ui.configitems(filter):
1163 if cmd == '!':
1168 if cmd == '!':
1164 continue
1169 continue
1165 mf = matchmod.match(self.root, '', [pat])
1170 mf = matchmod.match(self.root, '', [pat])
1166 fn = None
1171 fn = None
1167 params = cmd
1172 params = cmd
1168 for name, filterfn in self._datafilters.iteritems():
1173 for name, filterfn in self._datafilters.iteritems():
1169 if cmd.startswith(name):
1174 if cmd.startswith(name):
1170 fn = filterfn
1175 fn = filterfn
1171 params = cmd[len(name):].lstrip()
1176 params = cmd[len(name):].lstrip()
1172 break
1177 break
1173 if not fn:
1178 if not fn:
1174 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1179 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1175 # Wrap old filters not supporting keyword arguments
1180 # Wrap old filters not supporting keyword arguments
1176 if not pycompat.getargspec(fn)[2]:
1181 if not pycompat.getargspec(fn)[2]:
1177 oldfn = fn
1182 oldfn = fn
1178 fn = lambda s, c, **kwargs: oldfn(s, c)
1183 fn = lambda s, c, **kwargs: oldfn(s, c)
1179 l.append((mf, fn, params))
1184 l.append((mf, fn, params))
1180 self._filterpats[filter] = l
1185 self._filterpats[filter] = l
1181 return self._filterpats[filter]
1186 return self._filterpats[filter]
1182
1187
1183 def _filter(self, filterpats, filename, data):
1188 def _filter(self, filterpats, filename, data):
1184 for mf, fn, cmd in filterpats:
1189 for mf, fn, cmd in filterpats:
1185 if mf(filename):
1190 if mf(filename):
1186 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1191 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1187 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1192 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1188 break
1193 break
1189
1194
1190 return data
1195 return data
1191
1196
1192 @unfilteredpropertycache
1197 @unfilteredpropertycache
1193 def _encodefilterpats(self):
1198 def _encodefilterpats(self):
1194 return self._loadfilter('encode')
1199 return self._loadfilter('encode')
1195
1200
1196 @unfilteredpropertycache
1201 @unfilteredpropertycache
1197 def _decodefilterpats(self):
1202 def _decodefilterpats(self):
1198 return self._loadfilter('decode')
1203 return self._loadfilter('decode')
1199
1204
1200 def adddatafilter(self, name, filter):
1205 def adddatafilter(self, name, filter):
1201 self._datafilters[name] = filter
1206 self._datafilters[name] = filter
1202
1207
1203 def wread(self, filename):
1208 def wread(self, filename):
1204 if self.wvfs.islink(filename):
1209 if self.wvfs.islink(filename):
1205 data = self.wvfs.readlink(filename)
1210 data = self.wvfs.readlink(filename)
1206 else:
1211 else:
1207 data = self.wvfs.read(filename)
1212 data = self.wvfs.read(filename)
1208 return self._filter(self._encodefilterpats, filename, data)
1213 return self._filter(self._encodefilterpats, filename, data)
1209
1214
1210 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1215 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1211 """write ``data`` into ``filename`` in the working directory
1216 """write ``data`` into ``filename`` in the working directory
1212
1217
1213 This returns length of written (maybe decoded) data.
1218 This returns length of written (maybe decoded) data.
1214 """
1219 """
1215 data = self._filter(self._decodefilterpats, filename, data)
1220 data = self._filter(self._decodefilterpats, filename, data)
1216 if 'l' in flags:
1221 if 'l' in flags:
1217 self.wvfs.symlink(data, filename)
1222 self.wvfs.symlink(data, filename)
1218 else:
1223 else:
1219 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1224 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1220 **kwargs)
1225 **kwargs)
1221 if 'x' in flags:
1226 if 'x' in flags:
1222 self.wvfs.setflags(filename, False, True)
1227 self.wvfs.setflags(filename, False, True)
1223 else:
1228 else:
1224 self.wvfs.setflags(filename, False, False)
1229 self.wvfs.setflags(filename, False, False)
1225 return len(data)
1230 return len(data)
1226
1231
1227 def wwritedata(self, filename, data):
1232 def wwritedata(self, filename, data):
1228 return self._filter(self._decodefilterpats, filename, data)
1233 return self._filter(self._decodefilterpats, filename, data)
1229
1234
1230 def currenttransaction(self):
1235 def currenttransaction(self):
1231 """return the current transaction or None if non exists"""
1236 """return the current transaction or None if non exists"""
1232 if self._transref:
1237 if self._transref:
1233 tr = self._transref()
1238 tr = self._transref()
1234 else:
1239 else:
1235 tr = None
1240 tr = None
1236
1241
1237 if tr and tr.running():
1242 if tr and tr.running():
1238 return tr
1243 return tr
1239 return None
1244 return None
1240
1245
1241 def transaction(self, desc, report=None):
1246 def transaction(self, desc, report=None):
1242 if (self.ui.configbool('devel', 'all-warnings')
1247 if (self.ui.configbool('devel', 'all-warnings')
1243 or self.ui.configbool('devel', 'check-locks')):
1248 or self.ui.configbool('devel', 'check-locks')):
1244 if self._currentlock(self._lockref) is None:
1249 if self._currentlock(self._lockref) is None:
1245 raise error.ProgrammingError('transaction requires locking')
1250 raise error.ProgrammingError('transaction requires locking')
1246 tr = self.currenttransaction()
1251 tr = self.currenttransaction()
1247 if tr is not None:
1252 if tr is not None:
1248 return tr.nest(name=desc)
1253 return tr.nest(name=desc)
1249
1254
1250 # abort here if the journal already exists
1255 # abort here if the journal already exists
1251 if self.svfs.exists("journal"):
1256 if self.svfs.exists("journal"):
1252 raise error.RepoError(
1257 raise error.RepoError(
1253 _("abandoned transaction found"),
1258 _("abandoned transaction found"),
1254 hint=_("run 'hg recover' to clean up transaction"))
1259 hint=_("run 'hg recover' to clean up transaction"))
1255
1260
1256 idbase = "%.40f#%f" % (random.random(), time.time())
1261 idbase = "%.40f#%f" % (random.random(), time.time())
1257 ha = hex(hashlib.sha1(idbase).digest())
1262 ha = hex(hashlib.sha1(idbase).digest())
1258 txnid = 'TXN:' + ha
1263 txnid = 'TXN:' + ha
1259 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1264 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1260
1265
1261 self._writejournal(desc)
1266 self._writejournal(desc)
1262 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1267 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1263 if report:
1268 if report:
1264 rp = report
1269 rp = report
1265 else:
1270 else:
1266 rp = self.ui.warn
1271 rp = self.ui.warn
1267 vfsmap = {'plain': self.vfs} # root of .hg/
1272 vfsmap = {'plain': self.vfs} # root of .hg/
1268 # we must avoid cyclic reference between repo and transaction.
1273 # we must avoid cyclic reference between repo and transaction.
1269 reporef = weakref.ref(self)
1274 reporef = weakref.ref(self)
1270 # Code to track tag movement
1275 # Code to track tag movement
1271 #
1276 #
1272 # Since tags are all handled as file content, it is actually quite hard
1277 # Since tags are all handled as file content, it is actually quite hard
1273 # to track these movement from a code perspective. So we fallback to a
1278 # to track these movement from a code perspective. So we fallback to a
1274 # tracking at the repository level. One could envision to track changes
1279 # tracking at the repository level. One could envision to track changes
1275 # to the '.hgtags' file through changegroup apply but that fails to
1280 # to the '.hgtags' file through changegroup apply but that fails to
1276 # cope with case where transaction expose new heads without changegroup
1281 # cope with case where transaction expose new heads without changegroup
1277 # being involved (eg: phase movement).
1282 # being involved (eg: phase movement).
1278 #
1283 #
1279 # For now, We gate the feature behind a flag since this likely comes
1284 # For now, We gate the feature behind a flag since this likely comes
1280 # with performance impacts. The current code run more often than needed
1285 # with performance impacts. The current code run more often than needed
1281 # and do not use caches as much as it could. The current focus is on
1286 # and do not use caches as much as it could. The current focus is on
1282 # the behavior of the feature so we disable it by default. The flag
1287 # the behavior of the feature so we disable it by default. The flag
1283 # will be removed when we are happy with the performance impact.
1288 # will be removed when we are happy with the performance impact.
1284 #
1289 #
1285 # Once this feature is no longer experimental move the following
1290 # Once this feature is no longer experimental move the following
1286 # documentation to the appropriate help section:
1291 # documentation to the appropriate help section:
1287 #
1292 #
1288 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1293 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1289 # tags (new or changed or deleted tags). In addition the details of
1294 # tags (new or changed or deleted tags). In addition the details of
1290 # these changes are made available in a file at:
1295 # these changes are made available in a file at:
1291 # ``REPOROOT/.hg/changes/tags.changes``.
1296 # ``REPOROOT/.hg/changes/tags.changes``.
1292 # Make sure you check for HG_TAG_MOVED before reading that file as it
1297 # Make sure you check for HG_TAG_MOVED before reading that file as it
1293 # might exist from a previous transaction even if no tag were touched
1298 # might exist from a previous transaction even if no tag were touched
1294 # in this one. Changes are recorded in a line base format::
1299 # in this one. Changes are recorded in a line base format::
1295 #
1300 #
1296 # <action> <hex-node> <tag-name>\n
1301 # <action> <hex-node> <tag-name>\n
1297 #
1302 #
1298 # Actions are defined as follow:
1303 # Actions are defined as follow:
1299 # "-R": tag is removed,
1304 # "-R": tag is removed,
1300 # "+A": tag is added,
1305 # "+A": tag is added,
1301 # "-M": tag is moved (old value),
1306 # "-M": tag is moved (old value),
1302 # "+M": tag is moved (new value),
1307 # "+M": tag is moved (new value),
1303 tracktags = lambda x: None
1308 tracktags = lambda x: None
1304 # experimental config: experimental.hook-track-tags
1309 # experimental config: experimental.hook-track-tags
1305 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1310 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1306 if desc != 'strip' and shouldtracktags:
1311 if desc != 'strip' and shouldtracktags:
1307 oldheads = self.changelog.headrevs()
1312 oldheads = self.changelog.headrevs()
1308 def tracktags(tr2):
1313 def tracktags(tr2):
1309 repo = reporef()
1314 repo = reporef()
1310 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1311 newheads = repo.changelog.headrevs()
1316 newheads = repo.changelog.headrevs()
1312 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1313 # notes: we compare lists here.
1318 # notes: we compare lists here.
1314 # As we do it only once buiding set would not be cheaper
1319 # As we do it only once buiding set would not be cheaper
1315 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1316 if changes:
1321 if changes:
1317 tr2.hookargs['tag_moved'] = '1'
1322 tr2.hookargs['tag_moved'] = '1'
1318 with repo.vfs('changes/tags.changes', 'w',
1323 with repo.vfs('changes/tags.changes', 'w',
1319 atomictemp=True) as changesfile:
1324 atomictemp=True) as changesfile:
1320 # note: we do not register the file to the transaction
1325 # note: we do not register the file to the transaction
1321 # because we needs it to still exist on the transaction
1326 # because we needs it to still exist on the transaction
1322 # is close (for txnclose hooks)
1327 # is close (for txnclose hooks)
1323 tagsmod.writediff(changesfile, changes)
1328 tagsmod.writediff(changesfile, changes)
1324 def validate(tr2):
1329 def validate(tr2):
1325 """will run pre-closing hooks"""
1330 """will run pre-closing hooks"""
1326 # XXX the transaction API is a bit lacking here so we take a hacky
1331 # XXX the transaction API is a bit lacking here so we take a hacky
1327 # path for now
1332 # path for now
1328 #
1333 #
1329 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1334 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1330 # dict is copied before these run. In addition we needs the data
1335 # dict is copied before these run. In addition we needs the data
1331 # available to in memory hooks too.
1336 # available to in memory hooks too.
1332 #
1337 #
1333 # Moreover, we also need to make sure this runs before txnclose
1338 # Moreover, we also need to make sure this runs before txnclose
1334 # hooks and there is no "pending" mechanism that would execute
1339 # hooks and there is no "pending" mechanism that would execute
1335 # logic only if hooks are about to run.
1340 # logic only if hooks are about to run.
1336 #
1341 #
1337 # Fixing this limitation of the transaction is also needed to track
1342 # Fixing this limitation of the transaction is also needed to track
1338 # other families of changes (bookmarks, phases, obsolescence).
1343 # other families of changes (bookmarks, phases, obsolescence).
1339 #
1344 #
1340 # This will have to be fixed before we remove the experimental
1345 # This will have to be fixed before we remove the experimental
1341 # gating.
1346 # gating.
1342 tracktags(tr2)
1347 tracktags(tr2)
1343 repo = reporef()
1348 repo = reporef()
1344 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1349 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1345 scmutil.enforcesinglehead(repo, tr2, desc)
1350 scmutil.enforcesinglehead(repo, tr2, desc)
1346 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1351 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1347 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1352 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1348 args = tr.hookargs.copy()
1353 args = tr.hookargs.copy()
1349 args.update(bookmarks.preparehookargs(name, old, new))
1354 args.update(bookmarks.preparehookargs(name, old, new))
1350 repo.hook('pretxnclose-bookmark', throw=True,
1355 repo.hook('pretxnclose-bookmark', throw=True,
1351 txnname=desc,
1356 txnname=desc,
1352 **pycompat.strkwargs(args))
1357 **pycompat.strkwargs(args))
1353 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1358 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1354 cl = repo.unfiltered().changelog
1359 cl = repo.unfiltered().changelog
1355 for rev, (old, new) in tr.changes['phases'].items():
1360 for rev, (old, new) in tr.changes['phases'].items():
1356 args = tr.hookargs.copy()
1361 args = tr.hookargs.copy()
1357 node = hex(cl.node(rev))
1362 node = hex(cl.node(rev))
1358 args.update(phases.preparehookargs(node, old, new))
1363 args.update(phases.preparehookargs(node, old, new))
1359 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1364 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1360 **pycompat.strkwargs(args))
1365 **pycompat.strkwargs(args))
1361
1366
1362 repo.hook('pretxnclose', throw=True,
1367 repo.hook('pretxnclose', throw=True,
1363 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1368 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1364 def releasefn(tr, success):
1369 def releasefn(tr, success):
1365 repo = reporef()
1370 repo = reporef()
1366 if success:
1371 if success:
1367 # this should be explicitly invoked here, because
1372 # this should be explicitly invoked here, because
1368 # in-memory changes aren't written out at closing
1373 # in-memory changes aren't written out at closing
1369 # transaction, if tr.addfilegenerator (via
1374 # transaction, if tr.addfilegenerator (via
1370 # dirstate.write or so) isn't invoked while
1375 # dirstate.write or so) isn't invoked while
1371 # transaction running
1376 # transaction running
1372 repo.dirstate.write(None)
1377 repo.dirstate.write(None)
1373 else:
1378 else:
1374 # discard all changes (including ones already written
1379 # discard all changes (including ones already written
1375 # out) in this transaction
1380 # out) in this transaction
1376 narrowspec.restorebackup(self, 'journal.narrowspec')
1381 narrowspec.restorebackup(self, 'journal.narrowspec')
1377 repo.dirstate.restorebackup(None, 'journal.dirstate')
1382 repo.dirstate.restorebackup(None, 'journal.dirstate')
1378
1383
1379 repo.invalidate(clearfilecache=True)
1384 repo.invalidate(clearfilecache=True)
1380
1385
1381 tr = transaction.transaction(rp, self.svfs, vfsmap,
1386 tr = transaction.transaction(rp, self.svfs, vfsmap,
1382 "journal",
1387 "journal",
1383 "undo",
1388 "undo",
1384 aftertrans(renames),
1389 aftertrans(renames),
1385 self.store.createmode,
1390 self.store.createmode,
1386 validator=validate,
1391 validator=validate,
1387 releasefn=releasefn,
1392 releasefn=releasefn,
1388 checkambigfiles=_cachedfiles,
1393 checkambigfiles=_cachedfiles,
1389 name=desc)
1394 name=desc)
1390 tr.changes['revs'] = pycompat.xrange(0, 0)
1395 tr.changes['revs'] = pycompat.xrange(0, 0)
1391 tr.changes['obsmarkers'] = set()
1396 tr.changes['obsmarkers'] = set()
1392 tr.changes['phases'] = {}
1397 tr.changes['phases'] = {}
1393 tr.changes['bookmarks'] = {}
1398 tr.changes['bookmarks'] = {}
1394
1399
1395 tr.hookargs['txnid'] = txnid
1400 tr.hookargs['txnid'] = txnid
1396 # note: writing the fncache only during finalize mean that the file is
1401 # note: writing the fncache only during finalize mean that the file is
1397 # outdated when running hooks. As fncache is used for streaming clone,
1402 # outdated when running hooks. As fncache is used for streaming clone,
1398 # this is not expected to break anything that happen during the hooks.
1403 # this is not expected to break anything that happen during the hooks.
1399 tr.addfinalize('flush-fncache', self.store.write)
1404 tr.addfinalize('flush-fncache', self.store.write)
1400 def txnclosehook(tr2):
1405 def txnclosehook(tr2):
1401 """To be run if transaction is successful, will schedule a hook run
1406 """To be run if transaction is successful, will schedule a hook run
1402 """
1407 """
1403 # Don't reference tr2 in hook() so we don't hold a reference.
1408 # Don't reference tr2 in hook() so we don't hold a reference.
1404 # This reduces memory consumption when there are multiple
1409 # This reduces memory consumption when there are multiple
1405 # transactions per lock. This can likely go away if issue5045
1410 # transactions per lock. This can likely go away if issue5045
1406 # fixes the function accumulation.
1411 # fixes the function accumulation.
1407 hookargs = tr2.hookargs
1412 hookargs = tr2.hookargs
1408
1413
1409 def hookfunc():
1414 def hookfunc():
1410 repo = reporef()
1415 repo = reporef()
1411 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1416 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1412 bmchanges = sorted(tr.changes['bookmarks'].items())
1417 bmchanges = sorted(tr.changes['bookmarks'].items())
1413 for name, (old, new) in bmchanges:
1418 for name, (old, new) in bmchanges:
1414 args = tr.hookargs.copy()
1419 args = tr.hookargs.copy()
1415 args.update(bookmarks.preparehookargs(name, old, new))
1420 args.update(bookmarks.preparehookargs(name, old, new))
1416 repo.hook('txnclose-bookmark', throw=False,
1421 repo.hook('txnclose-bookmark', throw=False,
1417 txnname=desc, **pycompat.strkwargs(args))
1422 txnname=desc, **pycompat.strkwargs(args))
1418
1423
1419 if hook.hashook(repo.ui, 'txnclose-phase'):
1424 if hook.hashook(repo.ui, 'txnclose-phase'):
1420 cl = repo.unfiltered().changelog
1425 cl = repo.unfiltered().changelog
1421 phasemv = sorted(tr.changes['phases'].items())
1426 phasemv = sorted(tr.changes['phases'].items())
1422 for rev, (old, new) in phasemv:
1427 for rev, (old, new) in phasemv:
1423 args = tr.hookargs.copy()
1428 args = tr.hookargs.copy()
1424 node = hex(cl.node(rev))
1429 node = hex(cl.node(rev))
1425 args.update(phases.preparehookargs(node, old, new))
1430 args.update(phases.preparehookargs(node, old, new))
1426 repo.hook('txnclose-phase', throw=False, txnname=desc,
1431 repo.hook('txnclose-phase', throw=False, txnname=desc,
1427 **pycompat.strkwargs(args))
1432 **pycompat.strkwargs(args))
1428
1433
1429 repo.hook('txnclose', throw=False, txnname=desc,
1434 repo.hook('txnclose', throw=False, txnname=desc,
1430 **pycompat.strkwargs(hookargs))
1435 **pycompat.strkwargs(hookargs))
1431 reporef()._afterlock(hookfunc)
1436 reporef()._afterlock(hookfunc)
1432 tr.addfinalize('txnclose-hook', txnclosehook)
1437 tr.addfinalize('txnclose-hook', txnclosehook)
1433 # Include a leading "-" to make it happen before the transaction summary
1438 # Include a leading "-" to make it happen before the transaction summary
1434 # reports registered via scmutil.registersummarycallback() whose names
1439 # reports registered via scmutil.registersummarycallback() whose names
1435 # are 00-txnreport etc. That way, the caches will be warm when the
1440 # are 00-txnreport etc. That way, the caches will be warm when the
1436 # callbacks run.
1441 # callbacks run.
1437 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1442 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1438 def txnaborthook(tr2):
1443 def txnaborthook(tr2):
1439 """To be run if transaction is aborted
1444 """To be run if transaction is aborted
1440 """
1445 """
1441 reporef().hook('txnabort', throw=False, txnname=desc,
1446 reporef().hook('txnabort', throw=False, txnname=desc,
1442 **pycompat.strkwargs(tr2.hookargs))
1447 **pycompat.strkwargs(tr2.hookargs))
1443 tr.addabort('txnabort-hook', txnaborthook)
1448 tr.addabort('txnabort-hook', txnaborthook)
1444 # avoid eager cache invalidation. in-memory data should be identical
1449 # avoid eager cache invalidation. in-memory data should be identical
1445 # to stored data if transaction has no error.
1450 # to stored data if transaction has no error.
1446 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1451 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1447 self._transref = weakref.ref(tr)
1452 self._transref = weakref.ref(tr)
1448 scmutil.registersummarycallback(self, tr, desc)
1453 scmutil.registersummarycallback(self, tr, desc)
1449 return tr
1454 return tr
1450
1455
1451 def _journalfiles(self):
1456 def _journalfiles(self):
1452 return ((self.svfs, 'journal'),
1457 return ((self.svfs, 'journal'),
1453 (self.vfs, 'journal.dirstate'),
1458 (self.vfs, 'journal.dirstate'),
1454 (self.vfs, 'journal.branch'),
1459 (self.vfs, 'journal.branch'),
1455 (self.vfs, 'journal.desc'),
1460 (self.vfs, 'journal.desc'),
1456 (self.vfs, 'journal.bookmarks'),
1461 (self.vfs, 'journal.bookmarks'),
1457 (self.svfs, 'journal.phaseroots'))
1462 (self.svfs, 'journal.phaseroots'))
1458
1463
1459 def undofiles(self):
1464 def undofiles(self):
1460 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1465 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1461
1466
1462 @unfilteredmethod
1467 @unfilteredmethod
1463 def _writejournal(self, desc):
1468 def _writejournal(self, desc):
1464 self.dirstate.savebackup(None, 'journal.dirstate')
1469 self.dirstate.savebackup(None, 'journal.dirstate')
1465 narrowspec.savebackup(self, 'journal.narrowspec')
1470 narrowspec.savebackup(self, 'journal.narrowspec')
1466 self.vfs.write("journal.branch",
1471 self.vfs.write("journal.branch",
1467 encoding.fromlocal(self.dirstate.branch()))
1472 encoding.fromlocal(self.dirstate.branch()))
1468 self.vfs.write("journal.desc",
1473 self.vfs.write("journal.desc",
1469 "%d\n%s\n" % (len(self), desc))
1474 "%d\n%s\n" % (len(self), desc))
1470 self.vfs.write("journal.bookmarks",
1475 self.vfs.write("journal.bookmarks",
1471 self.vfs.tryread("bookmarks"))
1476 self.vfs.tryread("bookmarks"))
1472 self.svfs.write("journal.phaseroots",
1477 self.svfs.write("journal.phaseroots",
1473 self.svfs.tryread("phaseroots"))
1478 self.svfs.tryread("phaseroots"))
1474
1479
1475 def recover(self):
1480 def recover(self):
1476 with self.lock():
1481 with self.lock():
1477 if self.svfs.exists("journal"):
1482 if self.svfs.exists("journal"):
1478 self.ui.status(_("rolling back interrupted transaction\n"))
1483 self.ui.status(_("rolling back interrupted transaction\n"))
1479 vfsmap = {'': self.svfs,
1484 vfsmap = {'': self.svfs,
1480 'plain': self.vfs,}
1485 'plain': self.vfs,}
1481 transaction.rollback(self.svfs, vfsmap, "journal",
1486 transaction.rollback(self.svfs, vfsmap, "journal",
1482 self.ui.warn,
1487 self.ui.warn,
1483 checkambigfiles=_cachedfiles)
1488 checkambigfiles=_cachedfiles)
1484 self.invalidate()
1489 self.invalidate()
1485 return True
1490 return True
1486 else:
1491 else:
1487 self.ui.warn(_("no interrupted transaction available\n"))
1492 self.ui.warn(_("no interrupted transaction available\n"))
1488 return False
1493 return False
1489
1494
1490 def rollback(self, dryrun=False, force=False):
1495 def rollback(self, dryrun=False, force=False):
1491 wlock = lock = dsguard = None
1496 wlock = lock = dsguard = None
1492 try:
1497 try:
1493 wlock = self.wlock()
1498 wlock = self.wlock()
1494 lock = self.lock()
1499 lock = self.lock()
1495 if self.svfs.exists("undo"):
1500 if self.svfs.exists("undo"):
1496 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1501 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1497
1502
1498 return self._rollback(dryrun, force, dsguard)
1503 return self._rollback(dryrun, force, dsguard)
1499 else:
1504 else:
1500 self.ui.warn(_("no rollback information available\n"))
1505 self.ui.warn(_("no rollback information available\n"))
1501 return 1
1506 return 1
1502 finally:
1507 finally:
1503 release(dsguard, lock, wlock)
1508 release(dsguard, lock, wlock)
1504
1509
1505 @unfilteredmethod # Until we get smarter cache management
1510 @unfilteredmethod # Until we get smarter cache management
1506 def _rollback(self, dryrun, force, dsguard):
1511 def _rollback(self, dryrun, force, dsguard):
1507 ui = self.ui
1512 ui = self.ui
1508 try:
1513 try:
1509 args = self.vfs.read('undo.desc').splitlines()
1514 args = self.vfs.read('undo.desc').splitlines()
1510 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1515 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1511 if len(args) >= 3:
1516 if len(args) >= 3:
1512 detail = args[2]
1517 detail = args[2]
1513 oldtip = oldlen - 1
1518 oldtip = oldlen - 1
1514
1519
1515 if detail and ui.verbose:
1520 if detail and ui.verbose:
1516 msg = (_('repository tip rolled back to revision %d'
1521 msg = (_('repository tip rolled back to revision %d'
1517 ' (undo %s: %s)\n')
1522 ' (undo %s: %s)\n')
1518 % (oldtip, desc, detail))
1523 % (oldtip, desc, detail))
1519 else:
1524 else:
1520 msg = (_('repository tip rolled back to revision %d'
1525 msg = (_('repository tip rolled back to revision %d'
1521 ' (undo %s)\n')
1526 ' (undo %s)\n')
1522 % (oldtip, desc))
1527 % (oldtip, desc))
1523 except IOError:
1528 except IOError:
1524 msg = _('rolling back unknown transaction\n')
1529 msg = _('rolling back unknown transaction\n')
1525 desc = None
1530 desc = None
1526
1531
1527 if not force and self['.'] != self['tip'] and desc == 'commit':
1532 if not force and self['.'] != self['tip'] and desc == 'commit':
1528 raise error.Abort(
1533 raise error.Abort(
1529 _('rollback of last commit while not checked out '
1534 _('rollback of last commit while not checked out '
1530 'may lose data'), hint=_('use -f to force'))
1535 'may lose data'), hint=_('use -f to force'))
1531
1536
1532 ui.status(msg)
1537 ui.status(msg)
1533 if dryrun:
1538 if dryrun:
1534 return 0
1539 return 0
1535
1540
1536 parents = self.dirstate.parents()
1541 parents = self.dirstate.parents()
1537 self.destroying()
1542 self.destroying()
1538 vfsmap = {'plain': self.vfs, '': self.svfs}
1543 vfsmap = {'plain': self.vfs, '': self.svfs}
1539 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1544 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1540 checkambigfiles=_cachedfiles)
1545 checkambigfiles=_cachedfiles)
1541 if self.vfs.exists('undo.bookmarks'):
1546 if self.vfs.exists('undo.bookmarks'):
1542 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1547 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1543 if self.svfs.exists('undo.phaseroots'):
1548 if self.svfs.exists('undo.phaseroots'):
1544 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1549 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1545 self.invalidate()
1550 self.invalidate()
1546
1551
1547 parentgone = (parents[0] not in self.changelog.nodemap or
1552 parentgone = (parents[0] not in self.changelog.nodemap or
1548 parents[1] not in self.changelog.nodemap)
1553 parents[1] not in self.changelog.nodemap)
1549 if parentgone:
1554 if parentgone:
1550 # prevent dirstateguard from overwriting already restored one
1555 # prevent dirstateguard from overwriting already restored one
1551 dsguard.close()
1556 dsguard.close()
1552
1557
1553 narrowspec.restorebackup(self, 'undo.narrowspec')
1558 narrowspec.restorebackup(self, 'undo.narrowspec')
1554 self.dirstate.restorebackup(None, 'undo.dirstate')
1559 self.dirstate.restorebackup(None, 'undo.dirstate')
1555 try:
1560 try:
1556 branch = self.vfs.read('undo.branch')
1561 branch = self.vfs.read('undo.branch')
1557 self.dirstate.setbranch(encoding.tolocal(branch))
1562 self.dirstate.setbranch(encoding.tolocal(branch))
1558 except IOError:
1563 except IOError:
1559 ui.warn(_('named branch could not be reset: '
1564 ui.warn(_('named branch could not be reset: '
1560 'current branch is still \'%s\'\n')
1565 'current branch is still \'%s\'\n')
1561 % self.dirstate.branch())
1566 % self.dirstate.branch())
1562
1567
1563 parents = tuple([p.rev() for p in self[None].parents()])
1568 parents = tuple([p.rev() for p in self[None].parents()])
1564 if len(parents) > 1:
1569 if len(parents) > 1:
1565 ui.status(_('working directory now based on '
1570 ui.status(_('working directory now based on '
1566 'revisions %d and %d\n') % parents)
1571 'revisions %d and %d\n') % parents)
1567 else:
1572 else:
1568 ui.status(_('working directory now based on '
1573 ui.status(_('working directory now based on '
1569 'revision %d\n') % parents)
1574 'revision %d\n') % parents)
1570 mergemod.mergestate.clean(self, self['.'].node())
1575 mergemod.mergestate.clean(self, self['.'].node())
1571
1576
1572 # TODO: if we know which new heads may result from this rollback, pass
1577 # TODO: if we know which new heads may result from this rollback, pass
1573 # them to destroy(), which will prevent the branchhead cache from being
1578 # them to destroy(), which will prevent the branchhead cache from being
1574 # invalidated.
1579 # invalidated.
1575 self.destroyed()
1580 self.destroyed()
1576 return 0
1581 return 0
1577
1582
1578 def _buildcacheupdater(self, newtransaction):
1583 def _buildcacheupdater(self, newtransaction):
1579 """called during transaction to build the callback updating cache
1584 """called during transaction to build the callback updating cache
1580
1585
1581 Lives on the repository to help extension who might want to augment
1586 Lives on the repository to help extension who might want to augment
1582 this logic. For this purpose, the created transaction is passed to the
1587 this logic. For this purpose, the created transaction is passed to the
1583 method.
1588 method.
1584 """
1589 """
1585 # we must avoid cyclic reference between repo and transaction.
1590 # we must avoid cyclic reference between repo and transaction.
1586 reporef = weakref.ref(self)
1591 reporef = weakref.ref(self)
1587 def updater(tr):
1592 def updater(tr):
1588 repo = reporef()
1593 repo = reporef()
1589 repo.updatecaches(tr)
1594 repo.updatecaches(tr)
1590 return updater
1595 return updater
1591
1596
1592 @unfilteredmethod
1597 @unfilteredmethod
1593 def updatecaches(self, tr=None, full=False):
1598 def updatecaches(self, tr=None, full=False):
1594 """warm appropriate caches
1599 """warm appropriate caches
1595
1600
1596 If this function is called after a transaction closed. The transaction
1601 If this function is called after a transaction closed. The transaction
1597 will be available in the 'tr' argument. This can be used to selectively
1602 will be available in the 'tr' argument. This can be used to selectively
1598 update caches relevant to the changes in that transaction.
1603 update caches relevant to the changes in that transaction.
1599
1604
1600 If 'full' is set, make sure all caches the function knows about have
1605 If 'full' is set, make sure all caches the function knows about have
1601 up-to-date data. Even the ones usually loaded more lazily.
1606 up-to-date data. Even the ones usually loaded more lazily.
1602 """
1607 """
1603 if tr is not None and tr.hookargs.get('source') == 'strip':
1608 if tr is not None and tr.hookargs.get('source') == 'strip':
1604 # During strip, many caches are invalid but
1609 # During strip, many caches are invalid but
1605 # later call to `destroyed` will refresh them.
1610 # later call to `destroyed` will refresh them.
1606 return
1611 return
1607
1612
1608 if tr is None or tr.changes['revs']:
1613 if tr is None or tr.changes['revs']:
1609 # updating the unfiltered branchmap should refresh all the others,
1614 # updating the unfiltered branchmap should refresh all the others,
1610 self.ui.debug('updating the branch cache\n')
1615 self.ui.debug('updating the branch cache\n')
1611 branchmap.updatecache(self.filtered('served'))
1616 branchmap.updatecache(self.filtered('served'))
1612
1617
1613 if full:
1618 if full:
1614 rbc = self.revbranchcache()
1619 rbc = self.revbranchcache()
1615 for r in self.changelog:
1620 for r in self.changelog:
1616 rbc.branchinfo(r)
1621 rbc.branchinfo(r)
1617 rbc.write()
1622 rbc.write()
1618
1623
1619 # ensure the working copy parents are in the manifestfulltextcache
1624 # ensure the working copy parents are in the manifestfulltextcache
1620 for ctx in self['.'].parents():
1625 for ctx in self['.'].parents():
1621 ctx.manifest() # accessing the manifest is enough
1626 ctx.manifest() # accessing the manifest is enough
1622
1627
1623 def invalidatecaches(self):
1628 def invalidatecaches(self):
1624
1629
1625 if '_tagscache' in vars(self):
1630 if '_tagscache' in vars(self):
1626 # can't use delattr on proxy
1631 # can't use delattr on proxy
1627 del self.__dict__['_tagscache']
1632 del self.__dict__['_tagscache']
1628
1633
1629 self.unfiltered()._branchcaches.clear()
1634 self.unfiltered()._branchcaches.clear()
1630 self.invalidatevolatilesets()
1635 self.invalidatevolatilesets()
1631 self._sparsesignaturecache.clear()
1636 self._sparsesignaturecache.clear()
1632
1637
1633 def invalidatevolatilesets(self):
1638 def invalidatevolatilesets(self):
1634 self.filteredrevcache.clear()
1639 self.filteredrevcache.clear()
1635 obsolete.clearobscaches(self)
1640 obsolete.clearobscaches(self)
1636
1641
1637 def invalidatedirstate(self):
1642 def invalidatedirstate(self):
1638 '''Invalidates the dirstate, causing the next call to dirstate
1643 '''Invalidates the dirstate, causing the next call to dirstate
1639 to check if it was modified since the last time it was read,
1644 to check if it was modified since the last time it was read,
1640 rereading it if it has.
1645 rereading it if it has.
1641
1646
1642 This is different to dirstate.invalidate() that it doesn't always
1647 This is different to dirstate.invalidate() that it doesn't always
1643 rereads the dirstate. Use dirstate.invalidate() if you want to
1648 rereads the dirstate. Use dirstate.invalidate() if you want to
1644 explicitly read the dirstate again (i.e. restoring it to a previous
1649 explicitly read the dirstate again (i.e. restoring it to a previous
1645 known good state).'''
1650 known good state).'''
1646 if hasunfilteredcache(self, 'dirstate'):
1651 if hasunfilteredcache(self, 'dirstate'):
1647 for k in self.dirstate._filecache:
1652 for k in self.dirstate._filecache:
1648 try:
1653 try:
1649 delattr(self.dirstate, k)
1654 delattr(self.dirstate, k)
1650 except AttributeError:
1655 except AttributeError:
1651 pass
1656 pass
1652 delattr(self.unfiltered(), 'dirstate')
1657 delattr(self.unfiltered(), 'dirstate')
1653
1658
1654 def invalidate(self, clearfilecache=False):
1659 def invalidate(self, clearfilecache=False):
1655 '''Invalidates both store and non-store parts other than dirstate
1660 '''Invalidates both store and non-store parts other than dirstate
1656
1661
1657 If a transaction is running, invalidation of store is omitted,
1662 If a transaction is running, invalidation of store is omitted,
1658 because discarding in-memory changes might cause inconsistency
1663 because discarding in-memory changes might cause inconsistency
1659 (e.g. incomplete fncache causes unintentional failure, but
1664 (e.g. incomplete fncache causes unintentional failure, but
1660 redundant one doesn't).
1665 redundant one doesn't).
1661 '''
1666 '''
1662 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1667 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1663 for k in list(self._filecache.keys()):
1668 for k in list(self._filecache.keys()):
1664 # dirstate is invalidated separately in invalidatedirstate()
1669 # dirstate is invalidated separately in invalidatedirstate()
1665 if k == 'dirstate':
1670 if k == 'dirstate':
1666 continue
1671 continue
1667 if (k == 'changelog' and
1672 if (k == 'changelog' and
1668 self.currenttransaction() and
1673 self.currenttransaction() and
1669 self.changelog._delayed):
1674 self.changelog._delayed):
1670 # The changelog object may store unwritten revisions. We don't
1675 # The changelog object may store unwritten revisions. We don't
1671 # want to lose them.
1676 # want to lose them.
1672 # TODO: Solve the problem instead of working around it.
1677 # TODO: Solve the problem instead of working around it.
1673 continue
1678 continue
1674
1679
1675 if clearfilecache:
1680 if clearfilecache:
1676 del self._filecache[k]
1681 del self._filecache[k]
1677 try:
1682 try:
1678 delattr(unfiltered, k)
1683 delattr(unfiltered, k)
1679 except AttributeError:
1684 except AttributeError:
1680 pass
1685 pass
1681 self.invalidatecaches()
1686 self.invalidatecaches()
1682 if not self.currenttransaction():
1687 if not self.currenttransaction():
1683 # TODO: Changing contents of store outside transaction
1688 # TODO: Changing contents of store outside transaction
1684 # causes inconsistency. We should make in-memory store
1689 # causes inconsistency. We should make in-memory store
1685 # changes detectable, and abort if changed.
1690 # changes detectable, and abort if changed.
1686 self.store.invalidatecaches()
1691 self.store.invalidatecaches()
1687
1692
1688 def invalidateall(self):
1693 def invalidateall(self):
1689 '''Fully invalidates both store and non-store parts, causing the
1694 '''Fully invalidates both store and non-store parts, causing the
1690 subsequent operation to reread any outside changes.'''
1695 subsequent operation to reread any outside changes.'''
1691 # extension should hook this to invalidate its caches
1696 # extension should hook this to invalidate its caches
1692 self.invalidate()
1697 self.invalidate()
1693 self.invalidatedirstate()
1698 self.invalidatedirstate()
1694
1699
1695 @unfilteredmethod
1700 @unfilteredmethod
1696 def _refreshfilecachestats(self, tr):
1701 def _refreshfilecachestats(self, tr):
1697 """Reload stats of cached files so that they are flagged as valid"""
1702 """Reload stats of cached files so that they are flagged as valid"""
1698 for k, ce in self._filecache.items():
1703 for k, ce in self._filecache.items():
1699 k = pycompat.sysstr(k)
1704 k = pycompat.sysstr(k)
1700 if k == r'dirstate' or k not in self.__dict__:
1705 if k == r'dirstate' or k not in self.__dict__:
1701 continue
1706 continue
1702 ce.refresh()
1707 ce.refresh()
1703
1708
1704 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1709 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1705 inheritchecker=None, parentenvvar=None):
1710 inheritchecker=None, parentenvvar=None):
1706 parentlock = None
1711 parentlock = None
1707 # the contents of parentenvvar are used by the underlying lock to
1712 # the contents of parentenvvar are used by the underlying lock to
1708 # determine whether it can be inherited
1713 # determine whether it can be inherited
1709 if parentenvvar is not None:
1714 if parentenvvar is not None:
1710 parentlock = encoding.environ.get(parentenvvar)
1715 parentlock = encoding.environ.get(parentenvvar)
1711
1716
1712 timeout = 0
1717 timeout = 0
1713 warntimeout = 0
1718 warntimeout = 0
1714 if wait:
1719 if wait:
1715 timeout = self.ui.configint("ui", "timeout")
1720 timeout = self.ui.configint("ui", "timeout")
1716 warntimeout = self.ui.configint("ui", "timeout.warn")
1721 warntimeout = self.ui.configint("ui", "timeout.warn")
1717 # internal config: ui.signal-safe-lock
1722 # internal config: ui.signal-safe-lock
1718 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1723 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1719
1724
1720 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1725 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1721 releasefn=releasefn,
1726 releasefn=releasefn,
1722 acquirefn=acquirefn, desc=desc,
1727 acquirefn=acquirefn, desc=desc,
1723 inheritchecker=inheritchecker,
1728 inheritchecker=inheritchecker,
1724 parentlock=parentlock,
1729 parentlock=parentlock,
1725 signalsafe=signalsafe)
1730 signalsafe=signalsafe)
1726 return l
1731 return l
1727
1732
1728 def _afterlock(self, callback):
1733 def _afterlock(self, callback):
1729 """add a callback to be run when the repository is fully unlocked
1734 """add a callback to be run when the repository is fully unlocked
1730
1735
1731 The callback will be executed when the outermost lock is released
1736 The callback will be executed when the outermost lock is released
1732 (with wlock being higher level than 'lock')."""
1737 (with wlock being higher level than 'lock')."""
1733 for ref in (self._wlockref, self._lockref):
1738 for ref in (self._wlockref, self._lockref):
1734 l = ref and ref()
1739 l = ref and ref()
1735 if l and l.held:
1740 if l and l.held:
1736 l.postrelease.append(callback)
1741 l.postrelease.append(callback)
1737 break
1742 break
1738 else: # no lock have been found.
1743 else: # no lock have been found.
1739 callback()
1744 callback()
1740
1745
1741 def lock(self, wait=True):
1746 def lock(self, wait=True):
1742 '''Lock the repository store (.hg/store) and return a weak reference
1747 '''Lock the repository store (.hg/store) and return a weak reference
1743 to the lock. Use this before modifying the store (e.g. committing or
1748 to the lock. Use this before modifying the store (e.g. committing or
1744 stripping). If you are opening a transaction, get a lock as well.)
1749 stripping). If you are opening a transaction, get a lock as well.)
1745
1750
1746 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1751 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1747 'wlock' first to avoid a dead-lock hazard.'''
1752 'wlock' first to avoid a dead-lock hazard.'''
1748 l = self._currentlock(self._lockref)
1753 l = self._currentlock(self._lockref)
1749 if l is not None:
1754 if l is not None:
1750 l.lock()
1755 l.lock()
1751 return l
1756 return l
1752
1757
1753 l = self._lock(self.svfs, "lock", wait, None,
1758 l = self._lock(self.svfs, "lock", wait, None,
1754 self.invalidate, _('repository %s') % self.origroot)
1759 self.invalidate, _('repository %s') % self.origroot)
1755 self._lockref = weakref.ref(l)
1760 self._lockref = weakref.ref(l)
1756 return l
1761 return l
1757
1762
1758 def _wlockchecktransaction(self):
1763 def _wlockchecktransaction(self):
1759 if self.currenttransaction() is not None:
1764 if self.currenttransaction() is not None:
1760 raise error.LockInheritanceContractViolation(
1765 raise error.LockInheritanceContractViolation(
1761 'wlock cannot be inherited in the middle of a transaction')
1766 'wlock cannot be inherited in the middle of a transaction')
1762
1767
1763 def wlock(self, wait=True):
1768 def wlock(self, wait=True):
1764 '''Lock the non-store parts of the repository (everything under
1769 '''Lock the non-store parts of the repository (everything under
1765 .hg except .hg/store) and return a weak reference to the lock.
1770 .hg except .hg/store) and return a weak reference to the lock.
1766
1771
1767 Use this before modifying files in .hg.
1772 Use this before modifying files in .hg.
1768
1773
1769 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1774 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1770 'wlock' first to avoid a dead-lock hazard.'''
1775 'wlock' first to avoid a dead-lock hazard.'''
1771 l = self._wlockref and self._wlockref()
1776 l = self._wlockref and self._wlockref()
1772 if l is not None and l.held:
1777 if l is not None and l.held:
1773 l.lock()
1778 l.lock()
1774 return l
1779 return l
1775
1780
1776 # We do not need to check for non-waiting lock acquisition. Such
1781 # We do not need to check for non-waiting lock acquisition. Such
1777 # acquisition would not cause dead-lock as they would just fail.
1782 # acquisition would not cause dead-lock as they would just fail.
1778 if wait and (self.ui.configbool('devel', 'all-warnings')
1783 if wait and (self.ui.configbool('devel', 'all-warnings')
1779 or self.ui.configbool('devel', 'check-locks')):
1784 or self.ui.configbool('devel', 'check-locks')):
1780 if self._currentlock(self._lockref) is not None:
1785 if self._currentlock(self._lockref) is not None:
1781 self.ui.develwarn('"wlock" acquired after "lock"')
1786 self.ui.develwarn('"wlock" acquired after "lock"')
1782
1787
1783 def unlock():
1788 def unlock():
1784 if self.dirstate.pendingparentchange():
1789 if self.dirstate.pendingparentchange():
1785 self.dirstate.invalidate()
1790 self.dirstate.invalidate()
1786 else:
1791 else:
1787 self.dirstate.write(None)
1792 self.dirstate.write(None)
1788
1793
1789 self._filecache['dirstate'].refresh()
1794 self._filecache['dirstate'].refresh()
1790
1795
1791 l = self._lock(self.vfs, "wlock", wait, unlock,
1796 l = self._lock(self.vfs, "wlock", wait, unlock,
1792 self.invalidatedirstate, _('working directory of %s') %
1797 self.invalidatedirstate, _('working directory of %s') %
1793 self.origroot,
1798 self.origroot,
1794 inheritchecker=self._wlockchecktransaction,
1799 inheritchecker=self._wlockchecktransaction,
1795 parentenvvar='HG_WLOCK_LOCKER')
1800 parentenvvar='HG_WLOCK_LOCKER')
1796 self._wlockref = weakref.ref(l)
1801 self._wlockref = weakref.ref(l)
1797 return l
1802 return l
1798
1803
1799 def _currentlock(self, lockref):
1804 def _currentlock(self, lockref):
1800 """Returns the lock if it's held, or None if it's not."""
1805 """Returns the lock if it's held, or None if it's not."""
1801 if lockref is None:
1806 if lockref is None:
1802 return None
1807 return None
1803 l = lockref()
1808 l = lockref()
1804 if l is None or not l.held:
1809 if l is None or not l.held:
1805 return None
1810 return None
1806 return l
1811 return l
1807
1812
1808 def currentwlock(self):
1813 def currentwlock(self):
1809 """Returns the wlock if it's held, or None if it's not."""
1814 """Returns the wlock if it's held, or None if it's not."""
1810 return self._currentlock(self._wlockref)
1815 return self._currentlock(self._wlockref)
1811
1816
1812 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1817 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1813 """
1818 """
1814 commit an individual file as part of a larger transaction
1819 commit an individual file as part of a larger transaction
1815 """
1820 """
1816
1821
1817 fname = fctx.path()
1822 fname = fctx.path()
1818 fparent1 = manifest1.get(fname, nullid)
1823 fparent1 = manifest1.get(fname, nullid)
1819 fparent2 = manifest2.get(fname, nullid)
1824 fparent2 = manifest2.get(fname, nullid)
1820 if isinstance(fctx, context.filectx):
1825 if isinstance(fctx, context.filectx):
1821 node = fctx.filenode()
1826 node = fctx.filenode()
1822 if node in [fparent1, fparent2]:
1827 if node in [fparent1, fparent2]:
1823 self.ui.debug('reusing %s filelog entry\n' % fname)
1828 self.ui.debug('reusing %s filelog entry\n' % fname)
1824 if manifest1.flags(fname) != fctx.flags():
1829 if manifest1.flags(fname) != fctx.flags():
1825 changelist.append(fname)
1830 changelist.append(fname)
1826 return node
1831 return node
1827
1832
1828 flog = self.file(fname)
1833 flog = self.file(fname)
1829 meta = {}
1834 meta = {}
1830 copy = fctx.renamed()
1835 copy = fctx.renamed()
1831 if copy and copy[0] != fname:
1836 if copy and copy[0] != fname:
1832 # Mark the new revision of this file as a copy of another
1837 # Mark the new revision of this file as a copy of another
1833 # file. This copy data will effectively act as a parent
1838 # file. This copy data will effectively act as a parent
1834 # of this new revision. If this is a merge, the first
1839 # of this new revision. If this is a merge, the first
1835 # parent will be the nullid (meaning "look up the copy data")
1840 # parent will be the nullid (meaning "look up the copy data")
1836 # and the second one will be the other parent. For example:
1841 # and the second one will be the other parent. For example:
1837 #
1842 #
1838 # 0 --- 1 --- 3 rev1 changes file foo
1843 # 0 --- 1 --- 3 rev1 changes file foo
1839 # \ / rev2 renames foo to bar and changes it
1844 # \ / rev2 renames foo to bar and changes it
1840 # \- 2 -/ rev3 should have bar with all changes and
1845 # \- 2 -/ rev3 should have bar with all changes and
1841 # should record that bar descends from
1846 # should record that bar descends from
1842 # bar in rev2 and foo in rev1
1847 # bar in rev2 and foo in rev1
1843 #
1848 #
1844 # this allows this merge to succeed:
1849 # this allows this merge to succeed:
1845 #
1850 #
1846 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1851 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1847 # \ / merging rev3 and rev4 should use bar@rev2
1852 # \ / merging rev3 and rev4 should use bar@rev2
1848 # \- 2 --- 4 as the merge base
1853 # \- 2 --- 4 as the merge base
1849 #
1854 #
1850
1855
1851 cfname = copy[0]
1856 cfname = copy[0]
1852 crev = manifest1.get(cfname)
1857 crev = manifest1.get(cfname)
1853 newfparent = fparent2
1858 newfparent = fparent2
1854
1859
1855 if manifest2: # branch merge
1860 if manifest2: # branch merge
1856 if fparent2 == nullid or crev is None: # copied on remote side
1861 if fparent2 == nullid or crev is None: # copied on remote side
1857 if cfname in manifest2:
1862 if cfname in manifest2:
1858 crev = manifest2[cfname]
1863 crev = manifest2[cfname]
1859 newfparent = fparent1
1864 newfparent = fparent1
1860
1865
1861 # Here, we used to search backwards through history to try to find
1866 # Here, we used to search backwards through history to try to find
1862 # where the file copy came from if the source of a copy was not in
1867 # where the file copy came from if the source of a copy was not in
1863 # the parent directory. However, this doesn't actually make sense to
1868 # the parent directory. However, this doesn't actually make sense to
1864 # do (what does a copy from something not in your working copy even
1869 # do (what does a copy from something not in your working copy even
1865 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1870 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1866 # the user that copy information was dropped, so if they didn't
1871 # the user that copy information was dropped, so if they didn't
1867 # expect this outcome it can be fixed, but this is the correct
1872 # expect this outcome it can be fixed, but this is the correct
1868 # behavior in this circumstance.
1873 # behavior in this circumstance.
1869
1874
1870 if crev:
1875 if crev:
1871 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1876 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1872 meta["copy"] = cfname
1877 meta["copy"] = cfname
1873 meta["copyrev"] = hex(crev)
1878 meta["copyrev"] = hex(crev)
1874 fparent1, fparent2 = nullid, newfparent
1879 fparent1, fparent2 = nullid, newfparent
1875 else:
1880 else:
1876 self.ui.warn(_("warning: can't find ancestor for '%s' "
1881 self.ui.warn(_("warning: can't find ancestor for '%s' "
1877 "copied from '%s'!\n") % (fname, cfname))
1882 "copied from '%s'!\n") % (fname, cfname))
1878
1883
1879 elif fparent1 == nullid:
1884 elif fparent1 == nullid:
1880 fparent1, fparent2 = fparent2, nullid
1885 fparent1, fparent2 = fparent2, nullid
1881 elif fparent2 != nullid:
1886 elif fparent2 != nullid:
1882 # is one parent an ancestor of the other?
1887 # is one parent an ancestor of the other?
1883 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1888 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1884 if fparent1 in fparentancestors:
1889 if fparent1 in fparentancestors:
1885 fparent1, fparent2 = fparent2, nullid
1890 fparent1, fparent2 = fparent2, nullid
1886 elif fparent2 in fparentancestors:
1891 elif fparent2 in fparentancestors:
1887 fparent2 = nullid
1892 fparent2 = nullid
1888
1893
1889 # is the file changed?
1894 # is the file changed?
1890 text = fctx.data()
1895 text = fctx.data()
1891 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1896 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1892 changelist.append(fname)
1897 changelist.append(fname)
1893 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1898 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1894 # are just the flags changed during merge?
1899 # are just the flags changed during merge?
1895 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1900 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1896 changelist.append(fname)
1901 changelist.append(fname)
1897
1902
1898 return fparent1
1903 return fparent1
1899
1904
1900 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1905 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1901 """check for commit arguments that aren't committable"""
1906 """check for commit arguments that aren't committable"""
1902 if match.isexact() or match.prefix():
1907 if match.isexact() or match.prefix():
1903 matched = set(status.modified + status.added + status.removed)
1908 matched = set(status.modified + status.added + status.removed)
1904
1909
1905 for f in match.files():
1910 for f in match.files():
1906 f = self.dirstate.normalize(f)
1911 f = self.dirstate.normalize(f)
1907 if f == '.' or f in matched or f in wctx.substate:
1912 if f == '.' or f in matched or f in wctx.substate:
1908 continue
1913 continue
1909 if f in status.deleted:
1914 if f in status.deleted:
1910 fail(f, _('file not found!'))
1915 fail(f, _('file not found!'))
1911 if f in vdirs: # visited directory
1916 if f in vdirs: # visited directory
1912 d = f + '/'
1917 d = f + '/'
1913 for mf in matched:
1918 for mf in matched:
1914 if mf.startswith(d):
1919 if mf.startswith(d):
1915 break
1920 break
1916 else:
1921 else:
1917 fail(f, _("no match under directory!"))
1922 fail(f, _("no match under directory!"))
1918 elif f not in self.dirstate:
1923 elif f not in self.dirstate:
1919 fail(f, _("file not tracked!"))
1924 fail(f, _("file not tracked!"))
1920
1925
1921 @unfilteredmethod
1926 @unfilteredmethod
1922 def commit(self, text="", user=None, date=None, match=None, force=False,
1927 def commit(self, text="", user=None, date=None, match=None, force=False,
1923 editor=False, extra=None):
1928 editor=False, extra=None):
1924 """Add a new revision to current repository.
1929 """Add a new revision to current repository.
1925
1930
1926 Revision information is gathered from the working directory,
1931 Revision information is gathered from the working directory,
1927 match can be used to filter the committed files. If editor is
1932 match can be used to filter the committed files. If editor is
1928 supplied, it is called to get a commit message.
1933 supplied, it is called to get a commit message.
1929 """
1934 """
1930 if extra is None:
1935 if extra is None:
1931 extra = {}
1936 extra = {}
1932
1937
1933 def fail(f, msg):
1938 def fail(f, msg):
1934 raise error.Abort('%s: %s' % (f, msg))
1939 raise error.Abort('%s: %s' % (f, msg))
1935
1940
1936 if not match:
1941 if not match:
1937 match = matchmod.always(self.root, '')
1942 match = matchmod.always(self.root, '')
1938
1943
1939 if not force:
1944 if not force:
1940 vdirs = []
1945 vdirs = []
1941 match.explicitdir = vdirs.append
1946 match.explicitdir = vdirs.append
1942 match.bad = fail
1947 match.bad = fail
1943
1948
1944 wlock = lock = tr = None
1949 wlock = lock = tr = None
1945 try:
1950 try:
1946 wlock = self.wlock()
1951 wlock = self.wlock()
1947 lock = self.lock() # for recent changelog (see issue4368)
1952 lock = self.lock() # for recent changelog (see issue4368)
1948
1953
1949 wctx = self[None]
1954 wctx = self[None]
1950 merge = len(wctx.parents()) > 1
1955 merge = len(wctx.parents()) > 1
1951
1956
1952 if not force and merge and not match.always():
1957 if not force and merge and not match.always():
1953 raise error.Abort(_('cannot partially commit a merge '
1958 raise error.Abort(_('cannot partially commit a merge '
1954 '(do not specify files or patterns)'))
1959 '(do not specify files or patterns)'))
1955
1960
1956 status = self.status(match=match, clean=force)
1961 status = self.status(match=match, clean=force)
1957 if force:
1962 if force:
1958 status.modified.extend(status.clean) # mq may commit clean files
1963 status.modified.extend(status.clean) # mq may commit clean files
1959
1964
1960 # check subrepos
1965 # check subrepos
1961 subs, commitsubs, newstate = subrepoutil.precommit(
1966 subs, commitsubs, newstate = subrepoutil.precommit(
1962 self.ui, wctx, status, match, force=force)
1967 self.ui, wctx, status, match, force=force)
1963
1968
1964 # make sure all explicit patterns are matched
1969 # make sure all explicit patterns are matched
1965 if not force:
1970 if not force:
1966 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1971 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1967
1972
1968 cctx = context.workingcommitctx(self, status,
1973 cctx = context.workingcommitctx(self, status,
1969 text, user, date, extra)
1974 text, user, date, extra)
1970
1975
1971 # internal config: ui.allowemptycommit
1976 # internal config: ui.allowemptycommit
1972 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1977 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1973 or extra.get('close') or merge or cctx.files()
1978 or extra.get('close') or merge or cctx.files()
1974 or self.ui.configbool('ui', 'allowemptycommit'))
1979 or self.ui.configbool('ui', 'allowemptycommit'))
1975 if not allowemptycommit:
1980 if not allowemptycommit:
1976 return None
1981 return None
1977
1982
1978 if merge and cctx.deleted():
1983 if merge and cctx.deleted():
1979 raise error.Abort(_("cannot commit merge with missing files"))
1984 raise error.Abort(_("cannot commit merge with missing files"))
1980
1985
1981 ms = mergemod.mergestate.read(self)
1986 ms = mergemod.mergestate.read(self)
1982 mergeutil.checkunresolved(ms)
1987 mergeutil.checkunresolved(ms)
1983
1988
1984 if editor:
1989 if editor:
1985 cctx._text = editor(self, cctx, subs)
1990 cctx._text = editor(self, cctx, subs)
1986 edited = (text != cctx._text)
1991 edited = (text != cctx._text)
1987
1992
1988 # Save commit message in case this transaction gets rolled back
1993 # Save commit message in case this transaction gets rolled back
1989 # (e.g. by a pretxncommit hook). Leave the content alone on
1994 # (e.g. by a pretxncommit hook). Leave the content alone on
1990 # the assumption that the user will use the same editor again.
1995 # the assumption that the user will use the same editor again.
1991 msgfn = self.savecommitmessage(cctx._text)
1996 msgfn = self.savecommitmessage(cctx._text)
1992
1997
1993 # commit subs and write new state
1998 # commit subs and write new state
1994 if subs:
1999 if subs:
1995 for s in sorted(commitsubs):
2000 for s in sorted(commitsubs):
1996 sub = wctx.sub(s)
2001 sub = wctx.sub(s)
1997 self.ui.status(_('committing subrepository %s\n') %
2002 self.ui.status(_('committing subrepository %s\n') %
1998 subrepoutil.subrelpath(sub))
2003 subrepoutil.subrelpath(sub))
1999 sr = sub.commit(cctx._text, user, date)
2004 sr = sub.commit(cctx._text, user, date)
2000 newstate[s] = (newstate[s][0], sr)
2005 newstate[s] = (newstate[s][0], sr)
2001 subrepoutil.writestate(self, newstate)
2006 subrepoutil.writestate(self, newstate)
2002
2007
2003 p1, p2 = self.dirstate.parents()
2008 p1, p2 = self.dirstate.parents()
2004 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2009 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2005 try:
2010 try:
2006 self.hook("precommit", throw=True, parent1=hookp1,
2011 self.hook("precommit", throw=True, parent1=hookp1,
2007 parent2=hookp2)
2012 parent2=hookp2)
2008 tr = self.transaction('commit')
2013 tr = self.transaction('commit')
2009 ret = self.commitctx(cctx, True)
2014 ret = self.commitctx(cctx, True)
2010 except: # re-raises
2015 except: # re-raises
2011 if edited:
2016 if edited:
2012 self.ui.write(
2017 self.ui.write(
2013 _('note: commit message saved in %s\n') % msgfn)
2018 _('note: commit message saved in %s\n') % msgfn)
2014 raise
2019 raise
2015 # update bookmarks, dirstate and mergestate
2020 # update bookmarks, dirstate and mergestate
2016 bookmarks.update(self, [p1, p2], ret)
2021 bookmarks.update(self, [p1, p2], ret)
2017 cctx.markcommitted(ret)
2022 cctx.markcommitted(ret)
2018 ms.reset()
2023 ms.reset()
2019 tr.close()
2024 tr.close()
2020
2025
2021 finally:
2026 finally:
2022 lockmod.release(tr, lock, wlock)
2027 lockmod.release(tr, lock, wlock)
2023
2028
2024 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2029 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2025 # hack for command that use a temporary commit (eg: histedit)
2030 # hack for command that use a temporary commit (eg: histedit)
2026 # temporary commit got stripped before hook release
2031 # temporary commit got stripped before hook release
2027 if self.changelog.hasnode(ret):
2032 if self.changelog.hasnode(ret):
2028 self.hook("commit", node=node, parent1=parent1,
2033 self.hook("commit", node=node, parent1=parent1,
2029 parent2=parent2)
2034 parent2=parent2)
2030 self._afterlock(commithook)
2035 self._afterlock(commithook)
2031 return ret
2036 return ret
2032
2037
2033 @unfilteredmethod
2038 @unfilteredmethod
2034 def commitctx(self, ctx, error=False):
2039 def commitctx(self, ctx, error=False):
2035 """Add a new revision to current repository.
2040 """Add a new revision to current repository.
2036 Revision information is passed via the context argument.
2041 Revision information is passed via the context argument.
2037 """
2042 """
2038
2043
2039 tr = None
2044 tr = None
2040 p1, p2 = ctx.p1(), ctx.p2()
2045 p1, p2 = ctx.p1(), ctx.p2()
2041 user = ctx.user()
2046 user = ctx.user()
2042
2047
2043 lock = self.lock()
2048 lock = self.lock()
2044 try:
2049 try:
2045 tr = self.transaction("commit")
2050 tr = self.transaction("commit")
2046 trp = weakref.proxy(tr)
2051 trp = weakref.proxy(tr)
2047
2052
2048 if ctx.manifestnode():
2053 if ctx.manifestnode():
2049 # reuse an existing manifest revision
2054 # reuse an existing manifest revision
2050 mn = ctx.manifestnode()
2055 mn = ctx.manifestnode()
2051 files = ctx.files()
2056 files = ctx.files()
2052 elif ctx.files():
2057 elif ctx.files():
2053 m1ctx = p1.manifestctx()
2058 m1ctx = p1.manifestctx()
2054 m2ctx = p2.manifestctx()
2059 m2ctx = p2.manifestctx()
2055 mctx = m1ctx.copy()
2060 mctx = m1ctx.copy()
2056
2061
2057 m = mctx.read()
2062 m = mctx.read()
2058 m1 = m1ctx.read()
2063 m1 = m1ctx.read()
2059 m2 = m2ctx.read()
2064 m2 = m2ctx.read()
2060
2065
2061 # check in files
2066 # check in files
2062 added = []
2067 added = []
2063 changed = []
2068 changed = []
2064 removed = list(ctx.removed())
2069 removed = list(ctx.removed())
2065 linkrev = len(self)
2070 linkrev = len(self)
2066 self.ui.note(_("committing files:\n"))
2071 self.ui.note(_("committing files:\n"))
2067 for f in sorted(ctx.modified() + ctx.added()):
2072 for f in sorted(ctx.modified() + ctx.added()):
2068 self.ui.note(f + "\n")
2073 self.ui.note(f + "\n")
2069 try:
2074 try:
2070 fctx = ctx[f]
2075 fctx = ctx[f]
2071 if fctx is None:
2076 if fctx is None:
2072 removed.append(f)
2077 removed.append(f)
2073 else:
2078 else:
2074 added.append(f)
2079 added.append(f)
2075 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2080 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2076 trp, changed)
2081 trp, changed)
2077 m.setflag(f, fctx.flags())
2082 m.setflag(f, fctx.flags())
2078 except OSError as inst:
2083 except OSError as inst:
2079 self.ui.warn(_("trouble committing %s!\n") % f)
2084 self.ui.warn(_("trouble committing %s!\n") % f)
2080 raise
2085 raise
2081 except IOError as inst:
2086 except IOError as inst:
2082 errcode = getattr(inst, 'errno', errno.ENOENT)
2087 errcode = getattr(inst, 'errno', errno.ENOENT)
2083 if error or errcode and errcode != errno.ENOENT:
2088 if error or errcode and errcode != errno.ENOENT:
2084 self.ui.warn(_("trouble committing %s!\n") % f)
2089 self.ui.warn(_("trouble committing %s!\n") % f)
2085 raise
2090 raise
2086
2091
2087 # update manifest
2092 # update manifest
2088 self.ui.note(_("committing manifest\n"))
2093 self.ui.note(_("committing manifest\n"))
2089 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2094 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2090 drop = [f for f in removed if f in m]
2095 drop = [f for f in removed if f in m]
2091 for f in drop:
2096 for f in drop:
2092 del m[f]
2097 del m[f]
2093 mn = mctx.write(trp, linkrev,
2098 mn = mctx.write(trp, linkrev,
2094 p1.manifestnode(), p2.manifestnode(),
2099 p1.manifestnode(), p2.manifestnode(),
2095 added, drop)
2100 added, drop)
2096 files = changed + removed
2101 files = changed + removed
2097 else:
2102 else:
2098 mn = p1.manifestnode()
2103 mn = p1.manifestnode()
2099 files = []
2104 files = []
2100
2105
2101 # update changelog
2106 # update changelog
2102 self.ui.note(_("committing changelog\n"))
2107 self.ui.note(_("committing changelog\n"))
2103 self.changelog.delayupdate(tr)
2108 self.changelog.delayupdate(tr)
2104 n = self.changelog.add(mn, files, ctx.description(),
2109 n = self.changelog.add(mn, files, ctx.description(),
2105 trp, p1.node(), p2.node(),
2110 trp, p1.node(), p2.node(),
2106 user, ctx.date(), ctx.extra().copy())
2111 user, ctx.date(), ctx.extra().copy())
2107 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2112 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2108 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2113 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2109 parent2=xp2)
2114 parent2=xp2)
2110 # set the new commit is proper phase
2115 # set the new commit is proper phase
2111 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2116 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2112 if targetphase:
2117 if targetphase:
2113 # retract boundary do not alter parent changeset.
2118 # retract boundary do not alter parent changeset.
2114 # if a parent have higher the resulting phase will
2119 # if a parent have higher the resulting phase will
2115 # be compliant anyway
2120 # be compliant anyway
2116 #
2121 #
2117 # if minimal phase was 0 we don't need to retract anything
2122 # if minimal phase was 0 we don't need to retract anything
2118 phases.registernew(self, tr, targetphase, [n])
2123 phases.registernew(self, tr, targetphase, [n])
2119 tr.close()
2124 tr.close()
2120 return n
2125 return n
2121 finally:
2126 finally:
2122 if tr:
2127 if tr:
2123 tr.release()
2128 tr.release()
2124 lock.release()
2129 lock.release()
2125
2130
2126 @unfilteredmethod
2131 @unfilteredmethod
2127 def destroying(self):
2132 def destroying(self):
2128 '''Inform the repository that nodes are about to be destroyed.
2133 '''Inform the repository that nodes are about to be destroyed.
2129 Intended for use by strip and rollback, so there's a common
2134 Intended for use by strip and rollback, so there's a common
2130 place for anything that has to be done before destroying history.
2135 place for anything that has to be done before destroying history.
2131
2136
2132 This is mostly useful for saving state that is in memory and waiting
2137 This is mostly useful for saving state that is in memory and waiting
2133 to be flushed when the current lock is released. Because a call to
2138 to be flushed when the current lock is released. Because a call to
2134 destroyed is imminent, the repo will be invalidated causing those
2139 destroyed is imminent, the repo will be invalidated causing those
2135 changes to stay in memory (waiting for the next unlock), or vanish
2140 changes to stay in memory (waiting for the next unlock), or vanish
2136 completely.
2141 completely.
2137 '''
2142 '''
2138 # When using the same lock to commit and strip, the phasecache is left
2143 # When using the same lock to commit and strip, the phasecache is left
2139 # dirty after committing. Then when we strip, the repo is invalidated,
2144 # dirty after committing. Then when we strip, the repo is invalidated,
2140 # causing those changes to disappear.
2145 # causing those changes to disappear.
2141 if '_phasecache' in vars(self):
2146 if '_phasecache' in vars(self):
2142 self._phasecache.write()
2147 self._phasecache.write()
2143
2148
2144 @unfilteredmethod
2149 @unfilteredmethod
2145 def destroyed(self):
2150 def destroyed(self):
2146 '''Inform the repository that nodes have been destroyed.
2151 '''Inform the repository that nodes have been destroyed.
2147 Intended for use by strip and rollback, so there's a common
2152 Intended for use by strip and rollback, so there's a common
2148 place for anything that has to be done after destroying history.
2153 place for anything that has to be done after destroying history.
2149 '''
2154 '''
2150 # When one tries to:
2155 # When one tries to:
2151 # 1) destroy nodes thus calling this method (e.g. strip)
2156 # 1) destroy nodes thus calling this method (e.g. strip)
2152 # 2) use phasecache somewhere (e.g. commit)
2157 # 2) use phasecache somewhere (e.g. commit)
2153 #
2158 #
2154 # then 2) will fail because the phasecache contains nodes that were
2159 # then 2) will fail because the phasecache contains nodes that were
2155 # removed. We can either remove phasecache from the filecache,
2160 # removed. We can either remove phasecache from the filecache,
2156 # causing it to reload next time it is accessed, or simply filter
2161 # causing it to reload next time it is accessed, or simply filter
2157 # the removed nodes now and write the updated cache.
2162 # the removed nodes now and write the updated cache.
2158 self._phasecache.filterunknown(self)
2163 self._phasecache.filterunknown(self)
2159 self._phasecache.write()
2164 self._phasecache.write()
2160
2165
2161 # refresh all repository caches
2166 # refresh all repository caches
2162 self.updatecaches()
2167 self.updatecaches()
2163
2168
2164 # Ensure the persistent tag cache is updated. Doing it now
2169 # Ensure the persistent tag cache is updated. Doing it now
2165 # means that the tag cache only has to worry about destroyed
2170 # means that the tag cache only has to worry about destroyed
2166 # heads immediately after a strip/rollback. That in turn
2171 # heads immediately after a strip/rollback. That in turn
2167 # guarantees that "cachetip == currenttip" (comparing both rev
2172 # guarantees that "cachetip == currenttip" (comparing both rev
2168 # and node) always means no nodes have been added or destroyed.
2173 # and node) always means no nodes have been added or destroyed.
2169
2174
2170 # XXX this is suboptimal when qrefresh'ing: we strip the current
2175 # XXX this is suboptimal when qrefresh'ing: we strip the current
2171 # head, refresh the tag cache, then immediately add a new head.
2176 # head, refresh the tag cache, then immediately add a new head.
2172 # But I think doing it this way is necessary for the "instant
2177 # But I think doing it this way is necessary for the "instant
2173 # tag cache retrieval" case to work.
2178 # tag cache retrieval" case to work.
2174 self.invalidate()
2179 self.invalidate()
2175
2180
2176 def status(self, node1='.', node2=None, match=None,
2181 def status(self, node1='.', node2=None, match=None,
2177 ignored=False, clean=False, unknown=False,
2182 ignored=False, clean=False, unknown=False,
2178 listsubrepos=False):
2183 listsubrepos=False):
2179 '''a convenience method that calls node1.status(node2)'''
2184 '''a convenience method that calls node1.status(node2)'''
2180 return self[node1].status(node2, match, ignored, clean, unknown,
2185 return self[node1].status(node2, match, ignored, clean, unknown,
2181 listsubrepos)
2186 listsubrepos)
2182
2187
2183 def addpostdsstatus(self, ps):
2188 def addpostdsstatus(self, ps):
2184 """Add a callback to run within the wlock, at the point at which status
2189 """Add a callback to run within the wlock, at the point at which status
2185 fixups happen.
2190 fixups happen.
2186
2191
2187 On status completion, callback(wctx, status) will be called with the
2192 On status completion, callback(wctx, status) will be called with the
2188 wlock held, unless the dirstate has changed from underneath or the wlock
2193 wlock held, unless the dirstate has changed from underneath or the wlock
2189 couldn't be grabbed.
2194 couldn't be grabbed.
2190
2195
2191 Callbacks should not capture and use a cached copy of the dirstate --
2196 Callbacks should not capture and use a cached copy of the dirstate --
2192 it might change in the meanwhile. Instead, they should access the
2197 it might change in the meanwhile. Instead, they should access the
2193 dirstate via wctx.repo().dirstate.
2198 dirstate via wctx.repo().dirstate.
2194
2199
2195 This list is emptied out after each status run -- extensions should
2200 This list is emptied out after each status run -- extensions should
2196 make sure it adds to this list each time dirstate.status is called.
2201 make sure it adds to this list each time dirstate.status is called.
2197 Extensions should also make sure they don't call this for statuses
2202 Extensions should also make sure they don't call this for statuses
2198 that don't involve the dirstate.
2203 that don't involve the dirstate.
2199 """
2204 """
2200
2205
2201 # The list is located here for uniqueness reasons -- it is actually
2206 # The list is located here for uniqueness reasons -- it is actually
2202 # managed by the workingctx, but that isn't unique per-repo.
2207 # managed by the workingctx, but that isn't unique per-repo.
2203 self._postdsstatus.append(ps)
2208 self._postdsstatus.append(ps)
2204
2209
2205 def postdsstatus(self):
2210 def postdsstatus(self):
2206 """Used by workingctx to get the list of post-dirstate-status hooks."""
2211 """Used by workingctx to get the list of post-dirstate-status hooks."""
2207 return self._postdsstatus
2212 return self._postdsstatus
2208
2213
2209 def clearpostdsstatus(self):
2214 def clearpostdsstatus(self):
2210 """Used by workingctx to clear post-dirstate-status hooks."""
2215 """Used by workingctx to clear post-dirstate-status hooks."""
2211 del self._postdsstatus[:]
2216 del self._postdsstatus[:]
2212
2217
2213 def heads(self, start=None):
2218 def heads(self, start=None):
2214 if start is None:
2219 if start is None:
2215 cl = self.changelog
2220 cl = self.changelog
2216 headrevs = reversed(cl.headrevs())
2221 headrevs = reversed(cl.headrevs())
2217 return [cl.node(rev) for rev in headrevs]
2222 return [cl.node(rev) for rev in headrevs]
2218
2223
2219 heads = self.changelog.heads(start)
2224 heads = self.changelog.heads(start)
2220 # sort the output in rev descending order
2225 # sort the output in rev descending order
2221 return sorted(heads, key=self.changelog.rev, reverse=True)
2226 return sorted(heads, key=self.changelog.rev, reverse=True)
2222
2227
2223 def branchheads(self, branch=None, start=None, closed=False):
2228 def branchheads(self, branch=None, start=None, closed=False):
2224 '''return a (possibly filtered) list of heads for the given branch
2229 '''return a (possibly filtered) list of heads for the given branch
2225
2230
2226 Heads are returned in topological order, from newest to oldest.
2231 Heads are returned in topological order, from newest to oldest.
2227 If branch is None, use the dirstate branch.
2232 If branch is None, use the dirstate branch.
2228 If start is not None, return only heads reachable from start.
2233 If start is not None, return only heads reachable from start.
2229 If closed is True, return heads that are marked as closed as well.
2234 If closed is True, return heads that are marked as closed as well.
2230 '''
2235 '''
2231 if branch is None:
2236 if branch is None:
2232 branch = self[None].branch()
2237 branch = self[None].branch()
2233 branches = self.branchmap()
2238 branches = self.branchmap()
2234 if branch not in branches:
2239 if branch not in branches:
2235 return []
2240 return []
2236 # the cache returns heads ordered lowest to highest
2241 # the cache returns heads ordered lowest to highest
2237 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2242 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2238 if start is not None:
2243 if start is not None:
2239 # filter out the heads that cannot be reached from startrev
2244 # filter out the heads that cannot be reached from startrev
2240 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2245 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2241 bheads = [h for h in bheads if h in fbheads]
2246 bheads = [h for h in bheads if h in fbheads]
2242 return bheads
2247 return bheads
2243
2248
2244 def branches(self, nodes):
2249 def branches(self, nodes):
2245 if not nodes:
2250 if not nodes:
2246 nodes = [self.changelog.tip()]
2251 nodes = [self.changelog.tip()]
2247 b = []
2252 b = []
2248 for n in nodes:
2253 for n in nodes:
2249 t = n
2254 t = n
2250 while True:
2255 while True:
2251 p = self.changelog.parents(n)
2256 p = self.changelog.parents(n)
2252 if p[1] != nullid or p[0] == nullid:
2257 if p[1] != nullid or p[0] == nullid:
2253 b.append((t, n, p[0], p[1]))
2258 b.append((t, n, p[0], p[1]))
2254 break
2259 break
2255 n = p[0]
2260 n = p[0]
2256 return b
2261 return b
2257
2262
2258 def between(self, pairs):
2263 def between(self, pairs):
2259 r = []
2264 r = []
2260
2265
2261 for top, bottom in pairs:
2266 for top, bottom in pairs:
2262 n, l, i = top, [], 0
2267 n, l, i = top, [], 0
2263 f = 1
2268 f = 1
2264
2269
2265 while n != bottom and n != nullid:
2270 while n != bottom and n != nullid:
2266 p = self.changelog.parents(n)[0]
2271 p = self.changelog.parents(n)[0]
2267 if i == f:
2272 if i == f:
2268 l.append(n)
2273 l.append(n)
2269 f = f * 2
2274 f = f * 2
2270 n = p
2275 n = p
2271 i += 1
2276 i += 1
2272
2277
2273 r.append(l)
2278 r.append(l)
2274
2279
2275 return r
2280 return r
2276
2281
2277 def checkpush(self, pushop):
2282 def checkpush(self, pushop):
2278 """Extensions can override this function if additional checks have
2283 """Extensions can override this function if additional checks have
2279 to be performed before pushing, or call it if they override push
2284 to be performed before pushing, or call it if they override push
2280 command.
2285 command.
2281 """
2286 """
2282
2287
2283 @unfilteredpropertycache
2288 @unfilteredpropertycache
2284 def prepushoutgoinghooks(self):
2289 def prepushoutgoinghooks(self):
2285 """Return util.hooks consists of a pushop with repo, remote, outgoing
2290 """Return util.hooks consists of a pushop with repo, remote, outgoing
2286 methods, which are called before pushing changesets.
2291 methods, which are called before pushing changesets.
2287 """
2292 """
2288 return util.hooks()
2293 return util.hooks()
2289
2294
2290 def pushkey(self, namespace, key, old, new):
2295 def pushkey(self, namespace, key, old, new):
2291 try:
2296 try:
2292 tr = self.currenttransaction()
2297 tr = self.currenttransaction()
2293 hookargs = {}
2298 hookargs = {}
2294 if tr is not None:
2299 if tr is not None:
2295 hookargs.update(tr.hookargs)
2300 hookargs.update(tr.hookargs)
2296 hookargs = pycompat.strkwargs(hookargs)
2301 hookargs = pycompat.strkwargs(hookargs)
2297 hookargs[r'namespace'] = namespace
2302 hookargs[r'namespace'] = namespace
2298 hookargs[r'key'] = key
2303 hookargs[r'key'] = key
2299 hookargs[r'old'] = old
2304 hookargs[r'old'] = old
2300 hookargs[r'new'] = new
2305 hookargs[r'new'] = new
2301 self.hook('prepushkey', throw=True, **hookargs)
2306 self.hook('prepushkey', throw=True, **hookargs)
2302 except error.HookAbort as exc:
2307 except error.HookAbort as exc:
2303 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2308 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2304 if exc.hint:
2309 if exc.hint:
2305 self.ui.write_err(_("(%s)\n") % exc.hint)
2310 self.ui.write_err(_("(%s)\n") % exc.hint)
2306 return False
2311 return False
2307 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2312 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2308 ret = pushkey.push(self, namespace, key, old, new)
2313 ret = pushkey.push(self, namespace, key, old, new)
2309 def runhook():
2314 def runhook():
2310 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2315 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2311 ret=ret)
2316 ret=ret)
2312 self._afterlock(runhook)
2317 self._afterlock(runhook)
2313 return ret
2318 return ret
2314
2319
2315 def listkeys(self, namespace):
2320 def listkeys(self, namespace):
2316 self.hook('prelistkeys', throw=True, namespace=namespace)
2321 self.hook('prelistkeys', throw=True, namespace=namespace)
2317 self.ui.debug('listing keys for "%s"\n' % namespace)
2322 self.ui.debug('listing keys for "%s"\n' % namespace)
2318 values = pushkey.list(self, namespace)
2323 values = pushkey.list(self, namespace)
2319 self.hook('listkeys', namespace=namespace, values=values)
2324 self.hook('listkeys', namespace=namespace, values=values)
2320 return values
2325 return values
2321
2326
2322 def debugwireargs(self, one, two, three=None, four=None, five=None):
2327 def debugwireargs(self, one, two, three=None, four=None, five=None):
2323 '''used to test argument passing over the wire'''
2328 '''used to test argument passing over the wire'''
2324 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2329 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2325 pycompat.bytestr(four),
2330 pycompat.bytestr(four),
2326 pycompat.bytestr(five))
2331 pycompat.bytestr(five))
2327
2332
2328 def savecommitmessage(self, text):
2333 def savecommitmessage(self, text):
2329 fp = self.vfs('last-message.txt', 'wb')
2334 fp = self.vfs('last-message.txt', 'wb')
2330 try:
2335 try:
2331 fp.write(text)
2336 fp.write(text)
2332 finally:
2337 finally:
2333 fp.close()
2338 fp.close()
2334 return self.pathto(fp.name[len(self.root) + 1:])
2339 return self.pathto(fp.name[len(self.root) + 1:])
2335
2340
2336 # used to avoid circular references so destructors work
2341 # used to avoid circular references so destructors work
2337 def aftertrans(files):
2342 def aftertrans(files):
2338 renamefiles = [tuple(t) for t in files]
2343 renamefiles = [tuple(t) for t in files]
2339 def a():
2344 def a():
2340 for vfs, src, dest in renamefiles:
2345 for vfs, src, dest in renamefiles:
2341 # if src and dest refer to a same file, vfs.rename is a no-op,
2346 # if src and dest refer to a same file, vfs.rename is a no-op,
2342 # leaving both src and dest on disk. delete dest to make sure
2347 # leaving both src and dest on disk. delete dest to make sure
2343 # the rename couldn't be such a no-op.
2348 # the rename couldn't be such a no-op.
2344 vfs.tryunlink(dest)
2349 vfs.tryunlink(dest)
2345 try:
2350 try:
2346 vfs.rename(src, dest)
2351 vfs.rename(src, dest)
2347 except OSError: # journal file does not yet exist
2352 except OSError: # journal file does not yet exist
2348 pass
2353 pass
2349 return a
2354 return a
2350
2355
2351 def undoname(fn):
2356 def undoname(fn):
2352 base, name = os.path.split(fn)
2357 base, name = os.path.split(fn)
2353 assert name.startswith('journal')
2358 assert name.startswith('journal')
2354 return os.path.join(base, name.replace('journal', 'undo', 1))
2359 return os.path.join(base, name.replace('journal', 'undo', 1))
2355
2360
2356 def instance(ui, path, create, intents=None):
2361 def instance(ui, path, create, intents=None):
2357 return localrepository(ui, util.urllocalpath(path), create,
2362 return localrepository(ui, util.urllocalpath(path), create,
2358 intents=intents)
2363 intents=intents)
2359
2364
2360 def islocal(path):
2365 def islocal(path):
2361 return True
2366 return True
2362
2367
2363 def newreporequirements(repo):
2368 def newreporequirements(repo):
2364 """Determine the set of requirements for a new local repository.
2369 """Determine the set of requirements for a new local repository.
2365
2370
2366 Extensions can wrap this function to specify custom requirements for
2371 Extensions can wrap this function to specify custom requirements for
2367 new repositories.
2372 new repositories.
2368 """
2373 """
2369 ui = repo.ui
2374 ui = repo.ui
2370 requirements = {'revlogv1'}
2375 requirements = {'revlogv1'}
2371 if ui.configbool('format', 'usestore'):
2376 if ui.configbool('format', 'usestore'):
2372 requirements.add('store')
2377 requirements.add('store')
2373 if ui.configbool('format', 'usefncache'):
2378 if ui.configbool('format', 'usefncache'):
2374 requirements.add('fncache')
2379 requirements.add('fncache')
2375 if ui.configbool('format', 'dotencode'):
2380 if ui.configbool('format', 'dotencode'):
2376 requirements.add('dotencode')
2381 requirements.add('dotencode')
2377
2382
2378 compengine = ui.config('experimental', 'format.compression')
2383 compengine = ui.config('experimental', 'format.compression')
2379 if compengine not in util.compengines:
2384 if compengine not in util.compengines:
2380 raise error.Abort(_('compression engine %s defined by '
2385 raise error.Abort(_('compression engine %s defined by '
2381 'experimental.format.compression not available') %
2386 'experimental.format.compression not available') %
2382 compengine,
2387 compengine,
2383 hint=_('run "hg debuginstall" to list available '
2388 hint=_('run "hg debuginstall" to list available '
2384 'compression engines'))
2389 'compression engines'))
2385
2390
2386 # zlib is the historical default and doesn't need an explicit requirement.
2391 # zlib is the historical default and doesn't need an explicit requirement.
2387 if compengine != 'zlib':
2392 if compengine != 'zlib':
2388 requirements.add('exp-compression-%s' % compengine)
2393 requirements.add('exp-compression-%s' % compengine)
2389
2394
2390 if scmutil.gdinitconfig(ui):
2395 if scmutil.gdinitconfig(ui):
2391 requirements.add('generaldelta')
2396 requirements.add('generaldelta')
2392 if ui.configbool('experimental', 'treemanifest'):
2397 if ui.configbool('experimental', 'treemanifest'):
2393 requirements.add('treemanifest')
2398 requirements.add('treemanifest')
2394 # experimental config: format.sparse-revlog
2399 # experimental config: format.sparse-revlog
2395 if ui.configbool('format', 'sparse-revlog'):
2400 if ui.configbool('format', 'sparse-revlog'):
2396 requirements.add(SPARSEREVLOG_REQUIREMENT)
2401 requirements.add(SPARSEREVLOG_REQUIREMENT)
2397
2402
2398 revlogv2 = ui.config('experimental', 'revlogv2')
2403 revlogv2 = ui.config('experimental', 'revlogv2')
2399 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2404 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2400 requirements.remove('revlogv1')
2405 requirements.remove('revlogv1')
2401 # generaldelta is implied by revlogv2.
2406 # generaldelta is implied by revlogv2.
2402 requirements.discard('generaldelta')
2407 requirements.discard('generaldelta')
2403 requirements.add(REVLOGV2_REQUIREMENT)
2408 requirements.add(REVLOGV2_REQUIREMENT)
2404
2409
2405 return requirements
2410 return requirements
@@ -1,1296 +1,1296 b''
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 Prepare repo a:
11 Prepare repo a:
12
12
13 $ hg init a
13 $ hg init a
14 $ cd a
14 $ cd a
15 $ echo a > a
15 $ echo a > a
16 $ hg add a
16 $ hg add a
17 $ hg commit -m test
17 $ hg commit -m test
18 $ echo first line > b
18 $ echo first line > b
19 $ hg add b
19 $ hg add b
20
20
21 Create a non-inlined filelog:
21 Create a non-inlined filelog:
22
22
23 $ $PYTHON -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
23 $ $PYTHON -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 > cat data1 >> b
25 > cat data1 >> b
26 > hg commit -m test
26 > hg commit -m test
27 > done
27 > done
28
28
29 List files in store/data (should show a 'b.d'):
29 List files in store/data (should show a 'b.d'):
30
30
31 #if reporevlogstore
31 #if reporevlogstore
32 $ for i in .hg/store/data/*; do
32 $ for i in .hg/store/data/*; do
33 > echo $i
33 > echo $i
34 > done
34 > done
35 .hg/store/data/a.i
35 .hg/store/data/a.i
36 .hg/store/data/b.d
36 .hg/store/data/b.d
37 .hg/store/data/b.i
37 .hg/store/data/b.i
38 #endif
38 #endif
39
39
40 Trigger branchcache creation:
40 Trigger branchcache creation:
41
41
42 $ hg branches
42 $ hg branches
43 default 10:a7949464abda
43 default 10:a7949464abda
44 $ ls .hg/cache
44 $ ls .hg/cache
45 branch2-served
45 branch2-served
46 checkisexec (execbit !)
46 checkisexec (execbit !)
47 checklink (symlink !)
47 checklink (symlink !)
48 checklink-target (symlink !)
48 checklink-target (symlink !)
49 checknoexec (execbit !)
49 checknoexec (execbit !)
50 manifestfulltextcache
50 manifestfulltextcache
51 rbc-names-v1
51 rbc-names-v1
52 rbc-revs-v1
52 rbc-revs-v1
53
53
54 Default operation:
54 Default operation:
55
55
56 $ hg clone . ../b
56 $ hg clone . ../b
57 updating to branch default
57 updating to branch default
58 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 $ cd ../b
59 $ cd ../b
60
60
61 Ensure branchcache got copied over:
61 Ensure branchcache got copied over:
62
62
63 $ ls .hg/cache
63 $ ls .hg/cache
64 branch2-served
64 branch2-served
65 checkisexec (execbit !)
65 checkisexec (execbit !)
66 checklink (symlink !)
66 checklink (symlink !)
67 checklink-target (symlink !)
67 checklink-target (symlink !)
68 rbc-names-v1
68 rbc-names-v1
69 rbc-revs-v1
69 rbc-revs-v1
70
70
71 $ cat a
71 $ cat a
72 a
72 a
73 $ hg verify
73 $ hg verify
74 checking changesets
74 checking changesets
75 checking manifests
75 checking manifests
76 crosschecking files in changesets and manifests
76 crosschecking files in changesets and manifests
77 checking files
77 checking files
78 2 files, 11 changesets, 11 total revisions
78 2 files, 11 changesets, 11 total revisions
79
79
80 Invalid dest '' must abort:
80 Invalid dest '' must abort:
81
81
82 $ hg clone . ''
82 $ hg clone . ''
83 abort: empty destination path is not valid
83 abort: empty destination path is not valid
84 [255]
84 [255]
85
85
86 No update, with debug option:
86 No update, with debug option:
87
87
88 #if hardlink
88 #if hardlink
89 $ hg --debug clone -U . ../c --config progress.debug=true
89 $ hg --debug clone -U . ../c --config progress.debug=true
90 linking: 1
90 linking: 1
91 linking: 2
91 linking: 2
92 linking: 3
92 linking: 3
93 linking: 4
93 linking: 4
94 linking: 5
94 linking: 5
95 linking: 6
95 linking: 6
96 linking: 7
96 linking: 7
97 linking: 8
97 linking: 8
98 linked 8 files (reporevlogstore !)
98 linked 8 files (reporevlogstore !)
99 linking: 9 (reposimplestore !)
99 linking: 9 (reposimplestore !)
100 linking: 10 (reposimplestore !)
100 linking: 10 (reposimplestore !)
101 linking: 11 (reposimplestore !)
101 linking: 11 (reposimplestore !)
102 linking: 12 (reposimplestore !)
102 linking: 12 (reposimplestore !)
103 linking: 13 (reposimplestore !)
103 linking: 13 (reposimplestore !)
104 linking: 14 (reposimplestore !)
104 linking: 14 (reposimplestore !)
105 linking: 15 (reposimplestore !)
105 linking: 15 (reposimplestore !)
106 linking: 16 (reposimplestore !)
106 linking: 16 (reposimplestore !)
107 linking: 17 (reposimplestore !)
107 linking: 17 (reposimplestore !)
108 linking: 18 (reposimplestore !)
108 linking: 18 (reposimplestore !)
109 linked 18 files (reposimplestore !)
109 linked 18 files (reposimplestore !)
110 #else
110 #else
111 $ hg --debug clone -U . ../c --config progress.debug=true
111 $ hg --debug clone -U . ../c --config progress.debug=true
112 linking: 1
112 linking: 1
113 copying: 2
113 copying: 2
114 copying: 3
114 copying: 3
115 copying: 4
115 copying: 4
116 copying: 5
116 copying: 5
117 copying: 6
117 copying: 6
118 copying: 7
118 copying: 7
119 copying: 8
119 copying: 8
120 copied 8 files (reporevlogstore !)
120 copied 8 files (reporevlogstore !)
121 copying: 9 (reposimplestore !)
121 copying: 9 (reposimplestore !)
122 copying: 10 (reposimplestore !)
122 copying: 10 (reposimplestore !)
123 copying: 11 (reposimplestore !)
123 copying: 11 (reposimplestore !)
124 copying: 12 (reposimplestore !)
124 copying: 12 (reposimplestore !)
125 copying: 13 (reposimplestore !)
125 copying: 13 (reposimplestore !)
126 copying: 14 (reposimplestore !)
126 copying: 14 (reposimplestore !)
127 copying: 15 (reposimplestore !)
127 copying: 15 (reposimplestore !)
128 copying: 16 (reposimplestore !)
128 copying: 16 (reposimplestore !)
129 copying: 17 (reposimplestore !)
129 copying: 17 (reposimplestore !)
130 copying: 18 (reposimplestore !)
130 copying: 18 (reposimplestore !)
131 copied 18 files (reposimplestore !)
131 copied 18 files (reposimplestore !)
132 #endif
132 #endif
133 $ cd ../c
133 $ cd ../c
134
134
135 Ensure branchcache got copied over:
135 Ensure branchcache got copied over:
136
136
137 $ ls .hg/cache
137 $ ls .hg/cache
138 branch2-served
138 branch2-served
139 rbc-names-v1
139 rbc-names-v1
140 rbc-revs-v1
140 rbc-revs-v1
141
141
142 $ cat a 2>/dev/null || echo "a not present"
142 $ cat a 2>/dev/null || echo "a not present"
143 a not present
143 a not present
144 $ hg verify
144 $ hg verify
145 checking changesets
145 checking changesets
146 checking manifests
146 checking manifests
147 crosschecking files in changesets and manifests
147 crosschecking files in changesets and manifests
148 checking files
148 checking files
149 2 files, 11 changesets, 11 total revisions
149 2 files, 11 changesets, 11 total revisions
150
150
151 Default destination:
151 Default destination:
152
152
153 $ mkdir ../d
153 $ mkdir ../d
154 $ cd ../d
154 $ cd ../d
155 $ hg clone ../a
155 $ hg clone ../a
156 destination directory: a
156 destination directory: a
157 updating to branch default
157 updating to branch default
158 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 $ cd a
159 $ cd a
160 $ hg cat a
160 $ hg cat a
161 a
161 a
162 $ cd ../..
162 $ cd ../..
163
163
164 Check that we drop the 'file:' from the path before writing the .hgrc:
164 Check that we drop the 'file:' from the path before writing the .hgrc:
165
165
166 $ hg clone file:a e
166 $ hg clone file:a e
167 updating to branch default
167 updating to branch default
168 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
168 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 $ grep 'file:' e/.hg/hgrc
169 $ grep 'file:' e/.hg/hgrc
170 [1]
170 [1]
171
171
172 Check that path aliases are expanded:
172 Check that path aliases are expanded:
173
173
174 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
174 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
175 $ hg -R f showconfig paths.default
175 $ hg -R f showconfig paths.default
176 $TESTTMP/a#0
176 $TESTTMP/a#0
177
177
178 Use --pull:
178 Use --pull:
179
179
180 $ hg clone --pull a g
180 $ hg clone --pull a g
181 requesting all changes
181 requesting all changes
182 adding changesets
182 adding changesets
183 adding manifests
183 adding manifests
184 adding file changes
184 adding file changes
185 added 11 changesets with 11 changes to 2 files
185 added 11 changesets with 11 changes to 2 files
186 new changesets acb14030fe0a:a7949464abda
186 new changesets acb14030fe0a:a7949464abda
187 updating to branch default
187 updating to branch default
188 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
188 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
189 $ hg -R g verify
189 $ hg -R g verify
190 checking changesets
190 checking changesets
191 checking manifests
191 checking manifests
192 crosschecking files in changesets and manifests
192 crosschecking files in changesets and manifests
193 checking files
193 checking files
194 2 files, 11 changesets, 11 total revisions
194 2 files, 11 changesets, 11 total revisions
195
195
196 Invalid dest '' with --pull must abort (issue2528):
196 Invalid dest '' with --pull must abort (issue2528):
197
197
198 $ hg clone --pull a ''
198 $ hg clone --pull a ''
199 abort: empty destination path is not valid
199 abort: empty destination path is not valid
200 [255]
200 [255]
201
201
202 Clone to '.':
202 Clone to '.':
203
203
204 $ mkdir h
204 $ mkdir h
205 $ cd h
205 $ cd h
206 $ hg clone ../a .
206 $ hg clone ../a .
207 updating to branch default
207 updating to branch default
208 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
208 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
209 $ cd ..
209 $ cd ..
210
210
211
211
212 *** Tests for option -u ***
212 *** Tests for option -u ***
213
213
214 Adding some more history to repo a:
214 Adding some more history to repo a:
215
215
216 $ cd a
216 $ cd a
217 $ hg tag ref1
217 $ hg tag ref1
218 $ echo the quick brown fox >a
218 $ echo the quick brown fox >a
219 $ hg ci -m "hacked default"
219 $ hg ci -m "hacked default"
220 $ hg up ref1
220 $ hg up ref1
221 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
221 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
222 $ hg branch stable
222 $ hg branch stable
223 marked working directory as branch stable
223 marked working directory as branch stable
224 (branches are permanent and global, did you want a bookmark?)
224 (branches are permanent and global, did you want a bookmark?)
225 $ echo some text >a
225 $ echo some text >a
226 $ hg ci -m "starting branch stable"
226 $ hg ci -m "starting branch stable"
227 $ hg tag ref2
227 $ hg tag ref2
228 $ echo some more text >a
228 $ echo some more text >a
229 $ hg ci -m "another change for branch stable"
229 $ hg ci -m "another change for branch stable"
230 $ hg up ref2
230 $ hg up ref2
231 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
231 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
232 $ hg parents
232 $ hg parents
233 changeset: 13:e8ece76546a6
233 changeset: 13:e8ece76546a6
234 branch: stable
234 branch: stable
235 tag: ref2
235 tag: ref2
236 parent: 10:a7949464abda
236 parent: 10:a7949464abda
237 user: test
237 user: test
238 date: Thu Jan 01 00:00:00 1970 +0000
238 date: Thu Jan 01 00:00:00 1970 +0000
239 summary: starting branch stable
239 summary: starting branch stable
240
240
241
241
242 Repo a has two heads:
242 Repo a has two heads:
243
243
244 $ hg heads
244 $ hg heads
245 changeset: 15:0aae7cf88f0d
245 changeset: 15:0aae7cf88f0d
246 branch: stable
246 branch: stable
247 tag: tip
247 tag: tip
248 user: test
248 user: test
249 date: Thu Jan 01 00:00:00 1970 +0000
249 date: Thu Jan 01 00:00:00 1970 +0000
250 summary: another change for branch stable
250 summary: another change for branch stable
251
251
252 changeset: 12:f21241060d6a
252 changeset: 12:f21241060d6a
253 user: test
253 user: test
254 date: Thu Jan 01 00:00:00 1970 +0000
254 date: Thu Jan 01 00:00:00 1970 +0000
255 summary: hacked default
255 summary: hacked default
256
256
257
257
258 $ cd ..
258 $ cd ..
259
259
260
260
261 Testing --noupdate with --updaterev (must abort):
261 Testing --noupdate with --updaterev (must abort):
262
262
263 $ hg clone --noupdate --updaterev 1 a ua
263 $ hg clone --noupdate --updaterev 1 a ua
264 abort: cannot specify both --noupdate and --updaterev
264 abort: cannot specify both --noupdate and --updaterev
265 [255]
265 [255]
266
266
267
267
268 Testing clone -u:
268 Testing clone -u:
269
269
270 $ hg clone -u . a ua
270 $ hg clone -u . a ua
271 updating to branch stable
271 updating to branch stable
272 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
272 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
273
273
274 Repo ua has both heads:
274 Repo ua has both heads:
275
275
276 $ hg -R ua heads
276 $ hg -R ua heads
277 changeset: 15:0aae7cf88f0d
277 changeset: 15:0aae7cf88f0d
278 branch: stable
278 branch: stable
279 tag: tip
279 tag: tip
280 user: test
280 user: test
281 date: Thu Jan 01 00:00:00 1970 +0000
281 date: Thu Jan 01 00:00:00 1970 +0000
282 summary: another change for branch stable
282 summary: another change for branch stable
283
283
284 changeset: 12:f21241060d6a
284 changeset: 12:f21241060d6a
285 user: test
285 user: test
286 date: Thu Jan 01 00:00:00 1970 +0000
286 date: Thu Jan 01 00:00:00 1970 +0000
287 summary: hacked default
287 summary: hacked default
288
288
289
289
290 Same revision checked out in repo a and ua:
290 Same revision checked out in repo a and ua:
291
291
292 $ hg -R a parents --template "{node|short}\n"
292 $ hg -R a parents --template "{node|short}\n"
293 e8ece76546a6
293 e8ece76546a6
294 $ hg -R ua parents --template "{node|short}\n"
294 $ hg -R ua parents --template "{node|short}\n"
295 e8ece76546a6
295 e8ece76546a6
296
296
297 $ rm -r ua
297 $ rm -r ua
298
298
299
299
300 Testing clone --pull -u:
300 Testing clone --pull -u:
301
301
302 $ hg clone --pull -u . a ua
302 $ hg clone --pull -u . a ua
303 requesting all changes
303 requesting all changes
304 adding changesets
304 adding changesets
305 adding manifests
305 adding manifests
306 adding file changes
306 adding file changes
307 added 16 changesets with 16 changes to 3 files (+1 heads)
307 added 16 changesets with 16 changes to 3 files (+1 heads)
308 new changesets acb14030fe0a:0aae7cf88f0d
308 new changesets acb14030fe0a:0aae7cf88f0d
309 updating to branch stable
309 updating to branch stable
310 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
310 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
311
311
312 Repo ua has both heads:
312 Repo ua has both heads:
313
313
314 $ hg -R ua heads
314 $ hg -R ua heads
315 changeset: 15:0aae7cf88f0d
315 changeset: 15:0aae7cf88f0d
316 branch: stable
316 branch: stable
317 tag: tip
317 tag: tip
318 user: test
318 user: test
319 date: Thu Jan 01 00:00:00 1970 +0000
319 date: Thu Jan 01 00:00:00 1970 +0000
320 summary: another change for branch stable
320 summary: another change for branch stable
321
321
322 changeset: 12:f21241060d6a
322 changeset: 12:f21241060d6a
323 user: test
323 user: test
324 date: Thu Jan 01 00:00:00 1970 +0000
324 date: Thu Jan 01 00:00:00 1970 +0000
325 summary: hacked default
325 summary: hacked default
326
326
327
327
328 Same revision checked out in repo a and ua:
328 Same revision checked out in repo a and ua:
329
329
330 $ hg -R a parents --template "{node|short}\n"
330 $ hg -R a parents --template "{node|short}\n"
331 e8ece76546a6
331 e8ece76546a6
332 $ hg -R ua parents --template "{node|short}\n"
332 $ hg -R ua parents --template "{node|short}\n"
333 e8ece76546a6
333 e8ece76546a6
334
334
335 $ rm -r ua
335 $ rm -r ua
336
336
337
337
338 Testing clone -u <branch>:
338 Testing clone -u <branch>:
339
339
340 $ hg clone -u stable a ua
340 $ hg clone -u stable a ua
341 updating to branch stable
341 updating to branch stable
342 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
342 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
343
343
344 Repo ua has both heads:
344 Repo ua has both heads:
345
345
346 $ hg -R ua heads
346 $ hg -R ua heads
347 changeset: 15:0aae7cf88f0d
347 changeset: 15:0aae7cf88f0d
348 branch: stable
348 branch: stable
349 tag: tip
349 tag: tip
350 user: test
350 user: test
351 date: Thu Jan 01 00:00:00 1970 +0000
351 date: Thu Jan 01 00:00:00 1970 +0000
352 summary: another change for branch stable
352 summary: another change for branch stable
353
353
354 changeset: 12:f21241060d6a
354 changeset: 12:f21241060d6a
355 user: test
355 user: test
356 date: Thu Jan 01 00:00:00 1970 +0000
356 date: Thu Jan 01 00:00:00 1970 +0000
357 summary: hacked default
357 summary: hacked default
358
358
359
359
360 Branch 'stable' is checked out:
360 Branch 'stable' is checked out:
361
361
362 $ hg -R ua parents
362 $ hg -R ua parents
363 changeset: 15:0aae7cf88f0d
363 changeset: 15:0aae7cf88f0d
364 branch: stable
364 branch: stable
365 tag: tip
365 tag: tip
366 user: test
366 user: test
367 date: Thu Jan 01 00:00:00 1970 +0000
367 date: Thu Jan 01 00:00:00 1970 +0000
368 summary: another change for branch stable
368 summary: another change for branch stable
369
369
370
370
371 $ rm -r ua
371 $ rm -r ua
372
372
373
373
374 Testing default checkout:
374 Testing default checkout:
375
375
376 $ hg clone a ua
376 $ hg clone a ua
377 updating to branch default
377 updating to branch default
378 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
378 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
379
379
380 Repo ua has both heads:
380 Repo ua has both heads:
381
381
382 $ hg -R ua heads
382 $ hg -R ua heads
383 changeset: 15:0aae7cf88f0d
383 changeset: 15:0aae7cf88f0d
384 branch: stable
384 branch: stable
385 tag: tip
385 tag: tip
386 user: test
386 user: test
387 date: Thu Jan 01 00:00:00 1970 +0000
387 date: Thu Jan 01 00:00:00 1970 +0000
388 summary: another change for branch stable
388 summary: another change for branch stable
389
389
390 changeset: 12:f21241060d6a
390 changeset: 12:f21241060d6a
391 user: test
391 user: test
392 date: Thu Jan 01 00:00:00 1970 +0000
392 date: Thu Jan 01 00:00:00 1970 +0000
393 summary: hacked default
393 summary: hacked default
394
394
395
395
396 Branch 'default' is checked out:
396 Branch 'default' is checked out:
397
397
398 $ hg -R ua parents
398 $ hg -R ua parents
399 changeset: 12:f21241060d6a
399 changeset: 12:f21241060d6a
400 user: test
400 user: test
401 date: Thu Jan 01 00:00:00 1970 +0000
401 date: Thu Jan 01 00:00:00 1970 +0000
402 summary: hacked default
402 summary: hacked default
403
403
404 Test clone with a branch named "@" (issue3677)
404 Test clone with a branch named "@" (issue3677)
405
405
406 $ hg -R ua branch @
406 $ hg -R ua branch @
407 marked working directory as branch @
407 marked working directory as branch @
408 $ hg -R ua commit -m 'created branch @'
408 $ hg -R ua commit -m 'created branch @'
409 $ hg clone ua atbranch
409 $ hg clone ua atbranch
410 updating to branch default
410 updating to branch default
411 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
411 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
412 $ hg -R atbranch heads
412 $ hg -R atbranch heads
413 changeset: 16:798b6d97153e
413 changeset: 16:798b6d97153e
414 branch: @
414 branch: @
415 tag: tip
415 tag: tip
416 parent: 12:f21241060d6a
416 parent: 12:f21241060d6a
417 user: test
417 user: test
418 date: Thu Jan 01 00:00:00 1970 +0000
418 date: Thu Jan 01 00:00:00 1970 +0000
419 summary: created branch @
419 summary: created branch @
420
420
421 changeset: 15:0aae7cf88f0d
421 changeset: 15:0aae7cf88f0d
422 branch: stable
422 branch: stable
423 user: test
423 user: test
424 date: Thu Jan 01 00:00:00 1970 +0000
424 date: Thu Jan 01 00:00:00 1970 +0000
425 summary: another change for branch stable
425 summary: another change for branch stable
426
426
427 changeset: 12:f21241060d6a
427 changeset: 12:f21241060d6a
428 user: test
428 user: test
429 date: Thu Jan 01 00:00:00 1970 +0000
429 date: Thu Jan 01 00:00:00 1970 +0000
430 summary: hacked default
430 summary: hacked default
431
431
432 $ hg -R atbranch parents
432 $ hg -R atbranch parents
433 changeset: 12:f21241060d6a
433 changeset: 12:f21241060d6a
434 user: test
434 user: test
435 date: Thu Jan 01 00:00:00 1970 +0000
435 date: Thu Jan 01 00:00:00 1970 +0000
436 summary: hacked default
436 summary: hacked default
437
437
438
438
439 $ rm -r ua atbranch
439 $ rm -r ua atbranch
440
440
441
441
442 Testing #<branch>:
442 Testing #<branch>:
443
443
444 $ hg clone -u . a#stable ua
444 $ hg clone -u . a#stable ua
445 adding changesets
445 adding changesets
446 adding manifests
446 adding manifests
447 adding file changes
447 adding file changes
448 added 14 changesets with 14 changes to 3 files
448 added 14 changesets with 14 changes to 3 files
449 new changesets acb14030fe0a:0aae7cf88f0d
449 new changesets acb14030fe0a:0aae7cf88f0d
450 updating to branch stable
450 updating to branch stable
451 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
451 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
452
452
453 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
453 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
454
454
455 $ hg -R ua heads
455 $ hg -R ua heads
456 changeset: 13:0aae7cf88f0d
456 changeset: 13:0aae7cf88f0d
457 branch: stable
457 branch: stable
458 tag: tip
458 tag: tip
459 user: test
459 user: test
460 date: Thu Jan 01 00:00:00 1970 +0000
460 date: Thu Jan 01 00:00:00 1970 +0000
461 summary: another change for branch stable
461 summary: another change for branch stable
462
462
463 changeset: 10:a7949464abda
463 changeset: 10:a7949464abda
464 user: test
464 user: test
465 date: Thu Jan 01 00:00:00 1970 +0000
465 date: Thu Jan 01 00:00:00 1970 +0000
466 summary: test
466 summary: test
467
467
468
468
469 Same revision checked out in repo a and ua:
469 Same revision checked out in repo a and ua:
470
470
471 $ hg -R a parents --template "{node|short}\n"
471 $ hg -R a parents --template "{node|short}\n"
472 e8ece76546a6
472 e8ece76546a6
473 $ hg -R ua parents --template "{node|short}\n"
473 $ hg -R ua parents --template "{node|short}\n"
474 e8ece76546a6
474 e8ece76546a6
475
475
476 $ rm -r ua
476 $ rm -r ua
477
477
478
478
479 Testing -u -r <branch>:
479 Testing -u -r <branch>:
480
480
481 $ hg clone -u . -r stable a ua
481 $ hg clone -u . -r stable a ua
482 adding changesets
482 adding changesets
483 adding manifests
483 adding manifests
484 adding file changes
484 adding file changes
485 added 14 changesets with 14 changes to 3 files
485 added 14 changesets with 14 changes to 3 files
486 new changesets acb14030fe0a:0aae7cf88f0d
486 new changesets acb14030fe0a:0aae7cf88f0d
487 updating to branch stable
487 updating to branch stable
488 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
488 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
489
489
490 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
490 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
491
491
492 $ hg -R ua heads
492 $ hg -R ua heads
493 changeset: 13:0aae7cf88f0d
493 changeset: 13:0aae7cf88f0d
494 branch: stable
494 branch: stable
495 tag: tip
495 tag: tip
496 user: test
496 user: test
497 date: Thu Jan 01 00:00:00 1970 +0000
497 date: Thu Jan 01 00:00:00 1970 +0000
498 summary: another change for branch stable
498 summary: another change for branch stable
499
499
500 changeset: 10:a7949464abda
500 changeset: 10:a7949464abda
501 user: test
501 user: test
502 date: Thu Jan 01 00:00:00 1970 +0000
502 date: Thu Jan 01 00:00:00 1970 +0000
503 summary: test
503 summary: test
504
504
505
505
506 Same revision checked out in repo a and ua:
506 Same revision checked out in repo a and ua:
507
507
508 $ hg -R a parents --template "{node|short}\n"
508 $ hg -R a parents --template "{node|short}\n"
509 e8ece76546a6
509 e8ece76546a6
510 $ hg -R ua parents --template "{node|short}\n"
510 $ hg -R ua parents --template "{node|short}\n"
511 e8ece76546a6
511 e8ece76546a6
512
512
513 $ rm -r ua
513 $ rm -r ua
514
514
515
515
516 Testing -r <branch>:
516 Testing -r <branch>:
517
517
518 $ hg clone -r stable a ua
518 $ hg clone -r stable a ua
519 adding changesets
519 adding changesets
520 adding manifests
520 adding manifests
521 adding file changes
521 adding file changes
522 added 14 changesets with 14 changes to 3 files
522 added 14 changesets with 14 changes to 3 files
523 new changesets acb14030fe0a:0aae7cf88f0d
523 new changesets acb14030fe0a:0aae7cf88f0d
524 updating to branch stable
524 updating to branch stable
525 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
525 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
526
526
527 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
527 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
528
528
529 $ hg -R ua heads
529 $ hg -R ua heads
530 changeset: 13:0aae7cf88f0d
530 changeset: 13:0aae7cf88f0d
531 branch: stable
531 branch: stable
532 tag: tip
532 tag: tip
533 user: test
533 user: test
534 date: Thu Jan 01 00:00:00 1970 +0000
534 date: Thu Jan 01 00:00:00 1970 +0000
535 summary: another change for branch stable
535 summary: another change for branch stable
536
536
537 changeset: 10:a7949464abda
537 changeset: 10:a7949464abda
538 user: test
538 user: test
539 date: Thu Jan 01 00:00:00 1970 +0000
539 date: Thu Jan 01 00:00:00 1970 +0000
540 summary: test
540 summary: test
541
541
542
542
543 Branch 'stable' is checked out:
543 Branch 'stable' is checked out:
544
544
545 $ hg -R ua parents
545 $ hg -R ua parents
546 changeset: 13:0aae7cf88f0d
546 changeset: 13:0aae7cf88f0d
547 branch: stable
547 branch: stable
548 tag: tip
548 tag: tip
549 user: test
549 user: test
550 date: Thu Jan 01 00:00:00 1970 +0000
550 date: Thu Jan 01 00:00:00 1970 +0000
551 summary: another change for branch stable
551 summary: another change for branch stable
552
552
553
553
554 $ rm -r ua
554 $ rm -r ua
555
555
556
556
557 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
557 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
558 iterable in addbranchrevs()
558 iterable in addbranchrevs()
559
559
560 $ cat <<EOF > simpleclone.py
560 $ cat <<EOF > simpleclone.py
561 > from mercurial import ui, hg
561 > from mercurial import ui, hg
562 > myui = ui.ui.load()
562 > myui = ui.ui.load()
563 > repo = hg.repository(myui, b'a')
563 > repo = hg.repository(myui, b'a')
564 > hg.clone(myui, {}, repo, dest=b"ua")
564 > hg.clone(myui, {}, repo, dest=b"ua")
565 > EOF
565 > EOF
566
566
567 $ $PYTHON simpleclone.py
567 $ $PYTHON simpleclone.py
568 updating to branch default
568 updating to branch default
569 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
569 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
570
570
571 $ rm -r ua
571 $ rm -r ua
572
572
573 $ cat <<EOF > branchclone.py
573 $ cat <<EOF > branchclone.py
574 > from mercurial import ui, hg, extensions
574 > from mercurial import ui, hg, extensions
575 > myui = ui.ui.load()
575 > myui = ui.ui.load()
576 > extensions.loadall(myui)
576 > extensions.loadall(myui)
577 > repo = hg.repository(myui, b'a')
577 > repo = hg.repository(myui, b'a')
578 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
578 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
579 > EOF
579 > EOF
580
580
581 $ $PYTHON branchclone.py
581 $ $PYTHON branchclone.py
582 adding changesets
582 adding changesets
583 adding manifests
583 adding manifests
584 adding file changes
584 adding file changes
585 added 14 changesets with 14 changes to 3 files
585 added 14 changesets with 14 changes to 3 files
586 new changesets acb14030fe0a:0aae7cf88f0d
586 new changesets acb14030fe0a:0aae7cf88f0d
587 updating to branch stable
587 updating to branch stable
588 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
588 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
589 $ rm -r ua
589 $ rm -r ua
590
590
591
591
592 Test clone with special '@' bookmark:
592 Test clone with special '@' bookmark:
593 $ cd a
593 $ cd a
594 $ hg bookmark -r a7949464abda @ # branch point of stable from default
594 $ hg bookmark -r a7949464abda @ # branch point of stable from default
595 $ hg clone . ../i
595 $ hg clone . ../i
596 updating to bookmark @
596 updating to bookmark @
597 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
597 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
598 $ hg id -i ../i
598 $ hg id -i ../i
599 a7949464abda
599 a7949464abda
600 $ rm -r ../i
600 $ rm -r ../i
601
601
602 $ hg bookmark -f -r stable @
602 $ hg bookmark -f -r stable @
603 $ hg bookmarks
603 $ hg bookmarks
604 @ 15:0aae7cf88f0d
604 @ 15:0aae7cf88f0d
605 $ hg clone . ../i
605 $ hg clone . ../i
606 updating to bookmark @ on branch stable
606 updating to bookmark @ on branch stable
607 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
607 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
608 $ hg id -i ../i
608 $ hg id -i ../i
609 0aae7cf88f0d
609 0aae7cf88f0d
610 $ cd "$TESTTMP"
610 $ cd "$TESTTMP"
611
611
612
612
613 Testing failures:
613 Testing failures:
614
614
615 $ mkdir fail
615 $ mkdir fail
616 $ cd fail
616 $ cd fail
617
617
618 No local source
618 No local source
619
619
620 $ hg clone a b
620 $ hg clone a b
621 abort: repository a not found!
621 abort: repository a not found!
622 [255]
622 [255]
623
623
624 No remote source
624 No remote source
625
625
626 #if windows
626 #if windows
627 $ hg clone http://$LOCALIP:3121/a b
627 $ hg clone http://$LOCALIP:3121/a b
628 abort: error: * (glob)
628 abort: error: * (glob)
629 [255]
629 [255]
630 #else
630 #else
631 $ hg clone http://$LOCALIP:3121/a b
631 $ hg clone http://$LOCALIP:3121/a b
632 abort: error: *refused* (glob)
632 abort: error: *refused* (glob)
633 [255]
633 [255]
634 #endif
634 #endif
635 $ rm -rf b # work around bug with http clone
635 $ rm -rf b # work around bug with http clone
636
636
637
637
638 #if unix-permissions no-root
638 #if unix-permissions no-root
639
639
640 Inaccessible source
640 Inaccessible source
641
641
642 $ mkdir a
642 $ mkdir a
643 $ chmod 000 a
643 $ chmod 000 a
644 $ hg clone a b
644 $ hg clone a b
645 abort: repository a not found!
645 abort: Permission denied: '$TESTTMP/fail/a/.hg'
646 [255]
646 [255]
647
647
648 Inaccessible destination
648 Inaccessible destination
649
649
650 $ hg init b
650 $ hg init b
651 $ cd b
651 $ cd b
652 $ hg clone . ../a
652 $ hg clone . ../a
653 abort: Permission denied: '../a'
653 abort: Permission denied: '../a'
654 [255]
654 [255]
655 $ cd ..
655 $ cd ..
656 $ chmod 700 a
656 $ chmod 700 a
657 $ rm -r a b
657 $ rm -r a b
658
658
659 #endif
659 #endif
660
660
661
661
662 #if fifo
662 #if fifo
663
663
664 Source of wrong type
664 Source of wrong type
665
665
666 $ mkfifo a
666 $ mkfifo a
667 $ hg clone a b
667 $ hg clone a b
668 abort: repository a not found!
668 abort: $ENOTDIR$: '$TESTTMP/fail/a/.hg'
669 [255]
669 [255]
670 $ rm a
670 $ rm a
671
671
672 #endif
672 #endif
673
673
674 Default destination, same directory
674 Default destination, same directory
675
675
676 $ hg init q
676 $ hg init q
677 $ hg clone q
677 $ hg clone q
678 destination directory: q
678 destination directory: q
679 abort: destination 'q' is not empty
679 abort: destination 'q' is not empty
680 [255]
680 [255]
681
681
682 destination directory not empty
682 destination directory not empty
683
683
684 $ mkdir a
684 $ mkdir a
685 $ echo stuff > a/a
685 $ echo stuff > a/a
686 $ hg clone q a
686 $ hg clone q a
687 abort: destination 'a' is not empty
687 abort: destination 'a' is not empty
688 [255]
688 [255]
689
689
690
690
691 #if unix-permissions no-root
691 #if unix-permissions no-root
692
692
693 leave existing directory in place after clone failure
693 leave existing directory in place after clone failure
694
694
695 $ hg init c
695 $ hg init c
696 $ cd c
696 $ cd c
697 $ echo c > c
697 $ echo c > c
698 $ hg commit -A -m test
698 $ hg commit -A -m test
699 adding c
699 adding c
700 $ chmod -rx .hg/store/data
700 $ chmod -rx .hg/store/data
701 $ cd ..
701 $ cd ..
702 $ mkdir d
702 $ mkdir d
703 $ hg clone c d 2> err
703 $ hg clone c d 2> err
704 [255]
704 [255]
705 $ test -d d
705 $ test -d d
706 $ test -d d/.hg
706 $ test -d d/.hg
707 [1]
707 [1]
708
708
709 re-enable perm to allow deletion
709 re-enable perm to allow deletion
710
710
711 $ chmod +rx c/.hg/store/data
711 $ chmod +rx c/.hg/store/data
712
712
713 #endif
713 #endif
714
714
715 $ cd ..
715 $ cd ..
716
716
717 Test clone from the repository in (emulated) revlog format 0 (issue4203):
717 Test clone from the repository in (emulated) revlog format 0 (issue4203):
718
718
719 $ mkdir issue4203
719 $ mkdir issue4203
720 $ mkdir -p src/.hg
720 $ mkdir -p src/.hg
721 $ echo foo > src/foo
721 $ echo foo > src/foo
722 $ hg -R src add src/foo
722 $ hg -R src add src/foo
723 $ hg -R src commit -m '#0'
723 $ hg -R src commit -m '#0'
724 $ hg -R src log -q
724 $ hg -R src log -q
725 0:e1bab28bca43
725 0:e1bab28bca43
726 $ hg clone -U -q src dst
726 $ hg clone -U -q src dst
727 $ hg -R dst log -q
727 $ hg -R dst log -q
728 0:e1bab28bca43
728 0:e1bab28bca43
729
729
730 Create repositories to test auto sharing functionality
730 Create repositories to test auto sharing functionality
731
731
732 $ cat >> $HGRCPATH << EOF
732 $ cat >> $HGRCPATH << EOF
733 > [extensions]
733 > [extensions]
734 > share=
734 > share=
735 > EOF
735 > EOF
736
736
737 $ hg init empty
737 $ hg init empty
738 $ hg init source1a
738 $ hg init source1a
739 $ cd source1a
739 $ cd source1a
740 $ echo initial1 > foo
740 $ echo initial1 > foo
741 $ hg -q commit -A -m initial
741 $ hg -q commit -A -m initial
742 $ echo second > foo
742 $ echo second > foo
743 $ hg commit -m second
743 $ hg commit -m second
744 $ cd ..
744 $ cd ..
745
745
746 $ hg init filteredrev0
746 $ hg init filteredrev0
747 $ cd filteredrev0
747 $ cd filteredrev0
748 $ cat >> .hg/hgrc << EOF
748 $ cat >> .hg/hgrc << EOF
749 > [experimental]
749 > [experimental]
750 > evolution.createmarkers=True
750 > evolution.createmarkers=True
751 > EOF
751 > EOF
752 $ echo initial1 > foo
752 $ echo initial1 > foo
753 $ hg -q commit -A -m initial0
753 $ hg -q commit -A -m initial0
754 $ hg -q up -r null
754 $ hg -q up -r null
755 $ echo initial2 > foo
755 $ echo initial2 > foo
756 $ hg -q commit -A -m initial1
756 $ hg -q commit -A -m initial1
757 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
757 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
758 obsoleted 1 changesets
758 obsoleted 1 changesets
759 $ cd ..
759 $ cd ..
760
760
761 $ hg -q clone --pull source1a source1b
761 $ hg -q clone --pull source1a source1b
762 $ cd source1a
762 $ cd source1a
763 $ hg bookmark bookA
763 $ hg bookmark bookA
764 $ echo 1a > foo
764 $ echo 1a > foo
765 $ hg commit -m 1a
765 $ hg commit -m 1a
766 $ cd ../source1b
766 $ cd ../source1b
767 $ hg -q up -r 0
767 $ hg -q up -r 0
768 $ echo head1 > foo
768 $ echo head1 > foo
769 $ hg commit -m head1
769 $ hg commit -m head1
770 created new head
770 created new head
771 $ hg bookmark head1
771 $ hg bookmark head1
772 $ hg -q up -r 0
772 $ hg -q up -r 0
773 $ echo head2 > foo
773 $ echo head2 > foo
774 $ hg commit -m head2
774 $ hg commit -m head2
775 created new head
775 created new head
776 $ hg bookmark head2
776 $ hg bookmark head2
777 $ hg -q up -r 0
777 $ hg -q up -r 0
778 $ hg branch branch1
778 $ hg branch branch1
779 marked working directory as branch branch1
779 marked working directory as branch branch1
780 (branches are permanent and global, did you want a bookmark?)
780 (branches are permanent and global, did you want a bookmark?)
781 $ echo branch1 > foo
781 $ echo branch1 > foo
782 $ hg commit -m branch1
782 $ hg commit -m branch1
783 $ hg -q up -r 0
783 $ hg -q up -r 0
784 $ hg branch branch2
784 $ hg branch branch2
785 marked working directory as branch branch2
785 marked working directory as branch branch2
786 $ echo branch2 > foo
786 $ echo branch2 > foo
787 $ hg commit -m branch2
787 $ hg commit -m branch2
788 $ cd ..
788 $ cd ..
789 $ hg init source2
789 $ hg init source2
790 $ cd source2
790 $ cd source2
791 $ echo initial2 > foo
791 $ echo initial2 > foo
792 $ hg -q commit -A -m initial2
792 $ hg -q commit -A -m initial2
793 $ echo second > foo
793 $ echo second > foo
794 $ hg commit -m second
794 $ hg commit -m second
795 $ cd ..
795 $ cd ..
796
796
797 Clone with auto share from an empty repo should not result in share
797 Clone with auto share from an empty repo should not result in share
798
798
799 $ mkdir share
799 $ mkdir share
800 $ hg --config share.pool=share clone empty share-empty
800 $ hg --config share.pool=share clone empty share-empty
801 (not using pooled storage: remote appears to be empty)
801 (not using pooled storage: remote appears to be empty)
802 updating to branch default
802 updating to branch default
803 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
803 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
804 $ ls share
804 $ ls share
805 $ test -d share-empty/.hg/store
805 $ test -d share-empty/.hg/store
806 $ test -f share-empty/.hg/sharedpath
806 $ test -f share-empty/.hg/sharedpath
807 [1]
807 [1]
808
808
809 Clone with auto share from a repo with filtered revision 0 should not result in share
809 Clone with auto share from a repo with filtered revision 0 should not result in share
810
810
811 $ hg --config share.pool=share clone filteredrev0 share-filtered
811 $ hg --config share.pool=share clone filteredrev0 share-filtered
812 (not using pooled storage: unable to resolve identity of remote)
812 (not using pooled storage: unable to resolve identity of remote)
813 requesting all changes
813 requesting all changes
814 adding changesets
814 adding changesets
815 adding manifests
815 adding manifests
816 adding file changes
816 adding file changes
817 added 1 changesets with 1 changes to 1 files
817 added 1 changesets with 1 changes to 1 files
818 new changesets e082c1832e09
818 new changesets e082c1832e09
819 updating to branch default
819 updating to branch default
820 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
820 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
821
821
822 Clone from repo with content should result in shared store being created
822 Clone from repo with content should result in shared store being created
823
823
824 $ hg --config share.pool=share clone source1a share-dest1a
824 $ hg --config share.pool=share clone source1a share-dest1a
825 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
825 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
826 requesting all changes
826 requesting all changes
827 adding changesets
827 adding changesets
828 adding manifests
828 adding manifests
829 adding file changes
829 adding file changes
830 added 3 changesets with 3 changes to 1 files
830 added 3 changesets with 3 changes to 1 files
831 new changesets b5f04eac9d8f:e5bfe23c0b47
831 new changesets b5f04eac9d8f:e5bfe23c0b47
832 searching for changes
832 searching for changes
833 no changes found
833 no changes found
834 adding remote bookmark bookA
834 adding remote bookmark bookA
835 updating working directory
835 updating working directory
836 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
836 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
837
837
838 The shared repo should have been created
838 The shared repo should have been created
839
839
840 $ ls share
840 $ ls share
841 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
841 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
842
842
843 The destination should point to it
843 The destination should point to it
844
844
845 $ cat share-dest1a/.hg/sharedpath; echo
845 $ cat share-dest1a/.hg/sharedpath; echo
846 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
846 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
847
847
848 The destination should have bookmarks
848 The destination should have bookmarks
849
849
850 $ hg -R share-dest1a bookmarks
850 $ hg -R share-dest1a bookmarks
851 bookA 2:e5bfe23c0b47
851 bookA 2:e5bfe23c0b47
852
852
853 The default path should be the remote, not the share
853 The default path should be the remote, not the share
854
854
855 $ hg -R share-dest1a config paths.default
855 $ hg -R share-dest1a config paths.default
856 $TESTTMP/source1a
856 $TESTTMP/source1a
857
857
858 Clone with existing share dir should result in pull + share
858 Clone with existing share dir should result in pull + share
859
859
860 $ hg --config share.pool=share clone source1b share-dest1b
860 $ hg --config share.pool=share clone source1b share-dest1b
861 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
861 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
862 searching for changes
862 searching for changes
863 adding changesets
863 adding changesets
864 adding manifests
864 adding manifests
865 adding file changes
865 adding file changes
866 added 4 changesets with 4 changes to 1 files (+4 heads)
866 added 4 changesets with 4 changes to 1 files (+4 heads)
867 adding remote bookmark head1
867 adding remote bookmark head1
868 adding remote bookmark head2
868 adding remote bookmark head2
869 new changesets 4a8dc1ab4c13:6bacf4683960
869 new changesets 4a8dc1ab4c13:6bacf4683960
870 updating working directory
870 updating working directory
871 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
871 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
872
872
873 $ ls share
873 $ ls share
874 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
874 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
875
875
876 $ cat share-dest1b/.hg/sharedpath; echo
876 $ cat share-dest1b/.hg/sharedpath; echo
877 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
877 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
878
878
879 We only get bookmarks from the remote, not everything in the share
879 We only get bookmarks from the remote, not everything in the share
880
880
881 $ hg -R share-dest1b bookmarks
881 $ hg -R share-dest1b bookmarks
882 head1 3:4a8dc1ab4c13
882 head1 3:4a8dc1ab4c13
883 head2 4:99f71071f117
883 head2 4:99f71071f117
884
884
885 Default path should be source, not share.
885 Default path should be source, not share.
886
886
887 $ hg -R share-dest1b config paths.default
887 $ hg -R share-dest1b config paths.default
888 $TESTTMP/source1b
888 $TESTTMP/source1b
889
889
890 Checked out revision should be head of default branch
890 Checked out revision should be head of default branch
891
891
892 $ hg -R share-dest1b log -r .
892 $ hg -R share-dest1b log -r .
893 changeset: 4:99f71071f117
893 changeset: 4:99f71071f117
894 bookmark: head2
894 bookmark: head2
895 parent: 0:b5f04eac9d8f
895 parent: 0:b5f04eac9d8f
896 user: test
896 user: test
897 date: Thu Jan 01 00:00:00 1970 +0000
897 date: Thu Jan 01 00:00:00 1970 +0000
898 summary: head2
898 summary: head2
899
899
900
900
901 Clone from unrelated repo should result in new share
901 Clone from unrelated repo should result in new share
902
902
903 $ hg --config share.pool=share clone source2 share-dest2
903 $ hg --config share.pool=share clone source2 share-dest2
904 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
904 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
905 requesting all changes
905 requesting all changes
906 adding changesets
906 adding changesets
907 adding manifests
907 adding manifests
908 adding file changes
908 adding file changes
909 added 2 changesets with 2 changes to 1 files
909 added 2 changesets with 2 changes to 1 files
910 new changesets 22aeff664783:63cf6c3dba4a
910 new changesets 22aeff664783:63cf6c3dba4a
911 searching for changes
911 searching for changes
912 no changes found
912 no changes found
913 updating working directory
913 updating working directory
914 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
914 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
915
915
916 $ ls share
916 $ ls share
917 22aeff664783fd44c6d9b435618173c118c3448e
917 22aeff664783fd44c6d9b435618173c118c3448e
918 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
918 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
919
919
920 remote naming mode works as advertised
920 remote naming mode works as advertised
921
921
922 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
922 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
923 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
923 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
924 requesting all changes
924 requesting all changes
925 adding changesets
925 adding changesets
926 adding manifests
926 adding manifests
927 adding file changes
927 adding file changes
928 added 3 changesets with 3 changes to 1 files
928 added 3 changesets with 3 changes to 1 files
929 new changesets b5f04eac9d8f:e5bfe23c0b47
929 new changesets b5f04eac9d8f:e5bfe23c0b47
930 searching for changes
930 searching for changes
931 no changes found
931 no changes found
932 adding remote bookmark bookA
932 adding remote bookmark bookA
933 updating working directory
933 updating working directory
934 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
934 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
935
935
936 $ ls shareremote
936 $ ls shareremote
937 195bb1fcdb595c14a6c13e0269129ed78f6debde
937 195bb1fcdb595c14a6c13e0269129ed78f6debde
938
938
939 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
939 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
940 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
940 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
941 requesting all changes
941 requesting all changes
942 adding changesets
942 adding changesets
943 adding manifests
943 adding manifests
944 adding file changes
944 adding file changes
945 added 6 changesets with 6 changes to 1 files (+4 heads)
945 added 6 changesets with 6 changes to 1 files (+4 heads)
946 new changesets b5f04eac9d8f:6bacf4683960
946 new changesets b5f04eac9d8f:6bacf4683960
947 searching for changes
947 searching for changes
948 no changes found
948 no changes found
949 adding remote bookmark head1
949 adding remote bookmark head1
950 adding remote bookmark head2
950 adding remote bookmark head2
951 updating working directory
951 updating working directory
952 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
952 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
953
953
954 $ ls shareremote
954 $ ls shareremote
955 195bb1fcdb595c14a6c13e0269129ed78f6debde
955 195bb1fcdb595c14a6c13e0269129ed78f6debde
956 c0d4f83847ca2a873741feb7048a45085fd47c46
956 c0d4f83847ca2a873741feb7048a45085fd47c46
957
957
958 request to clone a single revision is respected in sharing mode
958 request to clone a single revision is respected in sharing mode
959
959
960 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
960 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
961 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
961 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
962 adding changesets
962 adding changesets
963 adding manifests
963 adding manifests
964 adding file changes
964 adding file changes
965 added 2 changesets with 2 changes to 1 files
965 added 2 changesets with 2 changes to 1 files
966 new changesets b5f04eac9d8f:4a8dc1ab4c13
966 new changesets b5f04eac9d8f:4a8dc1ab4c13
967 no changes found
967 no changes found
968 adding remote bookmark head1
968 adding remote bookmark head1
969 updating working directory
969 updating working directory
970 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
970 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
971
971
972 $ hg -R share-1arev log -G
972 $ hg -R share-1arev log -G
973 @ changeset: 1:4a8dc1ab4c13
973 @ changeset: 1:4a8dc1ab4c13
974 | bookmark: head1
974 | bookmark: head1
975 | tag: tip
975 | tag: tip
976 | user: test
976 | user: test
977 | date: Thu Jan 01 00:00:00 1970 +0000
977 | date: Thu Jan 01 00:00:00 1970 +0000
978 | summary: head1
978 | summary: head1
979 |
979 |
980 o changeset: 0:b5f04eac9d8f
980 o changeset: 0:b5f04eac9d8f
981 user: test
981 user: test
982 date: Thu Jan 01 00:00:00 1970 +0000
982 date: Thu Jan 01 00:00:00 1970 +0000
983 summary: initial
983 summary: initial
984
984
985
985
986 making another clone should only pull down requested rev
986 making another clone should only pull down requested rev
987
987
988 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
988 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
989 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
989 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
990 searching for changes
990 searching for changes
991 adding changesets
991 adding changesets
992 adding manifests
992 adding manifests
993 adding file changes
993 adding file changes
994 added 1 changesets with 1 changes to 1 files (+1 heads)
994 added 1 changesets with 1 changes to 1 files (+1 heads)
995 adding remote bookmark head1
995 adding remote bookmark head1
996 adding remote bookmark head2
996 adding remote bookmark head2
997 new changesets 99f71071f117
997 new changesets 99f71071f117
998 updating working directory
998 updating working directory
999 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
999 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1000
1000
1001 $ hg -R share-1brev log -G
1001 $ hg -R share-1brev log -G
1002 @ changeset: 2:99f71071f117
1002 @ changeset: 2:99f71071f117
1003 | bookmark: head2
1003 | bookmark: head2
1004 | tag: tip
1004 | tag: tip
1005 | parent: 0:b5f04eac9d8f
1005 | parent: 0:b5f04eac9d8f
1006 | user: test
1006 | user: test
1007 | date: Thu Jan 01 00:00:00 1970 +0000
1007 | date: Thu Jan 01 00:00:00 1970 +0000
1008 | summary: head2
1008 | summary: head2
1009 |
1009 |
1010 | o changeset: 1:4a8dc1ab4c13
1010 | o changeset: 1:4a8dc1ab4c13
1011 |/ bookmark: head1
1011 |/ bookmark: head1
1012 | user: test
1012 | user: test
1013 | date: Thu Jan 01 00:00:00 1970 +0000
1013 | date: Thu Jan 01 00:00:00 1970 +0000
1014 | summary: head1
1014 | summary: head1
1015 |
1015 |
1016 o changeset: 0:b5f04eac9d8f
1016 o changeset: 0:b5f04eac9d8f
1017 user: test
1017 user: test
1018 date: Thu Jan 01 00:00:00 1970 +0000
1018 date: Thu Jan 01 00:00:00 1970 +0000
1019 summary: initial
1019 summary: initial
1020
1020
1021
1021
1022 Request to clone a single branch is respected in sharing mode
1022 Request to clone a single branch is respected in sharing mode
1023
1023
1024 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1024 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1025 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1025 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1026 adding changesets
1026 adding changesets
1027 adding manifests
1027 adding manifests
1028 adding file changes
1028 adding file changes
1029 added 2 changesets with 2 changes to 1 files
1029 added 2 changesets with 2 changes to 1 files
1030 new changesets b5f04eac9d8f:5f92a6c1a1b1
1030 new changesets b5f04eac9d8f:5f92a6c1a1b1
1031 no changes found
1031 no changes found
1032 updating working directory
1032 updating working directory
1033 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1033 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1034
1034
1035 $ hg -R share-1bbranch1 log -G
1035 $ hg -R share-1bbranch1 log -G
1036 o changeset: 1:5f92a6c1a1b1
1036 o changeset: 1:5f92a6c1a1b1
1037 | branch: branch1
1037 | branch: branch1
1038 | tag: tip
1038 | tag: tip
1039 | user: test
1039 | user: test
1040 | date: Thu Jan 01 00:00:00 1970 +0000
1040 | date: Thu Jan 01 00:00:00 1970 +0000
1041 | summary: branch1
1041 | summary: branch1
1042 |
1042 |
1043 @ changeset: 0:b5f04eac9d8f
1043 @ changeset: 0:b5f04eac9d8f
1044 user: test
1044 user: test
1045 date: Thu Jan 01 00:00:00 1970 +0000
1045 date: Thu Jan 01 00:00:00 1970 +0000
1046 summary: initial
1046 summary: initial
1047
1047
1048
1048
1049 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1049 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1050 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1050 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1051 searching for changes
1051 searching for changes
1052 adding changesets
1052 adding changesets
1053 adding manifests
1053 adding manifests
1054 adding file changes
1054 adding file changes
1055 added 1 changesets with 1 changes to 1 files (+1 heads)
1055 added 1 changesets with 1 changes to 1 files (+1 heads)
1056 new changesets 6bacf4683960
1056 new changesets 6bacf4683960
1057 updating working directory
1057 updating working directory
1058 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1058 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1059
1059
1060 $ hg -R share-1bbranch2 log -G
1060 $ hg -R share-1bbranch2 log -G
1061 o changeset: 2:6bacf4683960
1061 o changeset: 2:6bacf4683960
1062 | branch: branch2
1062 | branch: branch2
1063 | tag: tip
1063 | tag: tip
1064 | parent: 0:b5f04eac9d8f
1064 | parent: 0:b5f04eac9d8f
1065 | user: test
1065 | user: test
1066 | date: Thu Jan 01 00:00:00 1970 +0000
1066 | date: Thu Jan 01 00:00:00 1970 +0000
1067 | summary: branch2
1067 | summary: branch2
1068 |
1068 |
1069 | o changeset: 1:5f92a6c1a1b1
1069 | o changeset: 1:5f92a6c1a1b1
1070 |/ branch: branch1
1070 |/ branch: branch1
1071 | user: test
1071 | user: test
1072 | date: Thu Jan 01 00:00:00 1970 +0000
1072 | date: Thu Jan 01 00:00:00 1970 +0000
1073 | summary: branch1
1073 | summary: branch1
1074 |
1074 |
1075 @ changeset: 0:b5f04eac9d8f
1075 @ changeset: 0:b5f04eac9d8f
1076 user: test
1076 user: test
1077 date: Thu Jan 01 00:00:00 1970 +0000
1077 date: Thu Jan 01 00:00:00 1970 +0000
1078 summary: initial
1078 summary: initial
1079
1079
1080
1080
1081 -U is respected in share clone mode
1081 -U is respected in share clone mode
1082
1082
1083 $ hg --config share.pool=share clone -U source1a share-1anowc
1083 $ hg --config share.pool=share clone -U source1a share-1anowc
1084 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1084 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1085 searching for changes
1085 searching for changes
1086 no changes found
1086 no changes found
1087 adding remote bookmark bookA
1087 adding remote bookmark bookA
1088
1088
1089 $ ls share-1anowc
1089 $ ls share-1anowc
1090
1090
1091 Test that auto sharing doesn't cause failure of "hg clone local remote"
1091 Test that auto sharing doesn't cause failure of "hg clone local remote"
1092
1092
1093 $ cd $TESTTMP
1093 $ cd $TESTTMP
1094 $ hg -R a id -r 0
1094 $ hg -R a id -r 0
1095 acb14030fe0a
1095 acb14030fe0a
1096 $ hg id -R remote -r 0
1096 $ hg id -R remote -r 0
1097 abort: repository remote not found!
1097 abort: repository remote not found!
1098 [255]
1098 [255]
1099 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1099 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1100 $ hg -R remote id -r 0
1100 $ hg -R remote id -r 0
1101 acb14030fe0a
1101 acb14030fe0a
1102
1102
1103 Cloning into pooled storage doesn't race (issue5104)
1103 Cloning into pooled storage doesn't race (issue5104)
1104
1104
1105 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1105 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1106 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1106 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1107 $ wait
1107 $ wait
1108
1108
1109 $ hg -R share-destrace1 log -r tip
1109 $ hg -R share-destrace1 log -r tip
1110 changeset: 2:e5bfe23c0b47
1110 changeset: 2:e5bfe23c0b47
1111 bookmark: bookA
1111 bookmark: bookA
1112 tag: tip
1112 tag: tip
1113 user: test
1113 user: test
1114 date: Thu Jan 01 00:00:00 1970 +0000
1114 date: Thu Jan 01 00:00:00 1970 +0000
1115 summary: 1a
1115 summary: 1a
1116
1116
1117
1117
1118 $ hg -R share-destrace2 log -r tip
1118 $ hg -R share-destrace2 log -r tip
1119 changeset: 2:e5bfe23c0b47
1119 changeset: 2:e5bfe23c0b47
1120 bookmark: bookA
1120 bookmark: bookA
1121 tag: tip
1121 tag: tip
1122 user: test
1122 user: test
1123 date: Thu Jan 01 00:00:00 1970 +0000
1123 date: Thu Jan 01 00:00:00 1970 +0000
1124 summary: 1a
1124 summary: 1a
1125
1125
1126 One repo should be new, the other should be shared from the pool. We
1126 One repo should be new, the other should be shared from the pool. We
1127 don't care which is which, so we just make sure we always print the
1127 don't care which is which, so we just make sure we always print the
1128 one containing "new pooled" first, then one one containing "existing
1128 one containing "new pooled" first, then one one containing "existing
1129 pooled".
1129 pooled".
1130
1130
1131 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1131 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1132 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1132 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1133 requesting all changes
1133 requesting all changes
1134 adding changesets
1134 adding changesets
1135 adding manifests
1135 adding manifests
1136 adding file changes
1136 adding file changes
1137 added 3 changesets with 3 changes to 1 files
1137 added 3 changesets with 3 changes to 1 files
1138 new changesets b5f04eac9d8f:e5bfe23c0b47
1138 new changesets b5f04eac9d8f:e5bfe23c0b47
1139 searching for changes
1139 searching for changes
1140 no changes found
1140 no changes found
1141 adding remote bookmark bookA
1141 adding remote bookmark bookA
1142 updating working directory
1142 updating working directory
1143 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1143 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1144
1144
1145 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1145 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1146 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1146 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1147 searching for changes
1147 searching for changes
1148 no changes found
1148 no changes found
1149 adding remote bookmark bookA
1149 adding remote bookmark bookA
1150 updating working directory
1150 updating working directory
1151 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1151 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1152
1152
1153 SEC: check for unsafe ssh url
1153 SEC: check for unsafe ssh url
1154
1154
1155 $ cat >> $HGRCPATH << EOF
1155 $ cat >> $HGRCPATH << EOF
1156 > [ui]
1156 > [ui]
1157 > ssh = sh -c "read l; read l; read l"
1157 > ssh = sh -c "read l; read l; read l"
1158 > EOF
1158 > EOF
1159
1159
1160 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1160 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1161 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1161 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1162 [255]
1162 [255]
1163 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1163 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1164 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1164 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1165 [255]
1165 [255]
1166 $ hg clone 'ssh://fakehost|touch%20owned/path'
1166 $ hg clone 'ssh://fakehost|touch%20owned/path'
1167 abort: no suitable response from remote hg!
1167 abort: no suitable response from remote hg!
1168 [255]
1168 [255]
1169 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1169 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1170 abort: no suitable response from remote hg!
1170 abort: no suitable response from remote hg!
1171 [255]
1171 [255]
1172
1172
1173 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1173 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1174 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1174 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1175 [255]
1175 [255]
1176
1176
1177 #if windows
1177 #if windows
1178 $ hg clone "ssh://%26touch%20owned%20/" --debug
1178 $ hg clone "ssh://%26touch%20owned%20/" --debug
1179 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1179 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1180 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1180 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1181 sending hello command
1181 sending hello command
1182 sending between command
1182 sending between command
1183 abort: no suitable response from remote hg!
1183 abort: no suitable response from remote hg!
1184 [255]
1184 [255]
1185 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1185 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1186 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1186 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1187 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1187 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1188 sending hello command
1188 sending hello command
1189 sending between command
1189 sending between command
1190 abort: no suitable response from remote hg!
1190 abort: no suitable response from remote hg!
1191 [255]
1191 [255]
1192 #else
1192 #else
1193 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1193 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1194 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1194 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1195 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1195 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1196 sending hello command
1196 sending hello command
1197 sending between command
1197 sending between command
1198 abort: no suitable response from remote hg!
1198 abort: no suitable response from remote hg!
1199 [255]
1199 [255]
1200 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1200 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1201 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1201 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1202 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1202 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1203 sending hello command
1203 sending hello command
1204 sending between command
1204 sending between command
1205 abort: no suitable response from remote hg!
1205 abort: no suitable response from remote hg!
1206 [255]
1206 [255]
1207 #endif
1207 #endif
1208
1208
1209 $ hg clone "ssh://v-alid.example.com/" --debug
1209 $ hg clone "ssh://v-alid.example.com/" --debug
1210 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1210 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1211 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1211 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1212 sending hello command
1212 sending hello command
1213 sending between command
1213 sending between command
1214 abort: no suitable response from remote hg!
1214 abort: no suitable response from remote hg!
1215 [255]
1215 [255]
1216
1216
1217 We should not have created a file named owned - if it exists, the
1217 We should not have created a file named owned - if it exists, the
1218 attack succeeded.
1218 attack succeeded.
1219 $ if test -f owned; then echo 'you got owned'; fi
1219 $ if test -f owned; then echo 'you got owned'; fi
1220
1220
1221 Cloning without fsmonitor enabled does not print a warning for small repos
1221 Cloning without fsmonitor enabled does not print a warning for small repos
1222
1222
1223 $ hg clone a fsmonitor-default
1223 $ hg clone a fsmonitor-default
1224 updating to bookmark @ on branch stable
1224 updating to bookmark @ on branch stable
1225 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1225 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1226
1226
1227 Lower the warning threshold to simulate a large repo
1227 Lower the warning threshold to simulate a large repo
1228
1228
1229 $ cat >> $HGRCPATH << EOF
1229 $ cat >> $HGRCPATH << EOF
1230 > [fsmonitor]
1230 > [fsmonitor]
1231 > warn_update_file_count = 2
1231 > warn_update_file_count = 2
1232 > EOF
1232 > EOF
1233
1233
1234 We should see a warning about no fsmonitor on supported platforms
1234 We should see a warning about no fsmonitor on supported platforms
1235
1235
1236 #if linuxormacos no-fsmonitor
1236 #if linuxormacos no-fsmonitor
1237 $ hg clone a nofsmonitor
1237 $ hg clone a nofsmonitor
1238 updating to bookmark @ on branch stable
1238 updating to bookmark @ on branch stable
1239 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1239 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1240 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1240 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1241 #else
1241 #else
1242 $ hg clone a nofsmonitor
1242 $ hg clone a nofsmonitor
1243 updating to bookmark @ on branch stable
1243 updating to bookmark @ on branch stable
1244 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1244 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1245 #endif
1245 #endif
1246
1246
1247 We should not see warning about fsmonitor when it is enabled
1247 We should not see warning about fsmonitor when it is enabled
1248
1248
1249 #if fsmonitor
1249 #if fsmonitor
1250 $ hg clone a fsmonitor-enabled
1250 $ hg clone a fsmonitor-enabled
1251 updating to bookmark @ on branch stable
1251 updating to bookmark @ on branch stable
1252 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1252 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1253 #endif
1253 #endif
1254
1254
1255 We can disable the fsmonitor warning
1255 We can disable the fsmonitor warning
1256
1256
1257 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1257 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1258 updating to bookmark @ on branch stable
1258 updating to bookmark @ on branch stable
1259 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1259 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1260
1260
1261 Loaded fsmonitor but disabled in config should still print warning
1261 Loaded fsmonitor but disabled in config should still print warning
1262
1262
1263 #if linuxormacos fsmonitor
1263 #if linuxormacos fsmonitor
1264 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1264 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1265 updating to bookmark @ on branch stable
1265 updating to bookmark @ on branch stable
1266 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1266 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1267 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1267 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1268 #endif
1268 #endif
1269
1269
1270 Warning not printed if working directory isn't empty
1270 Warning not printed if working directory isn't empty
1271
1271
1272 $ hg -q clone a fsmonitor-update
1272 $ hg -q clone a fsmonitor-update
1273 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1273 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1274 $ cd fsmonitor-update
1274 $ cd fsmonitor-update
1275 $ hg up acb14030fe0a
1275 $ hg up acb14030fe0a
1276 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1276 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1277 (leaving bookmark @)
1277 (leaving bookmark @)
1278 $ hg up cf0fe1914066
1278 $ hg up cf0fe1914066
1279 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1279 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1280
1280
1281 `hg update` from null revision also prints
1281 `hg update` from null revision also prints
1282
1282
1283 $ hg up null
1283 $ hg up null
1284 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1284 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1285
1285
1286 #if linuxormacos no-fsmonitor
1286 #if linuxormacos no-fsmonitor
1287 $ hg up cf0fe1914066
1287 $ hg up cf0fe1914066
1288 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1288 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1289 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1289 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290 #else
1290 #else
1291 $ hg up cf0fe1914066
1291 $ hg up cf0fe1914066
1292 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1292 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1293 #endif
1293 #endif
1294
1294
1295 $ cd ..
1295 $ cd ..
1296
1296
General Comments 0
You need to be logged in to leave comments. Login now