##// END OF EJS Templates
bookmarks: actual fix for race condition deleting bookmark...
marmoute -
r42903:e0cf09bc stable
parent child Browse files
Show More
@@ -1,3247 +1,3296 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 class mixedrepostorecache(_basefilecache):
125 class mixedrepostorecache(_basefilecache):
126 """filecache for a mix files in .hg/store and outside"""
126 """filecache for a mix files in .hg/store and outside"""
127 def __init__(self, *pathsandlocations):
127 def __init__(self, *pathsandlocations):
128 # scmutil.filecache only uses the path for passing back into our
128 # scmutil.filecache only uses the path for passing back into our
129 # join(), so we can safely pass a list of paths and locations
129 # join(), so we can safely pass a list of paths and locations
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 _cachedfiles.update(pathsandlocations)
131 _cachedfiles.update(pathsandlocations)
132
132
133 def join(self, obj, fnameandlocation):
133 def join(self, obj, fnameandlocation):
134 fname, location = fnameandlocation
134 fname, location = fnameandlocation
135 if location == 'plain':
135 if location == 'plain':
136 return obj.vfs.join(fname)
136 return obj.vfs.join(fname)
137 else:
137 else:
138 if location != '':
138 if location != '':
139 raise error.ProgrammingError('unexpected location: %s' %
139 raise error.ProgrammingError('unexpected location: %s' %
140 location)
140 location)
141 return obj.sjoin(fname)
141 return obj.sjoin(fname)
142
142
143 def isfilecached(repo, name):
143 def isfilecached(repo, name):
144 """check if a repo has already cached "name" filecache-ed property
144 """check if a repo has already cached "name" filecache-ed property
145
145
146 This returns (cachedobj-or-None, iscached) tuple.
146 This returns (cachedobj-or-None, iscached) tuple.
147 """
147 """
148 cacheentry = repo.unfiltered()._filecache.get(name, None)
148 cacheentry = repo.unfiltered()._filecache.get(name, None)
149 if not cacheentry:
149 if not cacheentry:
150 return None, False
150 return None, False
151 return cacheentry.obj, True
151 return cacheentry.obj, True
152
152
153 class unfilteredpropertycache(util.propertycache):
153 class unfilteredpropertycache(util.propertycache):
154 """propertycache that apply to unfiltered repo only"""
154 """propertycache that apply to unfiltered repo only"""
155
155
156 def __get__(self, repo, type=None):
156 def __get__(self, repo, type=None):
157 unfi = repo.unfiltered()
157 unfi = repo.unfiltered()
158 if unfi is repo:
158 if unfi is repo:
159 return super(unfilteredpropertycache, self).__get__(unfi)
159 return super(unfilteredpropertycache, self).__get__(unfi)
160 return getattr(unfi, self.name)
160 return getattr(unfi, self.name)
161
161
162 class filteredpropertycache(util.propertycache):
162 class filteredpropertycache(util.propertycache):
163 """propertycache that must take filtering in account"""
163 """propertycache that must take filtering in account"""
164
164
165 def cachevalue(self, obj, value):
165 def cachevalue(self, obj, value):
166 object.__setattr__(obj, self.name, value)
166 object.__setattr__(obj, self.name, value)
167
167
168
168
169 def hasunfilteredcache(repo, name):
169 def hasunfilteredcache(repo, name):
170 """check if a repo has an unfilteredpropertycache value for <name>"""
170 """check if a repo has an unfilteredpropertycache value for <name>"""
171 return name in vars(repo.unfiltered())
171 return name in vars(repo.unfiltered())
172
172
173 def unfilteredmethod(orig):
173 def unfilteredmethod(orig):
174 """decorate method that always need to be run on unfiltered version"""
174 """decorate method that always need to be run on unfiltered version"""
175 def wrapper(repo, *args, **kwargs):
175 def wrapper(repo, *args, **kwargs):
176 return orig(repo.unfiltered(), *args, **kwargs)
176 return orig(repo.unfiltered(), *args, **kwargs)
177 return wrapper
177 return wrapper
178
178
179 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
179 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
180 'unbundle'}
180 'unbundle'}
181 legacycaps = moderncaps.union({'changegroupsubset'})
181 legacycaps = moderncaps.union({'changegroupsubset'})
182
182
183 @interfaceutil.implementer(repository.ipeercommandexecutor)
183 @interfaceutil.implementer(repository.ipeercommandexecutor)
184 class localcommandexecutor(object):
184 class localcommandexecutor(object):
185 def __init__(self, peer):
185 def __init__(self, peer):
186 self._peer = peer
186 self._peer = peer
187 self._sent = False
187 self._sent = False
188 self._closed = False
188 self._closed = False
189
189
190 def __enter__(self):
190 def __enter__(self):
191 return self
191 return self
192
192
193 def __exit__(self, exctype, excvalue, exctb):
193 def __exit__(self, exctype, excvalue, exctb):
194 self.close()
194 self.close()
195
195
196 def callcommand(self, command, args):
196 def callcommand(self, command, args):
197 if self._sent:
197 if self._sent:
198 raise error.ProgrammingError('callcommand() cannot be used after '
198 raise error.ProgrammingError('callcommand() cannot be used after '
199 'sendcommands()')
199 'sendcommands()')
200
200
201 if self._closed:
201 if self._closed:
202 raise error.ProgrammingError('callcommand() cannot be used after '
202 raise error.ProgrammingError('callcommand() cannot be used after '
203 'close()')
203 'close()')
204
204
205 # We don't need to support anything fancy. Just call the named
205 # We don't need to support anything fancy. Just call the named
206 # method on the peer and return a resolved future.
206 # method on the peer and return a resolved future.
207 fn = getattr(self._peer, pycompat.sysstr(command))
207 fn = getattr(self._peer, pycompat.sysstr(command))
208
208
209 f = pycompat.futures.Future()
209 f = pycompat.futures.Future()
210
210
211 try:
211 try:
212 result = fn(**pycompat.strkwargs(args))
212 result = fn(**pycompat.strkwargs(args))
213 except Exception:
213 except Exception:
214 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
214 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
215 else:
215 else:
216 f.set_result(result)
216 f.set_result(result)
217
217
218 return f
218 return f
219
219
220 def sendcommands(self):
220 def sendcommands(self):
221 self._sent = True
221 self._sent = True
222
222
223 def close(self):
223 def close(self):
224 self._closed = True
224 self._closed = True
225
225
226 @interfaceutil.implementer(repository.ipeercommands)
226 @interfaceutil.implementer(repository.ipeercommands)
227 class localpeer(repository.peer):
227 class localpeer(repository.peer):
228 '''peer for a local repo; reflects only the most recent API'''
228 '''peer for a local repo; reflects only the most recent API'''
229
229
230 def __init__(self, repo, caps=None):
230 def __init__(self, repo, caps=None):
231 super(localpeer, self).__init__()
231 super(localpeer, self).__init__()
232
232
233 if caps is None:
233 if caps is None:
234 caps = moderncaps.copy()
234 caps = moderncaps.copy()
235 self._repo = repo.filtered('served')
235 self._repo = repo.filtered('served')
236 self.ui = repo.ui
236 self.ui = repo.ui
237 self._caps = repo._restrictcapabilities(caps)
237 self._caps = repo._restrictcapabilities(caps)
238
238
239 # Begin of _basepeer interface.
239 # Begin of _basepeer interface.
240
240
241 def url(self):
241 def url(self):
242 return self._repo.url()
242 return self._repo.url()
243
243
244 def local(self):
244 def local(self):
245 return self._repo
245 return self._repo
246
246
247 def peer(self):
247 def peer(self):
248 return self
248 return self
249
249
250 def canpush(self):
250 def canpush(self):
251 return True
251 return True
252
252
253 def close(self):
253 def close(self):
254 self._repo.close()
254 self._repo.close()
255
255
256 # End of _basepeer interface.
256 # End of _basepeer interface.
257
257
258 # Begin of _basewirecommands interface.
258 # Begin of _basewirecommands interface.
259
259
260 def branchmap(self):
260 def branchmap(self):
261 return self._repo.branchmap()
261 return self._repo.branchmap()
262
262
263 def capabilities(self):
263 def capabilities(self):
264 return self._caps
264 return self._caps
265
265
266 def clonebundles(self):
266 def clonebundles(self):
267 return self._repo.tryread('clonebundles.manifest')
267 return self._repo.tryread('clonebundles.manifest')
268
268
269 def debugwireargs(self, one, two, three=None, four=None, five=None):
269 def debugwireargs(self, one, two, three=None, four=None, five=None):
270 """Used to test argument passing over the wire"""
270 """Used to test argument passing over the wire"""
271 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
271 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
272 pycompat.bytestr(four),
272 pycompat.bytestr(four),
273 pycompat.bytestr(five))
273 pycompat.bytestr(five))
274
274
275 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
275 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
276 **kwargs):
276 **kwargs):
277 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
277 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
278 common=common, bundlecaps=bundlecaps,
278 common=common, bundlecaps=bundlecaps,
279 **kwargs)[1]
279 **kwargs)[1]
280 cb = util.chunkbuffer(chunks)
280 cb = util.chunkbuffer(chunks)
281
281
282 if exchange.bundle2requested(bundlecaps):
282 if exchange.bundle2requested(bundlecaps):
283 # When requesting a bundle2, getbundle returns a stream to make the
283 # When requesting a bundle2, getbundle returns a stream to make the
284 # wire level function happier. We need to build a proper object
284 # wire level function happier. We need to build a proper object
285 # from it in local peer.
285 # from it in local peer.
286 return bundle2.getunbundler(self.ui, cb)
286 return bundle2.getunbundler(self.ui, cb)
287 else:
287 else:
288 return changegroup.getunbundler('01', cb, None)
288 return changegroup.getunbundler('01', cb, None)
289
289
290 def heads(self):
290 def heads(self):
291 return self._repo.heads()
291 return self._repo.heads()
292
292
293 def known(self, nodes):
293 def known(self, nodes):
294 return self._repo.known(nodes)
294 return self._repo.known(nodes)
295
295
296 def listkeys(self, namespace):
296 def listkeys(self, namespace):
297 return self._repo.listkeys(namespace)
297 return self._repo.listkeys(namespace)
298
298
299 def lookup(self, key):
299 def lookup(self, key):
300 return self._repo.lookup(key)
300 return self._repo.lookup(key)
301
301
302 def pushkey(self, namespace, key, old, new):
302 def pushkey(self, namespace, key, old, new):
303 return self._repo.pushkey(namespace, key, old, new)
303 return self._repo.pushkey(namespace, key, old, new)
304
304
305 def stream_out(self):
305 def stream_out(self):
306 raise error.Abort(_('cannot perform stream clone against local '
306 raise error.Abort(_('cannot perform stream clone against local '
307 'peer'))
307 'peer'))
308
308
309 def unbundle(self, bundle, heads, url):
309 def unbundle(self, bundle, heads, url):
310 """apply a bundle on a repo
310 """apply a bundle on a repo
311
311
312 This function handles the repo locking itself."""
312 This function handles the repo locking itself."""
313 try:
313 try:
314 try:
314 try:
315 bundle = exchange.readbundle(self.ui, bundle, None)
315 bundle = exchange.readbundle(self.ui, bundle, None)
316 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
316 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
317 if util.safehasattr(ret, 'getchunks'):
317 if util.safehasattr(ret, 'getchunks'):
318 # This is a bundle20 object, turn it into an unbundler.
318 # This is a bundle20 object, turn it into an unbundler.
319 # This little dance should be dropped eventually when the
319 # This little dance should be dropped eventually when the
320 # API is finally improved.
320 # API is finally improved.
321 stream = util.chunkbuffer(ret.getchunks())
321 stream = util.chunkbuffer(ret.getchunks())
322 ret = bundle2.getunbundler(self.ui, stream)
322 ret = bundle2.getunbundler(self.ui, stream)
323 return ret
323 return ret
324 except Exception as exc:
324 except Exception as exc:
325 # If the exception contains output salvaged from a bundle2
325 # If the exception contains output salvaged from a bundle2
326 # reply, we need to make sure it is printed before continuing
326 # reply, we need to make sure it is printed before continuing
327 # to fail. So we build a bundle2 with such output and consume
327 # to fail. So we build a bundle2 with such output and consume
328 # it directly.
328 # it directly.
329 #
329 #
330 # This is not very elegant but allows a "simple" solution for
330 # This is not very elegant but allows a "simple" solution for
331 # issue4594
331 # issue4594
332 output = getattr(exc, '_bundle2salvagedoutput', ())
332 output = getattr(exc, '_bundle2salvagedoutput', ())
333 if output:
333 if output:
334 bundler = bundle2.bundle20(self._repo.ui)
334 bundler = bundle2.bundle20(self._repo.ui)
335 for out in output:
335 for out in output:
336 bundler.addpart(out)
336 bundler.addpart(out)
337 stream = util.chunkbuffer(bundler.getchunks())
337 stream = util.chunkbuffer(bundler.getchunks())
338 b = bundle2.getunbundler(self.ui, stream)
338 b = bundle2.getunbundler(self.ui, stream)
339 bundle2.processbundle(self._repo, b)
339 bundle2.processbundle(self._repo, b)
340 raise
340 raise
341 except error.PushRaced as exc:
341 except error.PushRaced as exc:
342 raise error.ResponseError(_('push failed:'),
342 raise error.ResponseError(_('push failed:'),
343 stringutil.forcebytestr(exc))
343 stringutil.forcebytestr(exc))
344
344
345 # End of _basewirecommands interface.
345 # End of _basewirecommands interface.
346
346
347 # Begin of peer interface.
347 # Begin of peer interface.
348
348
349 def commandexecutor(self):
349 def commandexecutor(self):
350 return localcommandexecutor(self)
350 return localcommandexecutor(self)
351
351
352 # End of peer interface.
352 # End of peer interface.
353
353
354 @interfaceutil.implementer(repository.ipeerlegacycommands)
354 @interfaceutil.implementer(repository.ipeerlegacycommands)
355 class locallegacypeer(localpeer):
355 class locallegacypeer(localpeer):
356 '''peer extension which implements legacy methods too; used for tests with
356 '''peer extension which implements legacy methods too; used for tests with
357 restricted capabilities'''
357 restricted capabilities'''
358
358
359 def __init__(self, repo):
359 def __init__(self, repo):
360 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
360 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
361
361
362 # Begin of baselegacywirecommands interface.
362 # Begin of baselegacywirecommands interface.
363
363
364 def between(self, pairs):
364 def between(self, pairs):
365 return self._repo.between(pairs)
365 return self._repo.between(pairs)
366
366
367 def branches(self, nodes):
367 def branches(self, nodes):
368 return self._repo.branches(nodes)
368 return self._repo.branches(nodes)
369
369
370 def changegroup(self, nodes, source):
370 def changegroup(self, nodes, source):
371 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
371 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
372 missingheads=self._repo.heads())
372 missingheads=self._repo.heads())
373 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
373 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
374
374
375 def changegroupsubset(self, bases, heads, source):
375 def changegroupsubset(self, bases, heads, source):
376 outgoing = discovery.outgoing(self._repo, missingroots=bases,
376 outgoing = discovery.outgoing(self._repo, missingroots=bases,
377 missingheads=heads)
377 missingheads=heads)
378 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
378 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
379
379
380 # End of baselegacywirecommands interface.
380 # End of baselegacywirecommands interface.
381
381
382 # Increment the sub-version when the revlog v2 format changes to lock out old
382 # Increment the sub-version when the revlog v2 format changes to lock out old
383 # clients.
383 # clients.
384 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
384 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
385
385
386 # A repository with the sparserevlog feature will have delta chains that
386 # A repository with the sparserevlog feature will have delta chains that
387 # can spread over a larger span. Sparse reading cuts these large spans into
387 # can spread over a larger span. Sparse reading cuts these large spans into
388 # pieces, so that each piece isn't too big.
388 # pieces, so that each piece isn't too big.
389 # Without the sparserevlog capability, reading from the repository could use
389 # Without the sparserevlog capability, reading from the repository could use
390 # huge amounts of memory, because the whole span would be read at once,
390 # huge amounts of memory, because the whole span would be read at once,
391 # including all the intermediate revisions that aren't pertinent for the chain.
391 # including all the intermediate revisions that aren't pertinent for the chain.
392 # This is why once a repository has enabled sparse-read, it becomes required.
392 # This is why once a repository has enabled sparse-read, it becomes required.
393 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
393 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
394
394
395 # Functions receiving (ui, features) that extensions can register to impact
395 # Functions receiving (ui, features) that extensions can register to impact
396 # the ability to load repositories with custom requirements. Only
396 # the ability to load repositories with custom requirements. Only
397 # functions defined in loaded extensions are called.
397 # functions defined in loaded extensions are called.
398 #
398 #
399 # The function receives a set of requirement strings that the repository
399 # The function receives a set of requirement strings that the repository
400 # is capable of opening. Functions will typically add elements to the
400 # is capable of opening. Functions will typically add elements to the
401 # set to reflect that the extension knows how to handle that requirements.
401 # set to reflect that the extension knows how to handle that requirements.
402 featuresetupfuncs = set()
402 featuresetupfuncs = set()
403
403
404 def makelocalrepository(baseui, path, intents=None):
404 def makelocalrepository(baseui, path, intents=None):
405 """Create a local repository object.
405 """Create a local repository object.
406
406
407 Given arguments needed to construct a local repository, this function
407 Given arguments needed to construct a local repository, this function
408 performs various early repository loading functionality (such as
408 performs various early repository loading functionality (such as
409 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
409 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
410 the repository can be opened, derives a type suitable for representing
410 the repository can be opened, derives a type suitable for representing
411 that repository, and returns an instance of it.
411 that repository, and returns an instance of it.
412
412
413 The returned object conforms to the ``repository.completelocalrepository``
413 The returned object conforms to the ``repository.completelocalrepository``
414 interface.
414 interface.
415
415
416 The repository type is derived by calling a series of factory functions
416 The repository type is derived by calling a series of factory functions
417 for each aspect/interface of the final repository. These are defined by
417 for each aspect/interface of the final repository. These are defined by
418 ``REPO_INTERFACES``.
418 ``REPO_INTERFACES``.
419
419
420 Each factory function is called to produce a type implementing a specific
420 Each factory function is called to produce a type implementing a specific
421 interface. The cumulative list of returned types will be combined into a
421 interface. The cumulative list of returned types will be combined into a
422 new type and that type will be instantiated to represent the local
422 new type and that type will be instantiated to represent the local
423 repository.
423 repository.
424
424
425 The factory functions each receive various state that may be consulted
425 The factory functions each receive various state that may be consulted
426 as part of deriving a type.
426 as part of deriving a type.
427
427
428 Extensions should wrap these factory functions to customize repository type
428 Extensions should wrap these factory functions to customize repository type
429 creation. Note that an extension's wrapped function may be called even if
429 creation. Note that an extension's wrapped function may be called even if
430 that extension is not loaded for the repo being constructed. Extensions
430 that extension is not loaded for the repo being constructed. Extensions
431 should check if their ``__name__`` appears in the
431 should check if their ``__name__`` appears in the
432 ``extensionmodulenames`` set passed to the factory function and no-op if
432 ``extensionmodulenames`` set passed to the factory function and no-op if
433 not.
433 not.
434 """
434 """
435 ui = baseui.copy()
435 ui = baseui.copy()
436 # Prevent copying repo configuration.
436 # Prevent copying repo configuration.
437 ui.copy = baseui.copy
437 ui.copy = baseui.copy
438
438
439 # Working directory VFS rooted at repository root.
439 # Working directory VFS rooted at repository root.
440 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
440 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
441
441
442 # Main VFS for .hg/ directory.
442 # Main VFS for .hg/ directory.
443 hgpath = wdirvfs.join(b'.hg')
443 hgpath = wdirvfs.join(b'.hg')
444 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
444 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
445
445
446 # The .hg/ path should exist and should be a directory. All other
446 # The .hg/ path should exist and should be a directory. All other
447 # cases are errors.
447 # cases are errors.
448 if not hgvfs.isdir():
448 if not hgvfs.isdir():
449 try:
449 try:
450 hgvfs.stat()
450 hgvfs.stat()
451 except OSError as e:
451 except OSError as e:
452 if e.errno != errno.ENOENT:
452 if e.errno != errno.ENOENT:
453 raise
453 raise
454
454
455 raise error.RepoError(_(b'repository %s not found') % path)
455 raise error.RepoError(_(b'repository %s not found') % path)
456
456
457 # .hg/requires file contains a newline-delimited list of
457 # .hg/requires file contains a newline-delimited list of
458 # features/capabilities the opener (us) must have in order to use
458 # features/capabilities the opener (us) must have in order to use
459 # the repository. This file was introduced in Mercurial 0.9.2,
459 # the repository. This file was introduced in Mercurial 0.9.2,
460 # which means very old repositories may not have one. We assume
460 # which means very old repositories may not have one. We assume
461 # a missing file translates to no requirements.
461 # a missing file translates to no requirements.
462 try:
462 try:
463 requirements = set(hgvfs.read(b'requires').splitlines())
463 requirements = set(hgvfs.read(b'requires').splitlines())
464 except IOError as e:
464 except IOError as e:
465 if e.errno != errno.ENOENT:
465 if e.errno != errno.ENOENT:
466 raise
466 raise
467 requirements = set()
467 requirements = set()
468
468
469 # The .hg/hgrc file may load extensions or contain config options
469 # The .hg/hgrc file may load extensions or contain config options
470 # that influence repository construction. Attempt to load it and
470 # that influence repository construction. Attempt to load it and
471 # process any new extensions that it may have pulled in.
471 # process any new extensions that it may have pulled in.
472 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
472 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
473 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
473 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
474 extensions.loadall(ui)
474 extensions.loadall(ui)
475 extensions.populateui(ui)
475 extensions.populateui(ui)
476
476
477 # Set of module names of extensions loaded for this repository.
477 # Set of module names of extensions loaded for this repository.
478 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
478 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
479
479
480 supportedrequirements = gathersupportedrequirements(ui)
480 supportedrequirements = gathersupportedrequirements(ui)
481
481
482 # We first validate the requirements are known.
482 # We first validate the requirements are known.
483 ensurerequirementsrecognized(requirements, supportedrequirements)
483 ensurerequirementsrecognized(requirements, supportedrequirements)
484
484
485 # Then we validate that the known set is reasonable to use together.
485 # Then we validate that the known set is reasonable to use together.
486 ensurerequirementscompatible(ui, requirements)
486 ensurerequirementscompatible(ui, requirements)
487
487
488 # TODO there are unhandled edge cases related to opening repositories with
488 # TODO there are unhandled edge cases related to opening repositories with
489 # shared storage. If storage is shared, we should also test for requirements
489 # shared storage. If storage is shared, we should also test for requirements
490 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
490 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
491 # that repo, as that repo may load extensions needed to open it. This is a
491 # that repo, as that repo may load extensions needed to open it. This is a
492 # bit complicated because we don't want the other hgrc to overwrite settings
492 # bit complicated because we don't want the other hgrc to overwrite settings
493 # in this hgrc.
493 # in this hgrc.
494 #
494 #
495 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
495 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
496 # file when sharing repos. But if a requirement is added after the share is
496 # file when sharing repos. But if a requirement is added after the share is
497 # performed, thereby introducing a new requirement for the opener, we may
497 # performed, thereby introducing a new requirement for the opener, we may
498 # will not see that and could encounter a run-time error interacting with
498 # will not see that and could encounter a run-time error interacting with
499 # that shared store since it has an unknown-to-us requirement.
499 # that shared store since it has an unknown-to-us requirement.
500
500
501 # At this point, we know we should be capable of opening the repository.
501 # At this point, we know we should be capable of opening the repository.
502 # Now get on with doing that.
502 # Now get on with doing that.
503
503
504 features = set()
504 features = set()
505
505
506 # The "store" part of the repository holds versioned data. How it is
506 # The "store" part of the repository holds versioned data. How it is
507 # accessed is determined by various requirements. The ``shared`` or
507 # accessed is determined by various requirements. The ``shared`` or
508 # ``relshared`` requirements indicate the store lives in the path contained
508 # ``relshared`` requirements indicate the store lives in the path contained
509 # in the ``.hg/sharedpath`` file. This is an absolute path for
509 # in the ``.hg/sharedpath`` file. This is an absolute path for
510 # ``shared`` and relative to ``.hg/`` for ``relshared``.
510 # ``shared`` and relative to ``.hg/`` for ``relshared``.
511 if b'shared' in requirements or b'relshared' in requirements:
511 if b'shared' in requirements or b'relshared' in requirements:
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 if b'relshared' in requirements:
513 if b'relshared' in requirements:
514 sharedpath = hgvfs.join(sharedpath)
514 sharedpath = hgvfs.join(sharedpath)
515
515
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517
517
518 if not sharedvfs.exists():
518 if not sharedvfs.exists():
519 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
519 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
520 b'directory %s') % sharedvfs.base)
520 b'directory %s') % sharedvfs.base)
521
521
522 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
522 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
523
523
524 storebasepath = sharedvfs.base
524 storebasepath = sharedvfs.base
525 cachepath = sharedvfs.join(b'cache')
525 cachepath = sharedvfs.join(b'cache')
526 else:
526 else:
527 storebasepath = hgvfs.base
527 storebasepath = hgvfs.base
528 cachepath = hgvfs.join(b'cache')
528 cachepath = hgvfs.join(b'cache')
529 wcachepath = hgvfs.join(b'wcache')
529 wcachepath = hgvfs.join(b'wcache')
530
530
531
531
532 # The store has changed over time and the exact layout is dictated by
532 # The store has changed over time and the exact layout is dictated by
533 # requirements. The store interface abstracts differences across all
533 # requirements. The store interface abstracts differences across all
534 # of them.
534 # of them.
535 store = makestore(requirements, storebasepath,
535 store = makestore(requirements, storebasepath,
536 lambda base: vfsmod.vfs(base, cacheaudited=True))
536 lambda base: vfsmod.vfs(base, cacheaudited=True))
537 hgvfs.createmode = store.createmode
537 hgvfs.createmode = store.createmode
538
538
539 storevfs = store.vfs
539 storevfs = store.vfs
540 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
540 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
541
541
542 # The cache vfs is used to manage cache files.
542 # The cache vfs is used to manage cache files.
543 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
543 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 cachevfs.createmode = store.createmode
544 cachevfs.createmode = store.createmode
545 # The cache vfs is used to manage cache files related to the working copy
545 # The cache vfs is used to manage cache files related to the working copy
546 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
546 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
547 wcachevfs.createmode = store.createmode
547 wcachevfs.createmode = store.createmode
548
548
549 # Now resolve the type for the repository object. We do this by repeatedly
549 # Now resolve the type for the repository object. We do this by repeatedly
550 # calling a factory function to produces types for specific aspects of the
550 # calling a factory function to produces types for specific aspects of the
551 # repo's operation. The aggregate returned types are used as base classes
551 # repo's operation. The aggregate returned types are used as base classes
552 # for a dynamically-derived type, which will represent our new repository.
552 # for a dynamically-derived type, which will represent our new repository.
553
553
554 bases = []
554 bases = []
555 extrastate = {}
555 extrastate = {}
556
556
557 for iface, fn in REPO_INTERFACES:
557 for iface, fn in REPO_INTERFACES:
558 # We pass all potentially useful state to give extensions tons of
558 # We pass all potentially useful state to give extensions tons of
559 # flexibility.
559 # flexibility.
560 typ = fn()(ui=ui,
560 typ = fn()(ui=ui,
561 intents=intents,
561 intents=intents,
562 requirements=requirements,
562 requirements=requirements,
563 features=features,
563 features=features,
564 wdirvfs=wdirvfs,
564 wdirvfs=wdirvfs,
565 hgvfs=hgvfs,
565 hgvfs=hgvfs,
566 store=store,
566 store=store,
567 storevfs=storevfs,
567 storevfs=storevfs,
568 storeoptions=storevfs.options,
568 storeoptions=storevfs.options,
569 cachevfs=cachevfs,
569 cachevfs=cachevfs,
570 wcachevfs=wcachevfs,
570 wcachevfs=wcachevfs,
571 extensionmodulenames=extensionmodulenames,
571 extensionmodulenames=extensionmodulenames,
572 extrastate=extrastate,
572 extrastate=extrastate,
573 baseclasses=bases)
573 baseclasses=bases)
574
574
575 if not isinstance(typ, type):
575 if not isinstance(typ, type):
576 raise error.ProgrammingError('unable to construct type for %s' %
576 raise error.ProgrammingError('unable to construct type for %s' %
577 iface)
577 iface)
578
578
579 bases.append(typ)
579 bases.append(typ)
580
580
581 # type() allows you to use characters in type names that wouldn't be
581 # type() allows you to use characters in type names that wouldn't be
582 # recognized as Python symbols in source code. We abuse that to add
582 # recognized as Python symbols in source code. We abuse that to add
583 # rich information about our constructed repo.
583 # rich information about our constructed repo.
584 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
584 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
585 wdirvfs.base,
585 wdirvfs.base,
586 b','.join(sorted(requirements))))
586 b','.join(sorted(requirements))))
587
587
588 cls = type(name, tuple(bases), {})
588 cls = type(name, tuple(bases), {})
589
589
590 return cls(
590 return cls(
591 baseui=baseui,
591 baseui=baseui,
592 ui=ui,
592 ui=ui,
593 origroot=path,
593 origroot=path,
594 wdirvfs=wdirvfs,
594 wdirvfs=wdirvfs,
595 hgvfs=hgvfs,
595 hgvfs=hgvfs,
596 requirements=requirements,
596 requirements=requirements,
597 supportedrequirements=supportedrequirements,
597 supportedrequirements=supportedrequirements,
598 sharedpath=storebasepath,
598 sharedpath=storebasepath,
599 store=store,
599 store=store,
600 cachevfs=cachevfs,
600 cachevfs=cachevfs,
601 wcachevfs=wcachevfs,
601 wcachevfs=wcachevfs,
602 features=features,
602 features=features,
603 intents=intents)
603 intents=intents)
604
604
605 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
605 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
606 """Load hgrc files/content into a ui instance.
606 """Load hgrc files/content into a ui instance.
607
607
608 This is called during repository opening to load any additional
608 This is called during repository opening to load any additional
609 config files or settings relevant to the current repository.
609 config files or settings relevant to the current repository.
610
610
611 Returns a bool indicating whether any additional configs were loaded.
611 Returns a bool indicating whether any additional configs were loaded.
612
612
613 Extensions should monkeypatch this function to modify how per-repo
613 Extensions should monkeypatch this function to modify how per-repo
614 configs are loaded. For example, an extension may wish to pull in
614 configs are loaded. For example, an extension may wish to pull in
615 configs from alternate files or sources.
615 configs from alternate files or sources.
616 """
616 """
617 try:
617 try:
618 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
618 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
619 return True
619 return True
620 except IOError:
620 except IOError:
621 return False
621 return False
622
622
623 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
623 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
624 """Perform additional actions after .hg/hgrc is loaded.
624 """Perform additional actions after .hg/hgrc is loaded.
625
625
626 This function is called during repository loading immediately after
626 This function is called during repository loading immediately after
627 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
627 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
628
628
629 The function can be used to validate configs, automatically add
629 The function can be used to validate configs, automatically add
630 options (including extensions) based on requirements, etc.
630 options (including extensions) based on requirements, etc.
631 """
631 """
632
632
633 # Map of requirements to list of extensions to load automatically when
633 # Map of requirements to list of extensions to load automatically when
634 # requirement is present.
634 # requirement is present.
635 autoextensions = {
635 autoextensions = {
636 b'largefiles': [b'largefiles'],
636 b'largefiles': [b'largefiles'],
637 b'lfs': [b'lfs'],
637 b'lfs': [b'lfs'],
638 }
638 }
639
639
640 for requirement, names in sorted(autoextensions.items()):
640 for requirement, names in sorted(autoextensions.items()):
641 if requirement not in requirements:
641 if requirement not in requirements:
642 continue
642 continue
643
643
644 for name in names:
644 for name in names:
645 if not ui.hasconfig(b'extensions', name):
645 if not ui.hasconfig(b'extensions', name):
646 ui.setconfig(b'extensions', name, b'', source='autoload')
646 ui.setconfig(b'extensions', name, b'', source='autoload')
647
647
648 def gathersupportedrequirements(ui):
648 def gathersupportedrequirements(ui):
649 """Determine the complete set of recognized requirements."""
649 """Determine the complete set of recognized requirements."""
650 # Start with all requirements supported by this file.
650 # Start with all requirements supported by this file.
651 supported = set(localrepository._basesupported)
651 supported = set(localrepository._basesupported)
652
652
653 # Execute ``featuresetupfuncs`` entries if they belong to an extension
653 # Execute ``featuresetupfuncs`` entries if they belong to an extension
654 # relevant to this ui instance.
654 # relevant to this ui instance.
655 modules = {m.__name__ for n, m in extensions.extensions(ui)}
655 modules = {m.__name__ for n, m in extensions.extensions(ui)}
656
656
657 for fn in featuresetupfuncs:
657 for fn in featuresetupfuncs:
658 if fn.__module__ in modules:
658 if fn.__module__ in modules:
659 fn(ui, supported)
659 fn(ui, supported)
660
660
661 # Add derived requirements from registered compression engines.
661 # Add derived requirements from registered compression engines.
662 for name in util.compengines:
662 for name in util.compengines:
663 engine = util.compengines[name]
663 engine = util.compengines[name]
664 if engine.available() and engine.revlogheader():
664 if engine.available() and engine.revlogheader():
665 supported.add(b'exp-compression-%s' % name)
665 supported.add(b'exp-compression-%s' % name)
666 if engine.name() == 'zstd':
666 if engine.name() == 'zstd':
667 supported.add(b'revlog-compression-zstd')
667 supported.add(b'revlog-compression-zstd')
668
668
669 return supported
669 return supported
670
670
671 def ensurerequirementsrecognized(requirements, supported):
671 def ensurerequirementsrecognized(requirements, supported):
672 """Validate that a set of local requirements is recognized.
672 """Validate that a set of local requirements is recognized.
673
673
674 Receives a set of requirements. Raises an ``error.RepoError`` if there
674 Receives a set of requirements. Raises an ``error.RepoError`` if there
675 exists any requirement in that set that currently loaded code doesn't
675 exists any requirement in that set that currently loaded code doesn't
676 recognize.
676 recognize.
677
677
678 Returns a set of supported requirements.
678 Returns a set of supported requirements.
679 """
679 """
680 missing = set()
680 missing = set()
681
681
682 for requirement in requirements:
682 for requirement in requirements:
683 if requirement in supported:
683 if requirement in supported:
684 continue
684 continue
685
685
686 if not requirement or not requirement[0:1].isalnum():
686 if not requirement or not requirement[0:1].isalnum():
687 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
687 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
688
688
689 missing.add(requirement)
689 missing.add(requirement)
690
690
691 if missing:
691 if missing:
692 raise error.RequirementError(
692 raise error.RequirementError(
693 _(b'repository requires features unknown to this Mercurial: %s') %
693 _(b'repository requires features unknown to this Mercurial: %s') %
694 b' '.join(sorted(missing)),
694 b' '.join(sorted(missing)),
695 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
695 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
696 b'for more information'))
696 b'for more information'))
697
697
698 def ensurerequirementscompatible(ui, requirements):
698 def ensurerequirementscompatible(ui, requirements):
699 """Validates that a set of recognized requirements is mutually compatible.
699 """Validates that a set of recognized requirements is mutually compatible.
700
700
701 Some requirements may not be compatible with others or require
701 Some requirements may not be compatible with others or require
702 config options that aren't enabled. This function is called during
702 config options that aren't enabled. This function is called during
703 repository opening to ensure that the set of requirements needed
703 repository opening to ensure that the set of requirements needed
704 to open a repository is sane and compatible with config options.
704 to open a repository is sane and compatible with config options.
705
705
706 Extensions can monkeypatch this function to perform additional
706 Extensions can monkeypatch this function to perform additional
707 checking.
707 checking.
708
708
709 ``error.RepoError`` should be raised on failure.
709 ``error.RepoError`` should be raised on failure.
710 """
710 """
711 if b'exp-sparse' in requirements and not sparse.enabled:
711 if b'exp-sparse' in requirements and not sparse.enabled:
712 raise error.RepoError(_(b'repository is using sparse feature but '
712 raise error.RepoError(_(b'repository is using sparse feature but '
713 b'sparse is not enabled; enable the '
713 b'sparse is not enabled; enable the '
714 b'"sparse" extensions to access'))
714 b'"sparse" extensions to access'))
715
715
716 def makestore(requirements, path, vfstype):
716 def makestore(requirements, path, vfstype):
717 """Construct a storage object for a repository."""
717 """Construct a storage object for a repository."""
718 if b'store' in requirements:
718 if b'store' in requirements:
719 if b'fncache' in requirements:
719 if b'fncache' in requirements:
720 return storemod.fncachestore(path, vfstype,
720 return storemod.fncachestore(path, vfstype,
721 b'dotencode' in requirements)
721 b'dotencode' in requirements)
722
722
723 return storemod.encodedstore(path, vfstype)
723 return storemod.encodedstore(path, vfstype)
724
724
725 return storemod.basicstore(path, vfstype)
725 return storemod.basicstore(path, vfstype)
726
726
727 def resolvestorevfsoptions(ui, requirements, features):
727 def resolvestorevfsoptions(ui, requirements, features):
728 """Resolve the options to pass to the store vfs opener.
728 """Resolve the options to pass to the store vfs opener.
729
729
730 The returned dict is used to influence behavior of the storage layer.
730 The returned dict is used to influence behavior of the storage layer.
731 """
731 """
732 options = {}
732 options = {}
733
733
734 if b'treemanifest' in requirements:
734 if b'treemanifest' in requirements:
735 options[b'treemanifest'] = True
735 options[b'treemanifest'] = True
736
736
737 # experimental config: format.manifestcachesize
737 # experimental config: format.manifestcachesize
738 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
738 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
739 if manifestcachesize is not None:
739 if manifestcachesize is not None:
740 options[b'manifestcachesize'] = manifestcachesize
740 options[b'manifestcachesize'] = manifestcachesize
741
741
742 # In the absence of another requirement superseding a revlog-related
742 # In the absence of another requirement superseding a revlog-related
743 # requirement, we have to assume the repo is using revlog version 0.
743 # requirement, we have to assume the repo is using revlog version 0.
744 # This revlog format is super old and we don't bother trying to parse
744 # This revlog format is super old and we don't bother trying to parse
745 # opener options for it because those options wouldn't do anything
745 # opener options for it because those options wouldn't do anything
746 # meaningful on such old repos.
746 # meaningful on such old repos.
747 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
747 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
748 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
748 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
749
749
750 return options
750 return options
751
751
752 def resolverevlogstorevfsoptions(ui, requirements, features):
752 def resolverevlogstorevfsoptions(ui, requirements, features):
753 """Resolve opener options specific to revlogs."""
753 """Resolve opener options specific to revlogs."""
754
754
755 options = {}
755 options = {}
756 options[b'flagprocessors'] = {}
756 options[b'flagprocessors'] = {}
757
757
758 if b'revlogv1' in requirements:
758 if b'revlogv1' in requirements:
759 options[b'revlogv1'] = True
759 options[b'revlogv1'] = True
760 if REVLOGV2_REQUIREMENT in requirements:
760 if REVLOGV2_REQUIREMENT in requirements:
761 options[b'revlogv2'] = True
761 options[b'revlogv2'] = True
762
762
763 if b'generaldelta' in requirements:
763 if b'generaldelta' in requirements:
764 options[b'generaldelta'] = True
764 options[b'generaldelta'] = True
765
765
766 # experimental config: format.chunkcachesize
766 # experimental config: format.chunkcachesize
767 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
767 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
768 if chunkcachesize is not None:
768 if chunkcachesize is not None:
769 options[b'chunkcachesize'] = chunkcachesize
769 options[b'chunkcachesize'] = chunkcachesize
770
770
771 deltabothparents = ui.configbool(b'storage',
771 deltabothparents = ui.configbool(b'storage',
772 b'revlog.optimize-delta-parent-choice')
772 b'revlog.optimize-delta-parent-choice')
773 options[b'deltabothparents'] = deltabothparents
773 options[b'deltabothparents'] = deltabothparents
774
774
775 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
775 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
776 lazydeltabase = False
776 lazydeltabase = False
777 if lazydelta:
777 if lazydelta:
778 lazydeltabase = ui.configbool(b'storage',
778 lazydeltabase = ui.configbool(b'storage',
779 b'revlog.reuse-external-delta-parent')
779 b'revlog.reuse-external-delta-parent')
780 if lazydeltabase is None:
780 if lazydeltabase is None:
781 lazydeltabase = not scmutil.gddeltaconfig(ui)
781 lazydeltabase = not scmutil.gddeltaconfig(ui)
782 options[b'lazydelta'] = lazydelta
782 options[b'lazydelta'] = lazydelta
783 options[b'lazydeltabase'] = lazydeltabase
783 options[b'lazydeltabase'] = lazydeltabase
784
784
785 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
785 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
786 if 0 <= chainspan:
786 if 0 <= chainspan:
787 options[b'maxdeltachainspan'] = chainspan
787 options[b'maxdeltachainspan'] = chainspan
788
788
789 mmapindexthreshold = ui.configbytes(b'experimental',
789 mmapindexthreshold = ui.configbytes(b'experimental',
790 b'mmapindexthreshold')
790 b'mmapindexthreshold')
791 if mmapindexthreshold is not None:
791 if mmapindexthreshold is not None:
792 options[b'mmapindexthreshold'] = mmapindexthreshold
792 options[b'mmapindexthreshold'] = mmapindexthreshold
793
793
794 withsparseread = ui.configbool(b'experimental', b'sparse-read')
794 withsparseread = ui.configbool(b'experimental', b'sparse-read')
795 srdensitythres = float(ui.config(b'experimental',
795 srdensitythres = float(ui.config(b'experimental',
796 b'sparse-read.density-threshold'))
796 b'sparse-read.density-threshold'))
797 srmingapsize = ui.configbytes(b'experimental',
797 srmingapsize = ui.configbytes(b'experimental',
798 b'sparse-read.min-gap-size')
798 b'sparse-read.min-gap-size')
799 options[b'with-sparse-read'] = withsparseread
799 options[b'with-sparse-read'] = withsparseread
800 options[b'sparse-read-density-threshold'] = srdensitythres
800 options[b'sparse-read-density-threshold'] = srdensitythres
801 options[b'sparse-read-min-gap-size'] = srmingapsize
801 options[b'sparse-read-min-gap-size'] = srmingapsize
802
802
803 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
803 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
804 options[b'sparse-revlog'] = sparserevlog
804 options[b'sparse-revlog'] = sparserevlog
805 if sparserevlog:
805 if sparserevlog:
806 options[b'generaldelta'] = True
806 options[b'generaldelta'] = True
807
807
808 maxchainlen = None
808 maxchainlen = None
809 if sparserevlog:
809 if sparserevlog:
810 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
810 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
811 # experimental config: format.maxchainlen
811 # experimental config: format.maxchainlen
812 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
812 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
813 if maxchainlen is not None:
813 if maxchainlen is not None:
814 options[b'maxchainlen'] = maxchainlen
814 options[b'maxchainlen'] = maxchainlen
815
815
816 for r in requirements:
816 for r in requirements:
817 # we allow multiple compression engine requirement to co-exist because
817 # we allow multiple compression engine requirement to co-exist because
818 # strickly speaking, revlog seems to support mixed compression style.
818 # strickly speaking, revlog seems to support mixed compression style.
819 #
819 #
820 # The compression used for new entries will be "the last one"
820 # The compression used for new entries will be "the last one"
821 prefix = r.startswith
821 prefix = r.startswith
822 if prefix('revlog-compression-') or prefix('exp-compression-'):
822 if prefix('revlog-compression-') or prefix('exp-compression-'):
823 options[b'compengine'] = r.split('-', 2)[2]
823 options[b'compengine'] = r.split('-', 2)[2]
824
824
825 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
825 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
826 if options[b'zlib.level'] is not None:
826 if options[b'zlib.level'] is not None:
827 if not (0 <= options[b'zlib.level'] <= 9):
827 if not (0 <= options[b'zlib.level'] <= 9):
828 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
828 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
829 raise error.Abort(msg % options[b'zlib.level'])
829 raise error.Abort(msg % options[b'zlib.level'])
830 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
830 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
831 if options[b'zstd.level'] is not None:
831 if options[b'zstd.level'] is not None:
832 if not (0 <= options[b'zstd.level'] <= 22):
832 if not (0 <= options[b'zstd.level'] <= 22):
833 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
833 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
834 raise error.Abort(msg % options[b'zstd.level'])
834 raise error.Abort(msg % options[b'zstd.level'])
835
835
836 if repository.NARROW_REQUIREMENT in requirements:
836 if repository.NARROW_REQUIREMENT in requirements:
837 options[b'enableellipsis'] = True
837 options[b'enableellipsis'] = True
838
838
839 return options
839 return options
840
840
841 def makemain(**kwargs):
841 def makemain(**kwargs):
842 """Produce a type conforming to ``ilocalrepositorymain``."""
842 """Produce a type conforming to ``ilocalrepositorymain``."""
843 return localrepository
843 return localrepository
844
844
845 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
845 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
846 class revlogfilestorage(object):
846 class revlogfilestorage(object):
847 """File storage when using revlogs."""
847 """File storage when using revlogs."""
848
848
849 def file(self, path):
849 def file(self, path):
850 if path[0] == b'/':
850 if path[0] == b'/':
851 path = path[1:]
851 path = path[1:]
852
852
853 return filelog.filelog(self.svfs, path)
853 return filelog.filelog(self.svfs, path)
854
854
855 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
855 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
856 class revlognarrowfilestorage(object):
856 class revlognarrowfilestorage(object):
857 """File storage when using revlogs and narrow files."""
857 """File storage when using revlogs and narrow files."""
858
858
859 def file(self, path):
859 def file(self, path):
860 if path[0] == b'/':
860 if path[0] == b'/':
861 path = path[1:]
861 path = path[1:]
862
862
863 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
863 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
864
864
865 def makefilestorage(requirements, features, **kwargs):
865 def makefilestorage(requirements, features, **kwargs):
866 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
866 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
867 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
867 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
868 features.add(repository.REPO_FEATURE_STREAM_CLONE)
868 features.add(repository.REPO_FEATURE_STREAM_CLONE)
869
869
870 if repository.NARROW_REQUIREMENT in requirements:
870 if repository.NARROW_REQUIREMENT in requirements:
871 return revlognarrowfilestorage
871 return revlognarrowfilestorage
872 else:
872 else:
873 return revlogfilestorage
873 return revlogfilestorage
874
874
875 # List of repository interfaces and factory functions for them. Each
875 # List of repository interfaces and factory functions for them. Each
876 # will be called in order during ``makelocalrepository()`` to iteratively
876 # will be called in order during ``makelocalrepository()`` to iteratively
877 # derive the final type for a local repository instance. We capture the
877 # derive the final type for a local repository instance. We capture the
878 # function as a lambda so we don't hold a reference and the module-level
878 # function as a lambda so we don't hold a reference and the module-level
879 # functions can be wrapped.
879 # functions can be wrapped.
880 REPO_INTERFACES = [
880 REPO_INTERFACES = [
881 (repository.ilocalrepositorymain, lambda: makemain),
881 (repository.ilocalrepositorymain, lambda: makemain),
882 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
882 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
883 ]
883 ]
884
884
885 @interfaceutil.implementer(repository.ilocalrepositorymain)
885 @interfaceutil.implementer(repository.ilocalrepositorymain)
886 class localrepository(object):
886 class localrepository(object):
887 """Main class for representing local repositories.
887 """Main class for representing local repositories.
888
888
889 All local repositories are instances of this class.
889 All local repositories are instances of this class.
890
890
891 Constructed on its own, instances of this class are not usable as
891 Constructed on its own, instances of this class are not usable as
892 repository objects. To obtain a usable repository object, call
892 repository objects. To obtain a usable repository object, call
893 ``hg.repository()``, ``localrepo.instance()``, or
893 ``hg.repository()``, ``localrepo.instance()``, or
894 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
894 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
895 ``instance()`` adds support for creating new repositories.
895 ``instance()`` adds support for creating new repositories.
896 ``hg.repository()`` adds more extension integration, including calling
896 ``hg.repository()`` adds more extension integration, including calling
897 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
897 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
898 used.
898 used.
899 """
899 """
900
900
901 # obsolete experimental requirements:
901 # obsolete experimental requirements:
902 # - manifestv2: An experimental new manifest format that allowed
902 # - manifestv2: An experimental new manifest format that allowed
903 # for stem compression of long paths. Experiment ended up not
903 # for stem compression of long paths. Experiment ended up not
904 # being successful (repository sizes went up due to worse delta
904 # being successful (repository sizes went up due to worse delta
905 # chains), and the code was deleted in 4.6.
905 # chains), and the code was deleted in 4.6.
906 supportedformats = {
906 supportedformats = {
907 'revlogv1',
907 'revlogv1',
908 'generaldelta',
908 'generaldelta',
909 'treemanifest',
909 'treemanifest',
910 REVLOGV2_REQUIREMENT,
910 REVLOGV2_REQUIREMENT,
911 SPARSEREVLOG_REQUIREMENT,
911 SPARSEREVLOG_REQUIREMENT,
912 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
912 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
913 }
913 }
914 _basesupported = supportedformats | {
914 _basesupported = supportedformats | {
915 'store',
915 'store',
916 'fncache',
916 'fncache',
917 'shared',
917 'shared',
918 'relshared',
918 'relshared',
919 'dotencode',
919 'dotencode',
920 'exp-sparse',
920 'exp-sparse',
921 'internal-phase'
921 'internal-phase'
922 }
922 }
923
923
924 # list of prefix for file which can be written without 'wlock'
924 # list of prefix for file which can be written without 'wlock'
925 # Extensions should extend this list when needed
925 # Extensions should extend this list when needed
926 _wlockfreeprefix = {
926 _wlockfreeprefix = {
927 # We migh consider requiring 'wlock' for the next
927 # We migh consider requiring 'wlock' for the next
928 # two, but pretty much all the existing code assume
928 # two, but pretty much all the existing code assume
929 # wlock is not needed so we keep them excluded for
929 # wlock is not needed so we keep them excluded for
930 # now.
930 # now.
931 'hgrc',
931 'hgrc',
932 'requires',
932 'requires',
933 # XXX cache is a complicatged business someone
933 # XXX cache is a complicatged business someone
934 # should investigate this in depth at some point
934 # should investigate this in depth at some point
935 'cache/',
935 'cache/',
936 # XXX shouldn't be dirstate covered by the wlock?
936 # XXX shouldn't be dirstate covered by the wlock?
937 'dirstate',
937 'dirstate',
938 # XXX bisect was still a bit too messy at the time
938 # XXX bisect was still a bit too messy at the time
939 # this changeset was introduced. Someone should fix
939 # this changeset was introduced. Someone should fix
940 # the remainig bit and drop this line
940 # the remainig bit and drop this line
941 'bisect.state',
941 'bisect.state',
942 }
942 }
943
943
944 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
944 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
945 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
945 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
946 features, intents=None):
946 features, intents=None):
947 """Create a new local repository instance.
947 """Create a new local repository instance.
948
948
949 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
949 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
950 or ``localrepo.makelocalrepository()`` for obtaining a new repository
950 or ``localrepo.makelocalrepository()`` for obtaining a new repository
951 object.
951 object.
952
952
953 Arguments:
953 Arguments:
954
954
955 baseui
955 baseui
956 ``ui.ui`` instance that ``ui`` argument was based off of.
956 ``ui.ui`` instance that ``ui`` argument was based off of.
957
957
958 ui
958 ui
959 ``ui.ui`` instance for use by the repository.
959 ``ui.ui`` instance for use by the repository.
960
960
961 origroot
961 origroot
962 ``bytes`` path to working directory root of this repository.
962 ``bytes`` path to working directory root of this repository.
963
963
964 wdirvfs
964 wdirvfs
965 ``vfs.vfs`` rooted at the working directory.
965 ``vfs.vfs`` rooted at the working directory.
966
966
967 hgvfs
967 hgvfs
968 ``vfs.vfs`` rooted at .hg/
968 ``vfs.vfs`` rooted at .hg/
969
969
970 requirements
970 requirements
971 ``set`` of bytestrings representing repository opening requirements.
971 ``set`` of bytestrings representing repository opening requirements.
972
972
973 supportedrequirements
973 supportedrequirements
974 ``set`` of bytestrings representing repository requirements that we
974 ``set`` of bytestrings representing repository requirements that we
975 know how to open. May be a supetset of ``requirements``.
975 know how to open. May be a supetset of ``requirements``.
976
976
977 sharedpath
977 sharedpath
978 ``bytes`` Defining path to storage base directory. Points to a
978 ``bytes`` Defining path to storage base directory. Points to a
979 ``.hg/`` directory somewhere.
979 ``.hg/`` directory somewhere.
980
980
981 store
981 store
982 ``store.basicstore`` (or derived) instance providing access to
982 ``store.basicstore`` (or derived) instance providing access to
983 versioned storage.
983 versioned storage.
984
984
985 cachevfs
985 cachevfs
986 ``vfs.vfs`` used for cache files.
986 ``vfs.vfs`` used for cache files.
987
987
988 wcachevfs
988 wcachevfs
989 ``vfs.vfs`` used for cache files related to the working copy.
989 ``vfs.vfs`` used for cache files related to the working copy.
990
990
991 features
991 features
992 ``set`` of bytestrings defining features/capabilities of this
992 ``set`` of bytestrings defining features/capabilities of this
993 instance.
993 instance.
994
994
995 intents
995 intents
996 ``set`` of system strings indicating what this repo will be used
996 ``set`` of system strings indicating what this repo will be used
997 for.
997 for.
998 """
998 """
999 self.baseui = baseui
999 self.baseui = baseui
1000 self.ui = ui
1000 self.ui = ui
1001 self.origroot = origroot
1001 self.origroot = origroot
1002 # vfs rooted at working directory.
1002 # vfs rooted at working directory.
1003 self.wvfs = wdirvfs
1003 self.wvfs = wdirvfs
1004 self.root = wdirvfs.base
1004 self.root = wdirvfs.base
1005 # vfs rooted at .hg/. Used to access most non-store paths.
1005 # vfs rooted at .hg/. Used to access most non-store paths.
1006 self.vfs = hgvfs
1006 self.vfs = hgvfs
1007 self.path = hgvfs.base
1007 self.path = hgvfs.base
1008 self.requirements = requirements
1008 self.requirements = requirements
1009 self.supported = supportedrequirements
1009 self.supported = supportedrequirements
1010 self.sharedpath = sharedpath
1010 self.sharedpath = sharedpath
1011 self.store = store
1011 self.store = store
1012 self.cachevfs = cachevfs
1012 self.cachevfs = cachevfs
1013 self.wcachevfs = wcachevfs
1013 self.wcachevfs = wcachevfs
1014 self.features = features
1014 self.features = features
1015
1015
1016 self.filtername = None
1016 self.filtername = None
1017
1017
1018 if (self.ui.configbool('devel', 'all-warnings') or
1018 if (self.ui.configbool('devel', 'all-warnings') or
1019 self.ui.configbool('devel', 'check-locks')):
1019 self.ui.configbool('devel', 'check-locks')):
1020 self.vfs.audit = self._getvfsward(self.vfs.audit)
1020 self.vfs.audit = self._getvfsward(self.vfs.audit)
1021 # A list of callback to shape the phase if no data were found.
1021 # A list of callback to shape the phase if no data were found.
1022 # Callback are in the form: func(repo, roots) --> processed root.
1022 # Callback are in the form: func(repo, roots) --> processed root.
1023 # This list it to be filled by extension during repo setup
1023 # This list it to be filled by extension during repo setup
1024 self._phasedefaults = []
1024 self._phasedefaults = []
1025
1025
1026 color.setup(self.ui)
1026 color.setup(self.ui)
1027
1027
1028 self.spath = self.store.path
1028 self.spath = self.store.path
1029 self.svfs = self.store.vfs
1029 self.svfs = self.store.vfs
1030 self.sjoin = self.store.join
1030 self.sjoin = self.store.join
1031 if (self.ui.configbool('devel', 'all-warnings') or
1031 if (self.ui.configbool('devel', 'all-warnings') or
1032 self.ui.configbool('devel', 'check-locks')):
1032 self.ui.configbool('devel', 'check-locks')):
1033 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1033 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1034 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1034 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1035 else: # standard vfs
1035 else: # standard vfs
1036 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1036 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1037
1037
1038 self._dirstatevalidatewarned = False
1038 self._dirstatevalidatewarned = False
1039
1039
1040 self._branchcaches = branchmap.BranchMapCache()
1040 self._branchcaches = branchmap.BranchMapCache()
1041 self._revbranchcache = None
1041 self._revbranchcache = None
1042 self._filterpats = {}
1042 self._filterpats = {}
1043 self._datafilters = {}
1043 self._datafilters = {}
1044 self._transref = self._lockref = self._wlockref = None
1044 self._transref = self._lockref = self._wlockref = None
1045
1045
1046 # A cache for various files under .hg/ that tracks file changes,
1046 # A cache for various files under .hg/ that tracks file changes,
1047 # (used by the filecache decorator)
1047 # (used by the filecache decorator)
1048 #
1048 #
1049 # Maps a property name to its util.filecacheentry
1049 # Maps a property name to its util.filecacheentry
1050 self._filecache = {}
1050 self._filecache = {}
1051
1051
1052 # hold sets of revision to be filtered
1052 # hold sets of revision to be filtered
1053 # should be cleared when something might have changed the filter value:
1053 # should be cleared when something might have changed the filter value:
1054 # - new changesets,
1054 # - new changesets,
1055 # - phase change,
1055 # - phase change,
1056 # - new obsolescence marker,
1056 # - new obsolescence marker,
1057 # - working directory parent change,
1057 # - working directory parent change,
1058 # - bookmark changes
1058 # - bookmark changes
1059 self.filteredrevcache = {}
1059 self.filteredrevcache = {}
1060
1060
1061 # post-dirstate-status hooks
1061 # post-dirstate-status hooks
1062 self._postdsstatus = []
1062 self._postdsstatus = []
1063
1063
1064 # generic mapping between names and nodes
1064 # generic mapping between names and nodes
1065 self.names = namespaces.namespaces()
1065 self.names = namespaces.namespaces()
1066
1066
1067 # Key to signature value.
1067 # Key to signature value.
1068 self._sparsesignaturecache = {}
1068 self._sparsesignaturecache = {}
1069 # Signature to cached matcher instance.
1069 # Signature to cached matcher instance.
1070 self._sparsematchercache = {}
1070 self._sparsematchercache = {}
1071
1071
1072 self._extrafilterid = repoview.extrafilter(ui)
1072 self._extrafilterid = repoview.extrafilter(ui)
1073
1073
1074 def _getvfsward(self, origfunc):
1074 def _getvfsward(self, origfunc):
1075 """build a ward for self.vfs"""
1075 """build a ward for self.vfs"""
1076 rref = weakref.ref(self)
1076 rref = weakref.ref(self)
1077 def checkvfs(path, mode=None):
1077 def checkvfs(path, mode=None):
1078 ret = origfunc(path, mode=mode)
1078 ret = origfunc(path, mode=mode)
1079 repo = rref()
1079 repo = rref()
1080 if (repo is None
1080 if (repo is None
1081 or not util.safehasattr(repo, '_wlockref')
1081 or not util.safehasattr(repo, '_wlockref')
1082 or not util.safehasattr(repo, '_lockref')):
1082 or not util.safehasattr(repo, '_lockref')):
1083 return
1083 return
1084 if mode in (None, 'r', 'rb'):
1084 if mode in (None, 'r', 'rb'):
1085 return
1085 return
1086 if path.startswith(repo.path):
1086 if path.startswith(repo.path):
1087 # truncate name relative to the repository (.hg)
1087 # truncate name relative to the repository (.hg)
1088 path = path[len(repo.path) + 1:]
1088 path = path[len(repo.path) + 1:]
1089 if path.startswith('cache/'):
1089 if path.startswith('cache/'):
1090 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1090 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1091 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1091 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1092 if path.startswith('journal.') or path.startswith('undo.'):
1092 if path.startswith('journal.') or path.startswith('undo.'):
1093 # journal is covered by 'lock'
1093 # journal is covered by 'lock'
1094 if repo._currentlock(repo._lockref) is None:
1094 if repo._currentlock(repo._lockref) is None:
1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1096 stacklevel=3, config='check-locks')
1096 stacklevel=3, config='check-locks')
1097 elif repo._currentlock(repo._wlockref) is None:
1097 elif repo._currentlock(repo._wlockref) is None:
1098 # rest of vfs files are covered by 'wlock'
1098 # rest of vfs files are covered by 'wlock'
1099 #
1099 #
1100 # exclude special files
1100 # exclude special files
1101 for prefix in self._wlockfreeprefix:
1101 for prefix in self._wlockfreeprefix:
1102 if path.startswith(prefix):
1102 if path.startswith(prefix):
1103 return
1103 return
1104 repo.ui.develwarn('write with no wlock: "%s"' % path,
1104 repo.ui.develwarn('write with no wlock: "%s"' % path,
1105 stacklevel=3, config='check-locks')
1105 stacklevel=3, config='check-locks')
1106 return ret
1106 return ret
1107 return checkvfs
1107 return checkvfs
1108
1108
1109 def _getsvfsward(self, origfunc):
1109 def _getsvfsward(self, origfunc):
1110 """build a ward for self.svfs"""
1110 """build a ward for self.svfs"""
1111 rref = weakref.ref(self)
1111 rref = weakref.ref(self)
1112 def checksvfs(path, mode=None):
1112 def checksvfs(path, mode=None):
1113 ret = origfunc(path, mode=mode)
1113 ret = origfunc(path, mode=mode)
1114 repo = rref()
1114 repo = rref()
1115 if repo is None or not util.safehasattr(repo, '_lockref'):
1115 if repo is None or not util.safehasattr(repo, '_lockref'):
1116 return
1116 return
1117 if mode in (None, 'r', 'rb'):
1117 if mode in (None, 'r', 'rb'):
1118 return
1118 return
1119 if path.startswith(repo.sharedpath):
1119 if path.startswith(repo.sharedpath):
1120 # truncate name relative to the repository (.hg)
1120 # truncate name relative to the repository (.hg)
1121 path = path[len(repo.sharedpath) + 1:]
1121 path = path[len(repo.sharedpath) + 1:]
1122 if repo._currentlock(repo._lockref) is None:
1122 if repo._currentlock(repo._lockref) is None:
1123 repo.ui.develwarn('write with no lock: "%s"' % path,
1123 repo.ui.develwarn('write with no lock: "%s"' % path,
1124 stacklevel=4)
1124 stacklevel=4)
1125 return ret
1125 return ret
1126 return checksvfs
1126 return checksvfs
1127
1127
1128 def close(self):
1128 def close(self):
1129 self._writecaches()
1129 self._writecaches()
1130
1130
1131 def _writecaches(self):
1131 def _writecaches(self):
1132 if self._revbranchcache:
1132 if self._revbranchcache:
1133 self._revbranchcache.write()
1133 self._revbranchcache.write()
1134
1134
1135 def _restrictcapabilities(self, caps):
1135 def _restrictcapabilities(self, caps):
1136 if self.ui.configbool('experimental', 'bundle2-advertise'):
1136 if self.ui.configbool('experimental', 'bundle2-advertise'):
1137 caps = set(caps)
1137 caps = set(caps)
1138 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1138 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1139 role='client'))
1139 role='client'))
1140 caps.add('bundle2=' + urlreq.quote(capsblob))
1140 caps.add('bundle2=' + urlreq.quote(capsblob))
1141 return caps
1141 return caps
1142
1142
1143 def _writerequirements(self):
1143 def _writerequirements(self):
1144 scmutil.writerequires(self.vfs, self.requirements)
1144 scmutil.writerequires(self.vfs, self.requirements)
1145
1145
1146 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1146 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1147 # self -> auditor -> self._checknested -> self
1147 # self -> auditor -> self._checknested -> self
1148
1148
1149 @property
1149 @property
1150 def auditor(self):
1150 def auditor(self):
1151 # This is only used by context.workingctx.match in order to
1151 # This is only used by context.workingctx.match in order to
1152 # detect files in subrepos.
1152 # detect files in subrepos.
1153 return pathutil.pathauditor(self.root, callback=self._checknested)
1153 return pathutil.pathauditor(self.root, callback=self._checknested)
1154
1154
1155 @property
1155 @property
1156 def nofsauditor(self):
1156 def nofsauditor(self):
1157 # This is only used by context.basectx.match in order to detect
1157 # This is only used by context.basectx.match in order to detect
1158 # files in subrepos.
1158 # files in subrepos.
1159 return pathutil.pathauditor(self.root, callback=self._checknested,
1159 return pathutil.pathauditor(self.root, callback=self._checknested,
1160 realfs=False, cached=True)
1160 realfs=False, cached=True)
1161
1161
1162 def _checknested(self, path):
1162 def _checknested(self, path):
1163 """Determine if path is a legal nested repository."""
1163 """Determine if path is a legal nested repository."""
1164 if not path.startswith(self.root):
1164 if not path.startswith(self.root):
1165 return False
1165 return False
1166 subpath = path[len(self.root) + 1:]
1166 subpath = path[len(self.root) + 1:]
1167 normsubpath = util.pconvert(subpath)
1167 normsubpath = util.pconvert(subpath)
1168
1168
1169 # XXX: Checking against the current working copy is wrong in
1169 # XXX: Checking against the current working copy is wrong in
1170 # the sense that it can reject things like
1170 # the sense that it can reject things like
1171 #
1171 #
1172 # $ hg cat -r 10 sub/x.txt
1172 # $ hg cat -r 10 sub/x.txt
1173 #
1173 #
1174 # if sub/ is no longer a subrepository in the working copy
1174 # if sub/ is no longer a subrepository in the working copy
1175 # parent revision.
1175 # parent revision.
1176 #
1176 #
1177 # However, it can of course also allow things that would have
1177 # However, it can of course also allow things that would have
1178 # been rejected before, such as the above cat command if sub/
1178 # been rejected before, such as the above cat command if sub/
1179 # is a subrepository now, but was a normal directory before.
1179 # is a subrepository now, but was a normal directory before.
1180 # The old path auditor would have rejected by mistake since it
1180 # The old path auditor would have rejected by mistake since it
1181 # panics when it sees sub/.hg/.
1181 # panics when it sees sub/.hg/.
1182 #
1182 #
1183 # All in all, checking against the working copy seems sensible
1183 # All in all, checking against the working copy seems sensible
1184 # since we want to prevent access to nested repositories on
1184 # since we want to prevent access to nested repositories on
1185 # the filesystem *now*.
1185 # the filesystem *now*.
1186 ctx = self[None]
1186 ctx = self[None]
1187 parts = util.splitpath(subpath)
1187 parts = util.splitpath(subpath)
1188 while parts:
1188 while parts:
1189 prefix = '/'.join(parts)
1189 prefix = '/'.join(parts)
1190 if prefix in ctx.substate:
1190 if prefix in ctx.substate:
1191 if prefix == normsubpath:
1191 if prefix == normsubpath:
1192 return True
1192 return True
1193 else:
1193 else:
1194 sub = ctx.sub(prefix)
1194 sub = ctx.sub(prefix)
1195 return sub.checknested(subpath[len(prefix) + 1:])
1195 return sub.checknested(subpath[len(prefix) + 1:])
1196 else:
1196 else:
1197 parts.pop()
1197 parts.pop()
1198 return False
1198 return False
1199
1199
1200 def peer(self):
1200 def peer(self):
1201 return localpeer(self) # not cached to avoid reference cycle
1201 return localpeer(self) # not cached to avoid reference cycle
1202
1202
1203 def unfiltered(self):
1203 def unfiltered(self):
1204 """Return unfiltered version of the repository
1204 """Return unfiltered version of the repository
1205
1205
1206 Intended to be overwritten by filtered repo."""
1206 Intended to be overwritten by filtered repo."""
1207 return self
1207 return self
1208
1208
1209 def filtered(self, name, visibilityexceptions=None):
1209 def filtered(self, name, visibilityexceptions=None):
1210 """Return a filtered version of a repository
1210 """Return a filtered version of a repository
1211
1211
1212 The `name` parameter is the identifier of the requested view. This
1212 The `name` parameter is the identifier of the requested view. This
1213 will return a repoview object set "exactly" to the specified view.
1213 will return a repoview object set "exactly" to the specified view.
1214
1214
1215 This function does not apply recursive filtering to a repository. For
1215 This function does not apply recursive filtering to a repository. For
1216 example calling `repo.filtered("served")` will return a repoview using
1216 example calling `repo.filtered("served")` will return a repoview using
1217 the "served" view, regardless of the initial view used by `repo`.
1217 the "served" view, regardless of the initial view used by `repo`.
1218
1218
1219 In other word, there is always only one level of `repoview` "filtering".
1219 In other word, there is always only one level of `repoview` "filtering".
1220 """
1220 """
1221 if self._extrafilterid is not None and '%' not in name:
1221 if self._extrafilterid is not None and '%' not in name:
1222 name = name + '%' + self._extrafilterid
1222 name = name + '%' + self._extrafilterid
1223
1223
1224 cls = repoview.newtype(self.unfiltered().__class__)
1224 cls = repoview.newtype(self.unfiltered().__class__)
1225 return cls(self, name, visibilityexceptions)
1225 return cls(self, name, visibilityexceptions)
1226
1226
1227 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1227 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1228 ('bookmarks', ''), ('00changelog.i', ''))
1228 ('bookmarks', ''), ('00changelog.i', ''))
1229 def _bookmarks(self):
1229 def _bookmarks(self):
1230 # Since the multiple files involved in the transaction cannot be
1231 # written atomically (with current repository format), there is a race
1232 # condition here.
1233 #
1234 # 1) changelog content A is read
1235 # 2) outside transaction update changelog to content B
1236 # 3) outside transaction update bookmark file referring to content B
1237 # 4) bookmarks file content is read and filtered against changelog-A
1238 #
1239 # When this happens, bookmarks against nodes missing from A are dropped.
1240 #
1241 # Having this happening during read is not great, but it become worse
1242 # when this happen during write because the bookmarks to the "unknown"
1243 # nodes will be dropped for good. However, writes happen within locks.
1244 # This locking makes it possible to have a race free consistent read.
1245 # For this purpose data read from disc before locking are
1246 # "invalidated" right after the locks are taken. This invalidations are
1247 # "light", the `filecache` mechanism keep the data in memory and will
1248 # reuse them if the underlying files did not changed. Not parsing the
1249 # same data multiple times helps performances.
1250 #
1251 # Unfortunately in the case describe above, the files tracked by the
1252 # bookmarks file cache might not have changed, but the in-memory
1253 # content is still "wrong" because we used an older changelog content
1254 # to process the on-disk data. So after locking, the changelog would be
1255 # refreshed but `_bookmarks` would be preserved.
1256 # Adding `00changelog.i` to the list of tracked file is not
1257 # enough, because at the time we build the content for `_bookmarks` in
1258 # (4), the changelog file has already diverged from the content used
1259 # for loading `changelog` in (1)
1260 #
1261 # To prevent the issue, we force the changelog to be explicitly
1262 # reloaded while computing `_bookmarks`. The data race can still happen
1263 # without the lock (with a narrower window), but it would no longer go
1264 # undetected during the lock time refresh.
1265 #
1266 # The new schedule is as follow
1267 #
1268 # 1) filecache logic detect that `_bookmarks` needs to be computed
1269 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1270 # 3) We force `changelog` filecache to be tested
1271 # 4) cachestat for `changelog` are captured (for changelog)
1272 # 5) `_bookmarks` is computed and cached
1273 #
1274 # The step in (3) ensure we have a changelog at least as recent as the
1275 # cache stat computed in (1). As a result at locking time:
1276 # * if the changelog did not changed since (1) -> we can reuse the data
1277 # * otherwise -> the bookmarks get refreshed.
1278 self._refreshchangelog()
1230 return bookmarks.bmstore(self)
1279 return bookmarks.bmstore(self)
1231
1280
1232 def _refreshchangelog(self):
1281 def _refreshchangelog(self):
1233 """make sure the in memory changelog match the on-disk one"""
1282 """make sure the in memory changelog match the on-disk one"""
1234 if ('changelog' in vars(self) and self.currenttransaction() is None):
1283 if ('changelog' in vars(self) and self.currenttransaction() is None):
1235 del self.changelog
1284 del self.changelog
1236
1285
1237 @property
1286 @property
1238 def _activebookmark(self):
1287 def _activebookmark(self):
1239 return self._bookmarks.active
1288 return self._bookmarks.active
1240
1289
1241 # _phasesets depend on changelog. what we need is to call
1290 # _phasesets depend on changelog. what we need is to call
1242 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1291 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1243 # can't be easily expressed in filecache mechanism.
1292 # can't be easily expressed in filecache mechanism.
1244 @storecache('phaseroots', '00changelog.i')
1293 @storecache('phaseroots', '00changelog.i')
1245 def _phasecache(self):
1294 def _phasecache(self):
1246 return phases.phasecache(self, self._phasedefaults)
1295 return phases.phasecache(self, self._phasedefaults)
1247
1296
1248 @storecache('obsstore')
1297 @storecache('obsstore')
1249 def obsstore(self):
1298 def obsstore(self):
1250 return obsolete.makestore(self.ui, self)
1299 return obsolete.makestore(self.ui, self)
1251
1300
1252 @storecache('00changelog.i')
1301 @storecache('00changelog.i')
1253 def changelog(self):
1302 def changelog(self):
1254 return changelog.changelog(self.svfs,
1303 return changelog.changelog(self.svfs,
1255 trypending=txnutil.mayhavepending(self.root))
1304 trypending=txnutil.mayhavepending(self.root))
1256
1305
1257 @storecache('00manifest.i')
1306 @storecache('00manifest.i')
1258 def manifestlog(self):
1307 def manifestlog(self):
1259 rootstore = manifest.manifestrevlog(self.svfs)
1308 rootstore = manifest.manifestrevlog(self.svfs)
1260 return manifest.manifestlog(self.svfs, self, rootstore,
1309 return manifest.manifestlog(self.svfs, self, rootstore,
1261 self._storenarrowmatch)
1310 self._storenarrowmatch)
1262
1311
1263 @repofilecache('dirstate')
1312 @repofilecache('dirstate')
1264 def dirstate(self):
1313 def dirstate(self):
1265 return self._makedirstate()
1314 return self._makedirstate()
1266
1315
1267 def _makedirstate(self):
1316 def _makedirstate(self):
1268 """Extension point for wrapping the dirstate per-repo."""
1317 """Extension point for wrapping the dirstate per-repo."""
1269 sparsematchfn = lambda: sparse.matcher(self)
1318 sparsematchfn = lambda: sparse.matcher(self)
1270
1319
1271 return dirstate.dirstate(self.vfs, self.ui, self.root,
1320 return dirstate.dirstate(self.vfs, self.ui, self.root,
1272 self._dirstatevalidate, sparsematchfn)
1321 self._dirstatevalidate, sparsematchfn)
1273
1322
1274 def _dirstatevalidate(self, node):
1323 def _dirstatevalidate(self, node):
1275 try:
1324 try:
1276 self.changelog.rev(node)
1325 self.changelog.rev(node)
1277 return node
1326 return node
1278 except error.LookupError:
1327 except error.LookupError:
1279 if not self._dirstatevalidatewarned:
1328 if not self._dirstatevalidatewarned:
1280 self._dirstatevalidatewarned = True
1329 self._dirstatevalidatewarned = True
1281 self.ui.warn(_("warning: ignoring unknown"
1330 self.ui.warn(_("warning: ignoring unknown"
1282 " working parent %s!\n") % short(node))
1331 " working parent %s!\n") % short(node))
1283 return nullid
1332 return nullid
1284
1333
1285 @storecache(narrowspec.FILENAME)
1334 @storecache(narrowspec.FILENAME)
1286 def narrowpats(self):
1335 def narrowpats(self):
1287 """matcher patterns for this repository's narrowspec
1336 """matcher patterns for this repository's narrowspec
1288
1337
1289 A tuple of (includes, excludes).
1338 A tuple of (includes, excludes).
1290 """
1339 """
1291 return narrowspec.load(self)
1340 return narrowspec.load(self)
1292
1341
1293 @storecache(narrowspec.FILENAME)
1342 @storecache(narrowspec.FILENAME)
1294 def _storenarrowmatch(self):
1343 def _storenarrowmatch(self):
1295 if repository.NARROW_REQUIREMENT not in self.requirements:
1344 if repository.NARROW_REQUIREMENT not in self.requirements:
1296 return matchmod.always()
1345 return matchmod.always()
1297 include, exclude = self.narrowpats
1346 include, exclude = self.narrowpats
1298 return narrowspec.match(self.root, include=include, exclude=exclude)
1347 return narrowspec.match(self.root, include=include, exclude=exclude)
1299
1348
1300 @storecache(narrowspec.FILENAME)
1349 @storecache(narrowspec.FILENAME)
1301 def _narrowmatch(self):
1350 def _narrowmatch(self):
1302 if repository.NARROW_REQUIREMENT not in self.requirements:
1351 if repository.NARROW_REQUIREMENT not in self.requirements:
1303 return matchmod.always()
1352 return matchmod.always()
1304 narrowspec.checkworkingcopynarrowspec(self)
1353 narrowspec.checkworkingcopynarrowspec(self)
1305 include, exclude = self.narrowpats
1354 include, exclude = self.narrowpats
1306 return narrowspec.match(self.root, include=include, exclude=exclude)
1355 return narrowspec.match(self.root, include=include, exclude=exclude)
1307
1356
1308 def narrowmatch(self, match=None, includeexact=False):
1357 def narrowmatch(self, match=None, includeexact=False):
1309 """matcher corresponding the the repo's narrowspec
1358 """matcher corresponding the the repo's narrowspec
1310
1359
1311 If `match` is given, then that will be intersected with the narrow
1360 If `match` is given, then that will be intersected with the narrow
1312 matcher.
1361 matcher.
1313
1362
1314 If `includeexact` is True, then any exact matches from `match` will
1363 If `includeexact` is True, then any exact matches from `match` will
1315 be included even if they're outside the narrowspec.
1364 be included even if they're outside the narrowspec.
1316 """
1365 """
1317 if match:
1366 if match:
1318 if includeexact and not self._narrowmatch.always():
1367 if includeexact and not self._narrowmatch.always():
1319 # do not exclude explicitly-specified paths so that they can
1368 # do not exclude explicitly-specified paths so that they can
1320 # be warned later on
1369 # be warned later on
1321 em = matchmod.exact(match.files())
1370 em = matchmod.exact(match.files())
1322 nm = matchmod.unionmatcher([self._narrowmatch, em])
1371 nm = matchmod.unionmatcher([self._narrowmatch, em])
1323 return matchmod.intersectmatchers(match, nm)
1372 return matchmod.intersectmatchers(match, nm)
1324 return matchmod.intersectmatchers(match, self._narrowmatch)
1373 return matchmod.intersectmatchers(match, self._narrowmatch)
1325 return self._narrowmatch
1374 return self._narrowmatch
1326
1375
1327 def setnarrowpats(self, newincludes, newexcludes):
1376 def setnarrowpats(self, newincludes, newexcludes):
1328 narrowspec.save(self, newincludes, newexcludes)
1377 narrowspec.save(self, newincludes, newexcludes)
1329 self.invalidate(clearfilecache=True)
1378 self.invalidate(clearfilecache=True)
1330
1379
1331 def __getitem__(self, changeid):
1380 def __getitem__(self, changeid):
1332 if changeid is None:
1381 if changeid is None:
1333 return context.workingctx(self)
1382 return context.workingctx(self)
1334 if isinstance(changeid, context.basectx):
1383 if isinstance(changeid, context.basectx):
1335 return changeid
1384 return changeid
1336 if isinstance(changeid, slice):
1385 if isinstance(changeid, slice):
1337 # wdirrev isn't contiguous so the slice shouldn't include it
1386 # wdirrev isn't contiguous so the slice shouldn't include it
1338 return [self[i]
1387 return [self[i]
1339 for i in pycompat.xrange(*changeid.indices(len(self)))
1388 for i in pycompat.xrange(*changeid.indices(len(self)))
1340 if i not in self.changelog.filteredrevs]
1389 if i not in self.changelog.filteredrevs]
1341 try:
1390 try:
1342 if isinstance(changeid, int):
1391 if isinstance(changeid, int):
1343 node = self.changelog.node(changeid)
1392 node = self.changelog.node(changeid)
1344 rev = changeid
1393 rev = changeid
1345 elif changeid == 'null':
1394 elif changeid == 'null':
1346 node = nullid
1395 node = nullid
1347 rev = nullrev
1396 rev = nullrev
1348 elif changeid == 'tip':
1397 elif changeid == 'tip':
1349 node = self.changelog.tip()
1398 node = self.changelog.tip()
1350 rev = self.changelog.rev(node)
1399 rev = self.changelog.rev(node)
1351 elif changeid == '.':
1400 elif changeid == '.':
1352 # this is a hack to delay/avoid loading obsmarkers
1401 # this is a hack to delay/avoid loading obsmarkers
1353 # when we know that '.' won't be hidden
1402 # when we know that '.' won't be hidden
1354 node = self.dirstate.p1()
1403 node = self.dirstate.p1()
1355 rev = self.unfiltered().changelog.rev(node)
1404 rev = self.unfiltered().changelog.rev(node)
1356 elif len(changeid) == 20:
1405 elif len(changeid) == 20:
1357 try:
1406 try:
1358 node = changeid
1407 node = changeid
1359 rev = self.changelog.rev(changeid)
1408 rev = self.changelog.rev(changeid)
1360 except error.FilteredLookupError:
1409 except error.FilteredLookupError:
1361 changeid = hex(changeid) # for the error message
1410 changeid = hex(changeid) # for the error message
1362 raise
1411 raise
1363 except LookupError:
1412 except LookupError:
1364 # check if it might have come from damaged dirstate
1413 # check if it might have come from damaged dirstate
1365 #
1414 #
1366 # XXX we could avoid the unfiltered if we had a recognizable
1415 # XXX we could avoid the unfiltered if we had a recognizable
1367 # exception for filtered changeset access
1416 # exception for filtered changeset access
1368 if (self.local()
1417 if (self.local()
1369 and changeid in self.unfiltered().dirstate.parents()):
1418 and changeid in self.unfiltered().dirstate.parents()):
1370 msg = _("working directory has unknown parent '%s'!")
1419 msg = _("working directory has unknown parent '%s'!")
1371 raise error.Abort(msg % short(changeid))
1420 raise error.Abort(msg % short(changeid))
1372 changeid = hex(changeid) # for the error message
1421 changeid = hex(changeid) # for the error message
1373 raise
1422 raise
1374
1423
1375 elif len(changeid) == 40:
1424 elif len(changeid) == 40:
1376 node = bin(changeid)
1425 node = bin(changeid)
1377 rev = self.changelog.rev(node)
1426 rev = self.changelog.rev(node)
1378 else:
1427 else:
1379 raise error.ProgrammingError(
1428 raise error.ProgrammingError(
1380 "unsupported changeid '%s' of type %s" %
1429 "unsupported changeid '%s' of type %s" %
1381 (changeid, type(changeid)))
1430 (changeid, type(changeid)))
1382
1431
1383 return context.changectx(self, rev, node)
1432 return context.changectx(self, rev, node)
1384
1433
1385 except (error.FilteredIndexError, error.FilteredLookupError):
1434 except (error.FilteredIndexError, error.FilteredLookupError):
1386 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1435 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1387 % pycompat.bytestr(changeid))
1436 % pycompat.bytestr(changeid))
1388 except (IndexError, LookupError):
1437 except (IndexError, LookupError):
1389 raise error.RepoLookupError(
1438 raise error.RepoLookupError(
1390 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1439 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1391 except error.WdirUnsupported:
1440 except error.WdirUnsupported:
1392 return context.workingctx(self)
1441 return context.workingctx(self)
1393
1442
1394 def __contains__(self, changeid):
1443 def __contains__(self, changeid):
1395 """True if the given changeid exists
1444 """True if the given changeid exists
1396
1445
1397 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1446 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1398 specified.
1447 specified.
1399 """
1448 """
1400 try:
1449 try:
1401 self[changeid]
1450 self[changeid]
1402 return True
1451 return True
1403 except error.RepoLookupError:
1452 except error.RepoLookupError:
1404 return False
1453 return False
1405
1454
1406 def __nonzero__(self):
1455 def __nonzero__(self):
1407 return True
1456 return True
1408
1457
1409 __bool__ = __nonzero__
1458 __bool__ = __nonzero__
1410
1459
1411 def __len__(self):
1460 def __len__(self):
1412 # no need to pay the cost of repoview.changelog
1461 # no need to pay the cost of repoview.changelog
1413 unfi = self.unfiltered()
1462 unfi = self.unfiltered()
1414 return len(unfi.changelog)
1463 return len(unfi.changelog)
1415
1464
1416 def __iter__(self):
1465 def __iter__(self):
1417 return iter(self.changelog)
1466 return iter(self.changelog)
1418
1467
1419 def revs(self, expr, *args):
1468 def revs(self, expr, *args):
1420 '''Find revisions matching a revset.
1469 '''Find revisions matching a revset.
1421
1470
1422 The revset is specified as a string ``expr`` that may contain
1471 The revset is specified as a string ``expr`` that may contain
1423 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1472 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1424
1473
1425 Revset aliases from the configuration are not expanded. To expand
1474 Revset aliases from the configuration are not expanded. To expand
1426 user aliases, consider calling ``scmutil.revrange()`` or
1475 user aliases, consider calling ``scmutil.revrange()`` or
1427 ``repo.anyrevs([expr], user=True)``.
1476 ``repo.anyrevs([expr], user=True)``.
1428
1477
1429 Returns a revset.abstractsmartset, which is a list-like interface
1478 Returns a revset.abstractsmartset, which is a list-like interface
1430 that contains integer revisions.
1479 that contains integer revisions.
1431 '''
1480 '''
1432 tree = revsetlang.spectree(expr, *args)
1481 tree = revsetlang.spectree(expr, *args)
1433 return revset.makematcher(tree)(self)
1482 return revset.makematcher(tree)(self)
1434
1483
1435 def set(self, expr, *args):
1484 def set(self, expr, *args):
1436 '''Find revisions matching a revset and emit changectx instances.
1485 '''Find revisions matching a revset and emit changectx instances.
1437
1486
1438 This is a convenience wrapper around ``revs()`` that iterates the
1487 This is a convenience wrapper around ``revs()`` that iterates the
1439 result and is a generator of changectx instances.
1488 result and is a generator of changectx instances.
1440
1489
1441 Revset aliases from the configuration are not expanded. To expand
1490 Revset aliases from the configuration are not expanded. To expand
1442 user aliases, consider calling ``scmutil.revrange()``.
1491 user aliases, consider calling ``scmutil.revrange()``.
1443 '''
1492 '''
1444 for r in self.revs(expr, *args):
1493 for r in self.revs(expr, *args):
1445 yield self[r]
1494 yield self[r]
1446
1495
1447 def anyrevs(self, specs, user=False, localalias=None):
1496 def anyrevs(self, specs, user=False, localalias=None):
1448 '''Find revisions matching one of the given revsets.
1497 '''Find revisions matching one of the given revsets.
1449
1498
1450 Revset aliases from the configuration are not expanded by default. To
1499 Revset aliases from the configuration are not expanded by default. To
1451 expand user aliases, specify ``user=True``. To provide some local
1500 expand user aliases, specify ``user=True``. To provide some local
1452 definitions overriding user aliases, set ``localalias`` to
1501 definitions overriding user aliases, set ``localalias`` to
1453 ``{name: definitionstring}``.
1502 ``{name: definitionstring}``.
1454 '''
1503 '''
1455 if user:
1504 if user:
1456 m = revset.matchany(self.ui, specs,
1505 m = revset.matchany(self.ui, specs,
1457 lookup=revset.lookupfn(self),
1506 lookup=revset.lookupfn(self),
1458 localalias=localalias)
1507 localalias=localalias)
1459 else:
1508 else:
1460 m = revset.matchany(None, specs, localalias=localalias)
1509 m = revset.matchany(None, specs, localalias=localalias)
1461 return m(self)
1510 return m(self)
1462
1511
1463 def url(self):
1512 def url(self):
1464 return 'file:' + self.root
1513 return 'file:' + self.root
1465
1514
1466 def hook(self, name, throw=False, **args):
1515 def hook(self, name, throw=False, **args):
1467 """Call a hook, passing this repo instance.
1516 """Call a hook, passing this repo instance.
1468
1517
1469 This a convenience method to aid invoking hooks. Extensions likely
1518 This a convenience method to aid invoking hooks. Extensions likely
1470 won't call this unless they have registered a custom hook or are
1519 won't call this unless they have registered a custom hook or are
1471 replacing code that is expected to call a hook.
1520 replacing code that is expected to call a hook.
1472 """
1521 """
1473 return hook.hook(self.ui, self, name, throw, **args)
1522 return hook.hook(self.ui, self, name, throw, **args)
1474
1523
1475 @filteredpropertycache
1524 @filteredpropertycache
1476 def _tagscache(self):
1525 def _tagscache(self):
1477 '''Returns a tagscache object that contains various tags related
1526 '''Returns a tagscache object that contains various tags related
1478 caches.'''
1527 caches.'''
1479
1528
1480 # This simplifies its cache management by having one decorated
1529 # This simplifies its cache management by having one decorated
1481 # function (this one) and the rest simply fetch things from it.
1530 # function (this one) and the rest simply fetch things from it.
1482 class tagscache(object):
1531 class tagscache(object):
1483 def __init__(self):
1532 def __init__(self):
1484 # These two define the set of tags for this repository. tags
1533 # These two define the set of tags for this repository. tags
1485 # maps tag name to node; tagtypes maps tag name to 'global' or
1534 # maps tag name to node; tagtypes maps tag name to 'global' or
1486 # 'local'. (Global tags are defined by .hgtags across all
1535 # 'local'. (Global tags are defined by .hgtags across all
1487 # heads, and local tags are defined in .hg/localtags.)
1536 # heads, and local tags are defined in .hg/localtags.)
1488 # They constitute the in-memory cache of tags.
1537 # They constitute the in-memory cache of tags.
1489 self.tags = self.tagtypes = None
1538 self.tags = self.tagtypes = None
1490
1539
1491 self.nodetagscache = self.tagslist = None
1540 self.nodetagscache = self.tagslist = None
1492
1541
1493 cache = tagscache()
1542 cache = tagscache()
1494 cache.tags, cache.tagtypes = self._findtags()
1543 cache.tags, cache.tagtypes = self._findtags()
1495
1544
1496 return cache
1545 return cache
1497
1546
1498 def tags(self):
1547 def tags(self):
1499 '''return a mapping of tag to node'''
1548 '''return a mapping of tag to node'''
1500 t = {}
1549 t = {}
1501 if self.changelog.filteredrevs:
1550 if self.changelog.filteredrevs:
1502 tags, tt = self._findtags()
1551 tags, tt = self._findtags()
1503 else:
1552 else:
1504 tags = self._tagscache.tags
1553 tags = self._tagscache.tags
1505 rev = self.changelog.rev
1554 rev = self.changelog.rev
1506 for k, v in tags.iteritems():
1555 for k, v in tags.iteritems():
1507 try:
1556 try:
1508 # ignore tags to unknown nodes
1557 # ignore tags to unknown nodes
1509 rev(v)
1558 rev(v)
1510 t[k] = v
1559 t[k] = v
1511 except (error.LookupError, ValueError):
1560 except (error.LookupError, ValueError):
1512 pass
1561 pass
1513 return t
1562 return t
1514
1563
1515 def _findtags(self):
1564 def _findtags(self):
1516 '''Do the hard work of finding tags. Return a pair of dicts
1565 '''Do the hard work of finding tags. Return a pair of dicts
1517 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1566 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1518 maps tag name to a string like \'global\' or \'local\'.
1567 maps tag name to a string like \'global\' or \'local\'.
1519 Subclasses or extensions are free to add their own tags, but
1568 Subclasses or extensions are free to add their own tags, but
1520 should be aware that the returned dicts will be retained for the
1569 should be aware that the returned dicts will be retained for the
1521 duration of the localrepo object.'''
1570 duration of the localrepo object.'''
1522
1571
1523 # XXX what tagtype should subclasses/extensions use? Currently
1572 # XXX what tagtype should subclasses/extensions use? Currently
1524 # mq and bookmarks add tags, but do not set the tagtype at all.
1573 # mq and bookmarks add tags, but do not set the tagtype at all.
1525 # Should each extension invent its own tag type? Should there
1574 # Should each extension invent its own tag type? Should there
1526 # be one tagtype for all such "virtual" tags? Or is the status
1575 # be one tagtype for all such "virtual" tags? Or is the status
1527 # quo fine?
1576 # quo fine?
1528
1577
1529
1578
1530 # map tag name to (node, hist)
1579 # map tag name to (node, hist)
1531 alltags = tagsmod.findglobaltags(self.ui, self)
1580 alltags = tagsmod.findglobaltags(self.ui, self)
1532 # map tag name to tag type
1581 # map tag name to tag type
1533 tagtypes = dict((tag, 'global') for tag in alltags)
1582 tagtypes = dict((tag, 'global') for tag in alltags)
1534
1583
1535 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1584 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1536
1585
1537 # Build the return dicts. Have to re-encode tag names because
1586 # Build the return dicts. Have to re-encode tag names because
1538 # the tags module always uses UTF-8 (in order not to lose info
1587 # the tags module always uses UTF-8 (in order not to lose info
1539 # writing to the cache), but the rest of Mercurial wants them in
1588 # writing to the cache), but the rest of Mercurial wants them in
1540 # local encoding.
1589 # local encoding.
1541 tags = {}
1590 tags = {}
1542 for (name, (node, hist)) in alltags.iteritems():
1591 for (name, (node, hist)) in alltags.iteritems():
1543 if node != nullid:
1592 if node != nullid:
1544 tags[encoding.tolocal(name)] = node
1593 tags[encoding.tolocal(name)] = node
1545 tags['tip'] = self.changelog.tip()
1594 tags['tip'] = self.changelog.tip()
1546 tagtypes = dict([(encoding.tolocal(name), value)
1595 tagtypes = dict([(encoding.tolocal(name), value)
1547 for (name, value) in tagtypes.iteritems()])
1596 for (name, value) in tagtypes.iteritems()])
1548 return (tags, tagtypes)
1597 return (tags, tagtypes)
1549
1598
1550 def tagtype(self, tagname):
1599 def tagtype(self, tagname):
1551 '''
1600 '''
1552 return the type of the given tag. result can be:
1601 return the type of the given tag. result can be:
1553
1602
1554 'local' : a local tag
1603 'local' : a local tag
1555 'global' : a global tag
1604 'global' : a global tag
1556 None : tag does not exist
1605 None : tag does not exist
1557 '''
1606 '''
1558
1607
1559 return self._tagscache.tagtypes.get(tagname)
1608 return self._tagscache.tagtypes.get(tagname)
1560
1609
1561 def tagslist(self):
1610 def tagslist(self):
1562 '''return a list of tags ordered by revision'''
1611 '''return a list of tags ordered by revision'''
1563 if not self._tagscache.tagslist:
1612 if not self._tagscache.tagslist:
1564 l = []
1613 l = []
1565 for t, n in self.tags().iteritems():
1614 for t, n in self.tags().iteritems():
1566 l.append((self.changelog.rev(n), t, n))
1615 l.append((self.changelog.rev(n), t, n))
1567 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1616 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1568
1617
1569 return self._tagscache.tagslist
1618 return self._tagscache.tagslist
1570
1619
1571 def nodetags(self, node):
1620 def nodetags(self, node):
1572 '''return the tags associated with a node'''
1621 '''return the tags associated with a node'''
1573 if not self._tagscache.nodetagscache:
1622 if not self._tagscache.nodetagscache:
1574 nodetagscache = {}
1623 nodetagscache = {}
1575 for t, n in self._tagscache.tags.iteritems():
1624 for t, n in self._tagscache.tags.iteritems():
1576 nodetagscache.setdefault(n, []).append(t)
1625 nodetagscache.setdefault(n, []).append(t)
1577 for tags in nodetagscache.itervalues():
1626 for tags in nodetagscache.itervalues():
1578 tags.sort()
1627 tags.sort()
1579 self._tagscache.nodetagscache = nodetagscache
1628 self._tagscache.nodetagscache = nodetagscache
1580 return self._tagscache.nodetagscache.get(node, [])
1629 return self._tagscache.nodetagscache.get(node, [])
1581
1630
1582 def nodebookmarks(self, node):
1631 def nodebookmarks(self, node):
1583 """return the list of bookmarks pointing to the specified node"""
1632 """return the list of bookmarks pointing to the specified node"""
1584 return self._bookmarks.names(node)
1633 return self._bookmarks.names(node)
1585
1634
1586 def branchmap(self):
1635 def branchmap(self):
1587 '''returns a dictionary {branch: [branchheads]} with branchheads
1636 '''returns a dictionary {branch: [branchheads]} with branchheads
1588 ordered by increasing revision number'''
1637 ordered by increasing revision number'''
1589 return self._branchcaches[self]
1638 return self._branchcaches[self]
1590
1639
1591 @unfilteredmethod
1640 @unfilteredmethod
1592 def revbranchcache(self):
1641 def revbranchcache(self):
1593 if not self._revbranchcache:
1642 if not self._revbranchcache:
1594 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1643 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1595 return self._revbranchcache
1644 return self._revbranchcache
1596
1645
1597 def branchtip(self, branch, ignoremissing=False):
1646 def branchtip(self, branch, ignoremissing=False):
1598 '''return the tip node for a given branch
1647 '''return the tip node for a given branch
1599
1648
1600 If ignoremissing is True, then this method will not raise an error.
1649 If ignoremissing is True, then this method will not raise an error.
1601 This is helpful for callers that only expect None for a missing branch
1650 This is helpful for callers that only expect None for a missing branch
1602 (e.g. namespace).
1651 (e.g. namespace).
1603
1652
1604 '''
1653 '''
1605 try:
1654 try:
1606 return self.branchmap().branchtip(branch)
1655 return self.branchmap().branchtip(branch)
1607 except KeyError:
1656 except KeyError:
1608 if not ignoremissing:
1657 if not ignoremissing:
1609 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1658 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1610 else:
1659 else:
1611 pass
1660 pass
1612
1661
1613 def lookup(self, key):
1662 def lookup(self, key):
1614 node = scmutil.revsymbol(self, key).node()
1663 node = scmutil.revsymbol(self, key).node()
1615 if node is None:
1664 if node is None:
1616 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1665 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1617 return node
1666 return node
1618
1667
1619 def lookupbranch(self, key):
1668 def lookupbranch(self, key):
1620 if self.branchmap().hasbranch(key):
1669 if self.branchmap().hasbranch(key):
1621 return key
1670 return key
1622
1671
1623 return scmutil.revsymbol(self, key).branch()
1672 return scmutil.revsymbol(self, key).branch()
1624
1673
1625 def known(self, nodes):
1674 def known(self, nodes):
1626 cl = self.changelog
1675 cl = self.changelog
1627 nm = cl.nodemap
1676 nm = cl.nodemap
1628 filtered = cl.filteredrevs
1677 filtered = cl.filteredrevs
1629 result = []
1678 result = []
1630 for n in nodes:
1679 for n in nodes:
1631 r = nm.get(n)
1680 r = nm.get(n)
1632 resp = not (r is None or r in filtered)
1681 resp = not (r is None or r in filtered)
1633 result.append(resp)
1682 result.append(resp)
1634 return result
1683 return result
1635
1684
1636 def local(self):
1685 def local(self):
1637 return self
1686 return self
1638
1687
1639 def publishing(self):
1688 def publishing(self):
1640 # it's safe (and desirable) to trust the publish flag unconditionally
1689 # it's safe (and desirable) to trust the publish flag unconditionally
1641 # so that we don't finalize changes shared between users via ssh or nfs
1690 # so that we don't finalize changes shared between users via ssh or nfs
1642 return self.ui.configbool('phases', 'publish', untrusted=True)
1691 return self.ui.configbool('phases', 'publish', untrusted=True)
1643
1692
1644 def cancopy(self):
1693 def cancopy(self):
1645 # so statichttprepo's override of local() works
1694 # so statichttprepo's override of local() works
1646 if not self.local():
1695 if not self.local():
1647 return False
1696 return False
1648 if not self.publishing():
1697 if not self.publishing():
1649 return True
1698 return True
1650 # if publishing we can't copy if there is filtered content
1699 # if publishing we can't copy if there is filtered content
1651 return not self.filtered('visible').changelog.filteredrevs
1700 return not self.filtered('visible').changelog.filteredrevs
1652
1701
1653 def shared(self):
1702 def shared(self):
1654 '''the type of shared repository (None if not shared)'''
1703 '''the type of shared repository (None if not shared)'''
1655 if self.sharedpath != self.path:
1704 if self.sharedpath != self.path:
1656 return 'store'
1705 return 'store'
1657 return None
1706 return None
1658
1707
1659 def wjoin(self, f, *insidef):
1708 def wjoin(self, f, *insidef):
1660 return self.vfs.reljoin(self.root, f, *insidef)
1709 return self.vfs.reljoin(self.root, f, *insidef)
1661
1710
1662 def setparents(self, p1, p2=nullid):
1711 def setparents(self, p1, p2=nullid):
1663 with self.dirstate.parentchange():
1712 with self.dirstate.parentchange():
1664 copies = self.dirstate.setparents(p1, p2)
1713 copies = self.dirstate.setparents(p1, p2)
1665 pctx = self[p1]
1714 pctx = self[p1]
1666 if copies:
1715 if copies:
1667 # Adjust copy records, the dirstate cannot do it, it
1716 # Adjust copy records, the dirstate cannot do it, it
1668 # requires access to parents manifests. Preserve them
1717 # requires access to parents manifests. Preserve them
1669 # only for entries added to first parent.
1718 # only for entries added to first parent.
1670 for f in copies:
1719 for f in copies:
1671 if f not in pctx and copies[f] in pctx:
1720 if f not in pctx and copies[f] in pctx:
1672 self.dirstate.copy(copies[f], f)
1721 self.dirstate.copy(copies[f], f)
1673 if p2 == nullid:
1722 if p2 == nullid:
1674 for f, s in sorted(self.dirstate.copies().items()):
1723 for f, s in sorted(self.dirstate.copies().items()):
1675 if f not in pctx and s not in pctx:
1724 if f not in pctx and s not in pctx:
1676 self.dirstate.copy(None, f)
1725 self.dirstate.copy(None, f)
1677
1726
1678 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1727 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1679 """changeid must be a changeset revision, if specified.
1728 """changeid must be a changeset revision, if specified.
1680 fileid can be a file revision or node."""
1729 fileid can be a file revision or node."""
1681 return context.filectx(self, path, changeid, fileid,
1730 return context.filectx(self, path, changeid, fileid,
1682 changectx=changectx)
1731 changectx=changectx)
1683
1732
1684 def getcwd(self):
1733 def getcwd(self):
1685 return self.dirstate.getcwd()
1734 return self.dirstate.getcwd()
1686
1735
1687 def pathto(self, f, cwd=None):
1736 def pathto(self, f, cwd=None):
1688 return self.dirstate.pathto(f, cwd)
1737 return self.dirstate.pathto(f, cwd)
1689
1738
1690 def _loadfilter(self, filter):
1739 def _loadfilter(self, filter):
1691 if filter not in self._filterpats:
1740 if filter not in self._filterpats:
1692 l = []
1741 l = []
1693 for pat, cmd in self.ui.configitems(filter):
1742 for pat, cmd in self.ui.configitems(filter):
1694 if cmd == '!':
1743 if cmd == '!':
1695 continue
1744 continue
1696 mf = matchmod.match(self.root, '', [pat])
1745 mf = matchmod.match(self.root, '', [pat])
1697 fn = None
1746 fn = None
1698 params = cmd
1747 params = cmd
1699 for name, filterfn in self._datafilters.iteritems():
1748 for name, filterfn in self._datafilters.iteritems():
1700 if cmd.startswith(name):
1749 if cmd.startswith(name):
1701 fn = filterfn
1750 fn = filterfn
1702 params = cmd[len(name):].lstrip()
1751 params = cmd[len(name):].lstrip()
1703 break
1752 break
1704 if not fn:
1753 if not fn:
1705 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1754 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1706 # Wrap old filters not supporting keyword arguments
1755 # Wrap old filters not supporting keyword arguments
1707 if not pycompat.getargspec(fn)[2]:
1756 if not pycompat.getargspec(fn)[2]:
1708 oldfn = fn
1757 oldfn = fn
1709 fn = lambda s, c, **kwargs: oldfn(s, c)
1758 fn = lambda s, c, **kwargs: oldfn(s, c)
1710 l.append((mf, fn, params))
1759 l.append((mf, fn, params))
1711 self._filterpats[filter] = l
1760 self._filterpats[filter] = l
1712 return self._filterpats[filter]
1761 return self._filterpats[filter]
1713
1762
1714 def _filter(self, filterpats, filename, data):
1763 def _filter(self, filterpats, filename, data):
1715 for mf, fn, cmd in filterpats:
1764 for mf, fn, cmd in filterpats:
1716 if mf(filename):
1765 if mf(filename):
1717 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1766 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1718 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1767 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1719 break
1768 break
1720
1769
1721 return data
1770 return data
1722
1771
1723 @unfilteredpropertycache
1772 @unfilteredpropertycache
1724 def _encodefilterpats(self):
1773 def _encodefilterpats(self):
1725 return self._loadfilter('encode')
1774 return self._loadfilter('encode')
1726
1775
1727 @unfilteredpropertycache
1776 @unfilteredpropertycache
1728 def _decodefilterpats(self):
1777 def _decodefilterpats(self):
1729 return self._loadfilter('decode')
1778 return self._loadfilter('decode')
1730
1779
1731 def adddatafilter(self, name, filter):
1780 def adddatafilter(self, name, filter):
1732 self._datafilters[name] = filter
1781 self._datafilters[name] = filter
1733
1782
1734 def wread(self, filename):
1783 def wread(self, filename):
1735 if self.wvfs.islink(filename):
1784 if self.wvfs.islink(filename):
1736 data = self.wvfs.readlink(filename)
1785 data = self.wvfs.readlink(filename)
1737 else:
1786 else:
1738 data = self.wvfs.read(filename)
1787 data = self.wvfs.read(filename)
1739 return self._filter(self._encodefilterpats, filename, data)
1788 return self._filter(self._encodefilterpats, filename, data)
1740
1789
1741 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1790 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1742 """write ``data`` into ``filename`` in the working directory
1791 """write ``data`` into ``filename`` in the working directory
1743
1792
1744 This returns length of written (maybe decoded) data.
1793 This returns length of written (maybe decoded) data.
1745 """
1794 """
1746 data = self._filter(self._decodefilterpats, filename, data)
1795 data = self._filter(self._decodefilterpats, filename, data)
1747 if 'l' in flags:
1796 if 'l' in flags:
1748 self.wvfs.symlink(data, filename)
1797 self.wvfs.symlink(data, filename)
1749 else:
1798 else:
1750 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1799 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1751 **kwargs)
1800 **kwargs)
1752 if 'x' in flags:
1801 if 'x' in flags:
1753 self.wvfs.setflags(filename, False, True)
1802 self.wvfs.setflags(filename, False, True)
1754 else:
1803 else:
1755 self.wvfs.setflags(filename, False, False)
1804 self.wvfs.setflags(filename, False, False)
1756 return len(data)
1805 return len(data)
1757
1806
1758 def wwritedata(self, filename, data):
1807 def wwritedata(self, filename, data):
1759 return self._filter(self._decodefilterpats, filename, data)
1808 return self._filter(self._decodefilterpats, filename, data)
1760
1809
1761 def currenttransaction(self):
1810 def currenttransaction(self):
1762 """return the current transaction or None if non exists"""
1811 """return the current transaction or None if non exists"""
1763 if self._transref:
1812 if self._transref:
1764 tr = self._transref()
1813 tr = self._transref()
1765 else:
1814 else:
1766 tr = None
1815 tr = None
1767
1816
1768 if tr and tr.running():
1817 if tr and tr.running():
1769 return tr
1818 return tr
1770 return None
1819 return None
1771
1820
1772 def transaction(self, desc, report=None):
1821 def transaction(self, desc, report=None):
1773 if (self.ui.configbool('devel', 'all-warnings')
1822 if (self.ui.configbool('devel', 'all-warnings')
1774 or self.ui.configbool('devel', 'check-locks')):
1823 or self.ui.configbool('devel', 'check-locks')):
1775 if self._currentlock(self._lockref) is None:
1824 if self._currentlock(self._lockref) is None:
1776 raise error.ProgrammingError('transaction requires locking')
1825 raise error.ProgrammingError('transaction requires locking')
1777 tr = self.currenttransaction()
1826 tr = self.currenttransaction()
1778 if tr is not None:
1827 if tr is not None:
1779 return tr.nest(name=desc)
1828 return tr.nest(name=desc)
1780
1829
1781 # abort here if the journal already exists
1830 # abort here if the journal already exists
1782 if self.svfs.exists("journal"):
1831 if self.svfs.exists("journal"):
1783 raise error.RepoError(
1832 raise error.RepoError(
1784 _("abandoned transaction found"),
1833 _("abandoned transaction found"),
1785 hint=_("run 'hg recover' to clean up transaction"))
1834 hint=_("run 'hg recover' to clean up transaction"))
1786
1835
1787 idbase = "%.40f#%f" % (random.random(), time.time())
1836 idbase = "%.40f#%f" % (random.random(), time.time())
1788 ha = hex(hashlib.sha1(idbase).digest())
1837 ha = hex(hashlib.sha1(idbase).digest())
1789 txnid = 'TXN:' + ha
1838 txnid = 'TXN:' + ha
1790 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1839 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1791
1840
1792 self._writejournal(desc)
1841 self._writejournal(desc)
1793 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1842 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1794 if report:
1843 if report:
1795 rp = report
1844 rp = report
1796 else:
1845 else:
1797 rp = self.ui.warn
1846 rp = self.ui.warn
1798 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1847 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1799 # we must avoid cyclic reference between repo and transaction.
1848 # we must avoid cyclic reference between repo and transaction.
1800 reporef = weakref.ref(self)
1849 reporef = weakref.ref(self)
1801 # Code to track tag movement
1850 # Code to track tag movement
1802 #
1851 #
1803 # Since tags are all handled as file content, it is actually quite hard
1852 # Since tags are all handled as file content, it is actually quite hard
1804 # to track these movement from a code perspective. So we fallback to a
1853 # to track these movement from a code perspective. So we fallback to a
1805 # tracking at the repository level. One could envision to track changes
1854 # tracking at the repository level. One could envision to track changes
1806 # to the '.hgtags' file through changegroup apply but that fails to
1855 # to the '.hgtags' file through changegroup apply but that fails to
1807 # cope with case where transaction expose new heads without changegroup
1856 # cope with case where transaction expose new heads without changegroup
1808 # being involved (eg: phase movement).
1857 # being involved (eg: phase movement).
1809 #
1858 #
1810 # For now, We gate the feature behind a flag since this likely comes
1859 # For now, We gate the feature behind a flag since this likely comes
1811 # with performance impacts. The current code run more often than needed
1860 # with performance impacts. The current code run more often than needed
1812 # and do not use caches as much as it could. The current focus is on
1861 # and do not use caches as much as it could. The current focus is on
1813 # the behavior of the feature so we disable it by default. The flag
1862 # the behavior of the feature so we disable it by default. The flag
1814 # will be removed when we are happy with the performance impact.
1863 # will be removed when we are happy with the performance impact.
1815 #
1864 #
1816 # Once this feature is no longer experimental move the following
1865 # Once this feature is no longer experimental move the following
1817 # documentation to the appropriate help section:
1866 # documentation to the appropriate help section:
1818 #
1867 #
1819 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1868 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1820 # tags (new or changed or deleted tags). In addition the details of
1869 # tags (new or changed or deleted tags). In addition the details of
1821 # these changes are made available in a file at:
1870 # these changes are made available in a file at:
1822 # ``REPOROOT/.hg/changes/tags.changes``.
1871 # ``REPOROOT/.hg/changes/tags.changes``.
1823 # Make sure you check for HG_TAG_MOVED before reading that file as it
1872 # Make sure you check for HG_TAG_MOVED before reading that file as it
1824 # might exist from a previous transaction even if no tag were touched
1873 # might exist from a previous transaction even if no tag were touched
1825 # in this one. Changes are recorded in a line base format::
1874 # in this one. Changes are recorded in a line base format::
1826 #
1875 #
1827 # <action> <hex-node> <tag-name>\n
1876 # <action> <hex-node> <tag-name>\n
1828 #
1877 #
1829 # Actions are defined as follow:
1878 # Actions are defined as follow:
1830 # "-R": tag is removed,
1879 # "-R": tag is removed,
1831 # "+A": tag is added,
1880 # "+A": tag is added,
1832 # "-M": tag is moved (old value),
1881 # "-M": tag is moved (old value),
1833 # "+M": tag is moved (new value),
1882 # "+M": tag is moved (new value),
1834 tracktags = lambda x: None
1883 tracktags = lambda x: None
1835 # experimental config: experimental.hook-track-tags
1884 # experimental config: experimental.hook-track-tags
1836 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1885 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1837 if desc != 'strip' and shouldtracktags:
1886 if desc != 'strip' and shouldtracktags:
1838 oldheads = self.changelog.headrevs()
1887 oldheads = self.changelog.headrevs()
1839 def tracktags(tr2):
1888 def tracktags(tr2):
1840 repo = reporef()
1889 repo = reporef()
1841 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1890 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1842 newheads = repo.changelog.headrevs()
1891 newheads = repo.changelog.headrevs()
1843 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1892 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1844 # notes: we compare lists here.
1893 # notes: we compare lists here.
1845 # As we do it only once buiding set would not be cheaper
1894 # As we do it only once buiding set would not be cheaper
1846 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1895 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1847 if changes:
1896 if changes:
1848 tr2.hookargs['tag_moved'] = '1'
1897 tr2.hookargs['tag_moved'] = '1'
1849 with repo.vfs('changes/tags.changes', 'w',
1898 with repo.vfs('changes/tags.changes', 'w',
1850 atomictemp=True) as changesfile:
1899 atomictemp=True) as changesfile:
1851 # note: we do not register the file to the transaction
1900 # note: we do not register the file to the transaction
1852 # because we needs it to still exist on the transaction
1901 # because we needs it to still exist on the transaction
1853 # is close (for txnclose hooks)
1902 # is close (for txnclose hooks)
1854 tagsmod.writediff(changesfile, changes)
1903 tagsmod.writediff(changesfile, changes)
1855 def validate(tr2):
1904 def validate(tr2):
1856 """will run pre-closing hooks"""
1905 """will run pre-closing hooks"""
1857 # XXX the transaction API is a bit lacking here so we take a hacky
1906 # XXX the transaction API is a bit lacking here so we take a hacky
1858 # path for now
1907 # path for now
1859 #
1908 #
1860 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1909 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1861 # dict is copied before these run. In addition we needs the data
1910 # dict is copied before these run. In addition we needs the data
1862 # available to in memory hooks too.
1911 # available to in memory hooks too.
1863 #
1912 #
1864 # Moreover, we also need to make sure this runs before txnclose
1913 # Moreover, we also need to make sure this runs before txnclose
1865 # hooks and there is no "pending" mechanism that would execute
1914 # hooks and there is no "pending" mechanism that would execute
1866 # logic only if hooks are about to run.
1915 # logic only if hooks are about to run.
1867 #
1916 #
1868 # Fixing this limitation of the transaction is also needed to track
1917 # Fixing this limitation of the transaction is also needed to track
1869 # other families of changes (bookmarks, phases, obsolescence).
1918 # other families of changes (bookmarks, phases, obsolescence).
1870 #
1919 #
1871 # This will have to be fixed before we remove the experimental
1920 # This will have to be fixed before we remove the experimental
1872 # gating.
1921 # gating.
1873 tracktags(tr2)
1922 tracktags(tr2)
1874 repo = reporef()
1923 repo = reporef()
1875 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1924 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1876 scmutil.enforcesinglehead(repo, tr2, desc)
1925 scmutil.enforcesinglehead(repo, tr2, desc)
1877 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1926 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1878 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1927 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1879 args = tr.hookargs.copy()
1928 args = tr.hookargs.copy()
1880 args.update(bookmarks.preparehookargs(name, old, new))
1929 args.update(bookmarks.preparehookargs(name, old, new))
1881 repo.hook('pretxnclose-bookmark', throw=True,
1930 repo.hook('pretxnclose-bookmark', throw=True,
1882 **pycompat.strkwargs(args))
1931 **pycompat.strkwargs(args))
1883 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1932 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1884 cl = repo.unfiltered().changelog
1933 cl = repo.unfiltered().changelog
1885 for rev, (old, new) in tr.changes['phases'].items():
1934 for rev, (old, new) in tr.changes['phases'].items():
1886 args = tr.hookargs.copy()
1935 args = tr.hookargs.copy()
1887 node = hex(cl.node(rev))
1936 node = hex(cl.node(rev))
1888 args.update(phases.preparehookargs(node, old, new))
1937 args.update(phases.preparehookargs(node, old, new))
1889 repo.hook('pretxnclose-phase', throw=True,
1938 repo.hook('pretxnclose-phase', throw=True,
1890 **pycompat.strkwargs(args))
1939 **pycompat.strkwargs(args))
1891
1940
1892 repo.hook('pretxnclose', throw=True,
1941 repo.hook('pretxnclose', throw=True,
1893 **pycompat.strkwargs(tr.hookargs))
1942 **pycompat.strkwargs(tr.hookargs))
1894 def releasefn(tr, success):
1943 def releasefn(tr, success):
1895 repo = reporef()
1944 repo = reporef()
1896 if success:
1945 if success:
1897 # this should be explicitly invoked here, because
1946 # this should be explicitly invoked here, because
1898 # in-memory changes aren't written out at closing
1947 # in-memory changes aren't written out at closing
1899 # transaction, if tr.addfilegenerator (via
1948 # transaction, if tr.addfilegenerator (via
1900 # dirstate.write or so) isn't invoked while
1949 # dirstate.write or so) isn't invoked while
1901 # transaction running
1950 # transaction running
1902 repo.dirstate.write(None)
1951 repo.dirstate.write(None)
1903 else:
1952 else:
1904 # discard all changes (including ones already written
1953 # discard all changes (including ones already written
1905 # out) in this transaction
1954 # out) in this transaction
1906 narrowspec.restorebackup(self, 'journal.narrowspec')
1955 narrowspec.restorebackup(self, 'journal.narrowspec')
1907 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1956 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1908 repo.dirstate.restorebackup(None, 'journal.dirstate')
1957 repo.dirstate.restorebackup(None, 'journal.dirstate')
1909
1958
1910 repo.invalidate(clearfilecache=True)
1959 repo.invalidate(clearfilecache=True)
1911
1960
1912 tr = transaction.transaction(rp, self.svfs, vfsmap,
1961 tr = transaction.transaction(rp, self.svfs, vfsmap,
1913 "journal",
1962 "journal",
1914 "undo",
1963 "undo",
1915 aftertrans(renames),
1964 aftertrans(renames),
1916 self.store.createmode,
1965 self.store.createmode,
1917 validator=validate,
1966 validator=validate,
1918 releasefn=releasefn,
1967 releasefn=releasefn,
1919 checkambigfiles=_cachedfiles,
1968 checkambigfiles=_cachedfiles,
1920 name=desc)
1969 name=desc)
1921 tr.changes['origrepolen'] = len(self)
1970 tr.changes['origrepolen'] = len(self)
1922 tr.changes['obsmarkers'] = set()
1971 tr.changes['obsmarkers'] = set()
1923 tr.changes['phases'] = {}
1972 tr.changes['phases'] = {}
1924 tr.changes['bookmarks'] = {}
1973 tr.changes['bookmarks'] = {}
1925
1974
1926 tr.hookargs['txnid'] = txnid
1975 tr.hookargs['txnid'] = txnid
1927 tr.hookargs['txnname'] = desc
1976 tr.hookargs['txnname'] = desc
1928 # note: writing the fncache only during finalize mean that the file is
1977 # note: writing the fncache only during finalize mean that the file is
1929 # outdated when running hooks. As fncache is used for streaming clone,
1978 # outdated when running hooks. As fncache is used for streaming clone,
1930 # this is not expected to break anything that happen during the hooks.
1979 # this is not expected to break anything that happen during the hooks.
1931 tr.addfinalize('flush-fncache', self.store.write)
1980 tr.addfinalize('flush-fncache', self.store.write)
1932 def txnclosehook(tr2):
1981 def txnclosehook(tr2):
1933 """To be run if transaction is successful, will schedule a hook run
1982 """To be run if transaction is successful, will schedule a hook run
1934 """
1983 """
1935 # Don't reference tr2 in hook() so we don't hold a reference.
1984 # Don't reference tr2 in hook() so we don't hold a reference.
1936 # This reduces memory consumption when there are multiple
1985 # This reduces memory consumption when there are multiple
1937 # transactions per lock. This can likely go away if issue5045
1986 # transactions per lock. This can likely go away if issue5045
1938 # fixes the function accumulation.
1987 # fixes the function accumulation.
1939 hookargs = tr2.hookargs
1988 hookargs = tr2.hookargs
1940
1989
1941 def hookfunc():
1990 def hookfunc():
1942 repo = reporef()
1991 repo = reporef()
1943 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1992 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1944 bmchanges = sorted(tr.changes['bookmarks'].items())
1993 bmchanges = sorted(tr.changes['bookmarks'].items())
1945 for name, (old, new) in bmchanges:
1994 for name, (old, new) in bmchanges:
1946 args = tr.hookargs.copy()
1995 args = tr.hookargs.copy()
1947 args.update(bookmarks.preparehookargs(name, old, new))
1996 args.update(bookmarks.preparehookargs(name, old, new))
1948 repo.hook('txnclose-bookmark', throw=False,
1997 repo.hook('txnclose-bookmark', throw=False,
1949 **pycompat.strkwargs(args))
1998 **pycompat.strkwargs(args))
1950
1999
1951 if hook.hashook(repo.ui, 'txnclose-phase'):
2000 if hook.hashook(repo.ui, 'txnclose-phase'):
1952 cl = repo.unfiltered().changelog
2001 cl = repo.unfiltered().changelog
1953 phasemv = sorted(tr.changes['phases'].items())
2002 phasemv = sorted(tr.changes['phases'].items())
1954 for rev, (old, new) in phasemv:
2003 for rev, (old, new) in phasemv:
1955 args = tr.hookargs.copy()
2004 args = tr.hookargs.copy()
1956 node = hex(cl.node(rev))
2005 node = hex(cl.node(rev))
1957 args.update(phases.preparehookargs(node, old, new))
2006 args.update(phases.preparehookargs(node, old, new))
1958 repo.hook('txnclose-phase', throw=False,
2007 repo.hook('txnclose-phase', throw=False,
1959 **pycompat.strkwargs(args))
2008 **pycompat.strkwargs(args))
1960
2009
1961 repo.hook('txnclose', throw=False,
2010 repo.hook('txnclose', throw=False,
1962 **pycompat.strkwargs(hookargs))
2011 **pycompat.strkwargs(hookargs))
1963 reporef()._afterlock(hookfunc)
2012 reporef()._afterlock(hookfunc)
1964 tr.addfinalize('txnclose-hook', txnclosehook)
2013 tr.addfinalize('txnclose-hook', txnclosehook)
1965 # Include a leading "-" to make it happen before the transaction summary
2014 # Include a leading "-" to make it happen before the transaction summary
1966 # reports registered via scmutil.registersummarycallback() whose names
2015 # reports registered via scmutil.registersummarycallback() whose names
1967 # are 00-txnreport etc. That way, the caches will be warm when the
2016 # are 00-txnreport etc. That way, the caches will be warm when the
1968 # callbacks run.
2017 # callbacks run.
1969 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
2018 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1970 def txnaborthook(tr2):
2019 def txnaborthook(tr2):
1971 """To be run if transaction is aborted
2020 """To be run if transaction is aborted
1972 """
2021 """
1973 reporef().hook('txnabort', throw=False,
2022 reporef().hook('txnabort', throw=False,
1974 **pycompat.strkwargs(tr2.hookargs))
2023 **pycompat.strkwargs(tr2.hookargs))
1975 tr.addabort('txnabort-hook', txnaborthook)
2024 tr.addabort('txnabort-hook', txnaborthook)
1976 # avoid eager cache invalidation. in-memory data should be identical
2025 # avoid eager cache invalidation. in-memory data should be identical
1977 # to stored data if transaction has no error.
2026 # to stored data if transaction has no error.
1978 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
2027 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1979 self._transref = weakref.ref(tr)
2028 self._transref = weakref.ref(tr)
1980 scmutil.registersummarycallback(self, tr, desc)
2029 scmutil.registersummarycallback(self, tr, desc)
1981 return tr
2030 return tr
1982
2031
1983 def _journalfiles(self):
2032 def _journalfiles(self):
1984 return ((self.svfs, 'journal'),
2033 return ((self.svfs, 'journal'),
1985 (self.svfs, 'journal.narrowspec'),
2034 (self.svfs, 'journal.narrowspec'),
1986 (self.vfs, 'journal.narrowspec.dirstate'),
2035 (self.vfs, 'journal.narrowspec.dirstate'),
1987 (self.vfs, 'journal.dirstate'),
2036 (self.vfs, 'journal.dirstate'),
1988 (self.vfs, 'journal.branch'),
2037 (self.vfs, 'journal.branch'),
1989 (self.vfs, 'journal.desc'),
2038 (self.vfs, 'journal.desc'),
1990 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
2039 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1991 (self.svfs, 'journal.phaseroots'))
2040 (self.svfs, 'journal.phaseroots'))
1992
2041
1993 def undofiles(self):
2042 def undofiles(self):
1994 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2043 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1995
2044
1996 @unfilteredmethod
2045 @unfilteredmethod
1997 def _writejournal(self, desc):
2046 def _writejournal(self, desc):
1998 self.dirstate.savebackup(None, 'journal.dirstate')
2047 self.dirstate.savebackup(None, 'journal.dirstate')
1999 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2048 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2000 narrowspec.savebackup(self, 'journal.narrowspec')
2049 narrowspec.savebackup(self, 'journal.narrowspec')
2001 self.vfs.write("journal.branch",
2050 self.vfs.write("journal.branch",
2002 encoding.fromlocal(self.dirstate.branch()))
2051 encoding.fromlocal(self.dirstate.branch()))
2003 self.vfs.write("journal.desc",
2052 self.vfs.write("journal.desc",
2004 "%d\n%s\n" % (len(self), desc))
2053 "%d\n%s\n" % (len(self), desc))
2005 bookmarksvfs = bookmarks.bookmarksvfs(self)
2054 bookmarksvfs = bookmarks.bookmarksvfs(self)
2006 bookmarksvfs.write("journal.bookmarks",
2055 bookmarksvfs.write("journal.bookmarks",
2007 bookmarksvfs.tryread("bookmarks"))
2056 bookmarksvfs.tryread("bookmarks"))
2008 self.svfs.write("journal.phaseroots",
2057 self.svfs.write("journal.phaseroots",
2009 self.svfs.tryread("phaseroots"))
2058 self.svfs.tryread("phaseroots"))
2010
2059
2011 def recover(self):
2060 def recover(self):
2012 with self.lock():
2061 with self.lock():
2013 if self.svfs.exists("journal"):
2062 if self.svfs.exists("journal"):
2014 self.ui.status(_("rolling back interrupted transaction\n"))
2063 self.ui.status(_("rolling back interrupted transaction\n"))
2015 vfsmap = {'': self.svfs,
2064 vfsmap = {'': self.svfs,
2016 'plain': self.vfs,}
2065 'plain': self.vfs,}
2017 transaction.rollback(self.svfs, vfsmap, "journal",
2066 transaction.rollback(self.svfs, vfsmap, "journal",
2018 self.ui.warn,
2067 self.ui.warn,
2019 checkambigfiles=_cachedfiles)
2068 checkambigfiles=_cachedfiles)
2020 self.invalidate()
2069 self.invalidate()
2021 return True
2070 return True
2022 else:
2071 else:
2023 self.ui.warn(_("no interrupted transaction available\n"))
2072 self.ui.warn(_("no interrupted transaction available\n"))
2024 return False
2073 return False
2025
2074
2026 def rollback(self, dryrun=False, force=False):
2075 def rollback(self, dryrun=False, force=False):
2027 wlock = lock = dsguard = None
2076 wlock = lock = dsguard = None
2028 try:
2077 try:
2029 wlock = self.wlock()
2078 wlock = self.wlock()
2030 lock = self.lock()
2079 lock = self.lock()
2031 if self.svfs.exists("undo"):
2080 if self.svfs.exists("undo"):
2032 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2081 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2033
2082
2034 return self._rollback(dryrun, force, dsguard)
2083 return self._rollback(dryrun, force, dsguard)
2035 else:
2084 else:
2036 self.ui.warn(_("no rollback information available\n"))
2085 self.ui.warn(_("no rollback information available\n"))
2037 return 1
2086 return 1
2038 finally:
2087 finally:
2039 release(dsguard, lock, wlock)
2088 release(dsguard, lock, wlock)
2040
2089
2041 @unfilteredmethod # Until we get smarter cache management
2090 @unfilteredmethod # Until we get smarter cache management
2042 def _rollback(self, dryrun, force, dsguard):
2091 def _rollback(self, dryrun, force, dsguard):
2043 ui = self.ui
2092 ui = self.ui
2044 try:
2093 try:
2045 args = self.vfs.read('undo.desc').splitlines()
2094 args = self.vfs.read('undo.desc').splitlines()
2046 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2095 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2047 if len(args) >= 3:
2096 if len(args) >= 3:
2048 detail = args[2]
2097 detail = args[2]
2049 oldtip = oldlen - 1
2098 oldtip = oldlen - 1
2050
2099
2051 if detail and ui.verbose:
2100 if detail and ui.verbose:
2052 msg = (_('repository tip rolled back to revision %d'
2101 msg = (_('repository tip rolled back to revision %d'
2053 ' (undo %s: %s)\n')
2102 ' (undo %s: %s)\n')
2054 % (oldtip, desc, detail))
2103 % (oldtip, desc, detail))
2055 else:
2104 else:
2056 msg = (_('repository tip rolled back to revision %d'
2105 msg = (_('repository tip rolled back to revision %d'
2057 ' (undo %s)\n')
2106 ' (undo %s)\n')
2058 % (oldtip, desc))
2107 % (oldtip, desc))
2059 except IOError:
2108 except IOError:
2060 msg = _('rolling back unknown transaction\n')
2109 msg = _('rolling back unknown transaction\n')
2061 desc = None
2110 desc = None
2062
2111
2063 if not force and self['.'] != self['tip'] and desc == 'commit':
2112 if not force and self['.'] != self['tip'] and desc == 'commit':
2064 raise error.Abort(
2113 raise error.Abort(
2065 _('rollback of last commit while not checked out '
2114 _('rollback of last commit while not checked out '
2066 'may lose data'), hint=_('use -f to force'))
2115 'may lose data'), hint=_('use -f to force'))
2067
2116
2068 ui.status(msg)
2117 ui.status(msg)
2069 if dryrun:
2118 if dryrun:
2070 return 0
2119 return 0
2071
2120
2072 parents = self.dirstate.parents()
2121 parents = self.dirstate.parents()
2073 self.destroying()
2122 self.destroying()
2074 vfsmap = {'plain': self.vfs, '': self.svfs}
2123 vfsmap = {'plain': self.vfs, '': self.svfs}
2075 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2124 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2076 checkambigfiles=_cachedfiles)
2125 checkambigfiles=_cachedfiles)
2077 bookmarksvfs = bookmarks.bookmarksvfs(self)
2126 bookmarksvfs = bookmarks.bookmarksvfs(self)
2078 if bookmarksvfs.exists('undo.bookmarks'):
2127 if bookmarksvfs.exists('undo.bookmarks'):
2079 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2128 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2080 if self.svfs.exists('undo.phaseroots'):
2129 if self.svfs.exists('undo.phaseroots'):
2081 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2130 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2082 self.invalidate()
2131 self.invalidate()
2083
2132
2084 parentgone = any(p not in self.changelog.nodemap for p in parents)
2133 parentgone = any(p not in self.changelog.nodemap for p in parents)
2085 if parentgone:
2134 if parentgone:
2086 # prevent dirstateguard from overwriting already restored one
2135 # prevent dirstateguard from overwriting already restored one
2087 dsguard.close()
2136 dsguard.close()
2088
2137
2089 narrowspec.restorebackup(self, 'undo.narrowspec')
2138 narrowspec.restorebackup(self, 'undo.narrowspec')
2090 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2139 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2091 self.dirstate.restorebackup(None, 'undo.dirstate')
2140 self.dirstate.restorebackup(None, 'undo.dirstate')
2092 try:
2141 try:
2093 branch = self.vfs.read('undo.branch')
2142 branch = self.vfs.read('undo.branch')
2094 self.dirstate.setbranch(encoding.tolocal(branch))
2143 self.dirstate.setbranch(encoding.tolocal(branch))
2095 except IOError:
2144 except IOError:
2096 ui.warn(_('named branch could not be reset: '
2145 ui.warn(_('named branch could not be reset: '
2097 'current branch is still \'%s\'\n')
2146 'current branch is still \'%s\'\n')
2098 % self.dirstate.branch())
2147 % self.dirstate.branch())
2099
2148
2100 parents = tuple([p.rev() for p in self[None].parents()])
2149 parents = tuple([p.rev() for p in self[None].parents()])
2101 if len(parents) > 1:
2150 if len(parents) > 1:
2102 ui.status(_('working directory now based on '
2151 ui.status(_('working directory now based on '
2103 'revisions %d and %d\n') % parents)
2152 'revisions %d and %d\n') % parents)
2104 else:
2153 else:
2105 ui.status(_('working directory now based on '
2154 ui.status(_('working directory now based on '
2106 'revision %d\n') % parents)
2155 'revision %d\n') % parents)
2107 mergemod.mergestate.clean(self, self['.'].node())
2156 mergemod.mergestate.clean(self, self['.'].node())
2108
2157
2109 # TODO: if we know which new heads may result from this rollback, pass
2158 # TODO: if we know which new heads may result from this rollback, pass
2110 # them to destroy(), which will prevent the branchhead cache from being
2159 # them to destroy(), which will prevent the branchhead cache from being
2111 # invalidated.
2160 # invalidated.
2112 self.destroyed()
2161 self.destroyed()
2113 return 0
2162 return 0
2114
2163
2115 def _buildcacheupdater(self, newtransaction):
2164 def _buildcacheupdater(self, newtransaction):
2116 """called during transaction to build the callback updating cache
2165 """called during transaction to build the callback updating cache
2117
2166
2118 Lives on the repository to help extension who might want to augment
2167 Lives on the repository to help extension who might want to augment
2119 this logic. For this purpose, the created transaction is passed to the
2168 this logic. For this purpose, the created transaction is passed to the
2120 method.
2169 method.
2121 """
2170 """
2122 # we must avoid cyclic reference between repo and transaction.
2171 # we must avoid cyclic reference between repo and transaction.
2123 reporef = weakref.ref(self)
2172 reporef = weakref.ref(self)
2124 def updater(tr):
2173 def updater(tr):
2125 repo = reporef()
2174 repo = reporef()
2126 repo.updatecaches(tr)
2175 repo.updatecaches(tr)
2127 return updater
2176 return updater
2128
2177
2129 @unfilteredmethod
2178 @unfilteredmethod
2130 def updatecaches(self, tr=None, full=False):
2179 def updatecaches(self, tr=None, full=False):
2131 """warm appropriate caches
2180 """warm appropriate caches
2132
2181
2133 If this function is called after a transaction closed. The transaction
2182 If this function is called after a transaction closed. The transaction
2134 will be available in the 'tr' argument. This can be used to selectively
2183 will be available in the 'tr' argument. This can be used to selectively
2135 update caches relevant to the changes in that transaction.
2184 update caches relevant to the changes in that transaction.
2136
2185
2137 If 'full' is set, make sure all caches the function knows about have
2186 If 'full' is set, make sure all caches the function knows about have
2138 up-to-date data. Even the ones usually loaded more lazily.
2187 up-to-date data. Even the ones usually loaded more lazily.
2139 """
2188 """
2140 if tr is not None and tr.hookargs.get('source') == 'strip':
2189 if tr is not None and tr.hookargs.get('source') == 'strip':
2141 # During strip, many caches are invalid but
2190 # During strip, many caches are invalid but
2142 # later call to `destroyed` will refresh them.
2191 # later call to `destroyed` will refresh them.
2143 return
2192 return
2144
2193
2145 if tr is None or tr.changes['origrepolen'] < len(self):
2194 if tr is None or tr.changes['origrepolen'] < len(self):
2146 # accessing the 'ser ved' branchmap should refresh all the others,
2195 # accessing the 'ser ved' branchmap should refresh all the others,
2147 self.ui.debug('updating the branch cache\n')
2196 self.ui.debug('updating the branch cache\n')
2148 self.filtered('served').branchmap()
2197 self.filtered('served').branchmap()
2149 self.filtered('served.hidden').branchmap()
2198 self.filtered('served.hidden').branchmap()
2150
2199
2151 if full:
2200 if full:
2152 unfi = self.unfiltered()
2201 unfi = self.unfiltered()
2153 rbc = unfi.revbranchcache()
2202 rbc = unfi.revbranchcache()
2154 for r in unfi.changelog:
2203 for r in unfi.changelog:
2155 rbc.branchinfo(r)
2204 rbc.branchinfo(r)
2156 rbc.write()
2205 rbc.write()
2157
2206
2158 # ensure the working copy parents are in the manifestfulltextcache
2207 # ensure the working copy parents are in the manifestfulltextcache
2159 for ctx in self['.'].parents():
2208 for ctx in self['.'].parents():
2160 ctx.manifest() # accessing the manifest is enough
2209 ctx.manifest() # accessing the manifest is enough
2161
2210
2162 # accessing fnode cache warms the cache
2211 # accessing fnode cache warms the cache
2163 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2212 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2164 # accessing tags warm the cache
2213 # accessing tags warm the cache
2165 self.tags()
2214 self.tags()
2166 self.filtered('served').tags()
2215 self.filtered('served').tags()
2167
2216
2168 def invalidatecaches(self):
2217 def invalidatecaches(self):
2169
2218
2170 if r'_tagscache' in vars(self):
2219 if r'_tagscache' in vars(self):
2171 # can't use delattr on proxy
2220 # can't use delattr on proxy
2172 del self.__dict__[r'_tagscache']
2221 del self.__dict__[r'_tagscache']
2173
2222
2174 self._branchcaches.clear()
2223 self._branchcaches.clear()
2175 self.invalidatevolatilesets()
2224 self.invalidatevolatilesets()
2176 self._sparsesignaturecache.clear()
2225 self._sparsesignaturecache.clear()
2177
2226
2178 def invalidatevolatilesets(self):
2227 def invalidatevolatilesets(self):
2179 self.filteredrevcache.clear()
2228 self.filteredrevcache.clear()
2180 obsolete.clearobscaches(self)
2229 obsolete.clearobscaches(self)
2181
2230
2182 def invalidatedirstate(self):
2231 def invalidatedirstate(self):
2183 '''Invalidates the dirstate, causing the next call to dirstate
2232 '''Invalidates the dirstate, causing the next call to dirstate
2184 to check if it was modified since the last time it was read,
2233 to check if it was modified since the last time it was read,
2185 rereading it if it has.
2234 rereading it if it has.
2186
2235
2187 This is different to dirstate.invalidate() that it doesn't always
2236 This is different to dirstate.invalidate() that it doesn't always
2188 rereads the dirstate. Use dirstate.invalidate() if you want to
2237 rereads the dirstate. Use dirstate.invalidate() if you want to
2189 explicitly read the dirstate again (i.e. restoring it to a previous
2238 explicitly read the dirstate again (i.e. restoring it to a previous
2190 known good state).'''
2239 known good state).'''
2191 if hasunfilteredcache(self, r'dirstate'):
2240 if hasunfilteredcache(self, r'dirstate'):
2192 for k in self.dirstate._filecache:
2241 for k in self.dirstate._filecache:
2193 try:
2242 try:
2194 delattr(self.dirstate, k)
2243 delattr(self.dirstate, k)
2195 except AttributeError:
2244 except AttributeError:
2196 pass
2245 pass
2197 delattr(self.unfiltered(), r'dirstate')
2246 delattr(self.unfiltered(), r'dirstate')
2198
2247
2199 def invalidate(self, clearfilecache=False):
2248 def invalidate(self, clearfilecache=False):
2200 '''Invalidates both store and non-store parts other than dirstate
2249 '''Invalidates both store and non-store parts other than dirstate
2201
2250
2202 If a transaction is running, invalidation of store is omitted,
2251 If a transaction is running, invalidation of store is omitted,
2203 because discarding in-memory changes might cause inconsistency
2252 because discarding in-memory changes might cause inconsistency
2204 (e.g. incomplete fncache causes unintentional failure, but
2253 (e.g. incomplete fncache causes unintentional failure, but
2205 redundant one doesn't).
2254 redundant one doesn't).
2206 '''
2255 '''
2207 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2256 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2208 for k in list(self._filecache.keys()):
2257 for k in list(self._filecache.keys()):
2209 # dirstate is invalidated separately in invalidatedirstate()
2258 # dirstate is invalidated separately in invalidatedirstate()
2210 if k == 'dirstate':
2259 if k == 'dirstate':
2211 continue
2260 continue
2212 if (k == 'changelog' and
2261 if (k == 'changelog' and
2213 self.currenttransaction() and
2262 self.currenttransaction() and
2214 self.changelog._delayed):
2263 self.changelog._delayed):
2215 # The changelog object may store unwritten revisions. We don't
2264 # The changelog object may store unwritten revisions. We don't
2216 # want to lose them.
2265 # want to lose them.
2217 # TODO: Solve the problem instead of working around it.
2266 # TODO: Solve the problem instead of working around it.
2218 continue
2267 continue
2219
2268
2220 if clearfilecache:
2269 if clearfilecache:
2221 del self._filecache[k]
2270 del self._filecache[k]
2222 try:
2271 try:
2223 delattr(unfiltered, k)
2272 delattr(unfiltered, k)
2224 except AttributeError:
2273 except AttributeError:
2225 pass
2274 pass
2226 self.invalidatecaches()
2275 self.invalidatecaches()
2227 if not self.currenttransaction():
2276 if not self.currenttransaction():
2228 # TODO: Changing contents of store outside transaction
2277 # TODO: Changing contents of store outside transaction
2229 # causes inconsistency. We should make in-memory store
2278 # causes inconsistency. We should make in-memory store
2230 # changes detectable, and abort if changed.
2279 # changes detectable, and abort if changed.
2231 self.store.invalidatecaches()
2280 self.store.invalidatecaches()
2232
2281
2233 def invalidateall(self):
2282 def invalidateall(self):
2234 '''Fully invalidates both store and non-store parts, causing the
2283 '''Fully invalidates both store and non-store parts, causing the
2235 subsequent operation to reread any outside changes.'''
2284 subsequent operation to reread any outside changes.'''
2236 # extension should hook this to invalidate its caches
2285 # extension should hook this to invalidate its caches
2237 self.invalidate()
2286 self.invalidate()
2238 self.invalidatedirstate()
2287 self.invalidatedirstate()
2239
2288
2240 @unfilteredmethod
2289 @unfilteredmethod
2241 def _refreshfilecachestats(self, tr):
2290 def _refreshfilecachestats(self, tr):
2242 """Reload stats of cached files so that they are flagged as valid"""
2291 """Reload stats of cached files so that they are flagged as valid"""
2243 for k, ce in self._filecache.items():
2292 for k, ce in self._filecache.items():
2244 k = pycompat.sysstr(k)
2293 k = pycompat.sysstr(k)
2245 if k == r'dirstate' or k not in self.__dict__:
2294 if k == r'dirstate' or k not in self.__dict__:
2246 continue
2295 continue
2247 ce.refresh()
2296 ce.refresh()
2248
2297
2249 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2298 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2250 inheritchecker=None, parentenvvar=None):
2299 inheritchecker=None, parentenvvar=None):
2251 parentlock = None
2300 parentlock = None
2252 # the contents of parentenvvar are used by the underlying lock to
2301 # the contents of parentenvvar are used by the underlying lock to
2253 # determine whether it can be inherited
2302 # determine whether it can be inherited
2254 if parentenvvar is not None:
2303 if parentenvvar is not None:
2255 parentlock = encoding.environ.get(parentenvvar)
2304 parentlock = encoding.environ.get(parentenvvar)
2256
2305
2257 timeout = 0
2306 timeout = 0
2258 warntimeout = 0
2307 warntimeout = 0
2259 if wait:
2308 if wait:
2260 timeout = self.ui.configint("ui", "timeout")
2309 timeout = self.ui.configint("ui", "timeout")
2261 warntimeout = self.ui.configint("ui", "timeout.warn")
2310 warntimeout = self.ui.configint("ui", "timeout.warn")
2262 # internal config: ui.signal-safe-lock
2311 # internal config: ui.signal-safe-lock
2263 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2312 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2264
2313
2265 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2314 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2266 releasefn=releasefn,
2315 releasefn=releasefn,
2267 acquirefn=acquirefn, desc=desc,
2316 acquirefn=acquirefn, desc=desc,
2268 inheritchecker=inheritchecker,
2317 inheritchecker=inheritchecker,
2269 parentlock=parentlock,
2318 parentlock=parentlock,
2270 signalsafe=signalsafe)
2319 signalsafe=signalsafe)
2271 return l
2320 return l
2272
2321
2273 def _afterlock(self, callback):
2322 def _afterlock(self, callback):
2274 """add a callback to be run when the repository is fully unlocked
2323 """add a callback to be run when the repository is fully unlocked
2275
2324
2276 The callback will be executed when the outermost lock is released
2325 The callback will be executed when the outermost lock is released
2277 (with wlock being higher level than 'lock')."""
2326 (with wlock being higher level than 'lock')."""
2278 for ref in (self._wlockref, self._lockref):
2327 for ref in (self._wlockref, self._lockref):
2279 l = ref and ref()
2328 l = ref and ref()
2280 if l and l.held:
2329 if l and l.held:
2281 l.postrelease.append(callback)
2330 l.postrelease.append(callback)
2282 break
2331 break
2283 else: # no lock have been found.
2332 else: # no lock have been found.
2284 callback()
2333 callback()
2285
2334
2286 def lock(self, wait=True):
2335 def lock(self, wait=True):
2287 '''Lock the repository store (.hg/store) and return a weak reference
2336 '''Lock the repository store (.hg/store) and return a weak reference
2288 to the lock. Use this before modifying the store (e.g. committing or
2337 to the lock. Use this before modifying the store (e.g. committing or
2289 stripping). If you are opening a transaction, get a lock as well.)
2338 stripping). If you are opening a transaction, get a lock as well.)
2290
2339
2291 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2340 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2292 'wlock' first to avoid a dead-lock hazard.'''
2341 'wlock' first to avoid a dead-lock hazard.'''
2293 l = self._currentlock(self._lockref)
2342 l = self._currentlock(self._lockref)
2294 if l is not None:
2343 if l is not None:
2295 l.lock()
2344 l.lock()
2296 return l
2345 return l
2297
2346
2298 l = self._lock(vfs=self.svfs,
2347 l = self._lock(vfs=self.svfs,
2299 lockname="lock",
2348 lockname="lock",
2300 wait=wait,
2349 wait=wait,
2301 releasefn=None,
2350 releasefn=None,
2302 acquirefn=self.invalidate,
2351 acquirefn=self.invalidate,
2303 desc=_('repository %s') % self.origroot)
2352 desc=_('repository %s') % self.origroot)
2304 self._lockref = weakref.ref(l)
2353 self._lockref = weakref.ref(l)
2305 return l
2354 return l
2306
2355
2307 def _wlockchecktransaction(self):
2356 def _wlockchecktransaction(self):
2308 if self.currenttransaction() is not None:
2357 if self.currenttransaction() is not None:
2309 raise error.LockInheritanceContractViolation(
2358 raise error.LockInheritanceContractViolation(
2310 'wlock cannot be inherited in the middle of a transaction')
2359 'wlock cannot be inherited in the middle of a transaction')
2311
2360
2312 def wlock(self, wait=True):
2361 def wlock(self, wait=True):
2313 '''Lock the non-store parts of the repository (everything under
2362 '''Lock the non-store parts of the repository (everything under
2314 .hg except .hg/store) and return a weak reference to the lock.
2363 .hg except .hg/store) and return a weak reference to the lock.
2315
2364
2316 Use this before modifying files in .hg.
2365 Use this before modifying files in .hg.
2317
2366
2318 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2367 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2319 'wlock' first to avoid a dead-lock hazard.'''
2368 'wlock' first to avoid a dead-lock hazard.'''
2320 l = self._wlockref and self._wlockref()
2369 l = self._wlockref and self._wlockref()
2321 if l is not None and l.held:
2370 if l is not None and l.held:
2322 l.lock()
2371 l.lock()
2323 return l
2372 return l
2324
2373
2325 # We do not need to check for non-waiting lock acquisition. Such
2374 # We do not need to check for non-waiting lock acquisition. Such
2326 # acquisition would not cause dead-lock as they would just fail.
2375 # acquisition would not cause dead-lock as they would just fail.
2327 if wait and (self.ui.configbool('devel', 'all-warnings')
2376 if wait and (self.ui.configbool('devel', 'all-warnings')
2328 or self.ui.configbool('devel', 'check-locks')):
2377 or self.ui.configbool('devel', 'check-locks')):
2329 if self._currentlock(self._lockref) is not None:
2378 if self._currentlock(self._lockref) is not None:
2330 self.ui.develwarn('"wlock" acquired after "lock"')
2379 self.ui.develwarn('"wlock" acquired after "lock"')
2331
2380
2332 def unlock():
2381 def unlock():
2333 if self.dirstate.pendingparentchange():
2382 if self.dirstate.pendingparentchange():
2334 self.dirstate.invalidate()
2383 self.dirstate.invalidate()
2335 else:
2384 else:
2336 self.dirstate.write(None)
2385 self.dirstate.write(None)
2337
2386
2338 self._filecache['dirstate'].refresh()
2387 self._filecache['dirstate'].refresh()
2339
2388
2340 l = self._lock(self.vfs, "wlock", wait, unlock,
2389 l = self._lock(self.vfs, "wlock", wait, unlock,
2341 self.invalidatedirstate, _('working directory of %s') %
2390 self.invalidatedirstate, _('working directory of %s') %
2342 self.origroot,
2391 self.origroot,
2343 inheritchecker=self._wlockchecktransaction,
2392 inheritchecker=self._wlockchecktransaction,
2344 parentenvvar='HG_WLOCK_LOCKER')
2393 parentenvvar='HG_WLOCK_LOCKER')
2345 self._wlockref = weakref.ref(l)
2394 self._wlockref = weakref.ref(l)
2346 return l
2395 return l
2347
2396
2348 def _currentlock(self, lockref):
2397 def _currentlock(self, lockref):
2349 """Returns the lock if it's held, or None if it's not."""
2398 """Returns the lock if it's held, or None if it's not."""
2350 if lockref is None:
2399 if lockref is None:
2351 return None
2400 return None
2352 l = lockref()
2401 l = lockref()
2353 if l is None or not l.held:
2402 if l is None or not l.held:
2354 return None
2403 return None
2355 return l
2404 return l
2356
2405
2357 def currentwlock(self):
2406 def currentwlock(self):
2358 """Returns the wlock if it's held, or None if it's not."""
2407 """Returns the wlock if it's held, or None if it's not."""
2359 return self._currentlock(self._wlockref)
2408 return self._currentlock(self._wlockref)
2360
2409
2361 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2410 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2362 includecopymeta):
2411 includecopymeta):
2363 """
2412 """
2364 commit an individual file as part of a larger transaction
2413 commit an individual file as part of a larger transaction
2365 """
2414 """
2366
2415
2367 fname = fctx.path()
2416 fname = fctx.path()
2368 fparent1 = manifest1.get(fname, nullid)
2417 fparent1 = manifest1.get(fname, nullid)
2369 fparent2 = manifest2.get(fname, nullid)
2418 fparent2 = manifest2.get(fname, nullid)
2370 if isinstance(fctx, context.filectx):
2419 if isinstance(fctx, context.filectx):
2371 node = fctx.filenode()
2420 node = fctx.filenode()
2372 if node in [fparent1, fparent2]:
2421 if node in [fparent1, fparent2]:
2373 self.ui.debug('reusing %s filelog entry\n' % fname)
2422 self.ui.debug('reusing %s filelog entry\n' % fname)
2374 if ((fparent1 != nullid and
2423 if ((fparent1 != nullid and
2375 manifest1.flags(fname) != fctx.flags()) or
2424 manifest1.flags(fname) != fctx.flags()) or
2376 (fparent2 != nullid and
2425 (fparent2 != nullid and
2377 manifest2.flags(fname) != fctx.flags())):
2426 manifest2.flags(fname) != fctx.flags())):
2378 changelist.append(fname)
2427 changelist.append(fname)
2379 return node
2428 return node
2380
2429
2381 flog = self.file(fname)
2430 flog = self.file(fname)
2382 meta = {}
2431 meta = {}
2383 cfname = fctx.copysource()
2432 cfname = fctx.copysource()
2384 if cfname and cfname != fname:
2433 if cfname and cfname != fname:
2385 # Mark the new revision of this file as a copy of another
2434 # Mark the new revision of this file as a copy of another
2386 # file. This copy data will effectively act as a parent
2435 # file. This copy data will effectively act as a parent
2387 # of this new revision. If this is a merge, the first
2436 # of this new revision. If this is a merge, the first
2388 # parent will be the nullid (meaning "look up the copy data")
2437 # parent will be the nullid (meaning "look up the copy data")
2389 # and the second one will be the other parent. For example:
2438 # and the second one will be the other parent. For example:
2390 #
2439 #
2391 # 0 --- 1 --- 3 rev1 changes file foo
2440 # 0 --- 1 --- 3 rev1 changes file foo
2392 # \ / rev2 renames foo to bar and changes it
2441 # \ / rev2 renames foo to bar and changes it
2393 # \- 2 -/ rev3 should have bar with all changes and
2442 # \- 2 -/ rev3 should have bar with all changes and
2394 # should record that bar descends from
2443 # should record that bar descends from
2395 # bar in rev2 and foo in rev1
2444 # bar in rev2 and foo in rev1
2396 #
2445 #
2397 # this allows this merge to succeed:
2446 # this allows this merge to succeed:
2398 #
2447 #
2399 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2448 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2400 # \ / merging rev3 and rev4 should use bar@rev2
2449 # \ / merging rev3 and rev4 should use bar@rev2
2401 # \- 2 --- 4 as the merge base
2450 # \- 2 --- 4 as the merge base
2402 #
2451 #
2403
2452
2404 cnode = manifest1.get(cfname)
2453 cnode = manifest1.get(cfname)
2405 newfparent = fparent2
2454 newfparent = fparent2
2406
2455
2407 if manifest2: # branch merge
2456 if manifest2: # branch merge
2408 if fparent2 == nullid or cnode is None: # copied on remote side
2457 if fparent2 == nullid or cnode is None: # copied on remote side
2409 if cfname in manifest2:
2458 if cfname in manifest2:
2410 cnode = manifest2[cfname]
2459 cnode = manifest2[cfname]
2411 newfparent = fparent1
2460 newfparent = fparent1
2412
2461
2413 # Here, we used to search backwards through history to try to find
2462 # Here, we used to search backwards through history to try to find
2414 # where the file copy came from if the source of a copy was not in
2463 # where the file copy came from if the source of a copy was not in
2415 # the parent directory. However, this doesn't actually make sense to
2464 # the parent directory. However, this doesn't actually make sense to
2416 # do (what does a copy from something not in your working copy even
2465 # do (what does a copy from something not in your working copy even
2417 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2466 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2418 # the user that copy information was dropped, so if they didn't
2467 # the user that copy information was dropped, so if they didn't
2419 # expect this outcome it can be fixed, but this is the correct
2468 # expect this outcome it can be fixed, but this is the correct
2420 # behavior in this circumstance.
2469 # behavior in this circumstance.
2421
2470
2422 if cnode:
2471 if cnode:
2423 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2472 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2424 if includecopymeta:
2473 if includecopymeta:
2425 meta["copy"] = cfname
2474 meta["copy"] = cfname
2426 meta["copyrev"] = hex(cnode)
2475 meta["copyrev"] = hex(cnode)
2427 fparent1, fparent2 = nullid, newfparent
2476 fparent1, fparent2 = nullid, newfparent
2428 else:
2477 else:
2429 self.ui.warn(_("warning: can't find ancestor for '%s' "
2478 self.ui.warn(_("warning: can't find ancestor for '%s' "
2430 "copied from '%s'!\n") % (fname, cfname))
2479 "copied from '%s'!\n") % (fname, cfname))
2431
2480
2432 elif fparent1 == nullid:
2481 elif fparent1 == nullid:
2433 fparent1, fparent2 = fparent2, nullid
2482 fparent1, fparent2 = fparent2, nullid
2434 elif fparent2 != nullid:
2483 elif fparent2 != nullid:
2435 # is one parent an ancestor of the other?
2484 # is one parent an ancestor of the other?
2436 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2485 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2437 if fparent1 in fparentancestors:
2486 if fparent1 in fparentancestors:
2438 fparent1, fparent2 = fparent2, nullid
2487 fparent1, fparent2 = fparent2, nullid
2439 elif fparent2 in fparentancestors:
2488 elif fparent2 in fparentancestors:
2440 fparent2 = nullid
2489 fparent2 = nullid
2441
2490
2442 # is the file changed?
2491 # is the file changed?
2443 text = fctx.data()
2492 text = fctx.data()
2444 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2493 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2445 changelist.append(fname)
2494 changelist.append(fname)
2446 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2495 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2447 # are just the flags changed during merge?
2496 # are just the flags changed during merge?
2448 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2497 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2449 changelist.append(fname)
2498 changelist.append(fname)
2450
2499
2451 return fparent1
2500 return fparent1
2452
2501
2453 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2502 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2454 """check for commit arguments that aren't committable"""
2503 """check for commit arguments that aren't committable"""
2455 if match.isexact() or match.prefix():
2504 if match.isexact() or match.prefix():
2456 matched = set(status.modified + status.added + status.removed)
2505 matched = set(status.modified + status.added + status.removed)
2457
2506
2458 for f in match.files():
2507 for f in match.files():
2459 f = self.dirstate.normalize(f)
2508 f = self.dirstate.normalize(f)
2460 if f == '.' or f in matched or f in wctx.substate:
2509 if f == '.' or f in matched or f in wctx.substate:
2461 continue
2510 continue
2462 if f in status.deleted:
2511 if f in status.deleted:
2463 fail(f, _('file not found!'))
2512 fail(f, _('file not found!'))
2464 if f in vdirs: # visited directory
2513 if f in vdirs: # visited directory
2465 d = f + '/'
2514 d = f + '/'
2466 for mf in matched:
2515 for mf in matched:
2467 if mf.startswith(d):
2516 if mf.startswith(d):
2468 break
2517 break
2469 else:
2518 else:
2470 fail(f, _("no match under directory!"))
2519 fail(f, _("no match under directory!"))
2471 elif f not in self.dirstate:
2520 elif f not in self.dirstate:
2472 fail(f, _("file not tracked!"))
2521 fail(f, _("file not tracked!"))
2473
2522
2474 @unfilteredmethod
2523 @unfilteredmethod
2475 def commit(self, text="", user=None, date=None, match=None, force=False,
2524 def commit(self, text="", user=None, date=None, match=None, force=False,
2476 editor=False, extra=None):
2525 editor=False, extra=None):
2477 """Add a new revision to current repository.
2526 """Add a new revision to current repository.
2478
2527
2479 Revision information is gathered from the working directory,
2528 Revision information is gathered from the working directory,
2480 match can be used to filter the committed files. If editor is
2529 match can be used to filter the committed files. If editor is
2481 supplied, it is called to get a commit message.
2530 supplied, it is called to get a commit message.
2482 """
2531 """
2483 if extra is None:
2532 if extra is None:
2484 extra = {}
2533 extra = {}
2485
2534
2486 def fail(f, msg):
2535 def fail(f, msg):
2487 raise error.Abort('%s: %s' % (f, msg))
2536 raise error.Abort('%s: %s' % (f, msg))
2488
2537
2489 if not match:
2538 if not match:
2490 match = matchmod.always()
2539 match = matchmod.always()
2491
2540
2492 if not force:
2541 if not force:
2493 vdirs = []
2542 vdirs = []
2494 match.explicitdir = vdirs.append
2543 match.explicitdir = vdirs.append
2495 match.bad = fail
2544 match.bad = fail
2496
2545
2497 # lock() for recent changelog (see issue4368)
2546 # lock() for recent changelog (see issue4368)
2498 with self.wlock(), self.lock():
2547 with self.wlock(), self.lock():
2499 wctx = self[None]
2548 wctx = self[None]
2500 merge = len(wctx.parents()) > 1
2549 merge = len(wctx.parents()) > 1
2501
2550
2502 if not force and merge and not match.always():
2551 if not force and merge and not match.always():
2503 raise error.Abort(_('cannot partially commit a merge '
2552 raise error.Abort(_('cannot partially commit a merge '
2504 '(do not specify files or patterns)'))
2553 '(do not specify files or patterns)'))
2505
2554
2506 status = self.status(match=match, clean=force)
2555 status = self.status(match=match, clean=force)
2507 if force:
2556 if force:
2508 status.modified.extend(status.clean) # mq may commit clean files
2557 status.modified.extend(status.clean) # mq may commit clean files
2509
2558
2510 # check subrepos
2559 # check subrepos
2511 subs, commitsubs, newstate = subrepoutil.precommit(
2560 subs, commitsubs, newstate = subrepoutil.precommit(
2512 self.ui, wctx, status, match, force=force)
2561 self.ui, wctx, status, match, force=force)
2513
2562
2514 # make sure all explicit patterns are matched
2563 # make sure all explicit patterns are matched
2515 if not force:
2564 if not force:
2516 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2565 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2517
2566
2518 cctx = context.workingcommitctx(self, status,
2567 cctx = context.workingcommitctx(self, status,
2519 text, user, date, extra)
2568 text, user, date, extra)
2520
2569
2521 # internal config: ui.allowemptycommit
2570 # internal config: ui.allowemptycommit
2522 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2571 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2523 or extra.get('close') or merge or cctx.files()
2572 or extra.get('close') or merge or cctx.files()
2524 or self.ui.configbool('ui', 'allowemptycommit'))
2573 or self.ui.configbool('ui', 'allowemptycommit'))
2525 if not allowemptycommit:
2574 if not allowemptycommit:
2526 return None
2575 return None
2527
2576
2528 if merge and cctx.deleted():
2577 if merge and cctx.deleted():
2529 raise error.Abort(_("cannot commit merge with missing files"))
2578 raise error.Abort(_("cannot commit merge with missing files"))
2530
2579
2531 ms = mergemod.mergestate.read(self)
2580 ms = mergemod.mergestate.read(self)
2532 mergeutil.checkunresolved(ms)
2581 mergeutil.checkunresolved(ms)
2533
2582
2534 if editor:
2583 if editor:
2535 cctx._text = editor(self, cctx, subs)
2584 cctx._text = editor(self, cctx, subs)
2536 edited = (text != cctx._text)
2585 edited = (text != cctx._text)
2537
2586
2538 # Save commit message in case this transaction gets rolled back
2587 # Save commit message in case this transaction gets rolled back
2539 # (e.g. by a pretxncommit hook). Leave the content alone on
2588 # (e.g. by a pretxncommit hook). Leave the content alone on
2540 # the assumption that the user will use the same editor again.
2589 # the assumption that the user will use the same editor again.
2541 msgfn = self.savecommitmessage(cctx._text)
2590 msgfn = self.savecommitmessage(cctx._text)
2542
2591
2543 # commit subs and write new state
2592 # commit subs and write new state
2544 if subs:
2593 if subs:
2545 uipathfn = scmutil.getuipathfn(self)
2594 uipathfn = scmutil.getuipathfn(self)
2546 for s in sorted(commitsubs):
2595 for s in sorted(commitsubs):
2547 sub = wctx.sub(s)
2596 sub = wctx.sub(s)
2548 self.ui.status(_('committing subrepository %s\n') %
2597 self.ui.status(_('committing subrepository %s\n') %
2549 uipathfn(subrepoutil.subrelpath(sub)))
2598 uipathfn(subrepoutil.subrelpath(sub)))
2550 sr = sub.commit(cctx._text, user, date)
2599 sr = sub.commit(cctx._text, user, date)
2551 newstate[s] = (newstate[s][0], sr)
2600 newstate[s] = (newstate[s][0], sr)
2552 subrepoutil.writestate(self, newstate)
2601 subrepoutil.writestate(self, newstate)
2553
2602
2554 p1, p2 = self.dirstate.parents()
2603 p1, p2 = self.dirstate.parents()
2555 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2604 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2556 try:
2605 try:
2557 self.hook("precommit", throw=True, parent1=hookp1,
2606 self.hook("precommit", throw=True, parent1=hookp1,
2558 parent2=hookp2)
2607 parent2=hookp2)
2559 with self.transaction('commit'):
2608 with self.transaction('commit'):
2560 ret = self.commitctx(cctx, True)
2609 ret = self.commitctx(cctx, True)
2561 # update bookmarks, dirstate and mergestate
2610 # update bookmarks, dirstate and mergestate
2562 bookmarks.update(self, [p1, p2], ret)
2611 bookmarks.update(self, [p1, p2], ret)
2563 cctx.markcommitted(ret)
2612 cctx.markcommitted(ret)
2564 ms.reset()
2613 ms.reset()
2565 except: # re-raises
2614 except: # re-raises
2566 if edited:
2615 if edited:
2567 self.ui.write(
2616 self.ui.write(
2568 _('note: commit message saved in %s\n') % msgfn)
2617 _('note: commit message saved in %s\n') % msgfn)
2569 raise
2618 raise
2570
2619
2571 def commithook():
2620 def commithook():
2572 # hack for command that use a temporary commit (eg: histedit)
2621 # hack for command that use a temporary commit (eg: histedit)
2573 # temporary commit got stripped before hook release
2622 # temporary commit got stripped before hook release
2574 if self.changelog.hasnode(ret):
2623 if self.changelog.hasnode(ret):
2575 self.hook("commit", node=hex(ret), parent1=hookp1,
2624 self.hook("commit", node=hex(ret), parent1=hookp1,
2576 parent2=hookp2)
2625 parent2=hookp2)
2577 self._afterlock(commithook)
2626 self._afterlock(commithook)
2578 return ret
2627 return ret
2579
2628
2580 @unfilteredmethod
2629 @unfilteredmethod
2581 def commitctx(self, ctx, error=False, origctx=None):
2630 def commitctx(self, ctx, error=False, origctx=None):
2582 """Add a new revision to current repository.
2631 """Add a new revision to current repository.
2583 Revision information is passed via the context argument.
2632 Revision information is passed via the context argument.
2584
2633
2585 ctx.files() should list all files involved in this commit, i.e.
2634 ctx.files() should list all files involved in this commit, i.e.
2586 modified/added/removed files. On merge, it may be wider than the
2635 modified/added/removed files. On merge, it may be wider than the
2587 ctx.files() to be committed, since any file nodes derived directly
2636 ctx.files() to be committed, since any file nodes derived directly
2588 from p1 or p2 are excluded from the committed ctx.files().
2637 from p1 or p2 are excluded from the committed ctx.files().
2589
2638
2590 origctx is for convert to work around the problem that bug
2639 origctx is for convert to work around the problem that bug
2591 fixes to the files list in changesets change hashes. For
2640 fixes to the files list in changesets change hashes. For
2592 convert to be the identity, it can pass an origctx and this
2641 convert to be the identity, it can pass an origctx and this
2593 function will use the same files list when it makes sense to
2642 function will use the same files list when it makes sense to
2594 do so.
2643 do so.
2595 """
2644 """
2596
2645
2597 p1, p2 = ctx.p1(), ctx.p2()
2646 p1, p2 = ctx.p1(), ctx.p2()
2598 user = ctx.user()
2647 user = ctx.user()
2599
2648
2600 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2649 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2601 writefilecopymeta = writecopiesto != 'changeset-only'
2650 writefilecopymeta = writecopiesto != 'changeset-only'
2602 writechangesetcopy = (writecopiesto in
2651 writechangesetcopy = (writecopiesto in
2603 ('changeset-only', 'compatibility'))
2652 ('changeset-only', 'compatibility'))
2604 p1copies, p2copies = None, None
2653 p1copies, p2copies = None, None
2605 if writechangesetcopy:
2654 if writechangesetcopy:
2606 p1copies = ctx.p1copies()
2655 p1copies = ctx.p1copies()
2607 p2copies = ctx.p2copies()
2656 p2copies = ctx.p2copies()
2608 filesadded, filesremoved = None, None
2657 filesadded, filesremoved = None, None
2609 with self.lock(), self.transaction("commit") as tr:
2658 with self.lock(), self.transaction("commit") as tr:
2610 trp = weakref.proxy(tr)
2659 trp = weakref.proxy(tr)
2611
2660
2612 if ctx.manifestnode():
2661 if ctx.manifestnode():
2613 # reuse an existing manifest revision
2662 # reuse an existing manifest revision
2614 self.ui.debug('reusing known manifest\n')
2663 self.ui.debug('reusing known manifest\n')
2615 mn = ctx.manifestnode()
2664 mn = ctx.manifestnode()
2616 files = ctx.files()
2665 files = ctx.files()
2617 if writechangesetcopy:
2666 if writechangesetcopy:
2618 filesadded = ctx.filesadded()
2667 filesadded = ctx.filesadded()
2619 filesremoved = ctx.filesremoved()
2668 filesremoved = ctx.filesremoved()
2620 elif ctx.files():
2669 elif ctx.files():
2621 m1ctx = p1.manifestctx()
2670 m1ctx = p1.manifestctx()
2622 m2ctx = p2.manifestctx()
2671 m2ctx = p2.manifestctx()
2623 mctx = m1ctx.copy()
2672 mctx = m1ctx.copy()
2624
2673
2625 m = mctx.read()
2674 m = mctx.read()
2626 m1 = m1ctx.read()
2675 m1 = m1ctx.read()
2627 m2 = m2ctx.read()
2676 m2 = m2ctx.read()
2628
2677
2629 # check in files
2678 # check in files
2630 added = []
2679 added = []
2631 changed = []
2680 changed = []
2632 removed = list(ctx.removed())
2681 removed = list(ctx.removed())
2633 linkrev = len(self)
2682 linkrev = len(self)
2634 self.ui.note(_("committing files:\n"))
2683 self.ui.note(_("committing files:\n"))
2635 uipathfn = scmutil.getuipathfn(self)
2684 uipathfn = scmutil.getuipathfn(self)
2636 for f in sorted(ctx.modified() + ctx.added()):
2685 for f in sorted(ctx.modified() + ctx.added()):
2637 self.ui.note(uipathfn(f) + "\n")
2686 self.ui.note(uipathfn(f) + "\n")
2638 try:
2687 try:
2639 fctx = ctx[f]
2688 fctx = ctx[f]
2640 if fctx is None:
2689 if fctx is None:
2641 removed.append(f)
2690 removed.append(f)
2642 else:
2691 else:
2643 added.append(f)
2692 added.append(f)
2644 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2693 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2645 trp, changed,
2694 trp, changed,
2646 writefilecopymeta)
2695 writefilecopymeta)
2647 m.setflag(f, fctx.flags())
2696 m.setflag(f, fctx.flags())
2648 except OSError:
2697 except OSError:
2649 self.ui.warn(_("trouble committing %s!\n") %
2698 self.ui.warn(_("trouble committing %s!\n") %
2650 uipathfn(f))
2699 uipathfn(f))
2651 raise
2700 raise
2652 except IOError as inst:
2701 except IOError as inst:
2653 errcode = getattr(inst, 'errno', errno.ENOENT)
2702 errcode = getattr(inst, 'errno', errno.ENOENT)
2654 if error or errcode and errcode != errno.ENOENT:
2703 if error or errcode and errcode != errno.ENOENT:
2655 self.ui.warn(_("trouble committing %s!\n") %
2704 self.ui.warn(_("trouble committing %s!\n") %
2656 uipathfn(f))
2705 uipathfn(f))
2657 raise
2706 raise
2658
2707
2659 # update manifest
2708 # update manifest
2660 removed = [f for f in removed if f in m1 or f in m2]
2709 removed = [f for f in removed if f in m1 or f in m2]
2661 drop = sorted([f for f in removed if f in m])
2710 drop = sorted([f for f in removed if f in m])
2662 for f in drop:
2711 for f in drop:
2663 del m[f]
2712 del m[f]
2664 if p2.rev() != nullrev:
2713 if p2.rev() != nullrev:
2665 @util.cachefunc
2714 @util.cachefunc
2666 def mas():
2715 def mas():
2667 p1n = p1.node()
2716 p1n = p1.node()
2668 p2n = p2.node()
2717 p2n = p2.node()
2669 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2718 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2670 if not cahs:
2719 if not cahs:
2671 cahs = [nullrev]
2720 cahs = [nullrev]
2672 return [self[r].manifest() for r in cahs]
2721 return [self[r].manifest() for r in cahs]
2673 def deletionfromparent(f):
2722 def deletionfromparent(f):
2674 # When a file is removed relative to p1 in a merge, this
2723 # When a file is removed relative to p1 in a merge, this
2675 # function determines whether the absence is due to a
2724 # function determines whether the absence is due to a
2676 # deletion from a parent, or whether the merge commit
2725 # deletion from a parent, or whether the merge commit
2677 # itself deletes the file. We decide this by doing a
2726 # itself deletes the file. We decide this by doing a
2678 # simplified three way merge of the manifest entry for
2727 # simplified three way merge of the manifest entry for
2679 # the file. There are two ways we decide the merge
2728 # the file. There are two ways we decide the merge
2680 # itself didn't delete a file:
2729 # itself didn't delete a file:
2681 # - neither parent (nor the merge) contain the file
2730 # - neither parent (nor the merge) contain the file
2682 # - exactly one parent contains the file, and that
2731 # - exactly one parent contains the file, and that
2683 # parent has the same filelog entry as the merge
2732 # parent has the same filelog entry as the merge
2684 # ancestor (or all of them if there two). In other
2733 # ancestor (or all of them if there two). In other
2685 # words, that parent left the file unchanged while the
2734 # words, that parent left the file unchanged while the
2686 # other one deleted it.
2735 # other one deleted it.
2687 # One way to think about this is that deleting a file is
2736 # One way to think about this is that deleting a file is
2688 # similar to emptying it, so the list of changed files
2737 # similar to emptying it, so the list of changed files
2689 # should be similar either way. The computation
2738 # should be similar either way. The computation
2690 # described above is not done directly in _filecommit
2739 # described above is not done directly in _filecommit
2691 # when creating the list of changed files, however
2740 # when creating the list of changed files, however
2692 # it does something very similar by comparing filelog
2741 # it does something very similar by comparing filelog
2693 # nodes.
2742 # nodes.
2694 if f in m1:
2743 if f in m1:
2695 return (f not in m2
2744 return (f not in m2
2696 and all(f in ma and ma.find(f) == m1.find(f)
2745 and all(f in ma and ma.find(f) == m1.find(f)
2697 for ma in mas()))
2746 for ma in mas()))
2698 elif f in m2:
2747 elif f in m2:
2699 return all(f in ma and ma.find(f) == m2.find(f)
2748 return all(f in ma and ma.find(f) == m2.find(f)
2700 for ma in mas())
2749 for ma in mas())
2701 else:
2750 else:
2702 return True
2751 return True
2703 removed = [f for f in removed if not deletionfromparent(f)]
2752 removed = [f for f in removed if not deletionfromparent(f)]
2704
2753
2705 files = changed + removed
2754 files = changed + removed
2706 md = None
2755 md = None
2707 if not files:
2756 if not files:
2708 # if no "files" actually changed in terms of the changelog,
2757 # if no "files" actually changed in terms of the changelog,
2709 # try hard to detect unmodified manifest entry so that the
2758 # try hard to detect unmodified manifest entry so that the
2710 # exact same commit can be reproduced later on convert.
2759 # exact same commit can be reproduced later on convert.
2711 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2760 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2712 if not files and md:
2761 if not files and md:
2713 self.ui.debug('not reusing manifest (no file change in '
2762 self.ui.debug('not reusing manifest (no file change in '
2714 'changelog, but manifest differs)\n')
2763 'changelog, but manifest differs)\n')
2715 if files or md:
2764 if files or md:
2716 self.ui.note(_("committing manifest\n"))
2765 self.ui.note(_("committing manifest\n"))
2717 # we're using narrowmatch here since it's already applied at
2766 # we're using narrowmatch here since it's already applied at
2718 # other stages (such as dirstate.walk), so we're already
2767 # other stages (such as dirstate.walk), so we're already
2719 # ignoring things outside of narrowspec in most cases. The
2768 # ignoring things outside of narrowspec in most cases. The
2720 # one case where we might have files outside the narrowspec
2769 # one case where we might have files outside the narrowspec
2721 # at this point is merges, and we already error out in the
2770 # at this point is merges, and we already error out in the
2722 # case where the merge has files outside of the narrowspec,
2771 # case where the merge has files outside of the narrowspec,
2723 # so this is safe.
2772 # so this is safe.
2724 mn = mctx.write(trp, linkrev,
2773 mn = mctx.write(trp, linkrev,
2725 p1.manifestnode(), p2.manifestnode(),
2774 p1.manifestnode(), p2.manifestnode(),
2726 added, drop, match=self.narrowmatch())
2775 added, drop, match=self.narrowmatch())
2727
2776
2728 if writechangesetcopy:
2777 if writechangesetcopy:
2729 filesadded = [f for f in changed
2778 filesadded = [f for f in changed
2730 if not (f in m1 or f in m2)]
2779 if not (f in m1 or f in m2)]
2731 filesremoved = removed
2780 filesremoved = removed
2732 else:
2781 else:
2733 self.ui.debug('reusing manifest from p1 (listed files '
2782 self.ui.debug('reusing manifest from p1 (listed files '
2734 'actually unchanged)\n')
2783 'actually unchanged)\n')
2735 mn = p1.manifestnode()
2784 mn = p1.manifestnode()
2736 else:
2785 else:
2737 self.ui.debug('reusing manifest from p1 (no file change)\n')
2786 self.ui.debug('reusing manifest from p1 (no file change)\n')
2738 mn = p1.manifestnode()
2787 mn = p1.manifestnode()
2739 files = []
2788 files = []
2740
2789
2741 if writecopiesto == 'changeset-only':
2790 if writecopiesto == 'changeset-only':
2742 # If writing only to changeset extras, use None to indicate that
2791 # If writing only to changeset extras, use None to indicate that
2743 # no entry should be written. If writing to both, write an empty
2792 # no entry should be written. If writing to both, write an empty
2744 # entry to prevent the reader from falling back to reading
2793 # entry to prevent the reader from falling back to reading
2745 # filelogs.
2794 # filelogs.
2746 p1copies = p1copies or None
2795 p1copies = p1copies or None
2747 p2copies = p2copies or None
2796 p2copies = p2copies or None
2748 filesadded = filesadded or None
2797 filesadded = filesadded or None
2749 filesremoved = filesremoved or None
2798 filesremoved = filesremoved or None
2750
2799
2751 if origctx and origctx.manifestnode() == mn:
2800 if origctx and origctx.manifestnode() == mn:
2752 files = origctx.files()
2801 files = origctx.files()
2753
2802
2754 # update changelog
2803 # update changelog
2755 self.ui.note(_("committing changelog\n"))
2804 self.ui.note(_("committing changelog\n"))
2756 self.changelog.delayupdate(tr)
2805 self.changelog.delayupdate(tr)
2757 n = self.changelog.add(mn, files, ctx.description(),
2806 n = self.changelog.add(mn, files, ctx.description(),
2758 trp, p1.node(), p2.node(),
2807 trp, p1.node(), p2.node(),
2759 user, ctx.date(), ctx.extra().copy(),
2808 user, ctx.date(), ctx.extra().copy(),
2760 p1copies, p2copies, filesadded, filesremoved)
2809 p1copies, p2copies, filesadded, filesremoved)
2761 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2810 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2762 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2811 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2763 parent2=xp2)
2812 parent2=xp2)
2764 # set the new commit is proper phase
2813 # set the new commit is proper phase
2765 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2814 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2766 if targetphase:
2815 if targetphase:
2767 # retract boundary do not alter parent changeset.
2816 # retract boundary do not alter parent changeset.
2768 # if a parent have higher the resulting phase will
2817 # if a parent have higher the resulting phase will
2769 # be compliant anyway
2818 # be compliant anyway
2770 #
2819 #
2771 # if minimal phase was 0 we don't need to retract anything
2820 # if minimal phase was 0 we don't need to retract anything
2772 phases.registernew(self, tr, targetphase, [n])
2821 phases.registernew(self, tr, targetphase, [n])
2773 return n
2822 return n
2774
2823
2775 @unfilteredmethod
2824 @unfilteredmethod
2776 def destroying(self):
2825 def destroying(self):
2777 '''Inform the repository that nodes are about to be destroyed.
2826 '''Inform the repository that nodes are about to be destroyed.
2778 Intended for use by strip and rollback, so there's a common
2827 Intended for use by strip and rollback, so there's a common
2779 place for anything that has to be done before destroying history.
2828 place for anything that has to be done before destroying history.
2780
2829
2781 This is mostly useful for saving state that is in memory and waiting
2830 This is mostly useful for saving state that is in memory and waiting
2782 to be flushed when the current lock is released. Because a call to
2831 to be flushed when the current lock is released. Because a call to
2783 destroyed is imminent, the repo will be invalidated causing those
2832 destroyed is imminent, the repo will be invalidated causing those
2784 changes to stay in memory (waiting for the next unlock), or vanish
2833 changes to stay in memory (waiting for the next unlock), or vanish
2785 completely.
2834 completely.
2786 '''
2835 '''
2787 # When using the same lock to commit and strip, the phasecache is left
2836 # When using the same lock to commit and strip, the phasecache is left
2788 # dirty after committing. Then when we strip, the repo is invalidated,
2837 # dirty after committing. Then when we strip, the repo is invalidated,
2789 # causing those changes to disappear.
2838 # causing those changes to disappear.
2790 if '_phasecache' in vars(self):
2839 if '_phasecache' in vars(self):
2791 self._phasecache.write()
2840 self._phasecache.write()
2792
2841
2793 @unfilteredmethod
2842 @unfilteredmethod
2794 def destroyed(self):
2843 def destroyed(self):
2795 '''Inform the repository that nodes have been destroyed.
2844 '''Inform the repository that nodes have been destroyed.
2796 Intended for use by strip and rollback, so there's a common
2845 Intended for use by strip and rollback, so there's a common
2797 place for anything that has to be done after destroying history.
2846 place for anything that has to be done after destroying history.
2798 '''
2847 '''
2799 # When one tries to:
2848 # When one tries to:
2800 # 1) destroy nodes thus calling this method (e.g. strip)
2849 # 1) destroy nodes thus calling this method (e.g. strip)
2801 # 2) use phasecache somewhere (e.g. commit)
2850 # 2) use phasecache somewhere (e.g. commit)
2802 #
2851 #
2803 # then 2) will fail because the phasecache contains nodes that were
2852 # then 2) will fail because the phasecache contains nodes that were
2804 # removed. We can either remove phasecache from the filecache,
2853 # removed. We can either remove phasecache from the filecache,
2805 # causing it to reload next time it is accessed, or simply filter
2854 # causing it to reload next time it is accessed, or simply filter
2806 # the removed nodes now and write the updated cache.
2855 # the removed nodes now and write the updated cache.
2807 self._phasecache.filterunknown(self)
2856 self._phasecache.filterunknown(self)
2808 self._phasecache.write()
2857 self._phasecache.write()
2809
2858
2810 # refresh all repository caches
2859 # refresh all repository caches
2811 self.updatecaches()
2860 self.updatecaches()
2812
2861
2813 # Ensure the persistent tag cache is updated. Doing it now
2862 # Ensure the persistent tag cache is updated. Doing it now
2814 # means that the tag cache only has to worry about destroyed
2863 # means that the tag cache only has to worry about destroyed
2815 # heads immediately after a strip/rollback. That in turn
2864 # heads immediately after a strip/rollback. That in turn
2816 # guarantees that "cachetip == currenttip" (comparing both rev
2865 # guarantees that "cachetip == currenttip" (comparing both rev
2817 # and node) always means no nodes have been added or destroyed.
2866 # and node) always means no nodes have been added or destroyed.
2818
2867
2819 # XXX this is suboptimal when qrefresh'ing: we strip the current
2868 # XXX this is suboptimal when qrefresh'ing: we strip the current
2820 # head, refresh the tag cache, then immediately add a new head.
2869 # head, refresh the tag cache, then immediately add a new head.
2821 # But I think doing it this way is necessary for the "instant
2870 # But I think doing it this way is necessary for the "instant
2822 # tag cache retrieval" case to work.
2871 # tag cache retrieval" case to work.
2823 self.invalidate()
2872 self.invalidate()
2824
2873
2825 def status(self, node1='.', node2=None, match=None,
2874 def status(self, node1='.', node2=None, match=None,
2826 ignored=False, clean=False, unknown=False,
2875 ignored=False, clean=False, unknown=False,
2827 listsubrepos=False):
2876 listsubrepos=False):
2828 '''a convenience method that calls node1.status(node2)'''
2877 '''a convenience method that calls node1.status(node2)'''
2829 return self[node1].status(node2, match, ignored, clean, unknown,
2878 return self[node1].status(node2, match, ignored, clean, unknown,
2830 listsubrepos)
2879 listsubrepos)
2831
2880
2832 def addpostdsstatus(self, ps):
2881 def addpostdsstatus(self, ps):
2833 """Add a callback to run within the wlock, at the point at which status
2882 """Add a callback to run within the wlock, at the point at which status
2834 fixups happen.
2883 fixups happen.
2835
2884
2836 On status completion, callback(wctx, status) will be called with the
2885 On status completion, callback(wctx, status) will be called with the
2837 wlock held, unless the dirstate has changed from underneath or the wlock
2886 wlock held, unless the dirstate has changed from underneath or the wlock
2838 couldn't be grabbed.
2887 couldn't be grabbed.
2839
2888
2840 Callbacks should not capture and use a cached copy of the dirstate --
2889 Callbacks should not capture and use a cached copy of the dirstate --
2841 it might change in the meanwhile. Instead, they should access the
2890 it might change in the meanwhile. Instead, they should access the
2842 dirstate via wctx.repo().dirstate.
2891 dirstate via wctx.repo().dirstate.
2843
2892
2844 This list is emptied out after each status run -- extensions should
2893 This list is emptied out after each status run -- extensions should
2845 make sure it adds to this list each time dirstate.status is called.
2894 make sure it adds to this list each time dirstate.status is called.
2846 Extensions should also make sure they don't call this for statuses
2895 Extensions should also make sure they don't call this for statuses
2847 that don't involve the dirstate.
2896 that don't involve the dirstate.
2848 """
2897 """
2849
2898
2850 # The list is located here for uniqueness reasons -- it is actually
2899 # The list is located here for uniqueness reasons -- it is actually
2851 # managed by the workingctx, but that isn't unique per-repo.
2900 # managed by the workingctx, but that isn't unique per-repo.
2852 self._postdsstatus.append(ps)
2901 self._postdsstatus.append(ps)
2853
2902
2854 def postdsstatus(self):
2903 def postdsstatus(self):
2855 """Used by workingctx to get the list of post-dirstate-status hooks."""
2904 """Used by workingctx to get the list of post-dirstate-status hooks."""
2856 return self._postdsstatus
2905 return self._postdsstatus
2857
2906
2858 def clearpostdsstatus(self):
2907 def clearpostdsstatus(self):
2859 """Used by workingctx to clear post-dirstate-status hooks."""
2908 """Used by workingctx to clear post-dirstate-status hooks."""
2860 del self._postdsstatus[:]
2909 del self._postdsstatus[:]
2861
2910
2862 def heads(self, start=None):
2911 def heads(self, start=None):
2863 if start is None:
2912 if start is None:
2864 cl = self.changelog
2913 cl = self.changelog
2865 headrevs = reversed(cl.headrevs())
2914 headrevs = reversed(cl.headrevs())
2866 return [cl.node(rev) for rev in headrevs]
2915 return [cl.node(rev) for rev in headrevs]
2867
2916
2868 heads = self.changelog.heads(start)
2917 heads = self.changelog.heads(start)
2869 # sort the output in rev descending order
2918 # sort the output in rev descending order
2870 return sorted(heads, key=self.changelog.rev, reverse=True)
2919 return sorted(heads, key=self.changelog.rev, reverse=True)
2871
2920
2872 def branchheads(self, branch=None, start=None, closed=False):
2921 def branchheads(self, branch=None, start=None, closed=False):
2873 '''return a (possibly filtered) list of heads for the given branch
2922 '''return a (possibly filtered) list of heads for the given branch
2874
2923
2875 Heads are returned in topological order, from newest to oldest.
2924 Heads are returned in topological order, from newest to oldest.
2876 If branch is None, use the dirstate branch.
2925 If branch is None, use the dirstate branch.
2877 If start is not None, return only heads reachable from start.
2926 If start is not None, return only heads reachable from start.
2878 If closed is True, return heads that are marked as closed as well.
2927 If closed is True, return heads that are marked as closed as well.
2879 '''
2928 '''
2880 if branch is None:
2929 if branch is None:
2881 branch = self[None].branch()
2930 branch = self[None].branch()
2882 branches = self.branchmap()
2931 branches = self.branchmap()
2883 if not branches.hasbranch(branch):
2932 if not branches.hasbranch(branch):
2884 return []
2933 return []
2885 # the cache returns heads ordered lowest to highest
2934 # the cache returns heads ordered lowest to highest
2886 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2935 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2887 if start is not None:
2936 if start is not None:
2888 # filter out the heads that cannot be reached from startrev
2937 # filter out the heads that cannot be reached from startrev
2889 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2938 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2890 bheads = [h for h in bheads if h in fbheads]
2939 bheads = [h for h in bheads if h in fbheads]
2891 return bheads
2940 return bheads
2892
2941
2893 def branches(self, nodes):
2942 def branches(self, nodes):
2894 if not nodes:
2943 if not nodes:
2895 nodes = [self.changelog.tip()]
2944 nodes = [self.changelog.tip()]
2896 b = []
2945 b = []
2897 for n in nodes:
2946 for n in nodes:
2898 t = n
2947 t = n
2899 while True:
2948 while True:
2900 p = self.changelog.parents(n)
2949 p = self.changelog.parents(n)
2901 if p[1] != nullid or p[0] == nullid:
2950 if p[1] != nullid or p[0] == nullid:
2902 b.append((t, n, p[0], p[1]))
2951 b.append((t, n, p[0], p[1]))
2903 break
2952 break
2904 n = p[0]
2953 n = p[0]
2905 return b
2954 return b
2906
2955
2907 def between(self, pairs):
2956 def between(self, pairs):
2908 r = []
2957 r = []
2909
2958
2910 for top, bottom in pairs:
2959 for top, bottom in pairs:
2911 n, l, i = top, [], 0
2960 n, l, i = top, [], 0
2912 f = 1
2961 f = 1
2913
2962
2914 while n != bottom and n != nullid:
2963 while n != bottom and n != nullid:
2915 p = self.changelog.parents(n)[0]
2964 p = self.changelog.parents(n)[0]
2916 if i == f:
2965 if i == f:
2917 l.append(n)
2966 l.append(n)
2918 f = f * 2
2967 f = f * 2
2919 n = p
2968 n = p
2920 i += 1
2969 i += 1
2921
2970
2922 r.append(l)
2971 r.append(l)
2923
2972
2924 return r
2973 return r
2925
2974
2926 def checkpush(self, pushop):
2975 def checkpush(self, pushop):
2927 """Extensions can override this function if additional checks have
2976 """Extensions can override this function if additional checks have
2928 to be performed before pushing, or call it if they override push
2977 to be performed before pushing, or call it if they override push
2929 command.
2978 command.
2930 """
2979 """
2931
2980
2932 @unfilteredpropertycache
2981 @unfilteredpropertycache
2933 def prepushoutgoinghooks(self):
2982 def prepushoutgoinghooks(self):
2934 """Return util.hooks consists of a pushop with repo, remote, outgoing
2983 """Return util.hooks consists of a pushop with repo, remote, outgoing
2935 methods, which are called before pushing changesets.
2984 methods, which are called before pushing changesets.
2936 """
2985 """
2937 return util.hooks()
2986 return util.hooks()
2938
2987
2939 def pushkey(self, namespace, key, old, new):
2988 def pushkey(self, namespace, key, old, new):
2940 try:
2989 try:
2941 tr = self.currenttransaction()
2990 tr = self.currenttransaction()
2942 hookargs = {}
2991 hookargs = {}
2943 if tr is not None:
2992 if tr is not None:
2944 hookargs.update(tr.hookargs)
2993 hookargs.update(tr.hookargs)
2945 hookargs = pycompat.strkwargs(hookargs)
2994 hookargs = pycompat.strkwargs(hookargs)
2946 hookargs[r'namespace'] = namespace
2995 hookargs[r'namespace'] = namespace
2947 hookargs[r'key'] = key
2996 hookargs[r'key'] = key
2948 hookargs[r'old'] = old
2997 hookargs[r'old'] = old
2949 hookargs[r'new'] = new
2998 hookargs[r'new'] = new
2950 self.hook('prepushkey', throw=True, **hookargs)
2999 self.hook('prepushkey', throw=True, **hookargs)
2951 except error.HookAbort as exc:
3000 except error.HookAbort as exc:
2952 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
3001 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2953 if exc.hint:
3002 if exc.hint:
2954 self.ui.write_err(_("(%s)\n") % exc.hint)
3003 self.ui.write_err(_("(%s)\n") % exc.hint)
2955 return False
3004 return False
2956 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
3005 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2957 ret = pushkey.push(self, namespace, key, old, new)
3006 ret = pushkey.push(self, namespace, key, old, new)
2958 def runhook():
3007 def runhook():
2959 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
3008 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2960 ret=ret)
3009 ret=ret)
2961 self._afterlock(runhook)
3010 self._afterlock(runhook)
2962 return ret
3011 return ret
2963
3012
2964 def listkeys(self, namespace):
3013 def listkeys(self, namespace):
2965 self.hook('prelistkeys', throw=True, namespace=namespace)
3014 self.hook('prelistkeys', throw=True, namespace=namespace)
2966 self.ui.debug('listing keys for "%s"\n' % namespace)
3015 self.ui.debug('listing keys for "%s"\n' % namespace)
2967 values = pushkey.list(self, namespace)
3016 values = pushkey.list(self, namespace)
2968 self.hook('listkeys', namespace=namespace, values=values)
3017 self.hook('listkeys', namespace=namespace, values=values)
2969 return values
3018 return values
2970
3019
2971 def debugwireargs(self, one, two, three=None, four=None, five=None):
3020 def debugwireargs(self, one, two, three=None, four=None, five=None):
2972 '''used to test argument passing over the wire'''
3021 '''used to test argument passing over the wire'''
2973 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
3022 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2974 pycompat.bytestr(four),
3023 pycompat.bytestr(four),
2975 pycompat.bytestr(five))
3024 pycompat.bytestr(five))
2976
3025
2977 def savecommitmessage(self, text):
3026 def savecommitmessage(self, text):
2978 fp = self.vfs('last-message.txt', 'wb')
3027 fp = self.vfs('last-message.txt', 'wb')
2979 try:
3028 try:
2980 fp.write(text)
3029 fp.write(text)
2981 finally:
3030 finally:
2982 fp.close()
3031 fp.close()
2983 return self.pathto(fp.name[len(self.root) + 1:])
3032 return self.pathto(fp.name[len(self.root) + 1:])
2984
3033
2985 # used to avoid circular references so destructors work
3034 # used to avoid circular references so destructors work
2986 def aftertrans(files):
3035 def aftertrans(files):
2987 renamefiles = [tuple(t) for t in files]
3036 renamefiles = [tuple(t) for t in files]
2988 def a():
3037 def a():
2989 for vfs, src, dest in renamefiles:
3038 for vfs, src, dest in renamefiles:
2990 # if src and dest refer to a same file, vfs.rename is a no-op,
3039 # if src and dest refer to a same file, vfs.rename is a no-op,
2991 # leaving both src and dest on disk. delete dest to make sure
3040 # leaving both src and dest on disk. delete dest to make sure
2992 # the rename couldn't be such a no-op.
3041 # the rename couldn't be such a no-op.
2993 vfs.tryunlink(dest)
3042 vfs.tryunlink(dest)
2994 try:
3043 try:
2995 vfs.rename(src, dest)
3044 vfs.rename(src, dest)
2996 except OSError: # journal file does not yet exist
3045 except OSError: # journal file does not yet exist
2997 pass
3046 pass
2998 return a
3047 return a
2999
3048
3000 def undoname(fn):
3049 def undoname(fn):
3001 base, name = os.path.split(fn)
3050 base, name = os.path.split(fn)
3002 assert name.startswith('journal')
3051 assert name.startswith('journal')
3003 return os.path.join(base, name.replace('journal', 'undo', 1))
3052 return os.path.join(base, name.replace('journal', 'undo', 1))
3004
3053
3005 def instance(ui, path, create, intents=None, createopts=None):
3054 def instance(ui, path, create, intents=None, createopts=None):
3006 localpath = util.urllocalpath(path)
3055 localpath = util.urllocalpath(path)
3007 if create:
3056 if create:
3008 createrepository(ui, localpath, createopts=createopts)
3057 createrepository(ui, localpath, createopts=createopts)
3009
3058
3010 return makelocalrepository(ui, localpath, intents=intents)
3059 return makelocalrepository(ui, localpath, intents=intents)
3011
3060
3012 def islocal(path):
3061 def islocal(path):
3013 return True
3062 return True
3014
3063
3015 def defaultcreateopts(ui, createopts=None):
3064 def defaultcreateopts(ui, createopts=None):
3016 """Populate the default creation options for a repository.
3065 """Populate the default creation options for a repository.
3017
3066
3018 A dictionary of explicitly requested creation options can be passed
3067 A dictionary of explicitly requested creation options can be passed
3019 in. Missing keys will be populated.
3068 in. Missing keys will be populated.
3020 """
3069 """
3021 createopts = dict(createopts or {})
3070 createopts = dict(createopts or {})
3022
3071
3023 if 'backend' not in createopts:
3072 if 'backend' not in createopts:
3024 # experimental config: storage.new-repo-backend
3073 # experimental config: storage.new-repo-backend
3025 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3074 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3026
3075
3027 return createopts
3076 return createopts
3028
3077
3029 def newreporequirements(ui, createopts):
3078 def newreporequirements(ui, createopts):
3030 """Determine the set of requirements for a new local repository.
3079 """Determine the set of requirements for a new local repository.
3031
3080
3032 Extensions can wrap this function to specify custom requirements for
3081 Extensions can wrap this function to specify custom requirements for
3033 new repositories.
3082 new repositories.
3034 """
3083 """
3035 # If the repo is being created from a shared repository, we copy
3084 # If the repo is being created from a shared repository, we copy
3036 # its requirements.
3085 # its requirements.
3037 if 'sharedrepo' in createopts:
3086 if 'sharedrepo' in createopts:
3038 requirements = set(createopts['sharedrepo'].requirements)
3087 requirements = set(createopts['sharedrepo'].requirements)
3039 if createopts.get('sharedrelative'):
3088 if createopts.get('sharedrelative'):
3040 requirements.add('relshared')
3089 requirements.add('relshared')
3041 else:
3090 else:
3042 requirements.add('shared')
3091 requirements.add('shared')
3043
3092
3044 return requirements
3093 return requirements
3045
3094
3046 if 'backend' not in createopts:
3095 if 'backend' not in createopts:
3047 raise error.ProgrammingError('backend key not present in createopts; '
3096 raise error.ProgrammingError('backend key not present in createopts; '
3048 'was defaultcreateopts() called?')
3097 'was defaultcreateopts() called?')
3049
3098
3050 if createopts['backend'] != 'revlogv1':
3099 if createopts['backend'] != 'revlogv1':
3051 raise error.Abort(_('unable to determine repository requirements for '
3100 raise error.Abort(_('unable to determine repository requirements for '
3052 'storage backend: %s') % createopts['backend'])
3101 'storage backend: %s') % createopts['backend'])
3053
3102
3054 requirements = {'revlogv1'}
3103 requirements = {'revlogv1'}
3055 if ui.configbool('format', 'usestore'):
3104 if ui.configbool('format', 'usestore'):
3056 requirements.add('store')
3105 requirements.add('store')
3057 if ui.configbool('format', 'usefncache'):
3106 if ui.configbool('format', 'usefncache'):
3058 requirements.add('fncache')
3107 requirements.add('fncache')
3059 if ui.configbool('format', 'dotencode'):
3108 if ui.configbool('format', 'dotencode'):
3060 requirements.add('dotencode')
3109 requirements.add('dotencode')
3061
3110
3062 compengine = ui.config('format', 'revlog-compression')
3111 compengine = ui.config('format', 'revlog-compression')
3063 if compengine not in util.compengines:
3112 if compengine not in util.compengines:
3064 raise error.Abort(_('compression engine %s defined by '
3113 raise error.Abort(_('compression engine %s defined by '
3065 'format.revlog-compression not available') %
3114 'format.revlog-compression not available') %
3066 compengine,
3115 compengine,
3067 hint=_('run "hg debuginstall" to list available '
3116 hint=_('run "hg debuginstall" to list available '
3068 'compression engines'))
3117 'compression engines'))
3069
3118
3070 # zlib is the historical default and doesn't need an explicit requirement.
3119 # zlib is the historical default and doesn't need an explicit requirement.
3071 elif compengine == 'zstd':
3120 elif compengine == 'zstd':
3072 requirements.add('revlog-compression-zstd')
3121 requirements.add('revlog-compression-zstd')
3073 elif compengine != 'zlib':
3122 elif compengine != 'zlib':
3074 requirements.add('exp-compression-%s' % compengine)
3123 requirements.add('exp-compression-%s' % compengine)
3075
3124
3076 if scmutil.gdinitconfig(ui):
3125 if scmutil.gdinitconfig(ui):
3077 requirements.add('generaldelta')
3126 requirements.add('generaldelta')
3078 if ui.configbool('format', 'sparse-revlog'):
3127 if ui.configbool('format', 'sparse-revlog'):
3079 requirements.add(SPARSEREVLOG_REQUIREMENT)
3128 requirements.add(SPARSEREVLOG_REQUIREMENT)
3080 if ui.configbool('experimental', 'treemanifest'):
3129 if ui.configbool('experimental', 'treemanifest'):
3081 requirements.add('treemanifest')
3130 requirements.add('treemanifest')
3082
3131
3083 revlogv2 = ui.config('experimental', 'revlogv2')
3132 revlogv2 = ui.config('experimental', 'revlogv2')
3084 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3133 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3085 requirements.remove('revlogv1')
3134 requirements.remove('revlogv1')
3086 # generaldelta is implied by revlogv2.
3135 # generaldelta is implied by revlogv2.
3087 requirements.discard('generaldelta')
3136 requirements.discard('generaldelta')
3088 requirements.add(REVLOGV2_REQUIREMENT)
3137 requirements.add(REVLOGV2_REQUIREMENT)
3089 # experimental config: format.internal-phase
3138 # experimental config: format.internal-phase
3090 if ui.configbool('format', 'internal-phase'):
3139 if ui.configbool('format', 'internal-phase'):
3091 requirements.add('internal-phase')
3140 requirements.add('internal-phase')
3092
3141
3093 if createopts.get('narrowfiles'):
3142 if createopts.get('narrowfiles'):
3094 requirements.add(repository.NARROW_REQUIREMENT)
3143 requirements.add(repository.NARROW_REQUIREMENT)
3095
3144
3096 if createopts.get('lfs'):
3145 if createopts.get('lfs'):
3097 requirements.add('lfs')
3146 requirements.add('lfs')
3098
3147
3099 if ui.configbool('format', 'bookmarks-in-store'):
3148 if ui.configbool('format', 'bookmarks-in-store'):
3100 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3149 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3101
3150
3102 return requirements
3151 return requirements
3103
3152
3104 def filterknowncreateopts(ui, createopts):
3153 def filterknowncreateopts(ui, createopts):
3105 """Filters a dict of repo creation options against options that are known.
3154 """Filters a dict of repo creation options against options that are known.
3106
3155
3107 Receives a dict of repo creation options and returns a dict of those
3156 Receives a dict of repo creation options and returns a dict of those
3108 options that we don't know how to handle.
3157 options that we don't know how to handle.
3109
3158
3110 This function is called as part of repository creation. If the
3159 This function is called as part of repository creation. If the
3111 returned dict contains any items, repository creation will not
3160 returned dict contains any items, repository creation will not
3112 be allowed, as it means there was a request to create a repository
3161 be allowed, as it means there was a request to create a repository
3113 with options not recognized by loaded code.
3162 with options not recognized by loaded code.
3114
3163
3115 Extensions can wrap this function to filter out creation options
3164 Extensions can wrap this function to filter out creation options
3116 they know how to handle.
3165 they know how to handle.
3117 """
3166 """
3118 known = {
3167 known = {
3119 'backend',
3168 'backend',
3120 'lfs',
3169 'lfs',
3121 'narrowfiles',
3170 'narrowfiles',
3122 'sharedrepo',
3171 'sharedrepo',
3123 'sharedrelative',
3172 'sharedrelative',
3124 'shareditems',
3173 'shareditems',
3125 'shallowfilestore',
3174 'shallowfilestore',
3126 }
3175 }
3127
3176
3128 return {k: v for k, v in createopts.items() if k not in known}
3177 return {k: v for k, v in createopts.items() if k not in known}
3129
3178
3130 def createrepository(ui, path, createopts=None):
3179 def createrepository(ui, path, createopts=None):
3131 """Create a new repository in a vfs.
3180 """Create a new repository in a vfs.
3132
3181
3133 ``path`` path to the new repo's working directory.
3182 ``path`` path to the new repo's working directory.
3134 ``createopts`` options for the new repository.
3183 ``createopts`` options for the new repository.
3135
3184
3136 The following keys for ``createopts`` are recognized:
3185 The following keys for ``createopts`` are recognized:
3137
3186
3138 backend
3187 backend
3139 The storage backend to use.
3188 The storage backend to use.
3140 lfs
3189 lfs
3141 Repository will be created with ``lfs`` requirement. The lfs extension
3190 Repository will be created with ``lfs`` requirement. The lfs extension
3142 will automatically be loaded when the repository is accessed.
3191 will automatically be loaded when the repository is accessed.
3143 narrowfiles
3192 narrowfiles
3144 Set up repository to support narrow file storage.
3193 Set up repository to support narrow file storage.
3145 sharedrepo
3194 sharedrepo
3146 Repository object from which storage should be shared.
3195 Repository object from which storage should be shared.
3147 sharedrelative
3196 sharedrelative
3148 Boolean indicating if the path to the shared repo should be
3197 Boolean indicating if the path to the shared repo should be
3149 stored as relative. By default, the pointer to the "parent" repo
3198 stored as relative. By default, the pointer to the "parent" repo
3150 is stored as an absolute path.
3199 is stored as an absolute path.
3151 shareditems
3200 shareditems
3152 Set of items to share to the new repository (in addition to storage).
3201 Set of items to share to the new repository (in addition to storage).
3153 shallowfilestore
3202 shallowfilestore
3154 Indicates that storage for files should be shallow (not all ancestor
3203 Indicates that storage for files should be shallow (not all ancestor
3155 revisions are known).
3204 revisions are known).
3156 """
3205 """
3157 createopts = defaultcreateopts(ui, createopts=createopts)
3206 createopts = defaultcreateopts(ui, createopts=createopts)
3158
3207
3159 unknownopts = filterknowncreateopts(ui, createopts)
3208 unknownopts = filterknowncreateopts(ui, createopts)
3160
3209
3161 if not isinstance(unknownopts, dict):
3210 if not isinstance(unknownopts, dict):
3162 raise error.ProgrammingError('filterknowncreateopts() did not return '
3211 raise error.ProgrammingError('filterknowncreateopts() did not return '
3163 'a dict')
3212 'a dict')
3164
3213
3165 if unknownopts:
3214 if unknownopts:
3166 raise error.Abort(_('unable to create repository because of unknown '
3215 raise error.Abort(_('unable to create repository because of unknown '
3167 'creation option: %s') %
3216 'creation option: %s') %
3168 ', '.join(sorted(unknownopts)),
3217 ', '.join(sorted(unknownopts)),
3169 hint=_('is a required extension not loaded?'))
3218 hint=_('is a required extension not loaded?'))
3170
3219
3171 requirements = newreporequirements(ui, createopts=createopts)
3220 requirements = newreporequirements(ui, createopts=createopts)
3172
3221
3173 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3222 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3174
3223
3175 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3224 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3176 if hgvfs.exists():
3225 if hgvfs.exists():
3177 raise error.RepoError(_('repository %s already exists') % path)
3226 raise error.RepoError(_('repository %s already exists') % path)
3178
3227
3179 if 'sharedrepo' in createopts:
3228 if 'sharedrepo' in createopts:
3180 sharedpath = createopts['sharedrepo'].sharedpath
3229 sharedpath = createopts['sharedrepo'].sharedpath
3181
3230
3182 if createopts.get('sharedrelative'):
3231 if createopts.get('sharedrelative'):
3183 try:
3232 try:
3184 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3233 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3185 except (IOError, ValueError) as e:
3234 except (IOError, ValueError) as e:
3186 # ValueError is raised on Windows if the drive letters differ
3235 # ValueError is raised on Windows if the drive letters differ
3187 # on each path.
3236 # on each path.
3188 raise error.Abort(_('cannot calculate relative path'),
3237 raise error.Abort(_('cannot calculate relative path'),
3189 hint=stringutil.forcebytestr(e))
3238 hint=stringutil.forcebytestr(e))
3190
3239
3191 if not wdirvfs.exists():
3240 if not wdirvfs.exists():
3192 wdirvfs.makedirs()
3241 wdirvfs.makedirs()
3193
3242
3194 hgvfs.makedir(notindexed=True)
3243 hgvfs.makedir(notindexed=True)
3195 if 'sharedrepo' not in createopts:
3244 if 'sharedrepo' not in createopts:
3196 hgvfs.mkdir(b'cache')
3245 hgvfs.mkdir(b'cache')
3197 hgvfs.mkdir(b'wcache')
3246 hgvfs.mkdir(b'wcache')
3198
3247
3199 if b'store' in requirements and 'sharedrepo' not in createopts:
3248 if b'store' in requirements and 'sharedrepo' not in createopts:
3200 hgvfs.mkdir(b'store')
3249 hgvfs.mkdir(b'store')
3201
3250
3202 # We create an invalid changelog outside the store so very old
3251 # We create an invalid changelog outside the store so very old
3203 # Mercurial versions (which didn't know about the requirements
3252 # Mercurial versions (which didn't know about the requirements
3204 # file) encounter an error on reading the changelog. This
3253 # file) encounter an error on reading the changelog. This
3205 # effectively locks out old clients and prevents them from
3254 # effectively locks out old clients and prevents them from
3206 # mucking with a repo in an unknown format.
3255 # mucking with a repo in an unknown format.
3207 #
3256 #
3208 # The revlog header has version 2, which won't be recognized by
3257 # The revlog header has version 2, which won't be recognized by
3209 # such old clients.
3258 # such old clients.
3210 hgvfs.append(b'00changelog.i',
3259 hgvfs.append(b'00changelog.i',
3211 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3260 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3212 b'layout')
3261 b'layout')
3213
3262
3214 scmutil.writerequires(hgvfs, requirements)
3263 scmutil.writerequires(hgvfs, requirements)
3215
3264
3216 # Write out file telling readers where to find the shared store.
3265 # Write out file telling readers where to find the shared store.
3217 if 'sharedrepo' in createopts:
3266 if 'sharedrepo' in createopts:
3218 hgvfs.write(b'sharedpath', sharedpath)
3267 hgvfs.write(b'sharedpath', sharedpath)
3219
3268
3220 if createopts.get('shareditems'):
3269 if createopts.get('shareditems'):
3221 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3270 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3222 hgvfs.write(b'shared', shared)
3271 hgvfs.write(b'shared', shared)
3223
3272
3224 def poisonrepository(repo):
3273 def poisonrepository(repo):
3225 """Poison a repository instance so it can no longer be used."""
3274 """Poison a repository instance so it can no longer be used."""
3226 # Perform any cleanup on the instance.
3275 # Perform any cleanup on the instance.
3227 repo.close()
3276 repo.close()
3228
3277
3229 # Our strategy is to replace the type of the object with one that
3278 # Our strategy is to replace the type of the object with one that
3230 # has all attribute lookups result in error.
3279 # has all attribute lookups result in error.
3231 #
3280 #
3232 # But we have to allow the close() method because some constructors
3281 # But we have to allow the close() method because some constructors
3233 # of repos call close() on repo references.
3282 # of repos call close() on repo references.
3234 class poisonedrepository(object):
3283 class poisonedrepository(object):
3235 def __getattribute__(self, item):
3284 def __getattribute__(self, item):
3236 if item == r'close':
3285 if item == r'close':
3237 return object.__getattribute__(self, item)
3286 return object.__getattribute__(self, item)
3238
3287
3239 raise error.ProgrammingError('repo instances should not be used '
3288 raise error.ProgrammingError('repo instances should not be used '
3240 'after unshare')
3289 'after unshare')
3241
3290
3242 def close(self):
3291 def close(self):
3243 pass
3292 pass
3244
3293
3245 # We may have a repoview, which intercepts __setattr__. So be sure
3294 # We may have a repoview, which intercepts __setattr__. So be sure
3246 # we operate at the lowest level possible.
3295 # we operate at the lowest level possible.
3247 object.__setattr__(repo, r'__class__', poisonedrepository)
3296 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,245 +1,246 b''
1 ================================
1 ================================
2 Test corner case around bookmark
2 Test corner case around bookmark
3 ================================
3 ================================
4
4
5 This test file is meant to gather test around bookmark that are specific
5 This test file is meant to gather test around bookmark that are specific
6 enough to not find a place elsewhere.
6 enough to not find a place elsewhere.
7
7
8 Test bookmark/changelog race condition
8 Test bookmark/changelog race condition
9 ======================================
9 ======================================
10
10
11 The data from the bookmark file are filtered to only contains bookmark with
11 The data from the bookmark file are filtered to only contains bookmark with
12 node known to the changelog. If the cache invalidation between these two bits
12 node known to the changelog. If the cache invalidation between these two bits
13 goes wrong, bookmark can be dropped.
13 goes wrong, bookmark can be dropped.
14
14
15 global setup
15 global setup
16 ------------
16 ------------
17
17
18 $ cat >> $HGRCPATH << EOF
18 $ cat >> $HGRCPATH << EOF
19 > [ui]
19 > [ui]
20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
21 > [server]
21 > [server]
22 > concurrent-push-mode=check-related
22 > concurrent-push-mode=check-related
23 > EOF
23 > EOF
24
24
25 Setup
25 Setup
26 -----
26 -----
27
27
28 initial repository setup
28 initial repository setup
29
29
30 $ hg init bookrace-server
30 $ hg init bookrace-server
31 $ cd bookrace-server
31 $ cd bookrace-server
32 $ echo a > a
32 $ echo a > a
33 $ hg add a
33 $ hg add a
34 $ hg commit -m root
34 $ hg commit -m root
35 $ echo a >> a
35 $ echo a >> a
36 $ hg bookmark book-A
36 $ hg bookmark book-A
37 $ hg commit -m A0
37 $ hg commit -m A0
38 $ hg up 'desc(root)'
38 $ hg up 'desc(root)'
39 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 (leaving bookmark book-A)
40 (leaving bookmark book-A)
41 $ echo b > b
41 $ echo b > b
42 $ hg add b
42 $ hg add b
43 $ hg bookmark book-B
43 $ hg bookmark book-B
44 $ hg commit -m B0
44 $ hg commit -m B0
45 created new head
45 created new head
46 $ hg up null
46 $ hg up null
47 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
47 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
48 (leaving bookmark book-B)
48 (leaving bookmark book-B)
49 $ hg phase --public --rev 'all()'
49 $ hg phase --public --rev 'all()'
50 $ hg log -G
50 $ hg log -G
51 o changeset: 2:c79985706978
51 o changeset: 2:c79985706978
52 | bookmark: book-B
52 | bookmark: book-B
53 | tag: tip
53 | tag: tip
54 | parent: 0:6569b5a81c7e
54 | parent: 0:6569b5a81c7e
55 | user: test
55 | user: test
56 | date: Thu Jan 01 00:00:00 1970 +0000
56 | date: Thu Jan 01 00:00:00 1970 +0000
57 | summary: B0
57 | summary: B0
58 |
58 |
59 | o changeset: 1:39c28d785860
59 | o changeset: 1:39c28d785860
60 |/ bookmark: book-A
60 |/ bookmark: book-A
61 | user: test
61 | user: test
62 | date: Thu Jan 01 00:00:00 1970 +0000
62 | date: Thu Jan 01 00:00:00 1970 +0000
63 | summary: A0
63 | summary: A0
64 |
64 |
65 o changeset: 0:6569b5a81c7e
65 o changeset: 0:6569b5a81c7e
66 user: test
66 user: test
67 date: Thu Jan 01 00:00:00 1970 +0000
67 date: Thu Jan 01 00:00:00 1970 +0000
68 summary: root
68 summary: root
69
69
70 $ hg book
70 $ hg book
71 book-A 1:39c28d785860
71 book-A 1:39c28d785860
72 book-B 2:c79985706978
72 book-B 2:c79985706978
73 $ cd ..
73 $ cd ..
74
74
75 Add new changeset on each bookmark in distinct clones
75 Add new changeset on each bookmark in distinct clones
76
76
77 $ hg clone ssh://user@dummy/bookrace-server client-A
77 $ hg clone ssh://user@dummy/bookrace-server client-A
78 requesting all changes
78 requesting all changes
79 adding changesets
79 adding changesets
80 adding manifests
80 adding manifests
81 adding file changes
81 adding file changes
82 added 3 changesets with 3 changes to 2 files (+1 heads)
82 added 3 changesets with 3 changes to 2 files (+1 heads)
83 new changesets 6569b5a81c7e:c79985706978
83 new changesets 6569b5a81c7e:c79985706978
84 updating to branch default
84 updating to branch default
85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 $ hg -R client-A update book-A
86 $ hg -R client-A update book-A
87 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
87 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
88 (activating bookmark book-A)
88 (activating bookmark book-A)
89 $ echo a >> client-A/a
89 $ echo a >> client-A/a
90 $ hg -R client-A commit -m A1
90 $ hg -R client-A commit -m A1
91 $ hg clone ssh://user@dummy/bookrace-server client-B
91 $ hg clone ssh://user@dummy/bookrace-server client-B
92 requesting all changes
92 requesting all changes
93 adding changesets
93 adding changesets
94 adding manifests
94 adding manifests
95 adding file changes
95 adding file changes
96 added 3 changesets with 3 changes to 2 files (+1 heads)
96 added 3 changesets with 3 changes to 2 files (+1 heads)
97 new changesets 6569b5a81c7e:c79985706978
97 new changesets 6569b5a81c7e:c79985706978
98 updating to branch default
98 updating to branch default
99 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
99 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 $ hg -R client-B update book-B
100 $ hg -R client-B update book-B
101 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
101 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 (activating bookmark book-B)
102 (activating bookmark book-B)
103 $ echo b >> client-B/b
103 $ echo b >> client-B/b
104 $ hg -R client-B commit -m B1
104 $ hg -R client-B commit -m B1
105
105
106 extension to reproduce the race
106 extension to reproduce the race
107 -------------------------------
107 -------------------------------
108
108
109 If two process are pushing we want to make sure the following happens:
109 If two process are pushing we want to make sure the following happens:
110
110
111 * process A read changelog
111 * process A read changelog
112 * process B to its full push
112 * process B to its full push
113 * process A read bookmarks
113 * process A read bookmarks
114 * process A proceed with rest of the push
114 * process A proceed with rest of the push
115
115
116 We build a server side extension for this purpose
116 We build a server side extension for this purpose
117
117
118 $ cat > bookrace.py << EOF
118 $ cat > bookrace.py << EOF
119 > import atexit
119 > import atexit
120 > import os
120 > import os
121 > import time
121 > import time
122 > from mercurial import error, extensions, bookmarks
122 > from mercurial import error, extensions, bookmarks
123 >
123 >
124 > def wait(repo):
124 > def wait(repo):
125 > if not os.path.exists('push-A-started'):
125 > if not os.path.exists('push-A-started'):
126 > assert repo._currentlock(repo._lockref) is None
126 > assert repo._currentlock(repo._lockref) is None
127 > assert repo._currentlock(repo._wlockref) is None
127 > assert repo._currentlock(repo._wlockref) is None
128 > repo.ui.status(b'setting raced push up\n')
128 > repo.ui.status(b'setting raced push up\n')
129 > with open('push-A-started', 'w'):
129 > with open('push-A-started', 'w'):
130 > pass
130 > pass
131 > clock = 300
131 > clock = 300
132 > while not os.path.exists('push-B-done'):
132 > while not os.path.exists('push-B-done'):
133 > clock -= 1
133 > clock -= 1
134 > if clock <= 0:
134 > if clock <= 0:
135 > raise error.Abort("race scenario timed out")
135 > raise error.Abort("race scenario timed out")
136 > time.sleep(0.1)
136 > time.sleep(0.1)
137 >
137 >
138 > def reposetup(ui, repo):
138 > def reposetup(ui, repo):
139 > class racedrepo(repo.__class__):
139 > class racedrepo(repo.__class__):
140 > @property
140 > @property
141 > def _bookmarks(self):
141 > def _bookmarks(self):
142 > wait(self)
142 > wait(self)
143 > return super(racedrepo, self)._bookmarks
143 > return super(racedrepo, self)._bookmarks
144 > repo.__class__ = racedrepo
144 > repo.__class__ = racedrepo
145 >
145 >
146 > def e():
146 > def e():
147 > with open('push-A-done', 'w'):
147 > with open('push-A-done', 'w'):
148 > pass
148 > pass
149 > atexit.register(e)
149 > atexit.register(e)
150 > EOF
150 > EOF
151
151
152 Actual test
152 Actual test
153 -----------
153 -----------
154
154
155 Start the raced push.
155 Start the raced push.
156
156
157 $ cat >> bookrace-server/.hg/hgrc << EOF
157 $ cat >> bookrace-server/.hg/hgrc << EOF
158 > [extensions]
158 > [extensions]
159 > bookrace=$TESTTMP/bookrace.py
159 > bookrace=$TESTTMP/bookrace.py
160 > EOF
160 > EOF
161 $ hg push -R client-A -r book-A >push-output.txt 2>&1 &
161 $ hg push -R client-A -r book-A >push-output.txt 2>&1 &
162
162
163 Wait up to 30 seconds for that push to start.
163 Wait up to 30 seconds for that push to start.
164
164
165 $ clock=30
165 $ clock=30
166 $ while [ ! -f push-A-started ] && [ $clock -gt 0 ] ; do
166 $ while [ ! -f push-A-started ] && [ $clock -gt 0 ] ; do
167 > clock=`expr $clock - 1`
167 > clock=`expr $clock - 1`
168 > sleep 1
168 > sleep 1
169 > done
169 > done
170
170
171 Do the other push.
171 Do the other push.
172
172
173 $ cat >> bookrace-server/.hg/hgrc << EOF
173 $ cat >> bookrace-server/.hg/hgrc << EOF
174 > [extensions]
174 > [extensions]
175 > bookrace=!
175 > bookrace=!
176 > EOF
176 > EOF
177
177
178 $ hg push -R client-B -r book-B
178 $ hg push -R client-B -r book-B
179 pushing to ssh://user@dummy/bookrace-server
179 pushing to ssh://user@dummy/bookrace-server
180 searching for changes
180 searching for changes
181 remote: adding changesets
181 remote: adding changesets
182 remote: adding manifests
182 remote: adding manifests
183 remote: adding file changes
183 remote: adding file changes
184 remote: added 1 changesets with 1 changes to 1 files
184 remote: added 1 changesets with 1 changes to 1 files
185 updating bookmark book-B
185 updating bookmark book-B
186
186
187 Signal the raced put that we are done (it waits up to 30 seconds).
187 Signal the raced put that we are done (it waits up to 30 seconds).
188
188
189 $ touch push-B-done
189 $ touch push-B-done
190
190
191 Wait for the raced push to finish (with the remaning of the initial 30 seconds).
191 Wait for the raced push to finish (with the remaning of the initial 30 seconds).
192
192
193 $ while [ ! -f push-A-done ] && [ $clock -gt 0 ] ; do
193 $ while [ ! -f push-A-done ] && [ $clock -gt 0 ] ; do
194 > clock=`expr $clock - 1`
194 > clock=`expr $clock - 1`
195 > sleep 1
195 > sleep 1
196 > done
196 > done
197
197
198 Check raced push output.
198 Check raced push output.
199
199
200 $ cat push-output.txt
200 $ cat push-output.txt
201 pushing to ssh://user@dummy/bookrace-server
201 pushing to ssh://user@dummy/bookrace-server
202 searching for changes
202 searching for changes
203 remote has heads on branch 'default' that are not known locally: f26c3b5167d1
203 remote: setting raced push up
204 remote: setting raced push up
204 remote: adding changesets
205 remote: adding changesets
205 remote: adding manifests
206 remote: adding manifests
206 remote: adding file changes
207 remote: adding file changes
207 remote: added 1 changesets with 1 changes to 1 files
208 remote: added 1 changesets with 1 changes to 1 files
208 updating bookmark book-A
209 updating bookmark book-A
209
210
210 Check result of the push.
211 Check result of the push.
211
212
212 $ hg -R bookrace-server log -G
213 $ hg -R bookrace-server log -G
213 o changeset: 4:9ce3b28c16de
214 o changeset: 4:9ce3b28c16de
214 | bookmark: book-A
215 | bookmark: book-A
215 | tag: tip
216 | tag: tip
216 | parent: 1:39c28d785860
217 | parent: 1:39c28d785860
217 | user: test
218 | user: test
218 | date: Thu Jan 01 00:00:00 1970 +0000
219 | date: Thu Jan 01 00:00:00 1970 +0000
219 | summary: A1
220 | summary: A1
220 |
221 |
221 | o changeset: 3:f26c3b5167d1
222 | o changeset: 3:f26c3b5167d1
222 | | bookmark: book-B (false !)
223 | | bookmark: book-B
223 | | user: test
224 | | user: test
224 | | date: Thu Jan 01 00:00:00 1970 +0000
225 | | date: Thu Jan 01 00:00:00 1970 +0000
225 | | summary: B1
226 | | summary: B1
226 | |
227 | |
227 | o changeset: 2:c79985706978
228 | o changeset: 2:c79985706978
228 | | parent: 0:6569b5a81c7e
229 | | parent: 0:6569b5a81c7e
229 | | user: test
230 | | user: test
230 | | date: Thu Jan 01 00:00:00 1970 +0000
231 | | date: Thu Jan 01 00:00:00 1970 +0000
231 | | summary: B0
232 | | summary: B0
232 | |
233 | |
233 o | changeset: 1:39c28d785860
234 o | changeset: 1:39c28d785860
234 |/ user: test
235 |/ user: test
235 | date: Thu Jan 01 00:00:00 1970 +0000
236 | date: Thu Jan 01 00:00:00 1970 +0000
236 | summary: A0
237 | summary: A0
237 |
238 |
238 o changeset: 0:6569b5a81c7e
239 o changeset: 0:6569b5a81c7e
239 user: test
240 user: test
240 date: Thu Jan 01 00:00:00 1970 +0000
241 date: Thu Jan 01 00:00:00 1970 +0000
241 summary: root
242 summary: root
242
243
243 $ hg -R bookrace-server book
244 $ hg -R bookrace-server book
244 book-A 4:9ce3b28c16de
245 book-A 4:9ce3b28c16de
245 book-B 3:f26c3b5167d1 (false !)
246 book-B 3:f26c3b5167d1
General Comments 0
You need to be logged in to leave comments. Login now