##// END OF EJS Templates
bookmarks: actual fix for race condition deleting bookmark...
marmoute -
r43096:044045dc stable
parent child Browse files
Show More
@@ -1,3161 +1,3210 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 class mixedrepostorecache(_basefilecache):
125 class mixedrepostorecache(_basefilecache):
126 """filecache for a mix files in .hg/store and outside"""
126 """filecache for a mix files in .hg/store and outside"""
127 def __init__(self, *pathsandlocations):
127 def __init__(self, *pathsandlocations):
128 # scmutil.filecache only uses the path for passing back into our
128 # scmutil.filecache only uses the path for passing back into our
129 # join(), so we can safely pass a list of paths and locations
129 # join(), so we can safely pass a list of paths and locations
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 for path, location in pathsandlocations:
131 for path, location in pathsandlocations:
132 _cachedfiles.update(pathsandlocations)
132 _cachedfiles.update(pathsandlocations)
133
133
134 def join(self, obj, fnameandlocation):
134 def join(self, obj, fnameandlocation):
135 fname, location = fnameandlocation
135 fname, location = fnameandlocation
136 if location == 'plain':
136 if location == 'plain':
137 return obj.vfs.join(fname)
137 return obj.vfs.join(fname)
138 else:
138 else:
139 if location != '':
139 if location != '':
140 raise error.ProgrammingError('unexpected location: %s' %
140 raise error.ProgrammingError('unexpected location: %s' %
141 location)
141 location)
142 return obj.sjoin(fname)
142 return obj.sjoin(fname)
143
143
144 def isfilecached(repo, name):
144 def isfilecached(repo, name):
145 """check if a repo has already cached "name" filecache-ed property
145 """check if a repo has already cached "name" filecache-ed property
146
146
147 This returns (cachedobj-or-None, iscached) tuple.
147 This returns (cachedobj-or-None, iscached) tuple.
148 """
148 """
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
150 if not cacheentry:
150 if not cacheentry:
151 return None, False
151 return None, False
152 return cacheentry.obj, True
152 return cacheentry.obj, True
153
153
154 class unfilteredpropertycache(util.propertycache):
154 class unfilteredpropertycache(util.propertycache):
155 """propertycache that apply to unfiltered repo only"""
155 """propertycache that apply to unfiltered repo only"""
156
156
157 def __get__(self, repo, type=None):
157 def __get__(self, repo, type=None):
158 unfi = repo.unfiltered()
158 unfi = repo.unfiltered()
159 if unfi is repo:
159 if unfi is repo:
160 return super(unfilteredpropertycache, self).__get__(unfi)
160 return super(unfilteredpropertycache, self).__get__(unfi)
161 return getattr(unfi, self.name)
161 return getattr(unfi, self.name)
162
162
163 class filteredpropertycache(util.propertycache):
163 class filteredpropertycache(util.propertycache):
164 """propertycache that must take filtering in account"""
164 """propertycache that must take filtering in account"""
165
165
166 def cachevalue(self, obj, value):
166 def cachevalue(self, obj, value):
167 object.__setattr__(obj, self.name, value)
167 object.__setattr__(obj, self.name, value)
168
168
169
169
170 def hasunfilteredcache(repo, name):
170 def hasunfilteredcache(repo, name):
171 """check if a repo has an unfilteredpropertycache value for <name>"""
171 """check if a repo has an unfilteredpropertycache value for <name>"""
172 return name in vars(repo.unfiltered())
172 return name in vars(repo.unfiltered())
173
173
174 def unfilteredmethod(orig):
174 def unfilteredmethod(orig):
175 """decorate method that always need to be run on unfiltered version"""
175 """decorate method that always need to be run on unfiltered version"""
176 def wrapper(repo, *args, **kwargs):
176 def wrapper(repo, *args, **kwargs):
177 return orig(repo.unfiltered(), *args, **kwargs)
177 return orig(repo.unfiltered(), *args, **kwargs)
178 return wrapper
178 return wrapper
179
179
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
181 'unbundle'}
181 'unbundle'}
182 legacycaps = moderncaps.union({'changegroupsubset'})
182 legacycaps = moderncaps.union({'changegroupsubset'})
183
183
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
185 class localcommandexecutor(object):
185 class localcommandexecutor(object):
186 def __init__(self, peer):
186 def __init__(self, peer):
187 self._peer = peer
187 self._peer = peer
188 self._sent = False
188 self._sent = False
189 self._closed = False
189 self._closed = False
190
190
191 def __enter__(self):
191 def __enter__(self):
192 return self
192 return self
193
193
194 def __exit__(self, exctype, excvalue, exctb):
194 def __exit__(self, exctype, excvalue, exctb):
195 self.close()
195 self.close()
196
196
197 def callcommand(self, command, args):
197 def callcommand(self, command, args):
198 if self._sent:
198 if self._sent:
199 raise error.ProgrammingError('callcommand() cannot be used after '
199 raise error.ProgrammingError('callcommand() cannot be used after '
200 'sendcommands()')
200 'sendcommands()')
201
201
202 if self._closed:
202 if self._closed:
203 raise error.ProgrammingError('callcommand() cannot be used after '
203 raise error.ProgrammingError('callcommand() cannot be used after '
204 'close()')
204 'close()')
205
205
206 # We don't need to support anything fancy. Just call the named
206 # We don't need to support anything fancy. Just call the named
207 # method on the peer and return a resolved future.
207 # method on the peer and return a resolved future.
208 fn = getattr(self._peer, pycompat.sysstr(command))
208 fn = getattr(self._peer, pycompat.sysstr(command))
209
209
210 f = pycompat.futures.Future()
210 f = pycompat.futures.Future()
211
211
212 try:
212 try:
213 result = fn(**pycompat.strkwargs(args))
213 result = fn(**pycompat.strkwargs(args))
214 except Exception:
214 except Exception:
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
216 else:
216 else:
217 f.set_result(result)
217 f.set_result(result)
218
218
219 return f
219 return f
220
220
221 def sendcommands(self):
221 def sendcommands(self):
222 self._sent = True
222 self._sent = True
223
223
224 def close(self):
224 def close(self):
225 self._closed = True
225 self._closed = True
226
226
227 @interfaceutil.implementer(repository.ipeercommands)
227 @interfaceutil.implementer(repository.ipeercommands)
228 class localpeer(repository.peer):
228 class localpeer(repository.peer):
229 '''peer for a local repo; reflects only the most recent API'''
229 '''peer for a local repo; reflects only the most recent API'''
230
230
231 def __init__(self, repo, caps=None):
231 def __init__(self, repo, caps=None):
232 super(localpeer, self).__init__()
232 super(localpeer, self).__init__()
233
233
234 if caps is None:
234 if caps is None:
235 caps = moderncaps.copy()
235 caps = moderncaps.copy()
236 self._repo = repo.filtered('served')
236 self._repo = repo.filtered('served')
237 self.ui = repo.ui
237 self.ui = repo.ui
238 self._caps = repo._restrictcapabilities(caps)
238 self._caps = repo._restrictcapabilities(caps)
239
239
240 # Begin of _basepeer interface.
240 # Begin of _basepeer interface.
241
241
242 def url(self):
242 def url(self):
243 return self._repo.url()
243 return self._repo.url()
244
244
245 def local(self):
245 def local(self):
246 return self._repo
246 return self._repo
247
247
248 def peer(self):
248 def peer(self):
249 return self
249 return self
250
250
251 def canpush(self):
251 def canpush(self):
252 return True
252 return True
253
253
254 def close(self):
254 def close(self):
255 self._repo.close()
255 self._repo.close()
256
256
257 # End of _basepeer interface.
257 # End of _basepeer interface.
258
258
259 # Begin of _basewirecommands interface.
259 # Begin of _basewirecommands interface.
260
260
261 def branchmap(self):
261 def branchmap(self):
262 return self._repo.branchmap()
262 return self._repo.branchmap()
263
263
264 def capabilities(self):
264 def capabilities(self):
265 return self._caps
265 return self._caps
266
266
267 def clonebundles(self):
267 def clonebundles(self):
268 return self._repo.tryread('clonebundles.manifest')
268 return self._repo.tryread('clonebundles.manifest')
269
269
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
271 """Used to test argument passing over the wire"""
271 """Used to test argument passing over the wire"""
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
273 pycompat.bytestr(four),
273 pycompat.bytestr(four),
274 pycompat.bytestr(five))
274 pycompat.bytestr(five))
275
275
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
277 **kwargs):
277 **kwargs):
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
279 common=common, bundlecaps=bundlecaps,
279 common=common, bundlecaps=bundlecaps,
280 **kwargs)[1]
280 **kwargs)[1]
281 cb = util.chunkbuffer(chunks)
281 cb = util.chunkbuffer(chunks)
282
282
283 if exchange.bundle2requested(bundlecaps):
283 if exchange.bundle2requested(bundlecaps):
284 # When requesting a bundle2, getbundle returns a stream to make the
284 # When requesting a bundle2, getbundle returns a stream to make the
285 # wire level function happier. We need to build a proper object
285 # wire level function happier. We need to build a proper object
286 # from it in local peer.
286 # from it in local peer.
287 return bundle2.getunbundler(self.ui, cb)
287 return bundle2.getunbundler(self.ui, cb)
288 else:
288 else:
289 return changegroup.getunbundler('01', cb, None)
289 return changegroup.getunbundler('01', cb, None)
290
290
291 def heads(self):
291 def heads(self):
292 return self._repo.heads()
292 return self._repo.heads()
293
293
294 def known(self, nodes):
294 def known(self, nodes):
295 return self._repo.known(nodes)
295 return self._repo.known(nodes)
296
296
297 def listkeys(self, namespace):
297 def listkeys(self, namespace):
298 return self._repo.listkeys(namespace)
298 return self._repo.listkeys(namespace)
299
299
300 def lookup(self, key):
300 def lookup(self, key):
301 return self._repo.lookup(key)
301 return self._repo.lookup(key)
302
302
303 def pushkey(self, namespace, key, old, new):
303 def pushkey(self, namespace, key, old, new):
304 return self._repo.pushkey(namespace, key, old, new)
304 return self._repo.pushkey(namespace, key, old, new)
305
305
306 def stream_out(self):
306 def stream_out(self):
307 raise error.Abort(_('cannot perform stream clone against local '
307 raise error.Abort(_('cannot perform stream clone against local '
308 'peer'))
308 'peer'))
309
309
310 def unbundle(self, bundle, heads, url):
310 def unbundle(self, bundle, heads, url):
311 """apply a bundle on a repo
311 """apply a bundle on a repo
312
312
313 This function handles the repo locking itself."""
313 This function handles the repo locking itself."""
314 try:
314 try:
315 try:
315 try:
316 bundle = exchange.readbundle(self.ui, bundle, None)
316 bundle = exchange.readbundle(self.ui, bundle, None)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
318 if util.safehasattr(ret, 'getchunks'):
318 if util.safehasattr(ret, 'getchunks'):
319 # This is a bundle20 object, turn it into an unbundler.
319 # This is a bundle20 object, turn it into an unbundler.
320 # This little dance should be dropped eventually when the
320 # This little dance should be dropped eventually when the
321 # API is finally improved.
321 # API is finally improved.
322 stream = util.chunkbuffer(ret.getchunks())
322 stream = util.chunkbuffer(ret.getchunks())
323 ret = bundle2.getunbundler(self.ui, stream)
323 ret = bundle2.getunbundler(self.ui, stream)
324 return ret
324 return ret
325 except Exception as exc:
325 except Exception as exc:
326 # If the exception contains output salvaged from a bundle2
326 # If the exception contains output salvaged from a bundle2
327 # reply, we need to make sure it is printed before continuing
327 # reply, we need to make sure it is printed before continuing
328 # to fail. So we build a bundle2 with such output and consume
328 # to fail. So we build a bundle2 with such output and consume
329 # it directly.
329 # it directly.
330 #
330 #
331 # This is not very elegant but allows a "simple" solution for
331 # This is not very elegant but allows a "simple" solution for
332 # issue4594
332 # issue4594
333 output = getattr(exc, '_bundle2salvagedoutput', ())
333 output = getattr(exc, '_bundle2salvagedoutput', ())
334 if output:
334 if output:
335 bundler = bundle2.bundle20(self._repo.ui)
335 bundler = bundle2.bundle20(self._repo.ui)
336 for out in output:
336 for out in output:
337 bundler.addpart(out)
337 bundler.addpart(out)
338 stream = util.chunkbuffer(bundler.getchunks())
338 stream = util.chunkbuffer(bundler.getchunks())
339 b = bundle2.getunbundler(self.ui, stream)
339 b = bundle2.getunbundler(self.ui, stream)
340 bundle2.processbundle(self._repo, b)
340 bundle2.processbundle(self._repo, b)
341 raise
341 raise
342 except error.PushRaced as exc:
342 except error.PushRaced as exc:
343 raise error.ResponseError(_('push failed:'),
343 raise error.ResponseError(_('push failed:'),
344 stringutil.forcebytestr(exc))
344 stringutil.forcebytestr(exc))
345
345
346 # End of _basewirecommands interface.
346 # End of _basewirecommands interface.
347
347
348 # Begin of peer interface.
348 # Begin of peer interface.
349
349
350 def commandexecutor(self):
350 def commandexecutor(self):
351 return localcommandexecutor(self)
351 return localcommandexecutor(self)
352
352
353 # End of peer interface.
353 # End of peer interface.
354
354
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
356 class locallegacypeer(localpeer):
356 class locallegacypeer(localpeer):
357 '''peer extension which implements legacy methods too; used for tests with
357 '''peer extension which implements legacy methods too; used for tests with
358 restricted capabilities'''
358 restricted capabilities'''
359
359
360 def __init__(self, repo):
360 def __init__(self, repo):
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
362
362
363 # Begin of baselegacywirecommands interface.
363 # Begin of baselegacywirecommands interface.
364
364
365 def between(self, pairs):
365 def between(self, pairs):
366 return self._repo.between(pairs)
366 return self._repo.between(pairs)
367
367
368 def branches(self, nodes):
368 def branches(self, nodes):
369 return self._repo.branches(nodes)
369 return self._repo.branches(nodes)
370
370
371 def changegroup(self, nodes, source):
371 def changegroup(self, nodes, source):
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
373 missingheads=self._repo.heads())
373 missingheads=self._repo.heads())
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
375
375
376 def changegroupsubset(self, bases, heads, source):
376 def changegroupsubset(self, bases, heads, source):
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
378 missingheads=heads)
378 missingheads=heads)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
380
380
381 # End of baselegacywirecommands interface.
381 # End of baselegacywirecommands interface.
382
382
383 # Increment the sub-version when the revlog v2 format changes to lock out old
383 # Increment the sub-version when the revlog v2 format changes to lock out old
384 # clients.
384 # clients.
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
386
386
387 # A repository with the sparserevlog feature will have delta chains that
387 # A repository with the sparserevlog feature will have delta chains that
388 # can spread over a larger span. Sparse reading cuts these large spans into
388 # can spread over a larger span. Sparse reading cuts these large spans into
389 # pieces, so that each piece isn't too big.
389 # pieces, so that each piece isn't too big.
390 # Without the sparserevlog capability, reading from the repository could use
390 # Without the sparserevlog capability, reading from the repository could use
391 # huge amounts of memory, because the whole span would be read at once,
391 # huge amounts of memory, because the whole span would be read at once,
392 # including all the intermediate revisions that aren't pertinent for the chain.
392 # including all the intermediate revisions that aren't pertinent for the chain.
393 # This is why once a repository has enabled sparse-read, it becomes required.
393 # This is why once a repository has enabled sparse-read, it becomes required.
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
395
395
396 # Functions receiving (ui, features) that extensions can register to impact
396 # Functions receiving (ui, features) that extensions can register to impact
397 # the ability to load repositories with custom requirements. Only
397 # the ability to load repositories with custom requirements. Only
398 # functions defined in loaded extensions are called.
398 # functions defined in loaded extensions are called.
399 #
399 #
400 # The function receives a set of requirement strings that the repository
400 # The function receives a set of requirement strings that the repository
401 # is capable of opening. Functions will typically add elements to the
401 # is capable of opening. Functions will typically add elements to the
402 # set to reflect that the extension knows how to handle that requirements.
402 # set to reflect that the extension knows how to handle that requirements.
403 featuresetupfuncs = set()
403 featuresetupfuncs = set()
404
404
405 def makelocalrepository(baseui, path, intents=None):
405 def makelocalrepository(baseui, path, intents=None):
406 """Create a local repository object.
406 """Create a local repository object.
407
407
408 Given arguments needed to construct a local repository, this function
408 Given arguments needed to construct a local repository, this function
409 performs various early repository loading functionality (such as
409 performs various early repository loading functionality (such as
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
411 the repository can be opened, derives a type suitable for representing
411 the repository can be opened, derives a type suitable for representing
412 that repository, and returns an instance of it.
412 that repository, and returns an instance of it.
413
413
414 The returned object conforms to the ``repository.completelocalrepository``
414 The returned object conforms to the ``repository.completelocalrepository``
415 interface.
415 interface.
416
416
417 The repository type is derived by calling a series of factory functions
417 The repository type is derived by calling a series of factory functions
418 for each aspect/interface of the final repository. These are defined by
418 for each aspect/interface of the final repository. These are defined by
419 ``REPO_INTERFACES``.
419 ``REPO_INTERFACES``.
420
420
421 Each factory function is called to produce a type implementing a specific
421 Each factory function is called to produce a type implementing a specific
422 interface. The cumulative list of returned types will be combined into a
422 interface. The cumulative list of returned types will be combined into a
423 new type and that type will be instantiated to represent the local
423 new type and that type will be instantiated to represent the local
424 repository.
424 repository.
425
425
426 The factory functions each receive various state that may be consulted
426 The factory functions each receive various state that may be consulted
427 as part of deriving a type.
427 as part of deriving a type.
428
428
429 Extensions should wrap these factory functions to customize repository type
429 Extensions should wrap these factory functions to customize repository type
430 creation. Note that an extension's wrapped function may be called even if
430 creation. Note that an extension's wrapped function may be called even if
431 that extension is not loaded for the repo being constructed. Extensions
431 that extension is not loaded for the repo being constructed. Extensions
432 should check if their ``__name__`` appears in the
432 should check if their ``__name__`` appears in the
433 ``extensionmodulenames`` set passed to the factory function and no-op if
433 ``extensionmodulenames`` set passed to the factory function and no-op if
434 not.
434 not.
435 """
435 """
436 ui = baseui.copy()
436 ui = baseui.copy()
437 # Prevent copying repo configuration.
437 # Prevent copying repo configuration.
438 ui.copy = baseui.copy
438 ui.copy = baseui.copy
439
439
440 # Working directory VFS rooted at repository root.
440 # Working directory VFS rooted at repository root.
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
442
442
443 # Main VFS for .hg/ directory.
443 # Main VFS for .hg/ directory.
444 hgpath = wdirvfs.join(b'.hg')
444 hgpath = wdirvfs.join(b'.hg')
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
446
446
447 # The .hg/ path should exist and should be a directory. All other
447 # The .hg/ path should exist and should be a directory. All other
448 # cases are errors.
448 # cases are errors.
449 if not hgvfs.isdir():
449 if not hgvfs.isdir():
450 try:
450 try:
451 hgvfs.stat()
451 hgvfs.stat()
452 except OSError as e:
452 except OSError as e:
453 if e.errno != errno.ENOENT:
453 if e.errno != errno.ENOENT:
454 raise
454 raise
455
455
456 raise error.RepoError(_(b'repository %s not found') % path)
456 raise error.RepoError(_(b'repository %s not found') % path)
457
457
458 # .hg/requires file contains a newline-delimited list of
458 # .hg/requires file contains a newline-delimited list of
459 # features/capabilities the opener (us) must have in order to use
459 # features/capabilities the opener (us) must have in order to use
460 # the repository. This file was introduced in Mercurial 0.9.2,
460 # the repository. This file was introduced in Mercurial 0.9.2,
461 # which means very old repositories may not have one. We assume
461 # which means very old repositories may not have one. We assume
462 # a missing file translates to no requirements.
462 # a missing file translates to no requirements.
463 try:
463 try:
464 requirements = set(hgvfs.read(b'requires').splitlines())
464 requirements = set(hgvfs.read(b'requires').splitlines())
465 except IOError as e:
465 except IOError as e:
466 if e.errno != errno.ENOENT:
466 if e.errno != errno.ENOENT:
467 raise
467 raise
468 requirements = set()
468 requirements = set()
469
469
470 # The .hg/hgrc file may load extensions or contain config options
470 # The .hg/hgrc file may load extensions or contain config options
471 # that influence repository construction. Attempt to load it and
471 # that influence repository construction. Attempt to load it and
472 # process any new extensions that it may have pulled in.
472 # process any new extensions that it may have pulled in.
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
475 extensions.loadall(ui)
475 extensions.loadall(ui)
476 extensions.populateui(ui)
476 extensions.populateui(ui)
477
477
478 # Set of module names of extensions loaded for this repository.
478 # Set of module names of extensions loaded for this repository.
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
480
480
481 supportedrequirements = gathersupportedrequirements(ui)
481 supportedrequirements = gathersupportedrequirements(ui)
482
482
483 # We first validate the requirements are known.
483 # We first validate the requirements are known.
484 ensurerequirementsrecognized(requirements, supportedrequirements)
484 ensurerequirementsrecognized(requirements, supportedrequirements)
485
485
486 # Then we validate that the known set is reasonable to use together.
486 # Then we validate that the known set is reasonable to use together.
487 ensurerequirementscompatible(ui, requirements)
487 ensurerequirementscompatible(ui, requirements)
488
488
489 # TODO there are unhandled edge cases related to opening repositories with
489 # TODO there are unhandled edge cases related to opening repositories with
490 # shared storage. If storage is shared, we should also test for requirements
490 # shared storage. If storage is shared, we should also test for requirements
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
492 # that repo, as that repo may load extensions needed to open it. This is a
492 # that repo, as that repo may load extensions needed to open it. This is a
493 # bit complicated because we don't want the other hgrc to overwrite settings
493 # bit complicated because we don't want the other hgrc to overwrite settings
494 # in this hgrc.
494 # in this hgrc.
495 #
495 #
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
497 # file when sharing repos. But if a requirement is added after the share is
497 # file when sharing repos. But if a requirement is added after the share is
498 # performed, thereby introducing a new requirement for the opener, we may
498 # performed, thereby introducing a new requirement for the opener, we may
499 # will not see that and could encounter a run-time error interacting with
499 # will not see that and could encounter a run-time error interacting with
500 # that shared store since it has an unknown-to-us requirement.
500 # that shared store since it has an unknown-to-us requirement.
501
501
502 # At this point, we know we should be capable of opening the repository.
502 # At this point, we know we should be capable of opening the repository.
503 # Now get on with doing that.
503 # Now get on with doing that.
504
504
505 features = set()
505 features = set()
506
506
507 # The "store" part of the repository holds versioned data. How it is
507 # The "store" part of the repository holds versioned data. How it is
508 # accessed is determined by various requirements. The ``shared`` or
508 # accessed is determined by various requirements. The ``shared`` or
509 # ``relshared`` requirements indicate the store lives in the path contained
509 # ``relshared`` requirements indicate the store lives in the path contained
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
512 if b'shared' in requirements or b'relshared' in requirements:
512 if b'shared' in requirements or b'relshared' in requirements:
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 if b'relshared' in requirements:
514 if b'relshared' in requirements:
515 sharedpath = hgvfs.join(sharedpath)
515 sharedpath = hgvfs.join(sharedpath)
516
516
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518
518
519 if not sharedvfs.exists():
519 if not sharedvfs.exists():
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
521 b'directory %s') % sharedvfs.base)
521 b'directory %s') % sharedvfs.base)
522
522
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
524
524
525 storebasepath = sharedvfs.base
525 storebasepath = sharedvfs.base
526 cachepath = sharedvfs.join(b'cache')
526 cachepath = sharedvfs.join(b'cache')
527 else:
527 else:
528 storebasepath = hgvfs.base
528 storebasepath = hgvfs.base
529 cachepath = hgvfs.join(b'cache')
529 cachepath = hgvfs.join(b'cache')
530 wcachepath = hgvfs.join(b'wcache')
530 wcachepath = hgvfs.join(b'wcache')
531
531
532
532
533 # The store has changed over time and the exact layout is dictated by
533 # The store has changed over time and the exact layout is dictated by
534 # requirements. The store interface abstracts differences across all
534 # requirements. The store interface abstracts differences across all
535 # of them.
535 # of them.
536 store = makestore(requirements, storebasepath,
536 store = makestore(requirements, storebasepath,
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
538 hgvfs.createmode = store.createmode
538 hgvfs.createmode = store.createmode
539
539
540 storevfs = store.vfs
540 storevfs = store.vfs
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
542
542
543 # The cache vfs is used to manage cache files.
543 # The cache vfs is used to manage cache files.
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 cachevfs.createmode = store.createmode
545 cachevfs.createmode = store.createmode
546 # The cache vfs is used to manage cache files related to the working copy
546 # The cache vfs is used to manage cache files related to the working copy
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
548 wcachevfs.createmode = store.createmode
548 wcachevfs.createmode = store.createmode
549
549
550 # Now resolve the type for the repository object. We do this by repeatedly
550 # Now resolve the type for the repository object. We do this by repeatedly
551 # calling a factory function to produces types for specific aspects of the
551 # calling a factory function to produces types for specific aspects of the
552 # repo's operation. The aggregate returned types are used as base classes
552 # repo's operation. The aggregate returned types are used as base classes
553 # for a dynamically-derived type, which will represent our new repository.
553 # for a dynamically-derived type, which will represent our new repository.
554
554
555 bases = []
555 bases = []
556 extrastate = {}
556 extrastate = {}
557
557
558 for iface, fn in REPO_INTERFACES:
558 for iface, fn in REPO_INTERFACES:
559 # We pass all potentially useful state to give extensions tons of
559 # We pass all potentially useful state to give extensions tons of
560 # flexibility.
560 # flexibility.
561 typ = fn()(ui=ui,
561 typ = fn()(ui=ui,
562 intents=intents,
562 intents=intents,
563 requirements=requirements,
563 requirements=requirements,
564 features=features,
564 features=features,
565 wdirvfs=wdirvfs,
565 wdirvfs=wdirvfs,
566 hgvfs=hgvfs,
566 hgvfs=hgvfs,
567 store=store,
567 store=store,
568 storevfs=storevfs,
568 storevfs=storevfs,
569 storeoptions=storevfs.options,
569 storeoptions=storevfs.options,
570 cachevfs=cachevfs,
570 cachevfs=cachevfs,
571 wcachevfs=wcachevfs,
571 wcachevfs=wcachevfs,
572 extensionmodulenames=extensionmodulenames,
572 extensionmodulenames=extensionmodulenames,
573 extrastate=extrastate,
573 extrastate=extrastate,
574 baseclasses=bases)
574 baseclasses=bases)
575
575
576 if not isinstance(typ, type):
576 if not isinstance(typ, type):
577 raise error.ProgrammingError('unable to construct type for %s' %
577 raise error.ProgrammingError('unable to construct type for %s' %
578 iface)
578 iface)
579
579
580 bases.append(typ)
580 bases.append(typ)
581
581
582 # type() allows you to use characters in type names that wouldn't be
582 # type() allows you to use characters in type names that wouldn't be
583 # recognized as Python symbols in source code. We abuse that to add
583 # recognized as Python symbols in source code. We abuse that to add
584 # rich information about our constructed repo.
584 # rich information about our constructed repo.
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
586 wdirvfs.base,
586 wdirvfs.base,
587 b','.join(sorted(requirements))))
587 b','.join(sorted(requirements))))
588
588
589 cls = type(name, tuple(bases), {})
589 cls = type(name, tuple(bases), {})
590
590
591 return cls(
591 return cls(
592 baseui=baseui,
592 baseui=baseui,
593 ui=ui,
593 ui=ui,
594 origroot=path,
594 origroot=path,
595 wdirvfs=wdirvfs,
595 wdirvfs=wdirvfs,
596 hgvfs=hgvfs,
596 hgvfs=hgvfs,
597 requirements=requirements,
597 requirements=requirements,
598 supportedrequirements=supportedrequirements,
598 supportedrequirements=supportedrequirements,
599 sharedpath=storebasepath,
599 sharedpath=storebasepath,
600 store=store,
600 store=store,
601 cachevfs=cachevfs,
601 cachevfs=cachevfs,
602 wcachevfs=wcachevfs,
602 wcachevfs=wcachevfs,
603 features=features,
603 features=features,
604 intents=intents)
604 intents=intents)
605
605
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
607 """Load hgrc files/content into a ui instance.
607 """Load hgrc files/content into a ui instance.
608
608
609 This is called during repository opening to load any additional
609 This is called during repository opening to load any additional
610 config files or settings relevant to the current repository.
610 config files or settings relevant to the current repository.
611
611
612 Returns a bool indicating whether any additional configs were loaded.
612 Returns a bool indicating whether any additional configs were loaded.
613
613
614 Extensions should monkeypatch this function to modify how per-repo
614 Extensions should monkeypatch this function to modify how per-repo
615 configs are loaded. For example, an extension may wish to pull in
615 configs are loaded. For example, an extension may wish to pull in
616 configs from alternate files or sources.
616 configs from alternate files or sources.
617 """
617 """
618 try:
618 try:
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
620 return True
620 return True
621 except IOError:
621 except IOError:
622 return False
622 return False
623
623
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
625 """Perform additional actions after .hg/hgrc is loaded.
625 """Perform additional actions after .hg/hgrc is loaded.
626
626
627 This function is called during repository loading immediately after
627 This function is called during repository loading immediately after
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
629
629
630 The function can be used to validate configs, automatically add
630 The function can be used to validate configs, automatically add
631 options (including extensions) based on requirements, etc.
631 options (including extensions) based on requirements, etc.
632 """
632 """
633
633
634 # Map of requirements to list of extensions to load automatically when
634 # Map of requirements to list of extensions to load automatically when
635 # requirement is present.
635 # requirement is present.
636 autoextensions = {
636 autoextensions = {
637 b'largefiles': [b'largefiles'],
637 b'largefiles': [b'largefiles'],
638 b'lfs': [b'lfs'],
638 b'lfs': [b'lfs'],
639 }
639 }
640
640
641 for requirement, names in sorted(autoextensions.items()):
641 for requirement, names in sorted(autoextensions.items()):
642 if requirement not in requirements:
642 if requirement not in requirements:
643 continue
643 continue
644
644
645 for name in names:
645 for name in names:
646 if not ui.hasconfig(b'extensions', name):
646 if not ui.hasconfig(b'extensions', name):
647 ui.setconfig(b'extensions', name, b'', source='autoload')
647 ui.setconfig(b'extensions', name, b'', source='autoload')
648
648
649 def gathersupportedrequirements(ui):
649 def gathersupportedrequirements(ui):
650 """Determine the complete set of recognized requirements."""
650 """Determine the complete set of recognized requirements."""
651 # Start with all requirements supported by this file.
651 # Start with all requirements supported by this file.
652 supported = set(localrepository._basesupported)
652 supported = set(localrepository._basesupported)
653
653
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
655 # relevant to this ui instance.
655 # relevant to this ui instance.
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
657
657
658 for fn in featuresetupfuncs:
658 for fn in featuresetupfuncs:
659 if fn.__module__ in modules:
659 if fn.__module__ in modules:
660 fn(ui, supported)
660 fn(ui, supported)
661
661
662 # Add derived requirements from registered compression engines.
662 # Add derived requirements from registered compression engines.
663 for name in util.compengines:
663 for name in util.compengines:
664 engine = util.compengines[name]
664 engine = util.compengines[name]
665 if engine.available() and engine.revlogheader():
665 if engine.available() and engine.revlogheader():
666 supported.add(b'exp-compression-%s' % name)
666 supported.add(b'exp-compression-%s' % name)
667 if engine.name() == 'zstd':
667 if engine.name() == 'zstd':
668 supported.add(b'revlog-compression-zstd')
668 supported.add(b'revlog-compression-zstd')
669
669
670 return supported
670 return supported
671
671
672 def ensurerequirementsrecognized(requirements, supported):
672 def ensurerequirementsrecognized(requirements, supported):
673 """Validate that a set of local requirements is recognized.
673 """Validate that a set of local requirements is recognized.
674
674
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
676 exists any requirement in that set that currently loaded code doesn't
676 exists any requirement in that set that currently loaded code doesn't
677 recognize.
677 recognize.
678
678
679 Returns a set of supported requirements.
679 Returns a set of supported requirements.
680 """
680 """
681 missing = set()
681 missing = set()
682
682
683 for requirement in requirements:
683 for requirement in requirements:
684 if requirement in supported:
684 if requirement in supported:
685 continue
685 continue
686
686
687 if not requirement or not requirement[0:1].isalnum():
687 if not requirement or not requirement[0:1].isalnum():
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
689
689
690 missing.add(requirement)
690 missing.add(requirement)
691
691
692 if missing:
692 if missing:
693 raise error.RequirementError(
693 raise error.RequirementError(
694 _(b'repository requires features unknown to this Mercurial: %s') %
694 _(b'repository requires features unknown to this Mercurial: %s') %
695 b' '.join(sorted(missing)),
695 b' '.join(sorted(missing)),
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
697 b'for more information'))
697 b'for more information'))
698
698
699 def ensurerequirementscompatible(ui, requirements):
699 def ensurerequirementscompatible(ui, requirements):
700 """Validates that a set of recognized requirements is mutually compatible.
700 """Validates that a set of recognized requirements is mutually compatible.
701
701
702 Some requirements may not be compatible with others or require
702 Some requirements may not be compatible with others or require
703 config options that aren't enabled. This function is called during
703 config options that aren't enabled. This function is called during
704 repository opening to ensure that the set of requirements needed
704 repository opening to ensure that the set of requirements needed
705 to open a repository is sane and compatible with config options.
705 to open a repository is sane and compatible with config options.
706
706
707 Extensions can monkeypatch this function to perform additional
707 Extensions can monkeypatch this function to perform additional
708 checking.
708 checking.
709
709
710 ``error.RepoError`` should be raised on failure.
710 ``error.RepoError`` should be raised on failure.
711 """
711 """
712 if b'exp-sparse' in requirements and not sparse.enabled:
712 if b'exp-sparse' in requirements and not sparse.enabled:
713 raise error.RepoError(_(b'repository is using sparse feature but '
713 raise error.RepoError(_(b'repository is using sparse feature but '
714 b'sparse is not enabled; enable the '
714 b'sparse is not enabled; enable the '
715 b'"sparse" extensions to access'))
715 b'"sparse" extensions to access'))
716
716
717 def makestore(requirements, path, vfstype):
717 def makestore(requirements, path, vfstype):
718 """Construct a storage object for a repository."""
718 """Construct a storage object for a repository."""
719 if b'store' in requirements:
719 if b'store' in requirements:
720 if b'fncache' in requirements:
720 if b'fncache' in requirements:
721 return storemod.fncachestore(path, vfstype,
721 return storemod.fncachestore(path, vfstype,
722 b'dotencode' in requirements)
722 b'dotencode' in requirements)
723
723
724 return storemod.encodedstore(path, vfstype)
724 return storemod.encodedstore(path, vfstype)
725
725
726 return storemod.basicstore(path, vfstype)
726 return storemod.basicstore(path, vfstype)
727
727
728 def resolvestorevfsoptions(ui, requirements, features):
728 def resolvestorevfsoptions(ui, requirements, features):
729 """Resolve the options to pass to the store vfs opener.
729 """Resolve the options to pass to the store vfs opener.
730
730
731 The returned dict is used to influence behavior of the storage layer.
731 The returned dict is used to influence behavior of the storage layer.
732 """
732 """
733 options = {}
733 options = {}
734
734
735 if b'treemanifest' in requirements:
735 if b'treemanifest' in requirements:
736 options[b'treemanifest'] = True
736 options[b'treemanifest'] = True
737
737
738 # experimental config: format.manifestcachesize
738 # experimental config: format.manifestcachesize
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
740 if manifestcachesize is not None:
740 if manifestcachesize is not None:
741 options[b'manifestcachesize'] = manifestcachesize
741 options[b'manifestcachesize'] = manifestcachesize
742
742
743 # In the absence of another requirement superseding a revlog-related
743 # In the absence of another requirement superseding a revlog-related
744 # requirement, we have to assume the repo is using revlog version 0.
744 # requirement, we have to assume the repo is using revlog version 0.
745 # This revlog format is super old and we don't bother trying to parse
745 # This revlog format is super old and we don't bother trying to parse
746 # opener options for it because those options wouldn't do anything
746 # opener options for it because those options wouldn't do anything
747 # meaningful on such old repos.
747 # meaningful on such old repos.
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
750
750
751 return options
751 return options
752
752
753 def resolverevlogstorevfsoptions(ui, requirements, features):
753 def resolverevlogstorevfsoptions(ui, requirements, features):
754 """Resolve opener options specific to revlogs."""
754 """Resolve opener options specific to revlogs."""
755
755
756 options = {}
756 options = {}
757 options[b'flagprocessors'] = {}
757 options[b'flagprocessors'] = {}
758
758
759 if b'revlogv1' in requirements:
759 if b'revlogv1' in requirements:
760 options[b'revlogv1'] = True
760 options[b'revlogv1'] = True
761 if REVLOGV2_REQUIREMENT in requirements:
761 if REVLOGV2_REQUIREMENT in requirements:
762 options[b'revlogv2'] = True
762 options[b'revlogv2'] = True
763
763
764 if b'generaldelta' in requirements:
764 if b'generaldelta' in requirements:
765 options[b'generaldelta'] = True
765 options[b'generaldelta'] = True
766
766
767 # experimental config: format.chunkcachesize
767 # experimental config: format.chunkcachesize
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
769 if chunkcachesize is not None:
769 if chunkcachesize is not None:
770 options[b'chunkcachesize'] = chunkcachesize
770 options[b'chunkcachesize'] = chunkcachesize
771
771
772 deltabothparents = ui.configbool(b'storage',
772 deltabothparents = ui.configbool(b'storage',
773 b'revlog.optimize-delta-parent-choice')
773 b'revlog.optimize-delta-parent-choice')
774 options[b'deltabothparents'] = deltabothparents
774 options[b'deltabothparents'] = deltabothparents
775
775
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
777 lazydeltabase = False
777 lazydeltabase = False
778 if lazydelta:
778 if lazydelta:
779 lazydeltabase = ui.configbool(b'storage',
779 lazydeltabase = ui.configbool(b'storage',
780 b'revlog.reuse-external-delta-parent')
780 b'revlog.reuse-external-delta-parent')
781 if lazydeltabase is None:
781 if lazydeltabase is None:
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
783 options[b'lazydelta'] = lazydelta
783 options[b'lazydelta'] = lazydelta
784 options[b'lazydeltabase'] = lazydeltabase
784 options[b'lazydeltabase'] = lazydeltabase
785
785
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
787 if 0 <= chainspan:
787 if 0 <= chainspan:
788 options[b'maxdeltachainspan'] = chainspan
788 options[b'maxdeltachainspan'] = chainspan
789
789
790 mmapindexthreshold = ui.configbytes(b'experimental',
790 mmapindexthreshold = ui.configbytes(b'experimental',
791 b'mmapindexthreshold')
791 b'mmapindexthreshold')
792 if mmapindexthreshold is not None:
792 if mmapindexthreshold is not None:
793 options[b'mmapindexthreshold'] = mmapindexthreshold
793 options[b'mmapindexthreshold'] = mmapindexthreshold
794
794
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
796 srdensitythres = float(ui.config(b'experimental',
796 srdensitythres = float(ui.config(b'experimental',
797 b'sparse-read.density-threshold'))
797 b'sparse-read.density-threshold'))
798 srmingapsize = ui.configbytes(b'experimental',
798 srmingapsize = ui.configbytes(b'experimental',
799 b'sparse-read.min-gap-size')
799 b'sparse-read.min-gap-size')
800 options[b'with-sparse-read'] = withsparseread
800 options[b'with-sparse-read'] = withsparseread
801 options[b'sparse-read-density-threshold'] = srdensitythres
801 options[b'sparse-read-density-threshold'] = srdensitythres
802 options[b'sparse-read-min-gap-size'] = srmingapsize
802 options[b'sparse-read-min-gap-size'] = srmingapsize
803
803
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
805 options[b'sparse-revlog'] = sparserevlog
805 options[b'sparse-revlog'] = sparserevlog
806 if sparserevlog:
806 if sparserevlog:
807 options[b'generaldelta'] = True
807 options[b'generaldelta'] = True
808
808
809 maxchainlen = None
809 maxchainlen = None
810 if sparserevlog:
810 if sparserevlog:
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
812 # experimental config: format.maxchainlen
812 # experimental config: format.maxchainlen
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
814 if maxchainlen is not None:
814 if maxchainlen is not None:
815 options[b'maxchainlen'] = maxchainlen
815 options[b'maxchainlen'] = maxchainlen
816
816
817 for r in requirements:
817 for r in requirements:
818 # we allow multiple compression engine requirement to co-exist because
818 # we allow multiple compression engine requirement to co-exist because
819 # strickly speaking, revlog seems to support mixed compression style.
819 # strickly speaking, revlog seems to support mixed compression style.
820 #
820 #
821 # The compression used for new entries will be "the last one"
821 # The compression used for new entries will be "the last one"
822 prefix = r.startswith
822 prefix = r.startswith
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
824 options[b'compengine'] = r.split('-', 2)[2]
824 options[b'compengine'] = r.split('-', 2)[2]
825
825
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
827 if options[b'zlib.level'] is not None:
827 if options[b'zlib.level'] is not None:
828 if not (0 <= options[b'zlib.level'] <= 9):
828 if not (0 <= options[b'zlib.level'] <= 9):
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
830 raise error.Abort(msg % options[b'zlib.level'])
830 raise error.Abort(msg % options[b'zlib.level'])
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
832 if options[b'zstd.level'] is not None:
832 if options[b'zstd.level'] is not None:
833 if not (0 <= options[b'zstd.level'] <= 22):
833 if not (0 <= options[b'zstd.level'] <= 22):
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
835 raise error.Abort(msg % options[b'zstd.level'])
835 raise error.Abort(msg % options[b'zstd.level'])
836
836
837 if repository.NARROW_REQUIREMENT in requirements:
837 if repository.NARROW_REQUIREMENT in requirements:
838 options[b'enableellipsis'] = True
838 options[b'enableellipsis'] = True
839
839
840 return options
840 return options
841
841
842 def makemain(**kwargs):
842 def makemain(**kwargs):
843 """Produce a type conforming to ``ilocalrepositorymain``."""
843 """Produce a type conforming to ``ilocalrepositorymain``."""
844 return localrepository
844 return localrepository
845
845
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
847 class revlogfilestorage(object):
847 class revlogfilestorage(object):
848 """File storage when using revlogs."""
848 """File storage when using revlogs."""
849
849
850 def file(self, path):
850 def file(self, path):
851 if path[0] == b'/':
851 if path[0] == b'/':
852 path = path[1:]
852 path = path[1:]
853
853
854 return filelog.filelog(self.svfs, path)
854 return filelog.filelog(self.svfs, path)
855
855
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
857 class revlognarrowfilestorage(object):
857 class revlognarrowfilestorage(object):
858 """File storage when using revlogs and narrow files."""
858 """File storage when using revlogs and narrow files."""
859
859
860 def file(self, path):
860 def file(self, path):
861 if path[0] == b'/':
861 if path[0] == b'/':
862 path = path[1:]
862 path = path[1:]
863
863
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
865
865
866 def makefilestorage(requirements, features, **kwargs):
866 def makefilestorage(requirements, features, **kwargs):
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
870
870
871 if repository.NARROW_REQUIREMENT in requirements:
871 if repository.NARROW_REQUIREMENT in requirements:
872 return revlognarrowfilestorage
872 return revlognarrowfilestorage
873 else:
873 else:
874 return revlogfilestorage
874 return revlogfilestorage
875
875
876 # List of repository interfaces and factory functions for them. Each
876 # List of repository interfaces and factory functions for them. Each
877 # will be called in order during ``makelocalrepository()`` to iteratively
877 # will be called in order during ``makelocalrepository()`` to iteratively
878 # derive the final type for a local repository instance. We capture the
878 # derive the final type for a local repository instance. We capture the
879 # function as a lambda so we don't hold a reference and the module-level
879 # function as a lambda so we don't hold a reference and the module-level
880 # functions can be wrapped.
880 # functions can be wrapped.
881 REPO_INTERFACES = [
881 REPO_INTERFACES = [
882 (repository.ilocalrepositorymain, lambda: makemain),
882 (repository.ilocalrepositorymain, lambda: makemain),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
884 ]
884 ]
885
885
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
887 class localrepository(object):
887 class localrepository(object):
888 """Main class for representing local repositories.
888 """Main class for representing local repositories.
889
889
890 All local repositories are instances of this class.
890 All local repositories are instances of this class.
891
891
892 Constructed on its own, instances of this class are not usable as
892 Constructed on its own, instances of this class are not usable as
893 repository objects. To obtain a usable repository object, call
893 repository objects. To obtain a usable repository object, call
894 ``hg.repository()``, ``localrepo.instance()``, or
894 ``hg.repository()``, ``localrepo.instance()``, or
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
896 ``instance()`` adds support for creating new repositories.
896 ``instance()`` adds support for creating new repositories.
897 ``hg.repository()`` adds more extension integration, including calling
897 ``hg.repository()`` adds more extension integration, including calling
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
899 used.
899 used.
900 """
900 """
901
901
902 # obsolete experimental requirements:
902 # obsolete experimental requirements:
903 # - manifestv2: An experimental new manifest format that allowed
903 # - manifestv2: An experimental new manifest format that allowed
904 # for stem compression of long paths. Experiment ended up not
904 # for stem compression of long paths. Experiment ended up not
905 # being successful (repository sizes went up due to worse delta
905 # being successful (repository sizes went up due to worse delta
906 # chains), and the code was deleted in 4.6.
906 # chains), and the code was deleted in 4.6.
907 supportedformats = {
907 supportedformats = {
908 'revlogv1',
908 'revlogv1',
909 'generaldelta',
909 'generaldelta',
910 'treemanifest',
910 'treemanifest',
911 REVLOGV2_REQUIREMENT,
911 REVLOGV2_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
913 }
913 }
914 _basesupported = supportedformats | {
914 _basesupported = supportedformats | {
915 'store',
915 'store',
916 'fncache',
916 'fncache',
917 'shared',
917 'shared',
918 'relshared',
918 'relshared',
919 'dotencode',
919 'dotencode',
920 'exp-sparse',
920 'exp-sparse',
921 'internal-phase'
921 'internal-phase'
922 }
922 }
923
923
924 # list of prefix for file which can be written without 'wlock'
924 # list of prefix for file which can be written without 'wlock'
925 # Extensions should extend this list when needed
925 # Extensions should extend this list when needed
926 _wlockfreeprefix = {
926 _wlockfreeprefix = {
927 # We migh consider requiring 'wlock' for the next
927 # We migh consider requiring 'wlock' for the next
928 # two, but pretty much all the existing code assume
928 # two, but pretty much all the existing code assume
929 # wlock is not needed so we keep them excluded for
929 # wlock is not needed so we keep them excluded for
930 # now.
930 # now.
931 'hgrc',
931 'hgrc',
932 'requires',
932 'requires',
933 # XXX cache is a complicatged business someone
933 # XXX cache is a complicatged business someone
934 # should investigate this in depth at some point
934 # should investigate this in depth at some point
935 'cache/',
935 'cache/',
936 # XXX shouldn't be dirstate covered by the wlock?
936 # XXX shouldn't be dirstate covered by the wlock?
937 'dirstate',
937 'dirstate',
938 # XXX bisect was still a bit too messy at the time
938 # XXX bisect was still a bit too messy at the time
939 # this changeset was introduced. Someone should fix
939 # this changeset was introduced. Someone should fix
940 # the remainig bit and drop this line
940 # the remainig bit and drop this line
941 'bisect.state',
941 'bisect.state',
942 }
942 }
943
943
944 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
944 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
945 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
945 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
946 features, intents=None):
946 features, intents=None):
947 """Create a new local repository instance.
947 """Create a new local repository instance.
948
948
949 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
949 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
950 or ``localrepo.makelocalrepository()`` for obtaining a new repository
950 or ``localrepo.makelocalrepository()`` for obtaining a new repository
951 object.
951 object.
952
952
953 Arguments:
953 Arguments:
954
954
955 baseui
955 baseui
956 ``ui.ui`` instance that ``ui`` argument was based off of.
956 ``ui.ui`` instance that ``ui`` argument was based off of.
957
957
958 ui
958 ui
959 ``ui.ui`` instance for use by the repository.
959 ``ui.ui`` instance for use by the repository.
960
960
961 origroot
961 origroot
962 ``bytes`` path to working directory root of this repository.
962 ``bytes`` path to working directory root of this repository.
963
963
964 wdirvfs
964 wdirvfs
965 ``vfs.vfs`` rooted at the working directory.
965 ``vfs.vfs`` rooted at the working directory.
966
966
967 hgvfs
967 hgvfs
968 ``vfs.vfs`` rooted at .hg/
968 ``vfs.vfs`` rooted at .hg/
969
969
970 requirements
970 requirements
971 ``set`` of bytestrings representing repository opening requirements.
971 ``set`` of bytestrings representing repository opening requirements.
972
972
973 supportedrequirements
973 supportedrequirements
974 ``set`` of bytestrings representing repository requirements that we
974 ``set`` of bytestrings representing repository requirements that we
975 know how to open. May be a supetset of ``requirements``.
975 know how to open. May be a supetset of ``requirements``.
976
976
977 sharedpath
977 sharedpath
978 ``bytes`` Defining path to storage base directory. Points to a
978 ``bytes`` Defining path to storage base directory. Points to a
979 ``.hg/`` directory somewhere.
979 ``.hg/`` directory somewhere.
980
980
981 store
981 store
982 ``store.basicstore`` (or derived) instance providing access to
982 ``store.basicstore`` (or derived) instance providing access to
983 versioned storage.
983 versioned storage.
984
984
985 cachevfs
985 cachevfs
986 ``vfs.vfs`` used for cache files.
986 ``vfs.vfs`` used for cache files.
987
987
988 wcachevfs
988 wcachevfs
989 ``vfs.vfs`` used for cache files related to the working copy.
989 ``vfs.vfs`` used for cache files related to the working copy.
990
990
991 features
991 features
992 ``set`` of bytestrings defining features/capabilities of this
992 ``set`` of bytestrings defining features/capabilities of this
993 instance.
993 instance.
994
994
995 intents
995 intents
996 ``set`` of system strings indicating what this repo will be used
996 ``set`` of system strings indicating what this repo will be used
997 for.
997 for.
998 """
998 """
999 self.baseui = baseui
999 self.baseui = baseui
1000 self.ui = ui
1000 self.ui = ui
1001 self.origroot = origroot
1001 self.origroot = origroot
1002 # vfs rooted at working directory.
1002 # vfs rooted at working directory.
1003 self.wvfs = wdirvfs
1003 self.wvfs = wdirvfs
1004 self.root = wdirvfs.base
1004 self.root = wdirvfs.base
1005 # vfs rooted at .hg/. Used to access most non-store paths.
1005 # vfs rooted at .hg/. Used to access most non-store paths.
1006 self.vfs = hgvfs
1006 self.vfs = hgvfs
1007 self.path = hgvfs.base
1007 self.path = hgvfs.base
1008 self.requirements = requirements
1008 self.requirements = requirements
1009 self.supported = supportedrequirements
1009 self.supported = supportedrequirements
1010 self.sharedpath = sharedpath
1010 self.sharedpath = sharedpath
1011 self.store = store
1011 self.store = store
1012 self.cachevfs = cachevfs
1012 self.cachevfs = cachevfs
1013 self.wcachevfs = wcachevfs
1013 self.wcachevfs = wcachevfs
1014 self.features = features
1014 self.features = features
1015
1015
1016 self.filtername = None
1016 self.filtername = None
1017
1017
1018 if (self.ui.configbool('devel', 'all-warnings') or
1018 if (self.ui.configbool('devel', 'all-warnings') or
1019 self.ui.configbool('devel', 'check-locks')):
1019 self.ui.configbool('devel', 'check-locks')):
1020 self.vfs.audit = self._getvfsward(self.vfs.audit)
1020 self.vfs.audit = self._getvfsward(self.vfs.audit)
1021 # A list of callback to shape the phase if no data were found.
1021 # A list of callback to shape the phase if no data were found.
1022 # Callback are in the form: func(repo, roots) --> processed root.
1022 # Callback are in the form: func(repo, roots) --> processed root.
1023 # This list it to be filled by extension during repo setup
1023 # This list it to be filled by extension during repo setup
1024 self._phasedefaults = []
1024 self._phasedefaults = []
1025
1025
1026 color.setup(self.ui)
1026 color.setup(self.ui)
1027
1027
1028 self.spath = self.store.path
1028 self.spath = self.store.path
1029 self.svfs = self.store.vfs
1029 self.svfs = self.store.vfs
1030 self.sjoin = self.store.join
1030 self.sjoin = self.store.join
1031 if (self.ui.configbool('devel', 'all-warnings') or
1031 if (self.ui.configbool('devel', 'all-warnings') or
1032 self.ui.configbool('devel', 'check-locks')):
1032 self.ui.configbool('devel', 'check-locks')):
1033 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1033 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1034 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1034 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1035 else: # standard vfs
1035 else: # standard vfs
1036 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1036 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1037
1037
1038 self._dirstatevalidatewarned = False
1038 self._dirstatevalidatewarned = False
1039
1039
1040 self._branchcaches = branchmap.BranchMapCache()
1040 self._branchcaches = branchmap.BranchMapCache()
1041 self._revbranchcache = None
1041 self._revbranchcache = None
1042 self._filterpats = {}
1042 self._filterpats = {}
1043 self._datafilters = {}
1043 self._datafilters = {}
1044 self._transref = self._lockref = self._wlockref = None
1044 self._transref = self._lockref = self._wlockref = None
1045
1045
1046 # A cache for various files under .hg/ that tracks file changes,
1046 # A cache for various files under .hg/ that tracks file changes,
1047 # (used by the filecache decorator)
1047 # (used by the filecache decorator)
1048 #
1048 #
1049 # Maps a property name to its util.filecacheentry
1049 # Maps a property name to its util.filecacheentry
1050 self._filecache = {}
1050 self._filecache = {}
1051
1051
1052 # hold sets of revision to be filtered
1052 # hold sets of revision to be filtered
1053 # should be cleared when something might have changed the filter value:
1053 # should be cleared when something might have changed the filter value:
1054 # - new changesets,
1054 # - new changesets,
1055 # - phase change,
1055 # - phase change,
1056 # - new obsolescence marker,
1056 # - new obsolescence marker,
1057 # - working directory parent change,
1057 # - working directory parent change,
1058 # - bookmark changes
1058 # - bookmark changes
1059 self.filteredrevcache = {}
1059 self.filteredrevcache = {}
1060
1060
1061 # post-dirstate-status hooks
1061 # post-dirstate-status hooks
1062 self._postdsstatus = []
1062 self._postdsstatus = []
1063
1063
1064 # generic mapping between names and nodes
1064 # generic mapping between names and nodes
1065 self.names = namespaces.namespaces()
1065 self.names = namespaces.namespaces()
1066
1066
1067 # Key to signature value.
1067 # Key to signature value.
1068 self._sparsesignaturecache = {}
1068 self._sparsesignaturecache = {}
1069 # Signature to cached matcher instance.
1069 # Signature to cached matcher instance.
1070 self._sparsematchercache = {}
1070 self._sparsematchercache = {}
1071
1071
1072 def _getvfsward(self, origfunc):
1072 def _getvfsward(self, origfunc):
1073 """build a ward for self.vfs"""
1073 """build a ward for self.vfs"""
1074 rref = weakref.ref(self)
1074 rref = weakref.ref(self)
1075 def checkvfs(path, mode=None):
1075 def checkvfs(path, mode=None):
1076 ret = origfunc(path, mode=mode)
1076 ret = origfunc(path, mode=mode)
1077 repo = rref()
1077 repo = rref()
1078 if (repo is None
1078 if (repo is None
1079 or not util.safehasattr(repo, '_wlockref')
1079 or not util.safehasattr(repo, '_wlockref')
1080 or not util.safehasattr(repo, '_lockref')):
1080 or not util.safehasattr(repo, '_lockref')):
1081 return
1081 return
1082 if mode in (None, 'r', 'rb'):
1082 if mode in (None, 'r', 'rb'):
1083 return
1083 return
1084 if path.startswith(repo.path):
1084 if path.startswith(repo.path):
1085 # truncate name relative to the repository (.hg)
1085 # truncate name relative to the repository (.hg)
1086 path = path[len(repo.path) + 1:]
1086 path = path[len(repo.path) + 1:]
1087 if path.startswith('cache/'):
1087 if path.startswith('cache/'):
1088 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1088 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1089 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1089 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1090 if path.startswith('journal.') or path.startswith('undo.'):
1090 if path.startswith('journal.') or path.startswith('undo.'):
1091 # journal is covered by 'lock'
1091 # journal is covered by 'lock'
1092 if repo._currentlock(repo._lockref) is None:
1092 if repo._currentlock(repo._lockref) is None:
1093 repo.ui.develwarn('write with no lock: "%s"' % path,
1093 repo.ui.develwarn('write with no lock: "%s"' % path,
1094 stacklevel=3, config='check-locks')
1094 stacklevel=3, config='check-locks')
1095 elif repo._currentlock(repo._wlockref) is None:
1095 elif repo._currentlock(repo._wlockref) is None:
1096 # rest of vfs files are covered by 'wlock'
1096 # rest of vfs files are covered by 'wlock'
1097 #
1097 #
1098 # exclude special files
1098 # exclude special files
1099 for prefix in self._wlockfreeprefix:
1099 for prefix in self._wlockfreeprefix:
1100 if path.startswith(prefix):
1100 if path.startswith(prefix):
1101 return
1101 return
1102 repo.ui.develwarn('write with no wlock: "%s"' % path,
1102 repo.ui.develwarn('write with no wlock: "%s"' % path,
1103 stacklevel=3, config='check-locks')
1103 stacklevel=3, config='check-locks')
1104 return ret
1104 return ret
1105 return checkvfs
1105 return checkvfs
1106
1106
1107 def _getsvfsward(self, origfunc):
1107 def _getsvfsward(self, origfunc):
1108 """build a ward for self.svfs"""
1108 """build a ward for self.svfs"""
1109 rref = weakref.ref(self)
1109 rref = weakref.ref(self)
1110 def checksvfs(path, mode=None):
1110 def checksvfs(path, mode=None):
1111 ret = origfunc(path, mode=mode)
1111 ret = origfunc(path, mode=mode)
1112 repo = rref()
1112 repo = rref()
1113 if repo is None or not util.safehasattr(repo, '_lockref'):
1113 if repo is None or not util.safehasattr(repo, '_lockref'):
1114 return
1114 return
1115 if mode in (None, 'r', 'rb'):
1115 if mode in (None, 'r', 'rb'):
1116 return
1116 return
1117 if path.startswith(repo.sharedpath):
1117 if path.startswith(repo.sharedpath):
1118 # truncate name relative to the repository (.hg)
1118 # truncate name relative to the repository (.hg)
1119 path = path[len(repo.sharedpath) + 1:]
1119 path = path[len(repo.sharedpath) + 1:]
1120 if repo._currentlock(repo._lockref) is None:
1120 if repo._currentlock(repo._lockref) is None:
1121 repo.ui.develwarn('write with no lock: "%s"' % path,
1121 repo.ui.develwarn('write with no lock: "%s"' % path,
1122 stacklevel=4)
1122 stacklevel=4)
1123 return ret
1123 return ret
1124 return checksvfs
1124 return checksvfs
1125
1125
1126 def close(self):
1126 def close(self):
1127 self._writecaches()
1127 self._writecaches()
1128
1128
1129 def _writecaches(self):
1129 def _writecaches(self):
1130 if self._revbranchcache:
1130 if self._revbranchcache:
1131 self._revbranchcache.write()
1131 self._revbranchcache.write()
1132
1132
1133 def _restrictcapabilities(self, caps):
1133 def _restrictcapabilities(self, caps):
1134 if self.ui.configbool('experimental', 'bundle2-advertise'):
1134 if self.ui.configbool('experimental', 'bundle2-advertise'):
1135 caps = set(caps)
1135 caps = set(caps)
1136 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1136 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1137 role='client'))
1137 role='client'))
1138 caps.add('bundle2=' + urlreq.quote(capsblob))
1138 caps.add('bundle2=' + urlreq.quote(capsblob))
1139 return caps
1139 return caps
1140
1140
1141 def _writerequirements(self):
1141 def _writerequirements(self):
1142 scmutil.writerequires(self.vfs, self.requirements)
1142 scmutil.writerequires(self.vfs, self.requirements)
1143
1143
1144 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1144 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1145 # self -> auditor -> self._checknested -> self
1145 # self -> auditor -> self._checknested -> self
1146
1146
1147 @property
1147 @property
1148 def auditor(self):
1148 def auditor(self):
1149 # This is only used by context.workingctx.match in order to
1149 # This is only used by context.workingctx.match in order to
1150 # detect files in subrepos.
1150 # detect files in subrepos.
1151 return pathutil.pathauditor(self.root, callback=self._checknested)
1151 return pathutil.pathauditor(self.root, callback=self._checknested)
1152
1152
1153 @property
1153 @property
1154 def nofsauditor(self):
1154 def nofsauditor(self):
1155 # This is only used by context.basectx.match in order to detect
1155 # This is only used by context.basectx.match in order to detect
1156 # files in subrepos.
1156 # files in subrepos.
1157 return pathutil.pathauditor(self.root, callback=self._checknested,
1157 return pathutil.pathauditor(self.root, callback=self._checknested,
1158 realfs=False, cached=True)
1158 realfs=False, cached=True)
1159
1159
1160 def _checknested(self, path):
1160 def _checknested(self, path):
1161 """Determine if path is a legal nested repository."""
1161 """Determine if path is a legal nested repository."""
1162 if not path.startswith(self.root):
1162 if not path.startswith(self.root):
1163 return False
1163 return False
1164 subpath = path[len(self.root) + 1:]
1164 subpath = path[len(self.root) + 1:]
1165 normsubpath = util.pconvert(subpath)
1165 normsubpath = util.pconvert(subpath)
1166
1166
1167 # XXX: Checking against the current working copy is wrong in
1167 # XXX: Checking against the current working copy is wrong in
1168 # the sense that it can reject things like
1168 # the sense that it can reject things like
1169 #
1169 #
1170 # $ hg cat -r 10 sub/x.txt
1170 # $ hg cat -r 10 sub/x.txt
1171 #
1171 #
1172 # if sub/ is no longer a subrepository in the working copy
1172 # if sub/ is no longer a subrepository in the working copy
1173 # parent revision.
1173 # parent revision.
1174 #
1174 #
1175 # However, it can of course also allow things that would have
1175 # However, it can of course also allow things that would have
1176 # been rejected before, such as the above cat command if sub/
1176 # been rejected before, such as the above cat command if sub/
1177 # is a subrepository now, but was a normal directory before.
1177 # is a subrepository now, but was a normal directory before.
1178 # The old path auditor would have rejected by mistake since it
1178 # The old path auditor would have rejected by mistake since it
1179 # panics when it sees sub/.hg/.
1179 # panics when it sees sub/.hg/.
1180 #
1180 #
1181 # All in all, checking against the working copy seems sensible
1181 # All in all, checking against the working copy seems sensible
1182 # since we want to prevent access to nested repositories on
1182 # since we want to prevent access to nested repositories on
1183 # the filesystem *now*.
1183 # the filesystem *now*.
1184 ctx = self[None]
1184 ctx = self[None]
1185 parts = util.splitpath(subpath)
1185 parts = util.splitpath(subpath)
1186 while parts:
1186 while parts:
1187 prefix = '/'.join(parts)
1187 prefix = '/'.join(parts)
1188 if prefix in ctx.substate:
1188 if prefix in ctx.substate:
1189 if prefix == normsubpath:
1189 if prefix == normsubpath:
1190 return True
1190 return True
1191 else:
1191 else:
1192 sub = ctx.sub(prefix)
1192 sub = ctx.sub(prefix)
1193 return sub.checknested(subpath[len(prefix) + 1:])
1193 return sub.checknested(subpath[len(prefix) + 1:])
1194 else:
1194 else:
1195 parts.pop()
1195 parts.pop()
1196 return False
1196 return False
1197
1197
1198 def peer(self):
1198 def peer(self):
1199 return localpeer(self) # not cached to avoid reference cycle
1199 return localpeer(self) # not cached to avoid reference cycle
1200
1200
1201 def unfiltered(self):
1201 def unfiltered(self):
1202 """Return unfiltered version of the repository
1202 """Return unfiltered version of the repository
1203
1203
1204 Intended to be overwritten by filtered repo."""
1204 Intended to be overwritten by filtered repo."""
1205 return self
1205 return self
1206
1206
1207 def filtered(self, name, visibilityexceptions=None):
1207 def filtered(self, name, visibilityexceptions=None):
1208 """Return a filtered version of a repository
1208 """Return a filtered version of a repository
1209
1209
1210 The `name` parameter is the identifier of the requested view. This
1210 The `name` parameter is the identifier of the requested view. This
1211 will return a repoview object set "exactly" to the specified view.
1211 will return a repoview object set "exactly" to the specified view.
1212
1212
1213 This function does not apply recursive filtering to a repository. For
1213 This function does not apply recursive filtering to a repository. For
1214 example calling `repo.filtered("served")` will return a repoview using
1214 example calling `repo.filtered("served")` will return a repoview using
1215 the "served" view, regardless of the initial view used by `repo`.
1215 the "served" view, regardless of the initial view used by `repo`.
1216
1216
1217 In other word, there is always only one level of `repoview` "filtering".
1217 In other word, there is always only one level of `repoview` "filtering".
1218 """
1218 """
1219 cls = repoview.newtype(self.unfiltered().__class__)
1219 cls = repoview.newtype(self.unfiltered().__class__)
1220 return cls(self, name, visibilityexceptions)
1220 return cls(self, name, visibilityexceptions)
1221
1221
1222 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1222 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1223 ('00changelog.i', ''))
1223 ('00changelog.i', ''))
1224 def _bookmarks(self):
1224 def _bookmarks(self):
1225 # Since the multiple files involved in the transaction cannot be
1226 # written atomically (with current repository format), there is a race
1227 # condition here.
1228 #
1229 # 1) changelog content A is read
1230 # 2) outside transaction update changelog to content B
1231 # 3) outside transaction update bookmark file referring to content B
1232 # 4) bookmarks file content is read and filtered against changelog-A
1233 #
1234 # When this happens, bookmarks against nodes missing from A are dropped.
1235 #
1236 # Having this happening during read is not great, but it become worse
1237 # when this happen during write because the bookmarks to the "unknown"
1238 # nodes will be dropped for good. However, writes happen within locks.
1239 # This locking makes it possible to have a race free consistent read.
1240 # For this purpose data read from disc before locking are
1241 # "invalidated" right after the locks are taken. This invalidations are
1242 # "light", the `filecache` mechanism keep the data in memory and will
1243 # reuse them if the underlying files did not changed. Not parsing the
1244 # same data multiple times helps performances.
1245 #
1246 # Unfortunately in the case describe above, the files tracked by the
1247 # bookmarks file cache might not have changed, but the in-memory
1248 # content is still "wrong" because we used an older changelog content
1249 # to process the on-disk data. So after locking, the changelog would be
1250 # refreshed but `_bookmarks` would be preserved.
1251 # Adding `00changelog.i` to the list of tracked file is not
1252 # enough, because at the time we build the content for `_bookmarks` in
1253 # (4), the changelog file has already diverged from the content used
1254 # for loading `changelog` in (1)
1255 #
1256 # To prevent the issue, we force the changelog to be explicitly
1257 # reloaded while computing `_bookmarks`. The data race can still happen
1258 # without the lock (with a narrower window), but it would no longer go
1259 # undetected during the lock time refresh.
1260 #
1261 # The new schedule is as follow
1262 #
1263 # 1) filecache logic detect that `_bookmarks` needs to be computed
1264 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1265 # 3) We force `changelog` filecache to be tested
1266 # 4) cachestat for `changelog` are captured (for changelog)
1267 # 5) `_bookmarks` is computed and cached
1268 #
1269 # The step in (3) ensure we have a changelog at least as recent as the
1270 # cache stat computed in (1). As a result at locking time:
1271 # * if the changelog did not changed since (1) -> we can reuse the data
1272 # * otherwise -> the bookmarks get refreshed.
1273 self._refreshchangelog()
1225 return bookmarks.bmstore(self)
1274 return bookmarks.bmstore(self)
1226
1275
1227 def _refreshchangelog(self):
1276 def _refreshchangelog(self):
1228 """make sure the in memory changelog match the on-disk one"""
1277 """make sure the in memory changelog match the on-disk one"""
1229 if ('changelog' in vars(self) and self.currenttransaction() is None):
1278 if ('changelog' in vars(self) and self.currenttransaction() is None):
1230 del self.changelog
1279 del self.changelog
1231
1280
1232 @property
1281 @property
1233 def _activebookmark(self):
1282 def _activebookmark(self):
1234 return self._bookmarks.active
1283 return self._bookmarks.active
1235
1284
1236 # _phasesets depend on changelog. what we need is to call
1285 # _phasesets depend on changelog. what we need is to call
1237 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1286 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1238 # can't be easily expressed in filecache mechanism.
1287 # can't be easily expressed in filecache mechanism.
1239 @storecache('phaseroots', '00changelog.i')
1288 @storecache('phaseroots', '00changelog.i')
1240 def _phasecache(self):
1289 def _phasecache(self):
1241 return phases.phasecache(self, self._phasedefaults)
1290 return phases.phasecache(self, self._phasedefaults)
1242
1291
1243 @storecache('obsstore')
1292 @storecache('obsstore')
1244 def obsstore(self):
1293 def obsstore(self):
1245 return obsolete.makestore(self.ui, self)
1294 return obsolete.makestore(self.ui, self)
1246
1295
1247 @storecache('00changelog.i')
1296 @storecache('00changelog.i')
1248 def changelog(self):
1297 def changelog(self):
1249 return changelog.changelog(self.svfs,
1298 return changelog.changelog(self.svfs,
1250 trypending=txnutil.mayhavepending(self.root))
1299 trypending=txnutil.mayhavepending(self.root))
1251
1300
1252 @storecache('00manifest.i')
1301 @storecache('00manifest.i')
1253 def manifestlog(self):
1302 def manifestlog(self):
1254 rootstore = manifest.manifestrevlog(self.svfs)
1303 rootstore = manifest.manifestrevlog(self.svfs)
1255 return manifest.manifestlog(self.svfs, self, rootstore,
1304 return manifest.manifestlog(self.svfs, self, rootstore,
1256 self._storenarrowmatch)
1305 self._storenarrowmatch)
1257
1306
1258 @repofilecache('dirstate')
1307 @repofilecache('dirstate')
1259 def dirstate(self):
1308 def dirstate(self):
1260 return self._makedirstate()
1309 return self._makedirstate()
1261
1310
1262 def _makedirstate(self):
1311 def _makedirstate(self):
1263 """Extension point for wrapping the dirstate per-repo."""
1312 """Extension point for wrapping the dirstate per-repo."""
1264 sparsematchfn = lambda: sparse.matcher(self)
1313 sparsematchfn = lambda: sparse.matcher(self)
1265
1314
1266 return dirstate.dirstate(self.vfs, self.ui, self.root,
1315 return dirstate.dirstate(self.vfs, self.ui, self.root,
1267 self._dirstatevalidate, sparsematchfn)
1316 self._dirstatevalidate, sparsematchfn)
1268
1317
1269 def _dirstatevalidate(self, node):
1318 def _dirstatevalidate(self, node):
1270 try:
1319 try:
1271 self.changelog.rev(node)
1320 self.changelog.rev(node)
1272 return node
1321 return node
1273 except error.LookupError:
1322 except error.LookupError:
1274 if not self._dirstatevalidatewarned:
1323 if not self._dirstatevalidatewarned:
1275 self._dirstatevalidatewarned = True
1324 self._dirstatevalidatewarned = True
1276 self.ui.warn(_("warning: ignoring unknown"
1325 self.ui.warn(_("warning: ignoring unknown"
1277 " working parent %s!\n") % short(node))
1326 " working parent %s!\n") % short(node))
1278 return nullid
1327 return nullid
1279
1328
1280 @storecache(narrowspec.FILENAME)
1329 @storecache(narrowspec.FILENAME)
1281 def narrowpats(self):
1330 def narrowpats(self):
1282 """matcher patterns for this repository's narrowspec
1331 """matcher patterns for this repository's narrowspec
1283
1332
1284 A tuple of (includes, excludes).
1333 A tuple of (includes, excludes).
1285 """
1334 """
1286 return narrowspec.load(self)
1335 return narrowspec.load(self)
1287
1336
1288 @storecache(narrowspec.FILENAME)
1337 @storecache(narrowspec.FILENAME)
1289 def _storenarrowmatch(self):
1338 def _storenarrowmatch(self):
1290 if repository.NARROW_REQUIREMENT not in self.requirements:
1339 if repository.NARROW_REQUIREMENT not in self.requirements:
1291 return matchmod.always()
1340 return matchmod.always()
1292 include, exclude = self.narrowpats
1341 include, exclude = self.narrowpats
1293 return narrowspec.match(self.root, include=include, exclude=exclude)
1342 return narrowspec.match(self.root, include=include, exclude=exclude)
1294
1343
1295 @storecache(narrowspec.FILENAME)
1344 @storecache(narrowspec.FILENAME)
1296 def _narrowmatch(self):
1345 def _narrowmatch(self):
1297 if repository.NARROW_REQUIREMENT not in self.requirements:
1346 if repository.NARROW_REQUIREMENT not in self.requirements:
1298 return matchmod.always()
1347 return matchmod.always()
1299 narrowspec.checkworkingcopynarrowspec(self)
1348 narrowspec.checkworkingcopynarrowspec(self)
1300 include, exclude = self.narrowpats
1349 include, exclude = self.narrowpats
1301 return narrowspec.match(self.root, include=include, exclude=exclude)
1350 return narrowspec.match(self.root, include=include, exclude=exclude)
1302
1351
1303 def narrowmatch(self, match=None, includeexact=False):
1352 def narrowmatch(self, match=None, includeexact=False):
1304 """matcher corresponding the the repo's narrowspec
1353 """matcher corresponding the the repo's narrowspec
1305
1354
1306 If `match` is given, then that will be intersected with the narrow
1355 If `match` is given, then that will be intersected with the narrow
1307 matcher.
1356 matcher.
1308
1357
1309 If `includeexact` is True, then any exact matches from `match` will
1358 If `includeexact` is True, then any exact matches from `match` will
1310 be included even if they're outside the narrowspec.
1359 be included even if they're outside the narrowspec.
1311 """
1360 """
1312 if match:
1361 if match:
1313 if includeexact and not self._narrowmatch.always():
1362 if includeexact and not self._narrowmatch.always():
1314 # do not exclude explicitly-specified paths so that they can
1363 # do not exclude explicitly-specified paths so that they can
1315 # be warned later on
1364 # be warned later on
1316 em = matchmod.exact(match.files())
1365 em = matchmod.exact(match.files())
1317 nm = matchmod.unionmatcher([self._narrowmatch, em])
1366 nm = matchmod.unionmatcher([self._narrowmatch, em])
1318 return matchmod.intersectmatchers(match, nm)
1367 return matchmod.intersectmatchers(match, nm)
1319 return matchmod.intersectmatchers(match, self._narrowmatch)
1368 return matchmod.intersectmatchers(match, self._narrowmatch)
1320 return self._narrowmatch
1369 return self._narrowmatch
1321
1370
1322 def setnarrowpats(self, newincludes, newexcludes):
1371 def setnarrowpats(self, newincludes, newexcludes):
1323 narrowspec.save(self, newincludes, newexcludes)
1372 narrowspec.save(self, newincludes, newexcludes)
1324 self.invalidate(clearfilecache=True)
1373 self.invalidate(clearfilecache=True)
1325
1374
1326 def __getitem__(self, changeid):
1375 def __getitem__(self, changeid):
1327 if changeid is None:
1376 if changeid is None:
1328 return context.workingctx(self)
1377 return context.workingctx(self)
1329 if isinstance(changeid, context.basectx):
1378 if isinstance(changeid, context.basectx):
1330 return changeid
1379 return changeid
1331 if isinstance(changeid, slice):
1380 if isinstance(changeid, slice):
1332 # wdirrev isn't contiguous so the slice shouldn't include it
1381 # wdirrev isn't contiguous so the slice shouldn't include it
1333 return [self[i]
1382 return [self[i]
1334 for i in pycompat.xrange(*changeid.indices(len(self)))
1383 for i in pycompat.xrange(*changeid.indices(len(self)))
1335 if i not in self.changelog.filteredrevs]
1384 if i not in self.changelog.filteredrevs]
1336 try:
1385 try:
1337 if isinstance(changeid, int):
1386 if isinstance(changeid, int):
1338 node = self.changelog.node(changeid)
1387 node = self.changelog.node(changeid)
1339 rev = changeid
1388 rev = changeid
1340 elif changeid == 'null':
1389 elif changeid == 'null':
1341 node = nullid
1390 node = nullid
1342 rev = nullrev
1391 rev = nullrev
1343 elif changeid == 'tip':
1392 elif changeid == 'tip':
1344 node = self.changelog.tip()
1393 node = self.changelog.tip()
1345 rev = self.changelog.rev(node)
1394 rev = self.changelog.rev(node)
1346 elif changeid == '.':
1395 elif changeid == '.':
1347 # this is a hack to delay/avoid loading obsmarkers
1396 # this is a hack to delay/avoid loading obsmarkers
1348 # when we know that '.' won't be hidden
1397 # when we know that '.' won't be hidden
1349 node = self.dirstate.p1()
1398 node = self.dirstate.p1()
1350 rev = self.unfiltered().changelog.rev(node)
1399 rev = self.unfiltered().changelog.rev(node)
1351 elif len(changeid) == 20:
1400 elif len(changeid) == 20:
1352 try:
1401 try:
1353 node = changeid
1402 node = changeid
1354 rev = self.changelog.rev(changeid)
1403 rev = self.changelog.rev(changeid)
1355 except error.FilteredLookupError:
1404 except error.FilteredLookupError:
1356 changeid = hex(changeid) # for the error message
1405 changeid = hex(changeid) # for the error message
1357 raise
1406 raise
1358 except LookupError:
1407 except LookupError:
1359 # check if it might have come from damaged dirstate
1408 # check if it might have come from damaged dirstate
1360 #
1409 #
1361 # XXX we could avoid the unfiltered if we had a recognizable
1410 # XXX we could avoid the unfiltered if we had a recognizable
1362 # exception for filtered changeset access
1411 # exception for filtered changeset access
1363 if (self.local()
1412 if (self.local()
1364 and changeid in self.unfiltered().dirstate.parents()):
1413 and changeid in self.unfiltered().dirstate.parents()):
1365 msg = _("working directory has unknown parent '%s'!")
1414 msg = _("working directory has unknown parent '%s'!")
1366 raise error.Abort(msg % short(changeid))
1415 raise error.Abort(msg % short(changeid))
1367 changeid = hex(changeid) # for the error message
1416 changeid = hex(changeid) # for the error message
1368 raise
1417 raise
1369
1418
1370 elif len(changeid) == 40:
1419 elif len(changeid) == 40:
1371 node = bin(changeid)
1420 node = bin(changeid)
1372 rev = self.changelog.rev(node)
1421 rev = self.changelog.rev(node)
1373 else:
1422 else:
1374 raise error.ProgrammingError(
1423 raise error.ProgrammingError(
1375 "unsupported changeid '%s' of type %s" %
1424 "unsupported changeid '%s' of type %s" %
1376 (changeid, type(changeid)))
1425 (changeid, type(changeid)))
1377
1426
1378 return context.changectx(self, rev, node)
1427 return context.changectx(self, rev, node)
1379
1428
1380 except (error.FilteredIndexError, error.FilteredLookupError):
1429 except (error.FilteredIndexError, error.FilteredLookupError):
1381 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1430 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1382 % pycompat.bytestr(changeid))
1431 % pycompat.bytestr(changeid))
1383 except (IndexError, LookupError):
1432 except (IndexError, LookupError):
1384 raise error.RepoLookupError(
1433 raise error.RepoLookupError(
1385 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1434 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1386 except error.WdirUnsupported:
1435 except error.WdirUnsupported:
1387 return context.workingctx(self)
1436 return context.workingctx(self)
1388
1437
1389 def __contains__(self, changeid):
1438 def __contains__(self, changeid):
1390 """True if the given changeid exists
1439 """True if the given changeid exists
1391
1440
1392 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1441 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1393 specified.
1442 specified.
1394 """
1443 """
1395 try:
1444 try:
1396 self[changeid]
1445 self[changeid]
1397 return True
1446 return True
1398 except error.RepoLookupError:
1447 except error.RepoLookupError:
1399 return False
1448 return False
1400
1449
1401 def __nonzero__(self):
1450 def __nonzero__(self):
1402 return True
1451 return True
1403
1452
1404 __bool__ = __nonzero__
1453 __bool__ = __nonzero__
1405
1454
1406 def __len__(self):
1455 def __len__(self):
1407 # no need to pay the cost of repoview.changelog
1456 # no need to pay the cost of repoview.changelog
1408 unfi = self.unfiltered()
1457 unfi = self.unfiltered()
1409 return len(unfi.changelog)
1458 return len(unfi.changelog)
1410
1459
1411 def __iter__(self):
1460 def __iter__(self):
1412 return iter(self.changelog)
1461 return iter(self.changelog)
1413
1462
1414 def revs(self, expr, *args):
1463 def revs(self, expr, *args):
1415 '''Find revisions matching a revset.
1464 '''Find revisions matching a revset.
1416
1465
1417 The revset is specified as a string ``expr`` that may contain
1466 The revset is specified as a string ``expr`` that may contain
1418 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1467 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1419
1468
1420 Revset aliases from the configuration are not expanded. To expand
1469 Revset aliases from the configuration are not expanded. To expand
1421 user aliases, consider calling ``scmutil.revrange()`` or
1470 user aliases, consider calling ``scmutil.revrange()`` or
1422 ``repo.anyrevs([expr], user=True)``.
1471 ``repo.anyrevs([expr], user=True)``.
1423
1472
1424 Returns a revset.abstractsmartset, which is a list-like interface
1473 Returns a revset.abstractsmartset, which is a list-like interface
1425 that contains integer revisions.
1474 that contains integer revisions.
1426 '''
1475 '''
1427 tree = revsetlang.spectree(expr, *args)
1476 tree = revsetlang.spectree(expr, *args)
1428 return revset.makematcher(tree)(self)
1477 return revset.makematcher(tree)(self)
1429
1478
1430 def set(self, expr, *args):
1479 def set(self, expr, *args):
1431 '''Find revisions matching a revset and emit changectx instances.
1480 '''Find revisions matching a revset and emit changectx instances.
1432
1481
1433 This is a convenience wrapper around ``revs()`` that iterates the
1482 This is a convenience wrapper around ``revs()`` that iterates the
1434 result and is a generator of changectx instances.
1483 result and is a generator of changectx instances.
1435
1484
1436 Revset aliases from the configuration are not expanded. To expand
1485 Revset aliases from the configuration are not expanded. To expand
1437 user aliases, consider calling ``scmutil.revrange()``.
1486 user aliases, consider calling ``scmutil.revrange()``.
1438 '''
1487 '''
1439 for r in self.revs(expr, *args):
1488 for r in self.revs(expr, *args):
1440 yield self[r]
1489 yield self[r]
1441
1490
1442 def anyrevs(self, specs, user=False, localalias=None):
1491 def anyrevs(self, specs, user=False, localalias=None):
1443 '''Find revisions matching one of the given revsets.
1492 '''Find revisions matching one of the given revsets.
1444
1493
1445 Revset aliases from the configuration are not expanded by default. To
1494 Revset aliases from the configuration are not expanded by default. To
1446 expand user aliases, specify ``user=True``. To provide some local
1495 expand user aliases, specify ``user=True``. To provide some local
1447 definitions overriding user aliases, set ``localalias`` to
1496 definitions overriding user aliases, set ``localalias`` to
1448 ``{name: definitionstring}``.
1497 ``{name: definitionstring}``.
1449 '''
1498 '''
1450 if user:
1499 if user:
1451 m = revset.matchany(self.ui, specs,
1500 m = revset.matchany(self.ui, specs,
1452 lookup=revset.lookupfn(self),
1501 lookup=revset.lookupfn(self),
1453 localalias=localalias)
1502 localalias=localalias)
1454 else:
1503 else:
1455 m = revset.matchany(None, specs, localalias=localalias)
1504 m = revset.matchany(None, specs, localalias=localalias)
1456 return m(self)
1505 return m(self)
1457
1506
1458 def url(self):
1507 def url(self):
1459 return 'file:' + self.root
1508 return 'file:' + self.root
1460
1509
1461 def hook(self, name, throw=False, **args):
1510 def hook(self, name, throw=False, **args):
1462 """Call a hook, passing this repo instance.
1511 """Call a hook, passing this repo instance.
1463
1512
1464 This a convenience method to aid invoking hooks. Extensions likely
1513 This a convenience method to aid invoking hooks. Extensions likely
1465 won't call this unless they have registered a custom hook or are
1514 won't call this unless they have registered a custom hook or are
1466 replacing code that is expected to call a hook.
1515 replacing code that is expected to call a hook.
1467 """
1516 """
1468 return hook.hook(self.ui, self, name, throw, **args)
1517 return hook.hook(self.ui, self, name, throw, **args)
1469
1518
1470 @filteredpropertycache
1519 @filteredpropertycache
1471 def _tagscache(self):
1520 def _tagscache(self):
1472 '''Returns a tagscache object that contains various tags related
1521 '''Returns a tagscache object that contains various tags related
1473 caches.'''
1522 caches.'''
1474
1523
1475 # This simplifies its cache management by having one decorated
1524 # This simplifies its cache management by having one decorated
1476 # function (this one) and the rest simply fetch things from it.
1525 # function (this one) and the rest simply fetch things from it.
1477 class tagscache(object):
1526 class tagscache(object):
1478 def __init__(self):
1527 def __init__(self):
1479 # These two define the set of tags for this repository. tags
1528 # These two define the set of tags for this repository. tags
1480 # maps tag name to node; tagtypes maps tag name to 'global' or
1529 # maps tag name to node; tagtypes maps tag name to 'global' or
1481 # 'local'. (Global tags are defined by .hgtags across all
1530 # 'local'. (Global tags are defined by .hgtags across all
1482 # heads, and local tags are defined in .hg/localtags.)
1531 # heads, and local tags are defined in .hg/localtags.)
1483 # They constitute the in-memory cache of tags.
1532 # They constitute the in-memory cache of tags.
1484 self.tags = self.tagtypes = None
1533 self.tags = self.tagtypes = None
1485
1534
1486 self.nodetagscache = self.tagslist = None
1535 self.nodetagscache = self.tagslist = None
1487
1536
1488 cache = tagscache()
1537 cache = tagscache()
1489 cache.tags, cache.tagtypes = self._findtags()
1538 cache.tags, cache.tagtypes = self._findtags()
1490
1539
1491 return cache
1540 return cache
1492
1541
1493 def tags(self):
1542 def tags(self):
1494 '''return a mapping of tag to node'''
1543 '''return a mapping of tag to node'''
1495 t = {}
1544 t = {}
1496 if self.changelog.filteredrevs:
1545 if self.changelog.filteredrevs:
1497 tags, tt = self._findtags()
1546 tags, tt = self._findtags()
1498 else:
1547 else:
1499 tags = self._tagscache.tags
1548 tags = self._tagscache.tags
1500 rev = self.changelog.rev
1549 rev = self.changelog.rev
1501 for k, v in tags.iteritems():
1550 for k, v in tags.iteritems():
1502 try:
1551 try:
1503 # ignore tags to unknown nodes
1552 # ignore tags to unknown nodes
1504 rev(v)
1553 rev(v)
1505 t[k] = v
1554 t[k] = v
1506 except (error.LookupError, ValueError):
1555 except (error.LookupError, ValueError):
1507 pass
1556 pass
1508 return t
1557 return t
1509
1558
1510 def _findtags(self):
1559 def _findtags(self):
1511 '''Do the hard work of finding tags. Return a pair of dicts
1560 '''Do the hard work of finding tags. Return a pair of dicts
1512 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1561 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1513 maps tag name to a string like \'global\' or \'local\'.
1562 maps tag name to a string like \'global\' or \'local\'.
1514 Subclasses or extensions are free to add their own tags, but
1563 Subclasses or extensions are free to add their own tags, but
1515 should be aware that the returned dicts will be retained for the
1564 should be aware that the returned dicts will be retained for the
1516 duration of the localrepo object.'''
1565 duration of the localrepo object.'''
1517
1566
1518 # XXX what tagtype should subclasses/extensions use? Currently
1567 # XXX what tagtype should subclasses/extensions use? Currently
1519 # mq and bookmarks add tags, but do not set the tagtype at all.
1568 # mq and bookmarks add tags, but do not set the tagtype at all.
1520 # Should each extension invent its own tag type? Should there
1569 # Should each extension invent its own tag type? Should there
1521 # be one tagtype for all such "virtual" tags? Or is the status
1570 # be one tagtype for all such "virtual" tags? Or is the status
1522 # quo fine?
1571 # quo fine?
1523
1572
1524
1573
1525 # map tag name to (node, hist)
1574 # map tag name to (node, hist)
1526 alltags = tagsmod.findglobaltags(self.ui, self)
1575 alltags = tagsmod.findglobaltags(self.ui, self)
1527 # map tag name to tag type
1576 # map tag name to tag type
1528 tagtypes = dict((tag, 'global') for tag in alltags)
1577 tagtypes = dict((tag, 'global') for tag in alltags)
1529
1578
1530 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1579 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1531
1580
1532 # Build the return dicts. Have to re-encode tag names because
1581 # Build the return dicts. Have to re-encode tag names because
1533 # the tags module always uses UTF-8 (in order not to lose info
1582 # the tags module always uses UTF-8 (in order not to lose info
1534 # writing to the cache), but the rest of Mercurial wants them in
1583 # writing to the cache), but the rest of Mercurial wants them in
1535 # local encoding.
1584 # local encoding.
1536 tags = {}
1585 tags = {}
1537 for (name, (node, hist)) in alltags.iteritems():
1586 for (name, (node, hist)) in alltags.iteritems():
1538 if node != nullid:
1587 if node != nullid:
1539 tags[encoding.tolocal(name)] = node
1588 tags[encoding.tolocal(name)] = node
1540 tags['tip'] = self.changelog.tip()
1589 tags['tip'] = self.changelog.tip()
1541 tagtypes = dict([(encoding.tolocal(name), value)
1590 tagtypes = dict([(encoding.tolocal(name), value)
1542 for (name, value) in tagtypes.iteritems()])
1591 for (name, value) in tagtypes.iteritems()])
1543 return (tags, tagtypes)
1592 return (tags, tagtypes)
1544
1593
1545 def tagtype(self, tagname):
1594 def tagtype(self, tagname):
1546 '''
1595 '''
1547 return the type of the given tag. result can be:
1596 return the type of the given tag. result can be:
1548
1597
1549 'local' : a local tag
1598 'local' : a local tag
1550 'global' : a global tag
1599 'global' : a global tag
1551 None : tag does not exist
1600 None : tag does not exist
1552 '''
1601 '''
1553
1602
1554 return self._tagscache.tagtypes.get(tagname)
1603 return self._tagscache.tagtypes.get(tagname)
1555
1604
1556 def tagslist(self):
1605 def tagslist(self):
1557 '''return a list of tags ordered by revision'''
1606 '''return a list of tags ordered by revision'''
1558 if not self._tagscache.tagslist:
1607 if not self._tagscache.tagslist:
1559 l = []
1608 l = []
1560 for t, n in self.tags().iteritems():
1609 for t, n in self.tags().iteritems():
1561 l.append((self.changelog.rev(n), t, n))
1610 l.append((self.changelog.rev(n), t, n))
1562 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1611 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1563
1612
1564 return self._tagscache.tagslist
1613 return self._tagscache.tagslist
1565
1614
1566 def nodetags(self, node):
1615 def nodetags(self, node):
1567 '''return the tags associated with a node'''
1616 '''return the tags associated with a node'''
1568 if not self._tagscache.nodetagscache:
1617 if not self._tagscache.nodetagscache:
1569 nodetagscache = {}
1618 nodetagscache = {}
1570 for t, n in self._tagscache.tags.iteritems():
1619 for t, n in self._tagscache.tags.iteritems():
1571 nodetagscache.setdefault(n, []).append(t)
1620 nodetagscache.setdefault(n, []).append(t)
1572 for tags in nodetagscache.itervalues():
1621 for tags in nodetagscache.itervalues():
1573 tags.sort()
1622 tags.sort()
1574 self._tagscache.nodetagscache = nodetagscache
1623 self._tagscache.nodetagscache = nodetagscache
1575 return self._tagscache.nodetagscache.get(node, [])
1624 return self._tagscache.nodetagscache.get(node, [])
1576
1625
1577 def nodebookmarks(self, node):
1626 def nodebookmarks(self, node):
1578 """return the list of bookmarks pointing to the specified node"""
1627 """return the list of bookmarks pointing to the specified node"""
1579 return self._bookmarks.names(node)
1628 return self._bookmarks.names(node)
1580
1629
1581 def branchmap(self):
1630 def branchmap(self):
1582 '''returns a dictionary {branch: [branchheads]} with branchheads
1631 '''returns a dictionary {branch: [branchheads]} with branchheads
1583 ordered by increasing revision number'''
1632 ordered by increasing revision number'''
1584 return self._branchcaches[self]
1633 return self._branchcaches[self]
1585
1634
1586 @unfilteredmethod
1635 @unfilteredmethod
1587 def revbranchcache(self):
1636 def revbranchcache(self):
1588 if not self._revbranchcache:
1637 if not self._revbranchcache:
1589 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1638 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1590 return self._revbranchcache
1639 return self._revbranchcache
1591
1640
1592 def branchtip(self, branch, ignoremissing=False):
1641 def branchtip(self, branch, ignoremissing=False):
1593 '''return the tip node for a given branch
1642 '''return the tip node for a given branch
1594
1643
1595 If ignoremissing is True, then this method will not raise an error.
1644 If ignoremissing is True, then this method will not raise an error.
1596 This is helpful for callers that only expect None for a missing branch
1645 This is helpful for callers that only expect None for a missing branch
1597 (e.g. namespace).
1646 (e.g. namespace).
1598
1647
1599 '''
1648 '''
1600 try:
1649 try:
1601 return self.branchmap().branchtip(branch)
1650 return self.branchmap().branchtip(branch)
1602 except KeyError:
1651 except KeyError:
1603 if not ignoremissing:
1652 if not ignoremissing:
1604 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1605 else:
1654 else:
1606 pass
1655 pass
1607
1656
1608 def lookup(self, key):
1657 def lookup(self, key):
1609 node = scmutil.revsymbol(self, key).node()
1658 node = scmutil.revsymbol(self, key).node()
1610 if node is None:
1659 if node is None:
1611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1660 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1612 return node
1661 return node
1613
1662
1614 def lookupbranch(self, key):
1663 def lookupbranch(self, key):
1615 if self.branchmap().hasbranch(key):
1664 if self.branchmap().hasbranch(key):
1616 return key
1665 return key
1617
1666
1618 return scmutil.revsymbol(self, key).branch()
1667 return scmutil.revsymbol(self, key).branch()
1619
1668
1620 def known(self, nodes):
1669 def known(self, nodes):
1621 cl = self.changelog
1670 cl = self.changelog
1622 nm = cl.nodemap
1671 nm = cl.nodemap
1623 filtered = cl.filteredrevs
1672 filtered = cl.filteredrevs
1624 result = []
1673 result = []
1625 for n in nodes:
1674 for n in nodes:
1626 r = nm.get(n)
1675 r = nm.get(n)
1627 resp = not (r is None or r in filtered)
1676 resp = not (r is None or r in filtered)
1628 result.append(resp)
1677 result.append(resp)
1629 return result
1678 return result
1630
1679
1631 def local(self):
1680 def local(self):
1632 return self
1681 return self
1633
1682
1634 def publishing(self):
1683 def publishing(self):
1635 # it's safe (and desirable) to trust the publish flag unconditionally
1684 # it's safe (and desirable) to trust the publish flag unconditionally
1636 # so that we don't finalize changes shared between users via ssh or nfs
1685 # so that we don't finalize changes shared between users via ssh or nfs
1637 return self.ui.configbool('phases', 'publish', untrusted=True)
1686 return self.ui.configbool('phases', 'publish', untrusted=True)
1638
1687
1639 def cancopy(self):
1688 def cancopy(self):
1640 # so statichttprepo's override of local() works
1689 # so statichttprepo's override of local() works
1641 if not self.local():
1690 if not self.local():
1642 return False
1691 return False
1643 if not self.publishing():
1692 if not self.publishing():
1644 return True
1693 return True
1645 # if publishing we can't copy if there is filtered content
1694 # if publishing we can't copy if there is filtered content
1646 return not self.filtered('visible').changelog.filteredrevs
1695 return not self.filtered('visible').changelog.filteredrevs
1647
1696
1648 def shared(self):
1697 def shared(self):
1649 '''the type of shared repository (None if not shared)'''
1698 '''the type of shared repository (None if not shared)'''
1650 if self.sharedpath != self.path:
1699 if self.sharedpath != self.path:
1651 return 'store'
1700 return 'store'
1652 return None
1701 return None
1653
1702
1654 def wjoin(self, f, *insidef):
1703 def wjoin(self, f, *insidef):
1655 return self.vfs.reljoin(self.root, f, *insidef)
1704 return self.vfs.reljoin(self.root, f, *insidef)
1656
1705
1657 def setparents(self, p1, p2=nullid):
1706 def setparents(self, p1, p2=nullid):
1658 with self.dirstate.parentchange():
1707 with self.dirstate.parentchange():
1659 copies = self.dirstate.setparents(p1, p2)
1708 copies = self.dirstate.setparents(p1, p2)
1660 pctx = self[p1]
1709 pctx = self[p1]
1661 if copies:
1710 if copies:
1662 # Adjust copy records, the dirstate cannot do it, it
1711 # Adjust copy records, the dirstate cannot do it, it
1663 # requires access to parents manifests. Preserve them
1712 # requires access to parents manifests. Preserve them
1664 # only for entries added to first parent.
1713 # only for entries added to first parent.
1665 for f in copies:
1714 for f in copies:
1666 if f not in pctx and copies[f] in pctx:
1715 if f not in pctx and copies[f] in pctx:
1667 self.dirstate.copy(copies[f], f)
1716 self.dirstate.copy(copies[f], f)
1668 if p2 == nullid:
1717 if p2 == nullid:
1669 for f, s in sorted(self.dirstate.copies().items()):
1718 for f, s in sorted(self.dirstate.copies().items()):
1670 if f not in pctx and s not in pctx:
1719 if f not in pctx and s not in pctx:
1671 self.dirstate.copy(None, f)
1720 self.dirstate.copy(None, f)
1672
1721
1673 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1722 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1674 """changeid must be a changeset revision, if specified.
1723 """changeid must be a changeset revision, if specified.
1675 fileid can be a file revision or node."""
1724 fileid can be a file revision or node."""
1676 return context.filectx(self, path, changeid, fileid,
1725 return context.filectx(self, path, changeid, fileid,
1677 changectx=changectx)
1726 changectx=changectx)
1678
1727
1679 def getcwd(self):
1728 def getcwd(self):
1680 return self.dirstate.getcwd()
1729 return self.dirstate.getcwd()
1681
1730
1682 def pathto(self, f, cwd=None):
1731 def pathto(self, f, cwd=None):
1683 return self.dirstate.pathto(f, cwd)
1732 return self.dirstate.pathto(f, cwd)
1684
1733
1685 def _loadfilter(self, filter):
1734 def _loadfilter(self, filter):
1686 if filter not in self._filterpats:
1735 if filter not in self._filterpats:
1687 l = []
1736 l = []
1688 for pat, cmd in self.ui.configitems(filter):
1737 for pat, cmd in self.ui.configitems(filter):
1689 if cmd == '!':
1738 if cmd == '!':
1690 continue
1739 continue
1691 mf = matchmod.match(self.root, '', [pat])
1740 mf = matchmod.match(self.root, '', [pat])
1692 fn = None
1741 fn = None
1693 params = cmd
1742 params = cmd
1694 for name, filterfn in self._datafilters.iteritems():
1743 for name, filterfn in self._datafilters.iteritems():
1695 if cmd.startswith(name):
1744 if cmd.startswith(name):
1696 fn = filterfn
1745 fn = filterfn
1697 params = cmd[len(name):].lstrip()
1746 params = cmd[len(name):].lstrip()
1698 break
1747 break
1699 if not fn:
1748 if not fn:
1700 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1749 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1701 # Wrap old filters not supporting keyword arguments
1750 # Wrap old filters not supporting keyword arguments
1702 if not pycompat.getargspec(fn)[2]:
1751 if not pycompat.getargspec(fn)[2]:
1703 oldfn = fn
1752 oldfn = fn
1704 fn = lambda s, c, **kwargs: oldfn(s, c)
1753 fn = lambda s, c, **kwargs: oldfn(s, c)
1705 l.append((mf, fn, params))
1754 l.append((mf, fn, params))
1706 self._filterpats[filter] = l
1755 self._filterpats[filter] = l
1707 return self._filterpats[filter]
1756 return self._filterpats[filter]
1708
1757
1709 def _filter(self, filterpats, filename, data):
1758 def _filter(self, filterpats, filename, data):
1710 for mf, fn, cmd in filterpats:
1759 for mf, fn, cmd in filterpats:
1711 if mf(filename):
1760 if mf(filename):
1712 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1761 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1713 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1762 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1714 break
1763 break
1715
1764
1716 return data
1765 return data
1717
1766
1718 @unfilteredpropertycache
1767 @unfilteredpropertycache
1719 def _encodefilterpats(self):
1768 def _encodefilterpats(self):
1720 return self._loadfilter('encode')
1769 return self._loadfilter('encode')
1721
1770
1722 @unfilteredpropertycache
1771 @unfilteredpropertycache
1723 def _decodefilterpats(self):
1772 def _decodefilterpats(self):
1724 return self._loadfilter('decode')
1773 return self._loadfilter('decode')
1725
1774
1726 def adddatafilter(self, name, filter):
1775 def adddatafilter(self, name, filter):
1727 self._datafilters[name] = filter
1776 self._datafilters[name] = filter
1728
1777
1729 def wread(self, filename):
1778 def wread(self, filename):
1730 if self.wvfs.islink(filename):
1779 if self.wvfs.islink(filename):
1731 data = self.wvfs.readlink(filename)
1780 data = self.wvfs.readlink(filename)
1732 else:
1781 else:
1733 data = self.wvfs.read(filename)
1782 data = self.wvfs.read(filename)
1734 return self._filter(self._encodefilterpats, filename, data)
1783 return self._filter(self._encodefilterpats, filename, data)
1735
1784
1736 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1785 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1737 """write ``data`` into ``filename`` in the working directory
1786 """write ``data`` into ``filename`` in the working directory
1738
1787
1739 This returns length of written (maybe decoded) data.
1788 This returns length of written (maybe decoded) data.
1740 """
1789 """
1741 data = self._filter(self._decodefilterpats, filename, data)
1790 data = self._filter(self._decodefilterpats, filename, data)
1742 if 'l' in flags:
1791 if 'l' in flags:
1743 self.wvfs.symlink(data, filename)
1792 self.wvfs.symlink(data, filename)
1744 else:
1793 else:
1745 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1794 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1746 **kwargs)
1795 **kwargs)
1747 if 'x' in flags:
1796 if 'x' in flags:
1748 self.wvfs.setflags(filename, False, True)
1797 self.wvfs.setflags(filename, False, True)
1749 else:
1798 else:
1750 self.wvfs.setflags(filename, False, False)
1799 self.wvfs.setflags(filename, False, False)
1751 return len(data)
1800 return len(data)
1752
1801
1753 def wwritedata(self, filename, data):
1802 def wwritedata(self, filename, data):
1754 return self._filter(self._decodefilterpats, filename, data)
1803 return self._filter(self._decodefilterpats, filename, data)
1755
1804
1756 def currenttransaction(self):
1805 def currenttransaction(self):
1757 """return the current transaction or None if non exists"""
1806 """return the current transaction or None if non exists"""
1758 if self._transref:
1807 if self._transref:
1759 tr = self._transref()
1808 tr = self._transref()
1760 else:
1809 else:
1761 tr = None
1810 tr = None
1762
1811
1763 if tr and tr.running():
1812 if tr and tr.running():
1764 return tr
1813 return tr
1765 return None
1814 return None
1766
1815
1767 def transaction(self, desc, report=None):
1816 def transaction(self, desc, report=None):
1768 if (self.ui.configbool('devel', 'all-warnings')
1817 if (self.ui.configbool('devel', 'all-warnings')
1769 or self.ui.configbool('devel', 'check-locks')):
1818 or self.ui.configbool('devel', 'check-locks')):
1770 if self._currentlock(self._lockref) is None:
1819 if self._currentlock(self._lockref) is None:
1771 raise error.ProgrammingError('transaction requires locking')
1820 raise error.ProgrammingError('transaction requires locking')
1772 tr = self.currenttransaction()
1821 tr = self.currenttransaction()
1773 if tr is not None:
1822 if tr is not None:
1774 return tr.nest(name=desc)
1823 return tr.nest(name=desc)
1775
1824
1776 # abort here if the journal already exists
1825 # abort here if the journal already exists
1777 if self.svfs.exists("journal"):
1826 if self.svfs.exists("journal"):
1778 raise error.RepoError(
1827 raise error.RepoError(
1779 _("abandoned transaction found"),
1828 _("abandoned transaction found"),
1780 hint=_("run 'hg recover' to clean up transaction"))
1829 hint=_("run 'hg recover' to clean up transaction"))
1781
1830
1782 idbase = "%.40f#%f" % (random.random(), time.time())
1831 idbase = "%.40f#%f" % (random.random(), time.time())
1783 ha = hex(hashlib.sha1(idbase).digest())
1832 ha = hex(hashlib.sha1(idbase).digest())
1784 txnid = 'TXN:' + ha
1833 txnid = 'TXN:' + ha
1785 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1834 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1786
1835
1787 self._writejournal(desc)
1836 self._writejournal(desc)
1788 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1837 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1789 if report:
1838 if report:
1790 rp = report
1839 rp = report
1791 else:
1840 else:
1792 rp = self.ui.warn
1841 rp = self.ui.warn
1793 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1842 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1794 # we must avoid cyclic reference between repo and transaction.
1843 # we must avoid cyclic reference between repo and transaction.
1795 reporef = weakref.ref(self)
1844 reporef = weakref.ref(self)
1796 # Code to track tag movement
1845 # Code to track tag movement
1797 #
1846 #
1798 # Since tags are all handled as file content, it is actually quite hard
1847 # Since tags are all handled as file content, it is actually quite hard
1799 # to track these movement from a code perspective. So we fallback to a
1848 # to track these movement from a code perspective. So we fallback to a
1800 # tracking at the repository level. One could envision to track changes
1849 # tracking at the repository level. One could envision to track changes
1801 # to the '.hgtags' file through changegroup apply but that fails to
1850 # to the '.hgtags' file through changegroup apply but that fails to
1802 # cope with case where transaction expose new heads without changegroup
1851 # cope with case where transaction expose new heads without changegroup
1803 # being involved (eg: phase movement).
1852 # being involved (eg: phase movement).
1804 #
1853 #
1805 # For now, We gate the feature behind a flag since this likely comes
1854 # For now, We gate the feature behind a flag since this likely comes
1806 # with performance impacts. The current code run more often than needed
1855 # with performance impacts. The current code run more often than needed
1807 # and do not use caches as much as it could. The current focus is on
1856 # and do not use caches as much as it could. The current focus is on
1808 # the behavior of the feature so we disable it by default. The flag
1857 # the behavior of the feature so we disable it by default. The flag
1809 # will be removed when we are happy with the performance impact.
1858 # will be removed when we are happy with the performance impact.
1810 #
1859 #
1811 # Once this feature is no longer experimental move the following
1860 # Once this feature is no longer experimental move the following
1812 # documentation to the appropriate help section:
1861 # documentation to the appropriate help section:
1813 #
1862 #
1814 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1863 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1815 # tags (new or changed or deleted tags). In addition the details of
1864 # tags (new or changed or deleted tags). In addition the details of
1816 # these changes are made available in a file at:
1865 # these changes are made available in a file at:
1817 # ``REPOROOT/.hg/changes/tags.changes``.
1866 # ``REPOROOT/.hg/changes/tags.changes``.
1818 # Make sure you check for HG_TAG_MOVED before reading that file as it
1867 # Make sure you check for HG_TAG_MOVED before reading that file as it
1819 # might exist from a previous transaction even if no tag were touched
1868 # might exist from a previous transaction even if no tag were touched
1820 # in this one. Changes are recorded in a line base format::
1869 # in this one. Changes are recorded in a line base format::
1821 #
1870 #
1822 # <action> <hex-node> <tag-name>\n
1871 # <action> <hex-node> <tag-name>\n
1823 #
1872 #
1824 # Actions are defined as follow:
1873 # Actions are defined as follow:
1825 # "-R": tag is removed,
1874 # "-R": tag is removed,
1826 # "+A": tag is added,
1875 # "+A": tag is added,
1827 # "-M": tag is moved (old value),
1876 # "-M": tag is moved (old value),
1828 # "+M": tag is moved (new value),
1877 # "+M": tag is moved (new value),
1829 tracktags = lambda x: None
1878 tracktags = lambda x: None
1830 # experimental config: experimental.hook-track-tags
1879 # experimental config: experimental.hook-track-tags
1831 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1880 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1832 if desc != 'strip' and shouldtracktags:
1881 if desc != 'strip' and shouldtracktags:
1833 oldheads = self.changelog.headrevs()
1882 oldheads = self.changelog.headrevs()
1834 def tracktags(tr2):
1883 def tracktags(tr2):
1835 repo = reporef()
1884 repo = reporef()
1836 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1885 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1837 newheads = repo.changelog.headrevs()
1886 newheads = repo.changelog.headrevs()
1838 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1887 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1839 # notes: we compare lists here.
1888 # notes: we compare lists here.
1840 # As we do it only once buiding set would not be cheaper
1889 # As we do it only once buiding set would not be cheaper
1841 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1890 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1842 if changes:
1891 if changes:
1843 tr2.hookargs['tag_moved'] = '1'
1892 tr2.hookargs['tag_moved'] = '1'
1844 with repo.vfs('changes/tags.changes', 'w',
1893 with repo.vfs('changes/tags.changes', 'w',
1845 atomictemp=True) as changesfile:
1894 atomictemp=True) as changesfile:
1846 # note: we do not register the file to the transaction
1895 # note: we do not register the file to the transaction
1847 # because we needs it to still exist on the transaction
1896 # because we needs it to still exist on the transaction
1848 # is close (for txnclose hooks)
1897 # is close (for txnclose hooks)
1849 tagsmod.writediff(changesfile, changes)
1898 tagsmod.writediff(changesfile, changes)
1850 def validate(tr2):
1899 def validate(tr2):
1851 """will run pre-closing hooks"""
1900 """will run pre-closing hooks"""
1852 # XXX the transaction API is a bit lacking here so we take a hacky
1901 # XXX the transaction API is a bit lacking here so we take a hacky
1853 # path for now
1902 # path for now
1854 #
1903 #
1855 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1904 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1856 # dict is copied before these run. In addition we needs the data
1905 # dict is copied before these run. In addition we needs the data
1857 # available to in memory hooks too.
1906 # available to in memory hooks too.
1858 #
1907 #
1859 # Moreover, we also need to make sure this runs before txnclose
1908 # Moreover, we also need to make sure this runs before txnclose
1860 # hooks and there is no "pending" mechanism that would execute
1909 # hooks and there is no "pending" mechanism that would execute
1861 # logic only if hooks are about to run.
1910 # logic only if hooks are about to run.
1862 #
1911 #
1863 # Fixing this limitation of the transaction is also needed to track
1912 # Fixing this limitation of the transaction is also needed to track
1864 # other families of changes (bookmarks, phases, obsolescence).
1913 # other families of changes (bookmarks, phases, obsolescence).
1865 #
1914 #
1866 # This will have to be fixed before we remove the experimental
1915 # This will have to be fixed before we remove the experimental
1867 # gating.
1916 # gating.
1868 tracktags(tr2)
1917 tracktags(tr2)
1869 repo = reporef()
1918 repo = reporef()
1870 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1919 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1871 scmutil.enforcesinglehead(repo, tr2, desc)
1920 scmutil.enforcesinglehead(repo, tr2, desc)
1872 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1921 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1873 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1922 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1874 args = tr.hookargs.copy()
1923 args = tr.hookargs.copy()
1875 args.update(bookmarks.preparehookargs(name, old, new))
1924 args.update(bookmarks.preparehookargs(name, old, new))
1876 repo.hook('pretxnclose-bookmark', throw=True,
1925 repo.hook('pretxnclose-bookmark', throw=True,
1877 **pycompat.strkwargs(args))
1926 **pycompat.strkwargs(args))
1878 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1927 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1879 cl = repo.unfiltered().changelog
1928 cl = repo.unfiltered().changelog
1880 for rev, (old, new) in tr.changes['phases'].items():
1929 for rev, (old, new) in tr.changes['phases'].items():
1881 args = tr.hookargs.copy()
1930 args = tr.hookargs.copy()
1882 node = hex(cl.node(rev))
1931 node = hex(cl.node(rev))
1883 args.update(phases.preparehookargs(node, old, new))
1932 args.update(phases.preparehookargs(node, old, new))
1884 repo.hook('pretxnclose-phase', throw=True,
1933 repo.hook('pretxnclose-phase', throw=True,
1885 **pycompat.strkwargs(args))
1934 **pycompat.strkwargs(args))
1886
1935
1887 repo.hook('pretxnclose', throw=True,
1936 repo.hook('pretxnclose', throw=True,
1888 **pycompat.strkwargs(tr.hookargs))
1937 **pycompat.strkwargs(tr.hookargs))
1889 def releasefn(tr, success):
1938 def releasefn(tr, success):
1890 repo = reporef()
1939 repo = reporef()
1891 if success:
1940 if success:
1892 # this should be explicitly invoked here, because
1941 # this should be explicitly invoked here, because
1893 # in-memory changes aren't written out at closing
1942 # in-memory changes aren't written out at closing
1894 # transaction, if tr.addfilegenerator (via
1943 # transaction, if tr.addfilegenerator (via
1895 # dirstate.write or so) isn't invoked while
1944 # dirstate.write or so) isn't invoked while
1896 # transaction running
1945 # transaction running
1897 repo.dirstate.write(None)
1946 repo.dirstate.write(None)
1898 else:
1947 else:
1899 # discard all changes (including ones already written
1948 # discard all changes (including ones already written
1900 # out) in this transaction
1949 # out) in this transaction
1901 narrowspec.restorebackup(self, 'journal.narrowspec')
1950 narrowspec.restorebackup(self, 'journal.narrowspec')
1902 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1951 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1903 repo.dirstate.restorebackup(None, 'journal.dirstate')
1952 repo.dirstate.restorebackup(None, 'journal.dirstate')
1904
1953
1905 repo.invalidate(clearfilecache=True)
1954 repo.invalidate(clearfilecache=True)
1906
1955
1907 tr = transaction.transaction(rp, self.svfs, vfsmap,
1956 tr = transaction.transaction(rp, self.svfs, vfsmap,
1908 "journal",
1957 "journal",
1909 "undo",
1958 "undo",
1910 aftertrans(renames),
1959 aftertrans(renames),
1911 self.store.createmode,
1960 self.store.createmode,
1912 validator=validate,
1961 validator=validate,
1913 releasefn=releasefn,
1962 releasefn=releasefn,
1914 checkambigfiles=_cachedfiles,
1963 checkambigfiles=_cachedfiles,
1915 name=desc)
1964 name=desc)
1916 tr.changes['origrepolen'] = len(self)
1965 tr.changes['origrepolen'] = len(self)
1917 tr.changes['obsmarkers'] = set()
1966 tr.changes['obsmarkers'] = set()
1918 tr.changes['phases'] = {}
1967 tr.changes['phases'] = {}
1919 tr.changes['bookmarks'] = {}
1968 tr.changes['bookmarks'] = {}
1920
1969
1921 tr.hookargs['txnid'] = txnid
1970 tr.hookargs['txnid'] = txnid
1922 tr.hookargs['txnname'] = desc
1971 tr.hookargs['txnname'] = desc
1923 # note: writing the fncache only during finalize mean that the file is
1972 # note: writing the fncache only during finalize mean that the file is
1924 # outdated when running hooks. As fncache is used for streaming clone,
1973 # outdated when running hooks. As fncache is used for streaming clone,
1925 # this is not expected to break anything that happen during the hooks.
1974 # this is not expected to break anything that happen during the hooks.
1926 tr.addfinalize('flush-fncache', self.store.write)
1975 tr.addfinalize('flush-fncache', self.store.write)
1927 def txnclosehook(tr2):
1976 def txnclosehook(tr2):
1928 """To be run if transaction is successful, will schedule a hook run
1977 """To be run if transaction is successful, will schedule a hook run
1929 """
1978 """
1930 # Don't reference tr2 in hook() so we don't hold a reference.
1979 # Don't reference tr2 in hook() so we don't hold a reference.
1931 # This reduces memory consumption when there are multiple
1980 # This reduces memory consumption when there are multiple
1932 # transactions per lock. This can likely go away if issue5045
1981 # transactions per lock. This can likely go away if issue5045
1933 # fixes the function accumulation.
1982 # fixes the function accumulation.
1934 hookargs = tr2.hookargs
1983 hookargs = tr2.hookargs
1935
1984
1936 def hookfunc():
1985 def hookfunc():
1937 repo = reporef()
1986 repo = reporef()
1938 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1987 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1939 bmchanges = sorted(tr.changes['bookmarks'].items())
1988 bmchanges = sorted(tr.changes['bookmarks'].items())
1940 for name, (old, new) in bmchanges:
1989 for name, (old, new) in bmchanges:
1941 args = tr.hookargs.copy()
1990 args = tr.hookargs.copy()
1942 args.update(bookmarks.preparehookargs(name, old, new))
1991 args.update(bookmarks.preparehookargs(name, old, new))
1943 repo.hook('txnclose-bookmark', throw=False,
1992 repo.hook('txnclose-bookmark', throw=False,
1944 **pycompat.strkwargs(args))
1993 **pycompat.strkwargs(args))
1945
1994
1946 if hook.hashook(repo.ui, 'txnclose-phase'):
1995 if hook.hashook(repo.ui, 'txnclose-phase'):
1947 cl = repo.unfiltered().changelog
1996 cl = repo.unfiltered().changelog
1948 phasemv = sorted(tr.changes['phases'].items())
1997 phasemv = sorted(tr.changes['phases'].items())
1949 for rev, (old, new) in phasemv:
1998 for rev, (old, new) in phasemv:
1950 args = tr.hookargs.copy()
1999 args = tr.hookargs.copy()
1951 node = hex(cl.node(rev))
2000 node = hex(cl.node(rev))
1952 args.update(phases.preparehookargs(node, old, new))
2001 args.update(phases.preparehookargs(node, old, new))
1953 repo.hook('txnclose-phase', throw=False,
2002 repo.hook('txnclose-phase', throw=False,
1954 **pycompat.strkwargs(args))
2003 **pycompat.strkwargs(args))
1955
2004
1956 repo.hook('txnclose', throw=False,
2005 repo.hook('txnclose', throw=False,
1957 **pycompat.strkwargs(hookargs))
2006 **pycompat.strkwargs(hookargs))
1958 reporef()._afterlock(hookfunc)
2007 reporef()._afterlock(hookfunc)
1959 tr.addfinalize('txnclose-hook', txnclosehook)
2008 tr.addfinalize('txnclose-hook', txnclosehook)
1960 # Include a leading "-" to make it happen before the transaction summary
2009 # Include a leading "-" to make it happen before the transaction summary
1961 # reports registered via scmutil.registersummarycallback() whose names
2010 # reports registered via scmutil.registersummarycallback() whose names
1962 # are 00-txnreport etc. That way, the caches will be warm when the
2011 # are 00-txnreport etc. That way, the caches will be warm when the
1963 # callbacks run.
2012 # callbacks run.
1964 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
2013 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1965 def txnaborthook(tr2):
2014 def txnaborthook(tr2):
1966 """To be run if transaction is aborted
2015 """To be run if transaction is aborted
1967 """
2016 """
1968 reporef().hook('txnabort', throw=False,
2017 reporef().hook('txnabort', throw=False,
1969 **pycompat.strkwargs(tr2.hookargs))
2018 **pycompat.strkwargs(tr2.hookargs))
1970 tr.addabort('txnabort-hook', txnaborthook)
2019 tr.addabort('txnabort-hook', txnaborthook)
1971 # avoid eager cache invalidation. in-memory data should be identical
2020 # avoid eager cache invalidation. in-memory data should be identical
1972 # to stored data if transaction has no error.
2021 # to stored data if transaction has no error.
1973 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
2022 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1974 self._transref = weakref.ref(tr)
2023 self._transref = weakref.ref(tr)
1975 scmutil.registersummarycallback(self, tr, desc)
2024 scmutil.registersummarycallback(self, tr, desc)
1976 return tr
2025 return tr
1977
2026
1978 def _journalfiles(self):
2027 def _journalfiles(self):
1979 return ((self.svfs, 'journal'),
2028 return ((self.svfs, 'journal'),
1980 (self.svfs, 'journal.narrowspec'),
2029 (self.svfs, 'journal.narrowspec'),
1981 (self.vfs, 'journal.narrowspec.dirstate'),
2030 (self.vfs, 'journal.narrowspec.dirstate'),
1982 (self.vfs, 'journal.dirstate'),
2031 (self.vfs, 'journal.dirstate'),
1983 (self.vfs, 'journal.branch'),
2032 (self.vfs, 'journal.branch'),
1984 (self.vfs, 'journal.desc'),
2033 (self.vfs, 'journal.desc'),
1985 (self.vfs, 'journal.bookmarks'),
2034 (self.vfs, 'journal.bookmarks'),
1986 (self.svfs, 'journal.phaseroots'))
2035 (self.svfs, 'journal.phaseroots'))
1987
2036
1988 def undofiles(self):
2037 def undofiles(self):
1989 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2038 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1990
2039
1991 @unfilteredmethod
2040 @unfilteredmethod
1992 def _writejournal(self, desc):
2041 def _writejournal(self, desc):
1993 self.dirstate.savebackup(None, 'journal.dirstate')
2042 self.dirstate.savebackup(None, 'journal.dirstate')
1994 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2043 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1995 narrowspec.savebackup(self, 'journal.narrowspec')
2044 narrowspec.savebackup(self, 'journal.narrowspec')
1996 self.vfs.write("journal.branch",
2045 self.vfs.write("journal.branch",
1997 encoding.fromlocal(self.dirstate.branch()))
2046 encoding.fromlocal(self.dirstate.branch()))
1998 self.vfs.write("journal.desc",
2047 self.vfs.write("journal.desc",
1999 "%d\n%s\n" % (len(self), desc))
2048 "%d\n%s\n" % (len(self), desc))
2000 self.vfs.write("journal.bookmarks",
2049 self.vfs.write("journal.bookmarks",
2001 self.vfs.tryread("bookmarks"))
2050 self.vfs.tryread("bookmarks"))
2002 self.svfs.write("journal.phaseroots",
2051 self.svfs.write("journal.phaseroots",
2003 self.svfs.tryread("phaseroots"))
2052 self.svfs.tryread("phaseroots"))
2004
2053
2005 def recover(self):
2054 def recover(self):
2006 with self.lock():
2055 with self.lock():
2007 if self.svfs.exists("journal"):
2056 if self.svfs.exists("journal"):
2008 self.ui.status(_("rolling back interrupted transaction\n"))
2057 self.ui.status(_("rolling back interrupted transaction\n"))
2009 vfsmap = {'': self.svfs,
2058 vfsmap = {'': self.svfs,
2010 'plain': self.vfs,}
2059 'plain': self.vfs,}
2011 transaction.rollback(self.svfs, vfsmap, "journal",
2060 transaction.rollback(self.svfs, vfsmap, "journal",
2012 self.ui.warn,
2061 self.ui.warn,
2013 checkambigfiles=_cachedfiles)
2062 checkambigfiles=_cachedfiles)
2014 self.invalidate()
2063 self.invalidate()
2015 return True
2064 return True
2016 else:
2065 else:
2017 self.ui.warn(_("no interrupted transaction available\n"))
2066 self.ui.warn(_("no interrupted transaction available\n"))
2018 return False
2067 return False
2019
2068
2020 def rollback(self, dryrun=False, force=False):
2069 def rollback(self, dryrun=False, force=False):
2021 wlock = lock = dsguard = None
2070 wlock = lock = dsguard = None
2022 try:
2071 try:
2023 wlock = self.wlock()
2072 wlock = self.wlock()
2024 lock = self.lock()
2073 lock = self.lock()
2025 if self.svfs.exists("undo"):
2074 if self.svfs.exists("undo"):
2026 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2075 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2027
2076
2028 return self._rollback(dryrun, force, dsguard)
2077 return self._rollback(dryrun, force, dsguard)
2029 else:
2078 else:
2030 self.ui.warn(_("no rollback information available\n"))
2079 self.ui.warn(_("no rollback information available\n"))
2031 return 1
2080 return 1
2032 finally:
2081 finally:
2033 release(dsguard, lock, wlock)
2082 release(dsguard, lock, wlock)
2034
2083
2035 @unfilteredmethod # Until we get smarter cache management
2084 @unfilteredmethod # Until we get smarter cache management
2036 def _rollback(self, dryrun, force, dsguard):
2085 def _rollback(self, dryrun, force, dsguard):
2037 ui = self.ui
2086 ui = self.ui
2038 try:
2087 try:
2039 args = self.vfs.read('undo.desc').splitlines()
2088 args = self.vfs.read('undo.desc').splitlines()
2040 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2089 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2041 if len(args) >= 3:
2090 if len(args) >= 3:
2042 detail = args[2]
2091 detail = args[2]
2043 oldtip = oldlen - 1
2092 oldtip = oldlen - 1
2044
2093
2045 if detail and ui.verbose:
2094 if detail and ui.verbose:
2046 msg = (_('repository tip rolled back to revision %d'
2095 msg = (_('repository tip rolled back to revision %d'
2047 ' (undo %s: %s)\n')
2096 ' (undo %s: %s)\n')
2048 % (oldtip, desc, detail))
2097 % (oldtip, desc, detail))
2049 else:
2098 else:
2050 msg = (_('repository tip rolled back to revision %d'
2099 msg = (_('repository tip rolled back to revision %d'
2051 ' (undo %s)\n')
2100 ' (undo %s)\n')
2052 % (oldtip, desc))
2101 % (oldtip, desc))
2053 except IOError:
2102 except IOError:
2054 msg = _('rolling back unknown transaction\n')
2103 msg = _('rolling back unknown transaction\n')
2055 desc = None
2104 desc = None
2056
2105
2057 if not force and self['.'] != self['tip'] and desc == 'commit':
2106 if not force and self['.'] != self['tip'] and desc == 'commit':
2058 raise error.Abort(
2107 raise error.Abort(
2059 _('rollback of last commit while not checked out '
2108 _('rollback of last commit while not checked out '
2060 'may lose data'), hint=_('use -f to force'))
2109 'may lose data'), hint=_('use -f to force'))
2061
2110
2062 ui.status(msg)
2111 ui.status(msg)
2063 if dryrun:
2112 if dryrun:
2064 return 0
2113 return 0
2065
2114
2066 parents = self.dirstate.parents()
2115 parents = self.dirstate.parents()
2067 self.destroying()
2116 self.destroying()
2068 vfsmap = {'plain': self.vfs, '': self.svfs}
2117 vfsmap = {'plain': self.vfs, '': self.svfs}
2069 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2118 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2070 checkambigfiles=_cachedfiles)
2119 checkambigfiles=_cachedfiles)
2071 if self.vfs.exists('undo.bookmarks'):
2120 if self.vfs.exists('undo.bookmarks'):
2072 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2121 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2073 if self.svfs.exists('undo.phaseroots'):
2122 if self.svfs.exists('undo.phaseroots'):
2074 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2123 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2075 self.invalidate()
2124 self.invalidate()
2076
2125
2077 parentgone = any(p not in self.changelog.nodemap for p in parents)
2126 parentgone = any(p not in self.changelog.nodemap for p in parents)
2078 if parentgone:
2127 if parentgone:
2079 # prevent dirstateguard from overwriting already restored one
2128 # prevent dirstateguard from overwriting already restored one
2080 dsguard.close()
2129 dsguard.close()
2081
2130
2082 narrowspec.restorebackup(self, 'undo.narrowspec')
2131 narrowspec.restorebackup(self, 'undo.narrowspec')
2083 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2132 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2084 self.dirstate.restorebackup(None, 'undo.dirstate')
2133 self.dirstate.restorebackup(None, 'undo.dirstate')
2085 try:
2134 try:
2086 branch = self.vfs.read('undo.branch')
2135 branch = self.vfs.read('undo.branch')
2087 self.dirstate.setbranch(encoding.tolocal(branch))
2136 self.dirstate.setbranch(encoding.tolocal(branch))
2088 except IOError:
2137 except IOError:
2089 ui.warn(_('named branch could not be reset: '
2138 ui.warn(_('named branch could not be reset: '
2090 'current branch is still \'%s\'\n')
2139 'current branch is still \'%s\'\n')
2091 % self.dirstate.branch())
2140 % self.dirstate.branch())
2092
2141
2093 parents = tuple([p.rev() for p in self[None].parents()])
2142 parents = tuple([p.rev() for p in self[None].parents()])
2094 if len(parents) > 1:
2143 if len(parents) > 1:
2095 ui.status(_('working directory now based on '
2144 ui.status(_('working directory now based on '
2096 'revisions %d and %d\n') % parents)
2145 'revisions %d and %d\n') % parents)
2097 else:
2146 else:
2098 ui.status(_('working directory now based on '
2147 ui.status(_('working directory now based on '
2099 'revision %d\n') % parents)
2148 'revision %d\n') % parents)
2100 mergemod.mergestate.clean(self, self['.'].node())
2149 mergemod.mergestate.clean(self, self['.'].node())
2101
2150
2102 # TODO: if we know which new heads may result from this rollback, pass
2151 # TODO: if we know which new heads may result from this rollback, pass
2103 # them to destroy(), which will prevent the branchhead cache from being
2152 # them to destroy(), which will prevent the branchhead cache from being
2104 # invalidated.
2153 # invalidated.
2105 self.destroyed()
2154 self.destroyed()
2106 return 0
2155 return 0
2107
2156
2108 def _buildcacheupdater(self, newtransaction):
2157 def _buildcacheupdater(self, newtransaction):
2109 """called during transaction to build the callback updating cache
2158 """called during transaction to build the callback updating cache
2110
2159
2111 Lives on the repository to help extension who might want to augment
2160 Lives on the repository to help extension who might want to augment
2112 this logic. For this purpose, the created transaction is passed to the
2161 this logic. For this purpose, the created transaction is passed to the
2113 method.
2162 method.
2114 """
2163 """
2115 # we must avoid cyclic reference between repo and transaction.
2164 # we must avoid cyclic reference between repo and transaction.
2116 reporef = weakref.ref(self)
2165 reporef = weakref.ref(self)
2117 def updater(tr):
2166 def updater(tr):
2118 repo = reporef()
2167 repo = reporef()
2119 repo.updatecaches(tr)
2168 repo.updatecaches(tr)
2120 return updater
2169 return updater
2121
2170
2122 @unfilteredmethod
2171 @unfilteredmethod
2123 def updatecaches(self, tr=None, full=False):
2172 def updatecaches(self, tr=None, full=False):
2124 """warm appropriate caches
2173 """warm appropriate caches
2125
2174
2126 If this function is called after a transaction closed. The transaction
2175 If this function is called after a transaction closed. The transaction
2127 will be available in the 'tr' argument. This can be used to selectively
2176 will be available in the 'tr' argument. This can be used to selectively
2128 update caches relevant to the changes in that transaction.
2177 update caches relevant to the changes in that transaction.
2129
2178
2130 If 'full' is set, make sure all caches the function knows about have
2179 If 'full' is set, make sure all caches the function knows about have
2131 up-to-date data. Even the ones usually loaded more lazily.
2180 up-to-date data. Even the ones usually loaded more lazily.
2132 """
2181 """
2133 if tr is not None and tr.hookargs.get('source') == 'strip':
2182 if tr is not None and tr.hookargs.get('source') == 'strip':
2134 # During strip, many caches are invalid but
2183 # During strip, many caches are invalid but
2135 # later call to `destroyed` will refresh them.
2184 # later call to `destroyed` will refresh them.
2136 return
2185 return
2137
2186
2138 if tr is None or tr.changes['origrepolen'] < len(self):
2187 if tr is None or tr.changes['origrepolen'] < len(self):
2139 # accessing the 'ser ved' branchmap should refresh all the others,
2188 # accessing the 'ser ved' branchmap should refresh all the others,
2140 self.ui.debug('updating the branch cache\n')
2189 self.ui.debug('updating the branch cache\n')
2141 self.filtered('served').branchmap()
2190 self.filtered('served').branchmap()
2142 self.filtered('served.hidden').branchmap()
2191 self.filtered('served.hidden').branchmap()
2143
2192
2144 if full:
2193 if full:
2145 unfi = self.unfiltered()
2194 unfi = self.unfiltered()
2146 rbc = unfi.revbranchcache()
2195 rbc = unfi.revbranchcache()
2147 for r in unfi.changelog:
2196 for r in unfi.changelog:
2148 rbc.branchinfo(r)
2197 rbc.branchinfo(r)
2149 rbc.write()
2198 rbc.write()
2150
2199
2151 # ensure the working copy parents are in the manifestfulltextcache
2200 # ensure the working copy parents are in the manifestfulltextcache
2152 for ctx in self['.'].parents():
2201 for ctx in self['.'].parents():
2153 ctx.manifest() # accessing the manifest is enough
2202 ctx.manifest() # accessing the manifest is enough
2154
2203
2155 # accessing tags warm the cache
2204 # accessing tags warm the cache
2156 self.tags()
2205 self.tags()
2157 self.filtered('served').tags()
2206 self.filtered('served').tags()
2158
2207
2159 def invalidatecaches(self):
2208 def invalidatecaches(self):
2160
2209
2161 if r'_tagscache' in vars(self):
2210 if r'_tagscache' in vars(self):
2162 # can't use delattr on proxy
2211 # can't use delattr on proxy
2163 del self.__dict__[r'_tagscache']
2212 del self.__dict__[r'_tagscache']
2164
2213
2165 self._branchcaches.clear()
2214 self._branchcaches.clear()
2166 self.invalidatevolatilesets()
2215 self.invalidatevolatilesets()
2167 self._sparsesignaturecache.clear()
2216 self._sparsesignaturecache.clear()
2168
2217
2169 def invalidatevolatilesets(self):
2218 def invalidatevolatilesets(self):
2170 self.filteredrevcache.clear()
2219 self.filteredrevcache.clear()
2171 obsolete.clearobscaches(self)
2220 obsolete.clearobscaches(self)
2172
2221
2173 def invalidatedirstate(self):
2222 def invalidatedirstate(self):
2174 '''Invalidates the dirstate, causing the next call to dirstate
2223 '''Invalidates the dirstate, causing the next call to dirstate
2175 to check if it was modified since the last time it was read,
2224 to check if it was modified since the last time it was read,
2176 rereading it if it has.
2225 rereading it if it has.
2177
2226
2178 This is different to dirstate.invalidate() that it doesn't always
2227 This is different to dirstate.invalidate() that it doesn't always
2179 rereads the dirstate. Use dirstate.invalidate() if you want to
2228 rereads the dirstate. Use dirstate.invalidate() if you want to
2180 explicitly read the dirstate again (i.e. restoring it to a previous
2229 explicitly read the dirstate again (i.e. restoring it to a previous
2181 known good state).'''
2230 known good state).'''
2182 if hasunfilteredcache(self, r'dirstate'):
2231 if hasunfilteredcache(self, r'dirstate'):
2183 for k in self.dirstate._filecache:
2232 for k in self.dirstate._filecache:
2184 try:
2233 try:
2185 delattr(self.dirstate, k)
2234 delattr(self.dirstate, k)
2186 except AttributeError:
2235 except AttributeError:
2187 pass
2236 pass
2188 delattr(self.unfiltered(), r'dirstate')
2237 delattr(self.unfiltered(), r'dirstate')
2189
2238
2190 def invalidate(self, clearfilecache=False):
2239 def invalidate(self, clearfilecache=False):
2191 '''Invalidates both store and non-store parts other than dirstate
2240 '''Invalidates both store and non-store parts other than dirstate
2192
2241
2193 If a transaction is running, invalidation of store is omitted,
2242 If a transaction is running, invalidation of store is omitted,
2194 because discarding in-memory changes might cause inconsistency
2243 because discarding in-memory changes might cause inconsistency
2195 (e.g. incomplete fncache causes unintentional failure, but
2244 (e.g. incomplete fncache causes unintentional failure, but
2196 redundant one doesn't).
2245 redundant one doesn't).
2197 '''
2246 '''
2198 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2247 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2199 for k in list(self._filecache.keys()):
2248 for k in list(self._filecache.keys()):
2200 # dirstate is invalidated separately in invalidatedirstate()
2249 # dirstate is invalidated separately in invalidatedirstate()
2201 if k == 'dirstate':
2250 if k == 'dirstate':
2202 continue
2251 continue
2203 if (k == 'changelog' and
2252 if (k == 'changelog' and
2204 self.currenttransaction() and
2253 self.currenttransaction() and
2205 self.changelog._delayed):
2254 self.changelog._delayed):
2206 # The changelog object may store unwritten revisions. We don't
2255 # The changelog object may store unwritten revisions. We don't
2207 # want to lose them.
2256 # want to lose them.
2208 # TODO: Solve the problem instead of working around it.
2257 # TODO: Solve the problem instead of working around it.
2209 continue
2258 continue
2210
2259
2211 if clearfilecache:
2260 if clearfilecache:
2212 del self._filecache[k]
2261 del self._filecache[k]
2213 try:
2262 try:
2214 delattr(unfiltered, k)
2263 delattr(unfiltered, k)
2215 except AttributeError:
2264 except AttributeError:
2216 pass
2265 pass
2217 self.invalidatecaches()
2266 self.invalidatecaches()
2218 if not self.currenttransaction():
2267 if not self.currenttransaction():
2219 # TODO: Changing contents of store outside transaction
2268 # TODO: Changing contents of store outside transaction
2220 # causes inconsistency. We should make in-memory store
2269 # causes inconsistency. We should make in-memory store
2221 # changes detectable, and abort if changed.
2270 # changes detectable, and abort if changed.
2222 self.store.invalidatecaches()
2271 self.store.invalidatecaches()
2223
2272
2224 def invalidateall(self):
2273 def invalidateall(self):
2225 '''Fully invalidates both store and non-store parts, causing the
2274 '''Fully invalidates both store and non-store parts, causing the
2226 subsequent operation to reread any outside changes.'''
2275 subsequent operation to reread any outside changes.'''
2227 # extension should hook this to invalidate its caches
2276 # extension should hook this to invalidate its caches
2228 self.invalidate()
2277 self.invalidate()
2229 self.invalidatedirstate()
2278 self.invalidatedirstate()
2230
2279
2231 @unfilteredmethod
2280 @unfilteredmethod
2232 def _refreshfilecachestats(self, tr):
2281 def _refreshfilecachestats(self, tr):
2233 """Reload stats of cached files so that they are flagged as valid"""
2282 """Reload stats of cached files so that they are flagged as valid"""
2234 for k, ce in self._filecache.items():
2283 for k, ce in self._filecache.items():
2235 k = pycompat.sysstr(k)
2284 k = pycompat.sysstr(k)
2236 if k == r'dirstate' or k not in self.__dict__:
2285 if k == r'dirstate' or k not in self.__dict__:
2237 continue
2286 continue
2238 ce.refresh()
2287 ce.refresh()
2239
2288
2240 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2289 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2241 inheritchecker=None, parentenvvar=None):
2290 inheritchecker=None, parentenvvar=None):
2242 parentlock = None
2291 parentlock = None
2243 # the contents of parentenvvar are used by the underlying lock to
2292 # the contents of parentenvvar are used by the underlying lock to
2244 # determine whether it can be inherited
2293 # determine whether it can be inherited
2245 if parentenvvar is not None:
2294 if parentenvvar is not None:
2246 parentlock = encoding.environ.get(parentenvvar)
2295 parentlock = encoding.environ.get(parentenvvar)
2247
2296
2248 timeout = 0
2297 timeout = 0
2249 warntimeout = 0
2298 warntimeout = 0
2250 if wait:
2299 if wait:
2251 timeout = self.ui.configint("ui", "timeout")
2300 timeout = self.ui.configint("ui", "timeout")
2252 warntimeout = self.ui.configint("ui", "timeout.warn")
2301 warntimeout = self.ui.configint("ui", "timeout.warn")
2253 # internal config: ui.signal-safe-lock
2302 # internal config: ui.signal-safe-lock
2254 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2303 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2255
2304
2256 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2305 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2257 releasefn=releasefn,
2306 releasefn=releasefn,
2258 acquirefn=acquirefn, desc=desc,
2307 acquirefn=acquirefn, desc=desc,
2259 inheritchecker=inheritchecker,
2308 inheritchecker=inheritchecker,
2260 parentlock=parentlock,
2309 parentlock=parentlock,
2261 signalsafe=signalsafe)
2310 signalsafe=signalsafe)
2262 return l
2311 return l
2263
2312
2264 def _afterlock(self, callback):
2313 def _afterlock(self, callback):
2265 """add a callback to be run when the repository is fully unlocked
2314 """add a callback to be run when the repository is fully unlocked
2266
2315
2267 The callback will be executed when the outermost lock is released
2316 The callback will be executed when the outermost lock is released
2268 (with wlock being higher level than 'lock')."""
2317 (with wlock being higher level than 'lock')."""
2269 for ref in (self._wlockref, self._lockref):
2318 for ref in (self._wlockref, self._lockref):
2270 l = ref and ref()
2319 l = ref and ref()
2271 if l and l.held:
2320 if l and l.held:
2272 l.postrelease.append(callback)
2321 l.postrelease.append(callback)
2273 break
2322 break
2274 else: # no lock have been found.
2323 else: # no lock have been found.
2275 callback()
2324 callback()
2276
2325
2277 def lock(self, wait=True):
2326 def lock(self, wait=True):
2278 '''Lock the repository store (.hg/store) and return a weak reference
2327 '''Lock the repository store (.hg/store) and return a weak reference
2279 to the lock. Use this before modifying the store (e.g. committing or
2328 to the lock. Use this before modifying the store (e.g. committing or
2280 stripping). If you are opening a transaction, get a lock as well.)
2329 stripping). If you are opening a transaction, get a lock as well.)
2281
2330
2282 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2331 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2283 'wlock' first to avoid a dead-lock hazard.'''
2332 'wlock' first to avoid a dead-lock hazard.'''
2284 l = self._currentlock(self._lockref)
2333 l = self._currentlock(self._lockref)
2285 if l is not None:
2334 if l is not None:
2286 l.lock()
2335 l.lock()
2287 return l
2336 return l
2288
2337
2289 l = self._lock(vfs=self.svfs,
2338 l = self._lock(vfs=self.svfs,
2290 lockname="lock",
2339 lockname="lock",
2291 wait=wait,
2340 wait=wait,
2292 releasefn=None,
2341 releasefn=None,
2293 acquirefn=self.invalidate,
2342 acquirefn=self.invalidate,
2294 desc=_('repository %s') % self.origroot)
2343 desc=_('repository %s') % self.origroot)
2295 self._lockref = weakref.ref(l)
2344 self._lockref = weakref.ref(l)
2296 return l
2345 return l
2297
2346
2298 def _wlockchecktransaction(self):
2347 def _wlockchecktransaction(self):
2299 if self.currenttransaction() is not None:
2348 if self.currenttransaction() is not None:
2300 raise error.LockInheritanceContractViolation(
2349 raise error.LockInheritanceContractViolation(
2301 'wlock cannot be inherited in the middle of a transaction')
2350 'wlock cannot be inherited in the middle of a transaction')
2302
2351
2303 def wlock(self, wait=True):
2352 def wlock(self, wait=True):
2304 '''Lock the non-store parts of the repository (everything under
2353 '''Lock the non-store parts of the repository (everything under
2305 .hg except .hg/store) and return a weak reference to the lock.
2354 .hg except .hg/store) and return a weak reference to the lock.
2306
2355
2307 Use this before modifying files in .hg.
2356 Use this before modifying files in .hg.
2308
2357
2309 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2358 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2310 'wlock' first to avoid a dead-lock hazard.'''
2359 'wlock' first to avoid a dead-lock hazard.'''
2311 l = self._wlockref and self._wlockref()
2360 l = self._wlockref and self._wlockref()
2312 if l is not None and l.held:
2361 if l is not None and l.held:
2313 l.lock()
2362 l.lock()
2314 return l
2363 return l
2315
2364
2316 # We do not need to check for non-waiting lock acquisition. Such
2365 # We do not need to check for non-waiting lock acquisition. Such
2317 # acquisition would not cause dead-lock as they would just fail.
2366 # acquisition would not cause dead-lock as they would just fail.
2318 if wait and (self.ui.configbool('devel', 'all-warnings')
2367 if wait and (self.ui.configbool('devel', 'all-warnings')
2319 or self.ui.configbool('devel', 'check-locks')):
2368 or self.ui.configbool('devel', 'check-locks')):
2320 if self._currentlock(self._lockref) is not None:
2369 if self._currentlock(self._lockref) is not None:
2321 self.ui.develwarn('"wlock" acquired after "lock"')
2370 self.ui.develwarn('"wlock" acquired after "lock"')
2322
2371
2323 def unlock():
2372 def unlock():
2324 if self.dirstate.pendingparentchange():
2373 if self.dirstate.pendingparentchange():
2325 self.dirstate.invalidate()
2374 self.dirstate.invalidate()
2326 else:
2375 else:
2327 self.dirstate.write(None)
2376 self.dirstate.write(None)
2328
2377
2329 self._filecache['dirstate'].refresh()
2378 self._filecache['dirstate'].refresh()
2330
2379
2331 l = self._lock(self.vfs, "wlock", wait, unlock,
2380 l = self._lock(self.vfs, "wlock", wait, unlock,
2332 self.invalidatedirstate, _('working directory of %s') %
2381 self.invalidatedirstate, _('working directory of %s') %
2333 self.origroot,
2382 self.origroot,
2334 inheritchecker=self._wlockchecktransaction,
2383 inheritchecker=self._wlockchecktransaction,
2335 parentenvvar='HG_WLOCK_LOCKER')
2384 parentenvvar='HG_WLOCK_LOCKER')
2336 self._wlockref = weakref.ref(l)
2385 self._wlockref = weakref.ref(l)
2337 return l
2386 return l
2338
2387
2339 def _currentlock(self, lockref):
2388 def _currentlock(self, lockref):
2340 """Returns the lock if it's held, or None if it's not."""
2389 """Returns the lock if it's held, or None if it's not."""
2341 if lockref is None:
2390 if lockref is None:
2342 return None
2391 return None
2343 l = lockref()
2392 l = lockref()
2344 if l is None or not l.held:
2393 if l is None or not l.held:
2345 return None
2394 return None
2346 return l
2395 return l
2347
2396
2348 def currentwlock(self):
2397 def currentwlock(self):
2349 """Returns the wlock if it's held, or None if it's not."""
2398 """Returns the wlock if it's held, or None if it's not."""
2350 return self._currentlock(self._wlockref)
2399 return self._currentlock(self._wlockref)
2351
2400
2352 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2401 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2353 includecopymeta):
2402 includecopymeta):
2354 """
2403 """
2355 commit an individual file as part of a larger transaction
2404 commit an individual file as part of a larger transaction
2356 """
2405 """
2357
2406
2358 fname = fctx.path()
2407 fname = fctx.path()
2359 fparent1 = manifest1.get(fname, nullid)
2408 fparent1 = manifest1.get(fname, nullid)
2360 fparent2 = manifest2.get(fname, nullid)
2409 fparent2 = manifest2.get(fname, nullid)
2361 if isinstance(fctx, context.filectx):
2410 if isinstance(fctx, context.filectx):
2362 node = fctx.filenode()
2411 node = fctx.filenode()
2363 if node in [fparent1, fparent2]:
2412 if node in [fparent1, fparent2]:
2364 self.ui.debug('reusing %s filelog entry\n' % fname)
2413 self.ui.debug('reusing %s filelog entry\n' % fname)
2365 if manifest1.flags(fname) != fctx.flags():
2414 if manifest1.flags(fname) != fctx.flags():
2366 changelist.append(fname)
2415 changelist.append(fname)
2367 return node
2416 return node
2368
2417
2369 flog = self.file(fname)
2418 flog = self.file(fname)
2370 meta = {}
2419 meta = {}
2371 cfname = fctx.copysource()
2420 cfname = fctx.copysource()
2372 if cfname and cfname != fname:
2421 if cfname and cfname != fname:
2373 # Mark the new revision of this file as a copy of another
2422 # Mark the new revision of this file as a copy of another
2374 # file. This copy data will effectively act as a parent
2423 # file. This copy data will effectively act as a parent
2375 # of this new revision. If this is a merge, the first
2424 # of this new revision. If this is a merge, the first
2376 # parent will be the nullid (meaning "look up the copy data")
2425 # parent will be the nullid (meaning "look up the copy data")
2377 # and the second one will be the other parent. For example:
2426 # and the second one will be the other parent. For example:
2378 #
2427 #
2379 # 0 --- 1 --- 3 rev1 changes file foo
2428 # 0 --- 1 --- 3 rev1 changes file foo
2380 # \ / rev2 renames foo to bar and changes it
2429 # \ / rev2 renames foo to bar and changes it
2381 # \- 2 -/ rev3 should have bar with all changes and
2430 # \- 2 -/ rev3 should have bar with all changes and
2382 # should record that bar descends from
2431 # should record that bar descends from
2383 # bar in rev2 and foo in rev1
2432 # bar in rev2 and foo in rev1
2384 #
2433 #
2385 # this allows this merge to succeed:
2434 # this allows this merge to succeed:
2386 #
2435 #
2387 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2436 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2388 # \ / merging rev3 and rev4 should use bar@rev2
2437 # \ / merging rev3 and rev4 should use bar@rev2
2389 # \- 2 --- 4 as the merge base
2438 # \- 2 --- 4 as the merge base
2390 #
2439 #
2391
2440
2392 cnode = manifest1.get(cfname)
2441 cnode = manifest1.get(cfname)
2393 newfparent = fparent2
2442 newfparent = fparent2
2394
2443
2395 if manifest2: # branch merge
2444 if manifest2: # branch merge
2396 if fparent2 == nullid or cnode is None: # copied on remote side
2445 if fparent2 == nullid or cnode is None: # copied on remote side
2397 if cfname in manifest2:
2446 if cfname in manifest2:
2398 cnode = manifest2[cfname]
2447 cnode = manifest2[cfname]
2399 newfparent = fparent1
2448 newfparent = fparent1
2400
2449
2401 # Here, we used to search backwards through history to try to find
2450 # Here, we used to search backwards through history to try to find
2402 # where the file copy came from if the source of a copy was not in
2451 # where the file copy came from if the source of a copy was not in
2403 # the parent directory. However, this doesn't actually make sense to
2452 # the parent directory. However, this doesn't actually make sense to
2404 # do (what does a copy from something not in your working copy even
2453 # do (what does a copy from something not in your working copy even
2405 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2454 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2406 # the user that copy information was dropped, so if they didn't
2455 # the user that copy information was dropped, so if they didn't
2407 # expect this outcome it can be fixed, but this is the correct
2456 # expect this outcome it can be fixed, but this is the correct
2408 # behavior in this circumstance.
2457 # behavior in this circumstance.
2409
2458
2410 if cnode:
2459 if cnode:
2411 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2460 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2412 if includecopymeta:
2461 if includecopymeta:
2413 meta["copy"] = cfname
2462 meta["copy"] = cfname
2414 meta["copyrev"] = hex(cnode)
2463 meta["copyrev"] = hex(cnode)
2415 fparent1, fparent2 = nullid, newfparent
2464 fparent1, fparent2 = nullid, newfparent
2416 else:
2465 else:
2417 self.ui.warn(_("warning: can't find ancestor for '%s' "
2466 self.ui.warn(_("warning: can't find ancestor for '%s' "
2418 "copied from '%s'!\n") % (fname, cfname))
2467 "copied from '%s'!\n") % (fname, cfname))
2419
2468
2420 elif fparent1 == nullid:
2469 elif fparent1 == nullid:
2421 fparent1, fparent2 = fparent2, nullid
2470 fparent1, fparent2 = fparent2, nullid
2422 elif fparent2 != nullid:
2471 elif fparent2 != nullid:
2423 # is one parent an ancestor of the other?
2472 # is one parent an ancestor of the other?
2424 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2473 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2425 if fparent1 in fparentancestors:
2474 if fparent1 in fparentancestors:
2426 fparent1, fparent2 = fparent2, nullid
2475 fparent1, fparent2 = fparent2, nullid
2427 elif fparent2 in fparentancestors:
2476 elif fparent2 in fparentancestors:
2428 fparent2 = nullid
2477 fparent2 = nullid
2429
2478
2430 # is the file changed?
2479 # is the file changed?
2431 text = fctx.data()
2480 text = fctx.data()
2432 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2481 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2433 changelist.append(fname)
2482 changelist.append(fname)
2434 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2483 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2435 # are just the flags changed during merge?
2484 # are just the flags changed during merge?
2436 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2485 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2437 changelist.append(fname)
2486 changelist.append(fname)
2438
2487
2439 return fparent1
2488 return fparent1
2440
2489
2441 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2490 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2442 """check for commit arguments that aren't committable"""
2491 """check for commit arguments that aren't committable"""
2443 if match.isexact() or match.prefix():
2492 if match.isexact() or match.prefix():
2444 matched = set(status.modified + status.added + status.removed)
2493 matched = set(status.modified + status.added + status.removed)
2445
2494
2446 for f in match.files():
2495 for f in match.files():
2447 f = self.dirstate.normalize(f)
2496 f = self.dirstate.normalize(f)
2448 if f == '.' or f in matched or f in wctx.substate:
2497 if f == '.' or f in matched or f in wctx.substate:
2449 continue
2498 continue
2450 if f in status.deleted:
2499 if f in status.deleted:
2451 fail(f, _('file not found!'))
2500 fail(f, _('file not found!'))
2452 if f in vdirs: # visited directory
2501 if f in vdirs: # visited directory
2453 d = f + '/'
2502 d = f + '/'
2454 for mf in matched:
2503 for mf in matched:
2455 if mf.startswith(d):
2504 if mf.startswith(d):
2456 break
2505 break
2457 else:
2506 else:
2458 fail(f, _("no match under directory!"))
2507 fail(f, _("no match under directory!"))
2459 elif f not in self.dirstate:
2508 elif f not in self.dirstate:
2460 fail(f, _("file not tracked!"))
2509 fail(f, _("file not tracked!"))
2461
2510
2462 @unfilteredmethod
2511 @unfilteredmethod
2463 def commit(self, text="", user=None, date=None, match=None, force=False,
2512 def commit(self, text="", user=None, date=None, match=None, force=False,
2464 editor=False, extra=None):
2513 editor=False, extra=None):
2465 """Add a new revision to current repository.
2514 """Add a new revision to current repository.
2466
2515
2467 Revision information is gathered from the working directory,
2516 Revision information is gathered from the working directory,
2468 match can be used to filter the committed files. If editor is
2517 match can be used to filter the committed files. If editor is
2469 supplied, it is called to get a commit message.
2518 supplied, it is called to get a commit message.
2470 """
2519 """
2471 if extra is None:
2520 if extra is None:
2472 extra = {}
2521 extra = {}
2473
2522
2474 def fail(f, msg):
2523 def fail(f, msg):
2475 raise error.Abort('%s: %s' % (f, msg))
2524 raise error.Abort('%s: %s' % (f, msg))
2476
2525
2477 if not match:
2526 if not match:
2478 match = matchmod.always()
2527 match = matchmod.always()
2479
2528
2480 if not force:
2529 if not force:
2481 vdirs = []
2530 vdirs = []
2482 match.explicitdir = vdirs.append
2531 match.explicitdir = vdirs.append
2483 match.bad = fail
2532 match.bad = fail
2484
2533
2485 # lock() for recent changelog (see issue4368)
2534 # lock() for recent changelog (see issue4368)
2486 with self.wlock(), self.lock():
2535 with self.wlock(), self.lock():
2487 wctx = self[None]
2536 wctx = self[None]
2488 merge = len(wctx.parents()) > 1
2537 merge = len(wctx.parents()) > 1
2489
2538
2490 if not force and merge and not match.always():
2539 if not force and merge and not match.always():
2491 raise error.Abort(_('cannot partially commit a merge '
2540 raise error.Abort(_('cannot partially commit a merge '
2492 '(do not specify files or patterns)'))
2541 '(do not specify files or patterns)'))
2493
2542
2494 status = self.status(match=match, clean=force)
2543 status = self.status(match=match, clean=force)
2495 if force:
2544 if force:
2496 status.modified.extend(status.clean) # mq may commit clean files
2545 status.modified.extend(status.clean) # mq may commit clean files
2497
2546
2498 # check subrepos
2547 # check subrepos
2499 subs, commitsubs, newstate = subrepoutil.precommit(
2548 subs, commitsubs, newstate = subrepoutil.precommit(
2500 self.ui, wctx, status, match, force=force)
2549 self.ui, wctx, status, match, force=force)
2501
2550
2502 # make sure all explicit patterns are matched
2551 # make sure all explicit patterns are matched
2503 if not force:
2552 if not force:
2504 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2553 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2505
2554
2506 cctx = context.workingcommitctx(self, status,
2555 cctx = context.workingcommitctx(self, status,
2507 text, user, date, extra)
2556 text, user, date, extra)
2508
2557
2509 # internal config: ui.allowemptycommit
2558 # internal config: ui.allowemptycommit
2510 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2559 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2511 or extra.get('close') or merge or cctx.files()
2560 or extra.get('close') or merge or cctx.files()
2512 or self.ui.configbool('ui', 'allowemptycommit'))
2561 or self.ui.configbool('ui', 'allowemptycommit'))
2513 if not allowemptycommit:
2562 if not allowemptycommit:
2514 return None
2563 return None
2515
2564
2516 if merge and cctx.deleted():
2565 if merge and cctx.deleted():
2517 raise error.Abort(_("cannot commit merge with missing files"))
2566 raise error.Abort(_("cannot commit merge with missing files"))
2518
2567
2519 ms = mergemod.mergestate.read(self)
2568 ms = mergemod.mergestate.read(self)
2520 mergeutil.checkunresolved(ms)
2569 mergeutil.checkunresolved(ms)
2521
2570
2522 if editor:
2571 if editor:
2523 cctx._text = editor(self, cctx, subs)
2572 cctx._text = editor(self, cctx, subs)
2524 edited = (text != cctx._text)
2573 edited = (text != cctx._text)
2525
2574
2526 # Save commit message in case this transaction gets rolled back
2575 # Save commit message in case this transaction gets rolled back
2527 # (e.g. by a pretxncommit hook). Leave the content alone on
2576 # (e.g. by a pretxncommit hook). Leave the content alone on
2528 # the assumption that the user will use the same editor again.
2577 # the assumption that the user will use the same editor again.
2529 msgfn = self.savecommitmessage(cctx._text)
2578 msgfn = self.savecommitmessage(cctx._text)
2530
2579
2531 # commit subs and write new state
2580 # commit subs and write new state
2532 if subs:
2581 if subs:
2533 uipathfn = scmutil.getuipathfn(self)
2582 uipathfn = scmutil.getuipathfn(self)
2534 for s in sorted(commitsubs):
2583 for s in sorted(commitsubs):
2535 sub = wctx.sub(s)
2584 sub = wctx.sub(s)
2536 self.ui.status(_('committing subrepository %s\n') %
2585 self.ui.status(_('committing subrepository %s\n') %
2537 uipathfn(subrepoutil.subrelpath(sub)))
2586 uipathfn(subrepoutil.subrelpath(sub)))
2538 sr = sub.commit(cctx._text, user, date)
2587 sr = sub.commit(cctx._text, user, date)
2539 newstate[s] = (newstate[s][0], sr)
2588 newstate[s] = (newstate[s][0], sr)
2540 subrepoutil.writestate(self, newstate)
2589 subrepoutil.writestate(self, newstate)
2541
2590
2542 p1, p2 = self.dirstate.parents()
2591 p1, p2 = self.dirstate.parents()
2543 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2592 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2544 try:
2593 try:
2545 self.hook("precommit", throw=True, parent1=hookp1,
2594 self.hook("precommit", throw=True, parent1=hookp1,
2546 parent2=hookp2)
2595 parent2=hookp2)
2547 with self.transaction('commit'):
2596 with self.transaction('commit'):
2548 ret = self.commitctx(cctx, True)
2597 ret = self.commitctx(cctx, True)
2549 # update bookmarks, dirstate and mergestate
2598 # update bookmarks, dirstate and mergestate
2550 bookmarks.update(self, [p1, p2], ret)
2599 bookmarks.update(self, [p1, p2], ret)
2551 cctx.markcommitted(ret)
2600 cctx.markcommitted(ret)
2552 ms.reset()
2601 ms.reset()
2553 except: # re-raises
2602 except: # re-raises
2554 if edited:
2603 if edited:
2555 self.ui.write(
2604 self.ui.write(
2556 _('note: commit message saved in %s\n') % msgfn)
2605 _('note: commit message saved in %s\n') % msgfn)
2557 raise
2606 raise
2558
2607
2559 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2608 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2560 # hack for command that use a temporary commit (eg: histedit)
2609 # hack for command that use a temporary commit (eg: histedit)
2561 # temporary commit got stripped before hook release
2610 # temporary commit got stripped before hook release
2562 if self.changelog.hasnode(ret):
2611 if self.changelog.hasnode(ret):
2563 self.hook("commit", node=node, parent1=parent1,
2612 self.hook("commit", node=node, parent1=parent1,
2564 parent2=parent2)
2613 parent2=parent2)
2565 self._afterlock(commithook)
2614 self._afterlock(commithook)
2566 return ret
2615 return ret
2567
2616
2568 @unfilteredmethod
2617 @unfilteredmethod
2569 def commitctx(self, ctx, error=False):
2618 def commitctx(self, ctx, error=False):
2570 """Add a new revision to current repository.
2619 """Add a new revision to current repository.
2571 Revision information is passed via the context argument.
2620 Revision information is passed via the context argument.
2572
2621
2573 ctx.files() should list all files involved in this commit, i.e.
2622 ctx.files() should list all files involved in this commit, i.e.
2574 modified/added/removed files. On merge, it may be wider than the
2623 modified/added/removed files. On merge, it may be wider than the
2575 ctx.files() to be committed, since any file nodes derived directly
2624 ctx.files() to be committed, since any file nodes derived directly
2576 from p1 or p2 are excluded from the committed ctx.files().
2625 from p1 or p2 are excluded from the committed ctx.files().
2577 """
2626 """
2578
2627
2579 p1, p2 = ctx.p1(), ctx.p2()
2628 p1, p2 = ctx.p1(), ctx.p2()
2580 user = ctx.user()
2629 user = ctx.user()
2581
2630
2582 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2631 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2583 writefilecopymeta = writecopiesto != 'changeset-only'
2632 writefilecopymeta = writecopiesto != 'changeset-only'
2584 p1copies, p2copies = None, None
2633 p1copies, p2copies = None, None
2585 if writecopiesto in ('changeset-only', 'compatibility'):
2634 if writecopiesto in ('changeset-only', 'compatibility'):
2586 p1copies = ctx.p1copies()
2635 p1copies = ctx.p1copies()
2587 p2copies = ctx.p2copies()
2636 p2copies = ctx.p2copies()
2588 with self.lock(), self.transaction("commit") as tr:
2637 with self.lock(), self.transaction("commit") as tr:
2589 trp = weakref.proxy(tr)
2638 trp = weakref.proxy(tr)
2590
2639
2591 if ctx.manifestnode():
2640 if ctx.manifestnode():
2592 # reuse an existing manifest revision
2641 # reuse an existing manifest revision
2593 self.ui.debug('reusing known manifest\n')
2642 self.ui.debug('reusing known manifest\n')
2594 mn = ctx.manifestnode()
2643 mn = ctx.manifestnode()
2595 files = ctx.files()
2644 files = ctx.files()
2596 elif ctx.files():
2645 elif ctx.files():
2597 m1ctx = p1.manifestctx()
2646 m1ctx = p1.manifestctx()
2598 m2ctx = p2.manifestctx()
2647 m2ctx = p2.manifestctx()
2599 mctx = m1ctx.copy()
2648 mctx = m1ctx.copy()
2600
2649
2601 m = mctx.read()
2650 m = mctx.read()
2602 m1 = m1ctx.read()
2651 m1 = m1ctx.read()
2603 m2 = m2ctx.read()
2652 m2 = m2ctx.read()
2604
2653
2605 # check in files
2654 # check in files
2606 added = []
2655 added = []
2607 changed = []
2656 changed = []
2608 removed = list(ctx.removed())
2657 removed = list(ctx.removed())
2609 linkrev = len(self)
2658 linkrev = len(self)
2610 self.ui.note(_("committing files:\n"))
2659 self.ui.note(_("committing files:\n"))
2611 uipathfn = scmutil.getuipathfn(self)
2660 uipathfn = scmutil.getuipathfn(self)
2612 for f in sorted(ctx.modified() + ctx.added()):
2661 for f in sorted(ctx.modified() + ctx.added()):
2613 self.ui.note(uipathfn(f) + "\n")
2662 self.ui.note(uipathfn(f) + "\n")
2614 try:
2663 try:
2615 fctx = ctx[f]
2664 fctx = ctx[f]
2616 if fctx is None:
2665 if fctx is None:
2617 removed.append(f)
2666 removed.append(f)
2618 else:
2667 else:
2619 added.append(f)
2668 added.append(f)
2620 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2669 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2621 trp, changed,
2670 trp, changed,
2622 writefilecopymeta)
2671 writefilecopymeta)
2623 m.setflag(f, fctx.flags())
2672 m.setflag(f, fctx.flags())
2624 except OSError:
2673 except OSError:
2625 self.ui.warn(_("trouble committing %s!\n") %
2674 self.ui.warn(_("trouble committing %s!\n") %
2626 uipathfn(f))
2675 uipathfn(f))
2627 raise
2676 raise
2628 except IOError as inst:
2677 except IOError as inst:
2629 errcode = getattr(inst, 'errno', errno.ENOENT)
2678 errcode = getattr(inst, 'errno', errno.ENOENT)
2630 if error or errcode and errcode != errno.ENOENT:
2679 if error or errcode and errcode != errno.ENOENT:
2631 self.ui.warn(_("trouble committing %s!\n") %
2680 self.ui.warn(_("trouble committing %s!\n") %
2632 uipathfn(f))
2681 uipathfn(f))
2633 raise
2682 raise
2634
2683
2635 # update manifest
2684 # update manifest
2636 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2685 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2637 drop = [f for f in removed if f in m]
2686 drop = [f for f in removed if f in m]
2638 for f in drop:
2687 for f in drop:
2639 del m[f]
2688 del m[f]
2640 files = changed + removed
2689 files = changed + removed
2641 md = None
2690 md = None
2642 if not files:
2691 if not files:
2643 # if no "files" actually changed in terms of the changelog,
2692 # if no "files" actually changed in terms of the changelog,
2644 # try hard to detect unmodified manifest entry so that the
2693 # try hard to detect unmodified manifest entry so that the
2645 # exact same commit can be reproduced later on convert.
2694 # exact same commit can be reproduced later on convert.
2646 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2695 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2647 if not files and md:
2696 if not files and md:
2648 self.ui.debug('not reusing manifest (no file change in '
2697 self.ui.debug('not reusing manifest (no file change in '
2649 'changelog, but manifest differs)\n')
2698 'changelog, but manifest differs)\n')
2650 if files or md:
2699 if files or md:
2651 self.ui.note(_("committing manifest\n"))
2700 self.ui.note(_("committing manifest\n"))
2652 # we're using narrowmatch here since it's already applied at
2701 # we're using narrowmatch here since it's already applied at
2653 # other stages (such as dirstate.walk), so we're already
2702 # other stages (such as dirstate.walk), so we're already
2654 # ignoring things outside of narrowspec in most cases. The
2703 # ignoring things outside of narrowspec in most cases. The
2655 # one case where we might have files outside the narrowspec
2704 # one case where we might have files outside the narrowspec
2656 # at this point is merges, and we already error out in the
2705 # at this point is merges, and we already error out in the
2657 # case where the merge has files outside of the narrowspec,
2706 # case where the merge has files outside of the narrowspec,
2658 # so this is safe.
2707 # so this is safe.
2659 mn = mctx.write(trp, linkrev,
2708 mn = mctx.write(trp, linkrev,
2660 p1.manifestnode(), p2.manifestnode(),
2709 p1.manifestnode(), p2.manifestnode(),
2661 added, drop, match=self.narrowmatch())
2710 added, drop, match=self.narrowmatch())
2662 else:
2711 else:
2663 self.ui.debug('reusing manifest form p1 (listed files '
2712 self.ui.debug('reusing manifest form p1 (listed files '
2664 'actually unchanged)\n')
2713 'actually unchanged)\n')
2665 mn = p1.manifestnode()
2714 mn = p1.manifestnode()
2666 else:
2715 else:
2667 self.ui.debug('reusing manifest from p1 (no file change)\n')
2716 self.ui.debug('reusing manifest from p1 (no file change)\n')
2668 mn = p1.manifestnode()
2717 mn = p1.manifestnode()
2669 files = []
2718 files = []
2670
2719
2671 # update changelog
2720 # update changelog
2672 self.ui.note(_("committing changelog\n"))
2721 self.ui.note(_("committing changelog\n"))
2673 self.changelog.delayupdate(tr)
2722 self.changelog.delayupdate(tr)
2674 n = self.changelog.add(mn, files, ctx.description(),
2723 n = self.changelog.add(mn, files, ctx.description(),
2675 trp, p1.node(), p2.node(),
2724 trp, p1.node(), p2.node(),
2676 user, ctx.date(), ctx.extra().copy(),
2725 user, ctx.date(), ctx.extra().copy(),
2677 p1copies, p2copies)
2726 p1copies, p2copies)
2678 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2727 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2679 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2728 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2680 parent2=xp2)
2729 parent2=xp2)
2681 # set the new commit is proper phase
2730 # set the new commit is proper phase
2682 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2731 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2683 if targetphase:
2732 if targetphase:
2684 # retract boundary do not alter parent changeset.
2733 # retract boundary do not alter parent changeset.
2685 # if a parent have higher the resulting phase will
2734 # if a parent have higher the resulting phase will
2686 # be compliant anyway
2735 # be compliant anyway
2687 #
2736 #
2688 # if minimal phase was 0 we don't need to retract anything
2737 # if minimal phase was 0 we don't need to retract anything
2689 phases.registernew(self, tr, targetphase, [n])
2738 phases.registernew(self, tr, targetphase, [n])
2690 return n
2739 return n
2691
2740
2692 @unfilteredmethod
2741 @unfilteredmethod
2693 def destroying(self):
2742 def destroying(self):
2694 '''Inform the repository that nodes are about to be destroyed.
2743 '''Inform the repository that nodes are about to be destroyed.
2695 Intended for use by strip and rollback, so there's a common
2744 Intended for use by strip and rollback, so there's a common
2696 place for anything that has to be done before destroying history.
2745 place for anything that has to be done before destroying history.
2697
2746
2698 This is mostly useful for saving state that is in memory and waiting
2747 This is mostly useful for saving state that is in memory and waiting
2699 to be flushed when the current lock is released. Because a call to
2748 to be flushed when the current lock is released. Because a call to
2700 destroyed is imminent, the repo will be invalidated causing those
2749 destroyed is imminent, the repo will be invalidated causing those
2701 changes to stay in memory (waiting for the next unlock), or vanish
2750 changes to stay in memory (waiting for the next unlock), or vanish
2702 completely.
2751 completely.
2703 '''
2752 '''
2704 # When using the same lock to commit and strip, the phasecache is left
2753 # When using the same lock to commit and strip, the phasecache is left
2705 # dirty after committing. Then when we strip, the repo is invalidated,
2754 # dirty after committing. Then when we strip, the repo is invalidated,
2706 # causing those changes to disappear.
2755 # causing those changes to disappear.
2707 if '_phasecache' in vars(self):
2756 if '_phasecache' in vars(self):
2708 self._phasecache.write()
2757 self._phasecache.write()
2709
2758
2710 @unfilteredmethod
2759 @unfilteredmethod
2711 def destroyed(self):
2760 def destroyed(self):
2712 '''Inform the repository that nodes have been destroyed.
2761 '''Inform the repository that nodes have been destroyed.
2713 Intended for use by strip and rollback, so there's a common
2762 Intended for use by strip and rollback, so there's a common
2714 place for anything that has to be done after destroying history.
2763 place for anything that has to be done after destroying history.
2715 '''
2764 '''
2716 # When one tries to:
2765 # When one tries to:
2717 # 1) destroy nodes thus calling this method (e.g. strip)
2766 # 1) destroy nodes thus calling this method (e.g. strip)
2718 # 2) use phasecache somewhere (e.g. commit)
2767 # 2) use phasecache somewhere (e.g. commit)
2719 #
2768 #
2720 # then 2) will fail because the phasecache contains nodes that were
2769 # then 2) will fail because the phasecache contains nodes that were
2721 # removed. We can either remove phasecache from the filecache,
2770 # removed. We can either remove phasecache from the filecache,
2722 # causing it to reload next time it is accessed, or simply filter
2771 # causing it to reload next time it is accessed, or simply filter
2723 # the removed nodes now and write the updated cache.
2772 # the removed nodes now and write the updated cache.
2724 self._phasecache.filterunknown(self)
2773 self._phasecache.filterunknown(self)
2725 self._phasecache.write()
2774 self._phasecache.write()
2726
2775
2727 # refresh all repository caches
2776 # refresh all repository caches
2728 self.updatecaches()
2777 self.updatecaches()
2729
2778
2730 # Ensure the persistent tag cache is updated. Doing it now
2779 # Ensure the persistent tag cache is updated. Doing it now
2731 # means that the tag cache only has to worry about destroyed
2780 # means that the tag cache only has to worry about destroyed
2732 # heads immediately after a strip/rollback. That in turn
2781 # heads immediately after a strip/rollback. That in turn
2733 # guarantees that "cachetip == currenttip" (comparing both rev
2782 # guarantees that "cachetip == currenttip" (comparing both rev
2734 # and node) always means no nodes have been added or destroyed.
2783 # and node) always means no nodes have been added or destroyed.
2735
2784
2736 # XXX this is suboptimal when qrefresh'ing: we strip the current
2785 # XXX this is suboptimal when qrefresh'ing: we strip the current
2737 # head, refresh the tag cache, then immediately add a new head.
2786 # head, refresh the tag cache, then immediately add a new head.
2738 # But I think doing it this way is necessary for the "instant
2787 # But I think doing it this way is necessary for the "instant
2739 # tag cache retrieval" case to work.
2788 # tag cache retrieval" case to work.
2740 self.invalidate()
2789 self.invalidate()
2741
2790
2742 def status(self, node1='.', node2=None, match=None,
2791 def status(self, node1='.', node2=None, match=None,
2743 ignored=False, clean=False, unknown=False,
2792 ignored=False, clean=False, unknown=False,
2744 listsubrepos=False):
2793 listsubrepos=False):
2745 '''a convenience method that calls node1.status(node2)'''
2794 '''a convenience method that calls node1.status(node2)'''
2746 return self[node1].status(node2, match, ignored, clean, unknown,
2795 return self[node1].status(node2, match, ignored, clean, unknown,
2747 listsubrepos)
2796 listsubrepos)
2748
2797
2749 def addpostdsstatus(self, ps):
2798 def addpostdsstatus(self, ps):
2750 """Add a callback to run within the wlock, at the point at which status
2799 """Add a callback to run within the wlock, at the point at which status
2751 fixups happen.
2800 fixups happen.
2752
2801
2753 On status completion, callback(wctx, status) will be called with the
2802 On status completion, callback(wctx, status) will be called with the
2754 wlock held, unless the dirstate has changed from underneath or the wlock
2803 wlock held, unless the dirstate has changed from underneath or the wlock
2755 couldn't be grabbed.
2804 couldn't be grabbed.
2756
2805
2757 Callbacks should not capture and use a cached copy of the dirstate --
2806 Callbacks should not capture and use a cached copy of the dirstate --
2758 it might change in the meanwhile. Instead, they should access the
2807 it might change in the meanwhile. Instead, they should access the
2759 dirstate via wctx.repo().dirstate.
2808 dirstate via wctx.repo().dirstate.
2760
2809
2761 This list is emptied out after each status run -- extensions should
2810 This list is emptied out after each status run -- extensions should
2762 make sure it adds to this list each time dirstate.status is called.
2811 make sure it adds to this list each time dirstate.status is called.
2763 Extensions should also make sure they don't call this for statuses
2812 Extensions should also make sure they don't call this for statuses
2764 that don't involve the dirstate.
2813 that don't involve the dirstate.
2765 """
2814 """
2766
2815
2767 # The list is located here for uniqueness reasons -- it is actually
2816 # The list is located here for uniqueness reasons -- it is actually
2768 # managed by the workingctx, but that isn't unique per-repo.
2817 # managed by the workingctx, but that isn't unique per-repo.
2769 self._postdsstatus.append(ps)
2818 self._postdsstatus.append(ps)
2770
2819
2771 def postdsstatus(self):
2820 def postdsstatus(self):
2772 """Used by workingctx to get the list of post-dirstate-status hooks."""
2821 """Used by workingctx to get the list of post-dirstate-status hooks."""
2773 return self._postdsstatus
2822 return self._postdsstatus
2774
2823
2775 def clearpostdsstatus(self):
2824 def clearpostdsstatus(self):
2776 """Used by workingctx to clear post-dirstate-status hooks."""
2825 """Used by workingctx to clear post-dirstate-status hooks."""
2777 del self._postdsstatus[:]
2826 del self._postdsstatus[:]
2778
2827
2779 def heads(self, start=None):
2828 def heads(self, start=None):
2780 if start is None:
2829 if start is None:
2781 cl = self.changelog
2830 cl = self.changelog
2782 headrevs = reversed(cl.headrevs())
2831 headrevs = reversed(cl.headrevs())
2783 return [cl.node(rev) for rev in headrevs]
2832 return [cl.node(rev) for rev in headrevs]
2784
2833
2785 heads = self.changelog.heads(start)
2834 heads = self.changelog.heads(start)
2786 # sort the output in rev descending order
2835 # sort the output in rev descending order
2787 return sorted(heads, key=self.changelog.rev, reverse=True)
2836 return sorted(heads, key=self.changelog.rev, reverse=True)
2788
2837
2789 def branchheads(self, branch=None, start=None, closed=False):
2838 def branchheads(self, branch=None, start=None, closed=False):
2790 '''return a (possibly filtered) list of heads for the given branch
2839 '''return a (possibly filtered) list of heads for the given branch
2791
2840
2792 Heads are returned in topological order, from newest to oldest.
2841 Heads are returned in topological order, from newest to oldest.
2793 If branch is None, use the dirstate branch.
2842 If branch is None, use the dirstate branch.
2794 If start is not None, return only heads reachable from start.
2843 If start is not None, return only heads reachable from start.
2795 If closed is True, return heads that are marked as closed as well.
2844 If closed is True, return heads that are marked as closed as well.
2796 '''
2845 '''
2797 if branch is None:
2846 if branch is None:
2798 branch = self[None].branch()
2847 branch = self[None].branch()
2799 branches = self.branchmap()
2848 branches = self.branchmap()
2800 if not branches.hasbranch(branch):
2849 if not branches.hasbranch(branch):
2801 return []
2850 return []
2802 # the cache returns heads ordered lowest to highest
2851 # the cache returns heads ordered lowest to highest
2803 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2852 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2804 if start is not None:
2853 if start is not None:
2805 # filter out the heads that cannot be reached from startrev
2854 # filter out the heads that cannot be reached from startrev
2806 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2855 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2807 bheads = [h for h in bheads if h in fbheads]
2856 bheads = [h for h in bheads if h in fbheads]
2808 return bheads
2857 return bheads
2809
2858
2810 def branches(self, nodes):
2859 def branches(self, nodes):
2811 if not nodes:
2860 if not nodes:
2812 nodes = [self.changelog.tip()]
2861 nodes = [self.changelog.tip()]
2813 b = []
2862 b = []
2814 for n in nodes:
2863 for n in nodes:
2815 t = n
2864 t = n
2816 while True:
2865 while True:
2817 p = self.changelog.parents(n)
2866 p = self.changelog.parents(n)
2818 if p[1] != nullid or p[0] == nullid:
2867 if p[1] != nullid or p[0] == nullid:
2819 b.append((t, n, p[0], p[1]))
2868 b.append((t, n, p[0], p[1]))
2820 break
2869 break
2821 n = p[0]
2870 n = p[0]
2822 return b
2871 return b
2823
2872
2824 def between(self, pairs):
2873 def between(self, pairs):
2825 r = []
2874 r = []
2826
2875
2827 for top, bottom in pairs:
2876 for top, bottom in pairs:
2828 n, l, i = top, [], 0
2877 n, l, i = top, [], 0
2829 f = 1
2878 f = 1
2830
2879
2831 while n != bottom and n != nullid:
2880 while n != bottom and n != nullid:
2832 p = self.changelog.parents(n)[0]
2881 p = self.changelog.parents(n)[0]
2833 if i == f:
2882 if i == f:
2834 l.append(n)
2883 l.append(n)
2835 f = f * 2
2884 f = f * 2
2836 n = p
2885 n = p
2837 i += 1
2886 i += 1
2838
2887
2839 r.append(l)
2888 r.append(l)
2840
2889
2841 return r
2890 return r
2842
2891
2843 def checkpush(self, pushop):
2892 def checkpush(self, pushop):
2844 """Extensions can override this function if additional checks have
2893 """Extensions can override this function if additional checks have
2845 to be performed before pushing, or call it if they override push
2894 to be performed before pushing, or call it if they override push
2846 command.
2895 command.
2847 """
2896 """
2848
2897
2849 @unfilteredpropertycache
2898 @unfilteredpropertycache
2850 def prepushoutgoinghooks(self):
2899 def prepushoutgoinghooks(self):
2851 """Return util.hooks consists of a pushop with repo, remote, outgoing
2900 """Return util.hooks consists of a pushop with repo, remote, outgoing
2852 methods, which are called before pushing changesets.
2901 methods, which are called before pushing changesets.
2853 """
2902 """
2854 return util.hooks()
2903 return util.hooks()
2855
2904
2856 def pushkey(self, namespace, key, old, new):
2905 def pushkey(self, namespace, key, old, new):
2857 try:
2906 try:
2858 tr = self.currenttransaction()
2907 tr = self.currenttransaction()
2859 hookargs = {}
2908 hookargs = {}
2860 if tr is not None:
2909 if tr is not None:
2861 hookargs.update(tr.hookargs)
2910 hookargs.update(tr.hookargs)
2862 hookargs = pycompat.strkwargs(hookargs)
2911 hookargs = pycompat.strkwargs(hookargs)
2863 hookargs[r'namespace'] = namespace
2912 hookargs[r'namespace'] = namespace
2864 hookargs[r'key'] = key
2913 hookargs[r'key'] = key
2865 hookargs[r'old'] = old
2914 hookargs[r'old'] = old
2866 hookargs[r'new'] = new
2915 hookargs[r'new'] = new
2867 self.hook('prepushkey', throw=True, **hookargs)
2916 self.hook('prepushkey', throw=True, **hookargs)
2868 except error.HookAbort as exc:
2917 except error.HookAbort as exc:
2869 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2918 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2870 if exc.hint:
2919 if exc.hint:
2871 self.ui.write_err(_("(%s)\n") % exc.hint)
2920 self.ui.write_err(_("(%s)\n") % exc.hint)
2872 return False
2921 return False
2873 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2922 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2874 ret = pushkey.push(self, namespace, key, old, new)
2923 ret = pushkey.push(self, namespace, key, old, new)
2875 def runhook():
2924 def runhook():
2876 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2925 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2877 ret=ret)
2926 ret=ret)
2878 self._afterlock(runhook)
2927 self._afterlock(runhook)
2879 return ret
2928 return ret
2880
2929
2881 def listkeys(self, namespace):
2930 def listkeys(self, namespace):
2882 self.hook('prelistkeys', throw=True, namespace=namespace)
2931 self.hook('prelistkeys', throw=True, namespace=namespace)
2883 self.ui.debug('listing keys for "%s"\n' % namespace)
2932 self.ui.debug('listing keys for "%s"\n' % namespace)
2884 values = pushkey.list(self, namespace)
2933 values = pushkey.list(self, namespace)
2885 self.hook('listkeys', namespace=namespace, values=values)
2934 self.hook('listkeys', namespace=namespace, values=values)
2886 return values
2935 return values
2887
2936
2888 def debugwireargs(self, one, two, three=None, four=None, five=None):
2937 def debugwireargs(self, one, two, three=None, four=None, five=None):
2889 '''used to test argument passing over the wire'''
2938 '''used to test argument passing over the wire'''
2890 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2939 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2891 pycompat.bytestr(four),
2940 pycompat.bytestr(four),
2892 pycompat.bytestr(five))
2941 pycompat.bytestr(five))
2893
2942
2894 def savecommitmessage(self, text):
2943 def savecommitmessage(self, text):
2895 fp = self.vfs('last-message.txt', 'wb')
2944 fp = self.vfs('last-message.txt', 'wb')
2896 try:
2945 try:
2897 fp.write(text)
2946 fp.write(text)
2898 finally:
2947 finally:
2899 fp.close()
2948 fp.close()
2900 return self.pathto(fp.name[len(self.root) + 1:])
2949 return self.pathto(fp.name[len(self.root) + 1:])
2901
2950
2902 # used to avoid circular references so destructors work
2951 # used to avoid circular references so destructors work
2903 def aftertrans(files):
2952 def aftertrans(files):
2904 renamefiles = [tuple(t) for t in files]
2953 renamefiles = [tuple(t) for t in files]
2905 def a():
2954 def a():
2906 for vfs, src, dest in renamefiles:
2955 for vfs, src, dest in renamefiles:
2907 # if src and dest refer to a same file, vfs.rename is a no-op,
2956 # if src and dest refer to a same file, vfs.rename is a no-op,
2908 # leaving both src and dest on disk. delete dest to make sure
2957 # leaving both src and dest on disk. delete dest to make sure
2909 # the rename couldn't be such a no-op.
2958 # the rename couldn't be such a no-op.
2910 vfs.tryunlink(dest)
2959 vfs.tryunlink(dest)
2911 try:
2960 try:
2912 vfs.rename(src, dest)
2961 vfs.rename(src, dest)
2913 except OSError: # journal file does not yet exist
2962 except OSError: # journal file does not yet exist
2914 pass
2963 pass
2915 return a
2964 return a
2916
2965
2917 def undoname(fn):
2966 def undoname(fn):
2918 base, name = os.path.split(fn)
2967 base, name = os.path.split(fn)
2919 assert name.startswith('journal')
2968 assert name.startswith('journal')
2920 return os.path.join(base, name.replace('journal', 'undo', 1))
2969 return os.path.join(base, name.replace('journal', 'undo', 1))
2921
2970
2922 def instance(ui, path, create, intents=None, createopts=None):
2971 def instance(ui, path, create, intents=None, createopts=None):
2923 localpath = util.urllocalpath(path)
2972 localpath = util.urllocalpath(path)
2924 if create:
2973 if create:
2925 createrepository(ui, localpath, createopts=createopts)
2974 createrepository(ui, localpath, createopts=createopts)
2926
2975
2927 return makelocalrepository(ui, localpath, intents=intents)
2976 return makelocalrepository(ui, localpath, intents=intents)
2928
2977
2929 def islocal(path):
2978 def islocal(path):
2930 return True
2979 return True
2931
2980
2932 def defaultcreateopts(ui, createopts=None):
2981 def defaultcreateopts(ui, createopts=None):
2933 """Populate the default creation options for a repository.
2982 """Populate the default creation options for a repository.
2934
2983
2935 A dictionary of explicitly requested creation options can be passed
2984 A dictionary of explicitly requested creation options can be passed
2936 in. Missing keys will be populated.
2985 in. Missing keys will be populated.
2937 """
2986 """
2938 createopts = dict(createopts or {})
2987 createopts = dict(createopts or {})
2939
2988
2940 if 'backend' not in createopts:
2989 if 'backend' not in createopts:
2941 # experimental config: storage.new-repo-backend
2990 # experimental config: storage.new-repo-backend
2942 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2991 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2943
2992
2944 return createopts
2993 return createopts
2945
2994
2946 def newreporequirements(ui, createopts):
2995 def newreporequirements(ui, createopts):
2947 """Determine the set of requirements for a new local repository.
2996 """Determine the set of requirements for a new local repository.
2948
2997
2949 Extensions can wrap this function to specify custom requirements for
2998 Extensions can wrap this function to specify custom requirements for
2950 new repositories.
2999 new repositories.
2951 """
3000 """
2952 # If the repo is being created from a shared repository, we copy
3001 # If the repo is being created from a shared repository, we copy
2953 # its requirements.
3002 # its requirements.
2954 if 'sharedrepo' in createopts:
3003 if 'sharedrepo' in createopts:
2955 requirements = set(createopts['sharedrepo'].requirements)
3004 requirements = set(createopts['sharedrepo'].requirements)
2956 if createopts.get('sharedrelative'):
3005 if createopts.get('sharedrelative'):
2957 requirements.add('relshared')
3006 requirements.add('relshared')
2958 else:
3007 else:
2959 requirements.add('shared')
3008 requirements.add('shared')
2960
3009
2961 return requirements
3010 return requirements
2962
3011
2963 if 'backend' not in createopts:
3012 if 'backend' not in createopts:
2964 raise error.ProgrammingError('backend key not present in createopts; '
3013 raise error.ProgrammingError('backend key not present in createopts; '
2965 'was defaultcreateopts() called?')
3014 'was defaultcreateopts() called?')
2966
3015
2967 if createopts['backend'] != 'revlogv1':
3016 if createopts['backend'] != 'revlogv1':
2968 raise error.Abort(_('unable to determine repository requirements for '
3017 raise error.Abort(_('unable to determine repository requirements for '
2969 'storage backend: %s') % createopts['backend'])
3018 'storage backend: %s') % createopts['backend'])
2970
3019
2971 requirements = {'revlogv1'}
3020 requirements = {'revlogv1'}
2972 if ui.configbool('format', 'usestore'):
3021 if ui.configbool('format', 'usestore'):
2973 requirements.add('store')
3022 requirements.add('store')
2974 if ui.configbool('format', 'usefncache'):
3023 if ui.configbool('format', 'usefncache'):
2975 requirements.add('fncache')
3024 requirements.add('fncache')
2976 if ui.configbool('format', 'dotencode'):
3025 if ui.configbool('format', 'dotencode'):
2977 requirements.add('dotencode')
3026 requirements.add('dotencode')
2978
3027
2979 compengine = ui.config('format', 'revlog-compression')
3028 compengine = ui.config('format', 'revlog-compression')
2980 if compengine not in util.compengines:
3029 if compengine not in util.compengines:
2981 raise error.Abort(_('compression engine %s defined by '
3030 raise error.Abort(_('compression engine %s defined by '
2982 'format.revlog-compression not available') %
3031 'format.revlog-compression not available') %
2983 compengine,
3032 compengine,
2984 hint=_('run "hg debuginstall" to list available '
3033 hint=_('run "hg debuginstall" to list available '
2985 'compression engines'))
3034 'compression engines'))
2986
3035
2987 # zlib is the historical default and doesn't need an explicit requirement.
3036 # zlib is the historical default and doesn't need an explicit requirement.
2988 elif compengine == 'zstd':
3037 elif compengine == 'zstd':
2989 requirements.add('revlog-compression-zstd')
3038 requirements.add('revlog-compression-zstd')
2990 elif compengine != 'zlib':
3039 elif compengine != 'zlib':
2991 requirements.add('exp-compression-%s' % compengine)
3040 requirements.add('exp-compression-%s' % compengine)
2992
3041
2993 if scmutil.gdinitconfig(ui):
3042 if scmutil.gdinitconfig(ui):
2994 requirements.add('generaldelta')
3043 requirements.add('generaldelta')
2995 if ui.configbool('format', 'sparse-revlog'):
3044 if ui.configbool('format', 'sparse-revlog'):
2996 requirements.add(SPARSEREVLOG_REQUIREMENT)
3045 requirements.add(SPARSEREVLOG_REQUIREMENT)
2997 if ui.configbool('experimental', 'treemanifest'):
3046 if ui.configbool('experimental', 'treemanifest'):
2998 requirements.add('treemanifest')
3047 requirements.add('treemanifest')
2999
3048
3000 revlogv2 = ui.config('experimental', 'revlogv2')
3049 revlogv2 = ui.config('experimental', 'revlogv2')
3001 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3050 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3002 requirements.remove('revlogv1')
3051 requirements.remove('revlogv1')
3003 # generaldelta is implied by revlogv2.
3052 # generaldelta is implied by revlogv2.
3004 requirements.discard('generaldelta')
3053 requirements.discard('generaldelta')
3005 requirements.add(REVLOGV2_REQUIREMENT)
3054 requirements.add(REVLOGV2_REQUIREMENT)
3006 # experimental config: format.internal-phase
3055 # experimental config: format.internal-phase
3007 if ui.configbool('format', 'internal-phase'):
3056 if ui.configbool('format', 'internal-phase'):
3008 requirements.add('internal-phase')
3057 requirements.add('internal-phase')
3009
3058
3010 if createopts.get('narrowfiles'):
3059 if createopts.get('narrowfiles'):
3011 requirements.add(repository.NARROW_REQUIREMENT)
3060 requirements.add(repository.NARROW_REQUIREMENT)
3012
3061
3013 if createopts.get('lfs'):
3062 if createopts.get('lfs'):
3014 requirements.add('lfs')
3063 requirements.add('lfs')
3015
3064
3016 return requirements
3065 return requirements
3017
3066
3018 def filterknowncreateopts(ui, createopts):
3067 def filterknowncreateopts(ui, createopts):
3019 """Filters a dict of repo creation options against options that are known.
3068 """Filters a dict of repo creation options against options that are known.
3020
3069
3021 Receives a dict of repo creation options and returns a dict of those
3070 Receives a dict of repo creation options and returns a dict of those
3022 options that we don't know how to handle.
3071 options that we don't know how to handle.
3023
3072
3024 This function is called as part of repository creation. If the
3073 This function is called as part of repository creation. If the
3025 returned dict contains any items, repository creation will not
3074 returned dict contains any items, repository creation will not
3026 be allowed, as it means there was a request to create a repository
3075 be allowed, as it means there was a request to create a repository
3027 with options not recognized by loaded code.
3076 with options not recognized by loaded code.
3028
3077
3029 Extensions can wrap this function to filter out creation options
3078 Extensions can wrap this function to filter out creation options
3030 they know how to handle.
3079 they know how to handle.
3031 """
3080 """
3032 known = {
3081 known = {
3033 'backend',
3082 'backend',
3034 'lfs',
3083 'lfs',
3035 'narrowfiles',
3084 'narrowfiles',
3036 'sharedrepo',
3085 'sharedrepo',
3037 'sharedrelative',
3086 'sharedrelative',
3038 'shareditems',
3087 'shareditems',
3039 'shallowfilestore',
3088 'shallowfilestore',
3040 }
3089 }
3041
3090
3042 return {k: v for k, v in createopts.items() if k not in known}
3091 return {k: v for k, v in createopts.items() if k not in known}
3043
3092
3044 def createrepository(ui, path, createopts=None):
3093 def createrepository(ui, path, createopts=None):
3045 """Create a new repository in a vfs.
3094 """Create a new repository in a vfs.
3046
3095
3047 ``path`` path to the new repo's working directory.
3096 ``path`` path to the new repo's working directory.
3048 ``createopts`` options for the new repository.
3097 ``createopts`` options for the new repository.
3049
3098
3050 The following keys for ``createopts`` are recognized:
3099 The following keys for ``createopts`` are recognized:
3051
3100
3052 backend
3101 backend
3053 The storage backend to use.
3102 The storage backend to use.
3054 lfs
3103 lfs
3055 Repository will be created with ``lfs`` requirement. The lfs extension
3104 Repository will be created with ``lfs`` requirement. The lfs extension
3056 will automatically be loaded when the repository is accessed.
3105 will automatically be loaded when the repository is accessed.
3057 narrowfiles
3106 narrowfiles
3058 Set up repository to support narrow file storage.
3107 Set up repository to support narrow file storage.
3059 sharedrepo
3108 sharedrepo
3060 Repository object from which storage should be shared.
3109 Repository object from which storage should be shared.
3061 sharedrelative
3110 sharedrelative
3062 Boolean indicating if the path to the shared repo should be
3111 Boolean indicating if the path to the shared repo should be
3063 stored as relative. By default, the pointer to the "parent" repo
3112 stored as relative. By default, the pointer to the "parent" repo
3064 is stored as an absolute path.
3113 is stored as an absolute path.
3065 shareditems
3114 shareditems
3066 Set of items to share to the new repository (in addition to storage).
3115 Set of items to share to the new repository (in addition to storage).
3067 shallowfilestore
3116 shallowfilestore
3068 Indicates that storage for files should be shallow (not all ancestor
3117 Indicates that storage for files should be shallow (not all ancestor
3069 revisions are known).
3118 revisions are known).
3070 """
3119 """
3071 createopts = defaultcreateopts(ui, createopts=createopts)
3120 createopts = defaultcreateopts(ui, createopts=createopts)
3072
3121
3073 unknownopts = filterknowncreateopts(ui, createopts)
3122 unknownopts = filterknowncreateopts(ui, createopts)
3074
3123
3075 if not isinstance(unknownopts, dict):
3124 if not isinstance(unknownopts, dict):
3076 raise error.ProgrammingError('filterknowncreateopts() did not return '
3125 raise error.ProgrammingError('filterknowncreateopts() did not return '
3077 'a dict')
3126 'a dict')
3078
3127
3079 if unknownopts:
3128 if unknownopts:
3080 raise error.Abort(_('unable to create repository because of unknown '
3129 raise error.Abort(_('unable to create repository because of unknown '
3081 'creation option: %s') %
3130 'creation option: %s') %
3082 ', '.join(sorted(unknownopts)),
3131 ', '.join(sorted(unknownopts)),
3083 hint=_('is a required extension not loaded?'))
3132 hint=_('is a required extension not loaded?'))
3084
3133
3085 requirements = newreporequirements(ui, createopts=createopts)
3134 requirements = newreporequirements(ui, createopts=createopts)
3086
3135
3087 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3136 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3088
3137
3089 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3138 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3090 if hgvfs.exists():
3139 if hgvfs.exists():
3091 raise error.RepoError(_('repository %s already exists') % path)
3140 raise error.RepoError(_('repository %s already exists') % path)
3092
3141
3093 if 'sharedrepo' in createopts:
3142 if 'sharedrepo' in createopts:
3094 sharedpath = createopts['sharedrepo'].sharedpath
3143 sharedpath = createopts['sharedrepo'].sharedpath
3095
3144
3096 if createopts.get('sharedrelative'):
3145 if createopts.get('sharedrelative'):
3097 try:
3146 try:
3098 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3147 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3099 except (IOError, ValueError) as e:
3148 except (IOError, ValueError) as e:
3100 # ValueError is raised on Windows if the drive letters differ
3149 # ValueError is raised on Windows if the drive letters differ
3101 # on each path.
3150 # on each path.
3102 raise error.Abort(_('cannot calculate relative path'),
3151 raise error.Abort(_('cannot calculate relative path'),
3103 hint=stringutil.forcebytestr(e))
3152 hint=stringutil.forcebytestr(e))
3104
3153
3105 if not wdirvfs.exists():
3154 if not wdirvfs.exists():
3106 wdirvfs.makedirs()
3155 wdirvfs.makedirs()
3107
3156
3108 hgvfs.makedir(notindexed=True)
3157 hgvfs.makedir(notindexed=True)
3109 if 'sharedrepo' not in createopts:
3158 if 'sharedrepo' not in createopts:
3110 hgvfs.mkdir(b'cache')
3159 hgvfs.mkdir(b'cache')
3111 hgvfs.mkdir(b'wcache')
3160 hgvfs.mkdir(b'wcache')
3112
3161
3113 if b'store' in requirements and 'sharedrepo' not in createopts:
3162 if b'store' in requirements and 'sharedrepo' not in createopts:
3114 hgvfs.mkdir(b'store')
3163 hgvfs.mkdir(b'store')
3115
3164
3116 # We create an invalid changelog outside the store so very old
3165 # We create an invalid changelog outside the store so very old
3117 # Mercurial versions (which didn't know about the requirements
3166 # Mercurial versions (which didn't know about the requirements
3118 # file) encounter an error on reading the changelog. This
3167 # file) encounter an error on reading the changelog. This
3119 # effectively locks out old clients and prevents them from
3168 # effectively locks out old clients and prevents them from
3120 # mucking with a repo in an unknown format.
3169 # mucking with a repo in an unknown format.
3121 #
3170 #
3122 # The revlog header has version 2, which won't be recognized by
3171 # The revlog header has version 2, which won't be recognized by
3123 # such old clients.
3172 # such old clients.
3124 hgvfs.append(b'00changelog.i',
3173 hgvfs.append(b'00changelog.i',
3125 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3174 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3126 b'layout')
3175 b'layout')
3127
3176
3128 scmutil.writerequires(hgvfs, requirements)
3177 scmutil.writerequires(hgvfs, requirements)
3129
3178
3130 # Write out file telling readers where to find the shared store.
3179 # Write out file telling readers where to find the shared store.
3131 if 'sharedrepo' in createopts:
3180 if 'sharedrepo' in createopts:
3132 hgvfs.write(b'sharedpath', sharedpath)
3181 hgvfs.write(b'sharedpath', sharedpath)
3133
3182
3134 if createopts.get('shareditems'):
3183 if createopts.get('shareditems'):
3135 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3184 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3136 hgvfs.write(b'shared', shared)
3185 hgvfs.write(b'shared', shared)
3137
3186
3138 def poisonrepository(repo):
3187 def poisonrepository(repo):
3139 """Poison a repository instance so it can no longer be used."""
3188 """Poison a repository instance so it can no longer be used."""
3140 # Perform any cleanup on the instance.
3189 # Perform any cleanup on the instance.
3141 repo.close()
3190 repo.close()
3142
3191
3143 # Our strategy is to replace the type of the object with one that
3192 # Our strategy is to replace the type of the object with one that
3144 # has all attribute lookups result in error.
3193 # has all attribute lookups result in error.
3145 #
3194 #
3146 # But we have to allow the close() method because some constructors
3195 # But we have to allow the close() method because some constructors
3147 # of repos call close() on repo references.
3196 # of repos call close() on repo references.
3148 class poisonedrepository(object):
3197 class poisonedrepository(object):
3149 def __getattribute__(self, item):
3198 def __getattribute__(self, item):
3150 if item == r'close':
3199 if item == r'close':
3151 return object.__getattribute__(self, item)
3200 return object.__getattribute__(self, item)
3152
3201
3153 raise error.ProgrammingError('repo instances should not be used '
3202 raise error.ProgrammingError('repo instances should not be used '
3154 'after unshare')
3203 'after unshare')
3155
3204
3156 def close(self):
3205 def close(self):
3157 pass
3206 pass
3158
3207
3159 # We may have a repoview, which intercepts __setattr__. So be sure
3208 # We may have a repoview, which intercepts __setattr__. So be sure
3160 # we operate at the lowest level possible.
3209 # we operate at the lowest level possible.
3161 object.__setattr__(repo, r'__class__', poisonedrepository)
3210 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,245 +1,246 b''
1 ================================
1 ================================
2 Test corner case around bookmark
2 Test corner case around bookmark
3 ================================
3 ================================
4
4
5 This test file is meant to gather test around bookmark that are specific
5 This test file is meant to gather test around bookmark that are specific
6 enough to not find a place elsewhere.
6 enough to not find a place elsewhere.
7
7
8 Test bookmark/changelog race condition
8 Test bookmark/changelog race condition
9 ======================================
9 ======================================
10
10
11 The data from the bookmark file are filtered to only contains bookmark with
11 The data from the bookmark file are filtered to only contains bookmark with
12 node known to the changelog. If the cache invalidation between these two bits
12 node known to the changelog. If the cache invalidation between these two bits
13 goes wrong, bookmark can be dropped.
13 goes wrong, bookmark can be dropped.
14
14
15 global setup
15 global setup
16 ------------
16 ------------
17
17
18 $ cat >> $HGRCPATH << EOF
18 $ cat >> $HGRCPATH << EOF
19 > [ui]
19 > [ui]
20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
21 > [server]
21 > [server]
22 > concurrent-push-mode=check-related
22 > concurrent-push-mode=check-related
23 > EOF
23 > EOF
24
24
25 Setup
25 Setup
26 -----
26 -----
27
27
28 initial repository setup
28 initial repository setup
29
29
30 $ hg init bookrace-server
30 $ hg init bookrace-server
31 $ cd bookrace-server
31 $ cd bookrace-server
32 $ echo a > a
32 $ echo a > a
33 $ hg add a
33 $ hg add a
34 $ hg commit -m root
34 $ hg commit -m root
35 $ echo a >> a
35 $ echo a >> a
36 $ hg bookmark book-A
36 $ hg bookmark book-A
37 $ hg commit -m A0
37 $ hg commit -m A0
38 $ hg up 'desc(root)'
38 $ hg up 'desc(root)'
39 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 (leaving bookmark book-A)
40 (leaving bookmark book-A)
41 $ echo b > b
41 $ echo b > b
42 $ hg add b
42 $ hg add b
43 $ hg bookmark book-B
43 $ hg bookmark book-B
44 $ hg commit -m B0
44 $ hg commit -m B0
45 created new head
45 created new head
46 $ hg up null
46 $ hg up null
47 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
47 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
48 (leaving bookmark book-B)
48 (leaving bookmark book-B)
49 $ hg phase --public --rev 'all()'
49 $ hg phase --public --rev 'all()'
50 $ hg log -G
50 $ hg log -G
51 o changeset: 2:c79985706978
51 o changeset: 2:c79985706978
52 | bookmark: book-B
52 | bookmark: book-B
53 | tag: tip
53 | tag: tip
54 | parent: 0:6569b5a81c7e
54 | parent: 0:6569b5a81c7e
55 | user: test
55 | user: test
56 | date: Thu Jan 01 00:00:00 1970 +0000
56 | date: Thu Jan 01 00:00:00 1970 +0000
57 | summary: B0
57 | summary: B0
58 |
58 |
59 | o changeset: 1:39c28d785860
59 | o changeset: 1:39c28d785860
60 |/ bookmark: book-A
60 |/ bookmark: book-A
61 | user: test
61 | user: test
62 | date: Thu Jan 01 00:00:00 1970 +0000
62 | date: Thu Jan 01 00:00:00 1970 +0000
63 | summary: A0
63 | summary: A0
64 |
64 |
65 o changeset: 0:6569b5a81c7e
65 o changeset: 0:6569b5a81c7e
66 user: test
66 user: test
67 date: Thu Jan 01 00:00:00 1970 +0000
67 date: Thu Jan 01 00:00:00 1970 +0000
68 summary: root
68 summary: root
69
69
70 $ hg book
70 $ hg book
71 book-A 1:39c28d785860
71 book-A 1:39c28d785860
72 book-B 2:c79985706978
72 book-B 2:c79985706978
73 $ cd ..
73 $ cd ..
74
74
75 Add new changeset on each bookmark in distinct clones
75 Add new changeset on each bookmark in distinct clones
76
76
77 $ hg clone ssh://user@dummy/bookrace-server client-A
77 $ hg clone ssh://user@dummy/bookrace-server client-A
78 requesting all changes
78 requesting all changes
79 adding changesets
79 adding changesets
80 adding manifests
80 adding manifests
81 adding file changes
81 adding file changes
82 added 3 changesets with 3 changes to 2 files (+1 heads)
82 added 3 changesets with 3 changes to 2 files (+1 heads)
83 new changesets 6569b5a81c7e:c79985706978
83 new changesets 6569b5a81c7e:c79985706978
84 updating to branch default
84 updating to branch default
85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 $ hg -R client-A update book-A
86 $ hg -R client-A update book-A
87 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
87 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
88 (activating bookmark book-A)
88 (activating bookmark book-A)
89 $ echo a >> client-A/a
89 $ echo a >> client-A/a
90 $ hg -R client-A commit -m A1
90 $ hg -R client-A commit -m A1
91 $ hg clone ssh://user@dummy/bookrace-server client-B
91 $ hg clone ssh://user@dummy/bookrace-server client-B
92 requesting all changes
92 requesting all changes
93 adding changesets
93 adding changesets
94 adding manifests
94 adding manifests
95 adding file changes
95 adding file changes
96 added 3 changesets with 3 changes to 2 files (+1 heads)
96 added 3 changesets with 3 changes to 2 files (+1 heads)
97 new changesets 6569b5a81c7e:c79985706978
97 new changesets 6569b5a81c7e:c79985706978
98 updating to branch default
98 updating to branch default
99 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
99 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 $ hg -R client-B update book-B
100 $ hg -R client-B update book-B
101 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
101 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 (activating bookmark book-B)
102 (activating bookmark book-B)
103 $ echo b >> client-B/b
103 $ echo b >> client-B/b
104 $ hg -R client-B commit -m B1
104 $ hg -R client-B commit -m B1
105
105
106 extension to reproduce the race
106 extension to reproduce the race
107 -------------------------------
107 -------------------------------
108
108
109 If two process are pushing we want to make sure the following happens:
109 If two process are pushing we want to make sure the following happens:
110
110
111 * process A read changelog
111 * process A read changelog
112 * process B to its full push
112 * process B to its full push
113 * process A read bookmarks
113 * process A read bookmarks
114 * process A proceed with rest of the push
114 * process A proceed with rest of the push
115
115
116 We build a server side extension for this purpose
116 We build a server side extension for this purpose
117
117
118 $ cat > bookrace.py << EOF
118 $ cat > bookrace.py << EOF
119 > import os
119 > import os
120 > import time
120 > import time
121 > import atexit
121 > import atexit
122 > from mercurial import error, extensions, bookmarks
122 > from mercurial import error, extensions, bookmarks
123 >
123 >
124 > def wait(repo):
124 > def wait(repo):
125 > if not os.path.exists('push-A-started'):
125 > if not os.path.exists('push-A-started'):
126 > assert repo._currentlock(repo._lockref) is None
126 > assert repo._currentlock(repo._lockref) is None
127 > assert repo._currentlock(repo._wlockref) is None
127 > assert repo._currentlock(repo._wlockref) is None
128 > print('setting raced push up')
128 > print('setting raced push up')
129 > with open('push-A-started', 'w'):
129 > with open('push-A-started', 'w'):
130 > pass
130 > pass
131 > clock = 300
131 > clock = 300
132 > while not os.path.exists('push-B-done'):
132 > while not os.path.exists('push-B-done'):
133 > clock -= 1
133 > clock -= 1
134 > if clock <= 0:
134 > if clock <= 0:
135 > raise error.Abort("race scenario timed out")
135 > raise error.Abort("race scenario timed out")
136 > time.sleep(0.1)
136 > time.sleep(0.1)
137 >
137 >
138 > def reposetup(ui, repo):
138 > def reposetup(ui, repo):
139 > class racedrepo(repo.__class__):
139 > class racedrepo(repo.__class__):
140 > @property
140 > @property
141 > def _bookmarks(self):
141 > def _bookmarks(self):
142 > wait(self)
142 > wait(self)
143 > return super(racedrepo, self)._bookmarks
143 > return super(racedrepo, self)._bookmarks
144 > repo.__class__ = racedrepo
144 > repo.__class__ = racedrepo
145 >
145 >
146 > def e():
146 > def e():
147 > with open('push-A-done', 'w'):
147 > with open('push-A-done', 'w'):
148 > pass
148 > pass
149 > atexit.register(e)
149 > atexit.register(e)
150 > EOF
150 > EOF
151
151
152 Actual test
152 Actual test
153 -----------
153 -----------
154
154
155 Start the raced push.
155 Start the raced push.
156
156
157 $ cat >> bookrace-server/.hg/hgrc << EOF
157 $ cat >> bookrace-server/.hg/hgrc << EOF
158 > [extensions]
158 > [extensions]
159 > bookrace=$TESTTMP/bookrace.py
159 > bookrace=$TESTTMP/bookrace.py
160 > EOF
160 > EOF
161 $ hg push -R client-A -r book-A >push-output.txt 2>&1 &
161 $ hg push -R client-A -r book-A >push-output.txt 2>&1 &
162
162
163 Wait up to 30 seconds for that push to start.
163 Wait up to 30 seconds for that push to start.
164
164
165 $ clock=30
165 $ clock=30
166 $ while [ ! -f push-A-started ] && [ $clock -gt 0 ] ; do
166 $ while [ ! -f push-A-started ] && [ $clock -gt 0 ] ; do
167 > clock=`expr $clock - 1`
167 > clock=`expr $clock - 1`
168 > sleep 1
168 > sleep 1
169 > done
169 > done
170
170
171 Do the other push.
171 Do the other push.
172
172
173 $ cat >> bookrace-server/.hg/hgrc << EOF
173 $ cat >> bookrace-server/.hg/hgrc << EOF
174 > [extensions]
174 > [extensions]
175 > bookrace=!
175 > bookrace=!
176 > EOF
176 > EOF
177
177
178 $ hg push -R client-B -r book-B
178 $ hg push -R client-B -r book-B
179 pushing to ssh://user@dummy/bookrace-server
179 pushing to ssh://user@dummy/bookrace-server
180 searching for changes
180 searching for changes
181 remote: adding changesets
181 remote: adding changesets
182 remote: adding manifests
182 remote: adding manifests
183 remote: adding file changes
183 remote: adding file changes
184 remote: added 1 changesets with 1 changes to 1 files
184 remote: added 1 changesets with 1 changes to 1 files
185 updating bookmark book-B
185 updating bookmark book-B
186
186
187 Signal the raced put that we are done (it waits up to 30 seconds).
187 Signal the raced put that we are done (it waits up to 30 seconds).
188
188
189 $ touch push-B-done
189 $ touch push-B-done
190
190
191 Wait for the raced push to finish (with the remaning of the initial 30 seconds).
191 Wait for the raced push to finish (with the remaning of the initial 30 seconds).
192
192
193 $ while [ ! -f push-A-done ] && [ $clock -gt 0 ] ; do
193 $ while [ ! -f push-A-done ] && [ $clock -gt 0 ] ; do
194 > clock=`expr $clock - 1`
194 > clock=`expr $clock - 1`
195 > sleep 1
195 > sleep 1
196 > done
196 > done
197
197
198 Check raced push output.
198 Check raced push output.
199
199
200 $ cat push-output.txt
200 $ cat push-output.txt
201 pushing to ssh://user@dummy/bookrace-server
201 pushing to ssh://user@dummy/bookrace-server
202 searching for changes
202 searching for changes
203 remote has heads on branch 'default' that are not known locally: f26c3b5167d1
203 remote: setting raced push up
204 remote: setting raced push up
204 remote: adding changesets
205 remote: adding changesets
205 remote: adding manifests
206 remote: adding manifests
206 remote: adding file changes
207 remote: adding file changes
207 remote: added 1 changesets with 1 changes to 1 files
208 remote: added 1 changesets with 1 changes to 1 files
208 updating bookmark book-A
209 updating bookmark book-A
209
210
210 Check result of the push.
211 Check result of the push.
211
212
212 $ hg -R bookrace-server log -G
213 $ hg -R bookrace-server log -G
213 o changeset: 4:9ce3b28c16de
214 o changeset: 4:9ce3b28c16de
214 | bookmark: book-A
215 | bookmark: book-A
215 | tag: tip
216 | tag: tip
216 | parent: 1:39c28d785860
217 | parent: 1:39c28d785860
217 | user: test
218 | user: test
218 | date: Thu Jan 01 00:00:00 1970 +0000
219 | date: Thu Jan 01 00:00:00 1970 +0000
219 | summary: A1
220 | summary: A1
220 |
221 |
221 | o changeset: 3:f26c3b5167d1
222 | o changeset: 3:f26c3b5167d1
222 | | bookmark: book-B (false !)
223 | | bookmark: book-B
223 | | user: test
224 | | user: test
224 | | date: Thu Jan 01 00:00:00 1970 +0000
225 | | date: Thu Jan 01 00:00:00 1970 +0000
225 | | summary: B1
226 | | summary: B1
226 | |
227 | |
227 | o changeset: 2:c79985706978
228 | o changeset: 2:c79985706978
228 | | parent: 0:6569b5a81c7e
229 | | parent: 0:6569b5a81c7e
229 | | user: test
230 | | user: test
230 | | date: Thu Jan 01 00:00:00 1970 +0000
231 | | date: Thu Jan 01 00:00:00 1970 +0000
231 | | summary: B0
232 | | summary: B0
232 | |
233 | |
233 o | changeset: 1:39c28d785860
234 o | changeset: 1:39c28d785860
234 |/ user: test
235 |/ user: test
235 | date: Thu Jan 01 00:00:00 1970 +0000
236 | date: Thu Jan 01 00:00:00 1970 +0000
236 | summary: A0
237 | summary: A0
237 |
238 |
238 o changeset: 0:6569b5a81c7e
239 o changeset: 0:6569b5a81c7e
239 user: test
240 user: test
240 date: Thu Jan 01 00:00:00 1970 +0000
241 date: Thu Jan 01 00:00:00 1970 +0000
241 summary: root
242 summary: root
242
243
243 $ hg -R bookrace-server book
244 $ hg -R bookrace-server book
244 book-A 4:9ce3b28c16de
245 book-A 4:9ce3b28c16de
245 book-B 3:f26c3b5167d1 (false !)
246 book-B 3:f26c3b5167d1
General Comments 0
You need to be logged in to leave comments. Login now