##// END OF EJS Templates
mixedrepostorecache: fix a silly redundant updating of set...
Martin von Zweigbergk -
r42632:aae93201 default
parent child Browse files
Show More
@@ -1,3193 +1,3192 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 class mixedrepostorecache(_basefilecache):
125 class mixedrepostorecache(_basefilecache):
126 """filecache for a mix files in .hg/store and outside"""
126 """filecache for a mix files in .hg/store and outside"""
127 def __init__(self, *pathsandlocations):
127 def __init__(self, *pathsandlocations):
128 # scmutil.filecache only uses the path for passing back into our
128 # scmutil.filecache only uses the path for passing back into our
129 # join(), so we can safely pass a list of paths and locations
129 # join(), so we can safely pass a list of paths and locations
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 for path, location in pathsandlocations:
131 _cachedfiles.update(pathsandlocations)
132 _cachedfiles.update(pathsandlocations)
133
132
134 def join(self, obj, fnameandlocation):
133 def join(self, obj, fnameandlocation):
135 fname, location = fnameandlocation
134 fname, location = fnameandlocation
136 if location == 'plain':
135 if location == 'plain':
137 return obj.vfs.join(fname)
136 return obj.vfs.join(fname)
138 else:
137 else:
139 if location != '':
138 if location != '':
140 raise error.ProgrammingError('unexpected location: %s' %
139 raise error.ProgrammingError('unexpected location: %s' %
141 location)
140 location)
142 return obj.sjoin(fname)
141 return obj.sjoin(fname)
143
142
144 def isfilecached(repo, name):
143 def isfilecached(repo, name):
145 """check if a repo has already cached "name" filecache-ed property
144 """check if a repo has already cached "name" filecache-ed property
146
145
147 This returns (cachedobj-or-None, iscached) tuple.
146 This returns (cachedobj-or-None, iscached) tuple.
148 """
147 """
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
148 cacheentry = repo.unfiltered()._filecache.get(name, None)
150 if not cacheentry:
149 if not cacheentry:
151 return None, False
150 return None, False
152 return cacheentry.obj, True
151 return cacheentry.obj, True
153
152
154 class unfilteredpropertycache(util.propertycache):
153 class unfilteredpropertycache(util.propertycache):
155 """propertycache that apply to unfiltered repo only"""
154 """propertycache that apply to unfiltered repo only"""
156
155
157 def __get__(self, repo, type=None):
156 def __get__(self, repo, type=None):
158 unfi = repo.unfiltered()
157 unfi = repo.unfiltered()
159 if unfi is repo:
158 if unfi is repo:
160 return super(unfilteredpropertycache, self).__get__(unfi)
159 return super(unfilteredpropertycache, self).__get__(unfi)
161 return getattr(unfi, self.name)
160 return getattr(unfi, self.name)
162
161
163 class filteredpropertycache(util.propertycache):
162 class filteredpropertycache(util.propertycache):
164 """propertycache that must take filtering in account"""
163 """propertycache that must take filtering in account"""
165
164
166 def cachevalue(self, obj, value):
165 def cachevalue(self, obj, value):
167 object.__setattr__(obj, self.name, value)
166 object.__setattr__(obj, self.name, value)
168
167
169
168
170 def hasunfilteredcache(repo, name):
169 def hasunfilteredcache(repo, name):
171 """check if a repo has an unfilteredpropertycache value for <name>"""
170 """check if a repo has an unfilteredpropertycache value for <name>"""
172 return name in vars(repo.unfiltered())
171 return name in vars(repo.unfiltered())
173
172
174 def unfilteredmethod(orig):
173 def unfilteredmethod(orig):
175 """decorate method that always need to be run on unfiltered version"""
174 """decorate method that always need to be run on unfiltered version"""
176 def wrapper(repo, *args, **kwargs):
175 def wrapper(repo, *args, **kwargs):
177 return orig(repo.unfiltered(), *args, **kwargs)
176 return orig(repo.unfiltered(), *args, **kwargs)
178 return wrapper
177 return wrapper
179
178
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
179 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
181 'unbundle'}
180 'unbundle'}
182 legacycaps = moderncaps.union({'changegroupsubset'})
181 legacycaps = moderncaps.union({'changegroupsubset'})
183
182
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
183 @interfaceutil.implementer(repository.ipeercommandexecutor)
185 class localcommandexecutor(object):
184 class localcommandexecutor(object):
186 def __init__(self, peer):
185 def __init__(self, peer):
187 self._peer = peer
186 self._peer = peer
188 self._sent = False
187 self._sent = False
189 self._closed = False
188 self._closed = False
190
189
191 def __enter__(self):
190 def __enter__(self):
192 return self
191 return self
193
192
194 def __exit__(self, exctype, excvalue, exctb):
193 def __exit__(self, exctype, excvalue, exctb):
195 self.close()
194 self.close()
196
195
197 def callcommand(self, command, args):
196 def callcommand(self, command, args):
198 if self._sent:
197 if self._sent:
199 raise error.ProgrammingError('callcommand() cannot be used after '
198 raise error.ProgrammingError('callcommand() cannot be used after '
200 'sendcommands()')
199 'sendcommands()')
201
200
202 if self._closed:
201 if self._closed:
203 raise error.ProgrammingError('callcommand() cannot be used after '
202 raise error.ProgrammingError('callcommand() cannot be used after '
204 'close()')
203 'close()')
205
204
206 # We don't need to support anything fancy. Just call the named
205 # We don't need to support anything fancy. Just call the named
207 # method on the peer and return a resolved future.
206 # method on the peer and return a resolved future.
208 fn = getattr(self._peer, pycompat.sysstr(command))
207 fn = getattr(self._peer, pycompat.sysstr(command))
209
208
210 f = pycompat.futures.Future()
209 f = pycompat.futures.Future()
211
210
212 try:
211 try:
213 result = fn(**pycompat.strkwargs(args))
212 result = fn(**pycompat.strkwargs(args))
214 except Exception:
213 except Exception:
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
214 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
216 else:
215 else:
217 f.set_result(result)
216 f.set_result(result)
218
217
219 return f
218 return f
220
219
221 def sendcommands(self):
220 def sendcommands(self):
222 self._sent = True
221 self._sent = True
223
222
224 def close(self):
223 def close(self):
225 self._closed = True
224 self._closed = True
226
225
227 @interfaceutil.implementer(repository.ipeercommands)
226 @interfaceutil.implementer(repository.ipeercommands)
228 class localpeer(repository.peer):
227 class localpeer(repository.peer):
229 '''peer for a local repo; reflects only the most recent API'''
228 '''peer for a local repo; reflects only the most recent API'''
230
229
231 def __init__(self, repo, caps=None):
230 def __init__(self, repo, caps=None):
232 super(localpeer, self).__init__()
231 super(localpeer, self).__init__()
233
232
234 if caps is None:
233 if caps is None:
235 caps = moderncaps.copy()
234 caps = moderncaps.copy()
236 self._repo = repo.filtered('served')
235 self._repo = repo.filtered('served')
237 self.ui = repo.ui
236 self.ui = repo.ui
238 self._caps = repo._restrictcapabilities(caps)
237 self._caps = repo._restrictcapabilities(caps)
239
238
240 # Begin of _basepeer interface.
239 # Begin of _basepeer interface.
241
240
242 def url(self):
241 def url(self):
243 return self._repo.url()
242 return self._repo.url()
244
243
245 def local(self):
244 def local(self):
246 return self._repo
245 return self._repo
247
246
248 def peer(self):
247 def peer(self):
249 return self
248 return self
250
249
251 def canpush(self):
250 def canpush(self):
252 return True
251 return True
253
252
254 def close(self):
253 def close(self):
255 self._repo.close()
254 self._repo.close()
256
255
257 # End of _basepeer interface.
256 # End of _basepeer interface.
258
257
259 # Begin of _basewirecommands interface.
258 # Begin of _basewirecommands interface.
260
259
261 def branchmap(self):
260 def branchmap(self):
262 return self._repo.branchmap()
261 return self._repo.branchmap()
263
262
264 def capabilities(self):
263 def capabilities(self):
265 return self._caps
264 return self._caps
266
265
267 def clonebundles(self):
266 def clonebundles(self):
268 return self._repo.tryread('clonebundles.manifest')
267 return self._repo.tryread('clonebundles.manifest')
269
268
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
269 def debugwireargs(self, one, two, three=None, four=None, five=None):
271 """Used to test argument passing over the wire"""
270 """Used to test argument passing over the wire"""
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
271 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
273 pycompat.bytestr(four),
272 pycompat.bytestr(four),
274 pycompat.bytestr(five))
273 pycompat.bytestr(five))
275
274
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
275 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
277 **kwargs):
276 **kwargs):
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
277 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
279 common=common, bundlecaps=bundlecaps,
278 common=common, bundlecaps=bundlecaps,
280 **kwargs)[1]
279 **kwargs)[1]
281 cb = util.chunkbuffer(chunks)
280 cb = util.chunkbuffer(chunks)
282
281
283 if exchange.bundle2requested(bundlecaps):
282 if exchange.bundle2requested(bundlecaps):
284 # When requesting a bundle2, getbundle returns a stream to make the
283 # When requesting a bundle2, getbundle returns a stream to make the
285 # wire level function happier. We need to build a proper object
284 # wire level function happier. We need to build a proper object
286 # from it in local peer.
285 # from it in local peer.
287 return bundle2.getunbundler(self.ui, cb)
286 return bundle2.getunbundler(self.ui, cb)
288 else:
287 else:
289 return changegroup.getunbundler('01', cb, None)
288 return changegroup.getunbundler('01', cb, None)
290
289
291 def heads(self):
290 def heads(self):
292 return self._repo.heads()
291 return self._repo.heads()
293
292
294 def known(self, nodes):
293 def known(self, nodes):
295 return self._repo.known(nodes)
294 return self._repo.known(nodes)
296
295
297 def listkeys(self, namespace):
296 def listkeys(self, namespace):
298 return self._repo.listkeys(namespace)
297 return self._repo.listkeys(namespace)
299
298
300 def lookup(self, key):
299 def lookup(self, key):
301 return self._repo.lookup(key)
300 return self._repo.lookup(key)
302
301
303 def pushkey(self, namespace, key, old, new):
302 def pushkey(self, namespace, key, old, new):
304 return self._repo.pushkey(namespace, key, old, new)
303 return self._repo.pushkey(namespace, key, old, new)
305
304
306 def stream_out(self):
305 def stream_out(self):
307 raise error.Abort(_('cannot perform stream clone against local '
306 raise error.Abort(_('cannot perform stream clone against local '
308 'peer'))
307 'peer'))
309
308
310 def unbundle(self, bundle, heads, url):
309 def unbundle(self, bundle, heads, url):
311 """apply a bundle on a repo
310 """apply a bundle on a repo
312
311
313 This function handles the repo locking itself."""
312 This function handles the repo locking itself."""
314 try:
313 try:
315 try:
314 try:
316 bundle = exchange.readbundle(self.ui, bundle, None)
315 bundle = exchange.readbundle(self.ui, bundle, None)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
316 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
318 if util.safehasattr(ret, 'getchunks'):
317 if util.safehasattr(ret, 'getchunks'):
319 # This is a bundle20 object, turn it into an unbundler.
318 # This is a bundle20 object, turn it into an unbundler.
320 # This little dance should be dropped eventually when the
319 # This little dance should be dropped eventually when the
321 # API is finally improved.
320 # API is finally improved.
322 stream = util.chunkbuffer(ret.getchunks())
321 stream = util.chunkbuffer(ret.getchunks())
323 ret = bundle2.getunbundler(self.ui, stream)
322 ret = bundle2.getunbundler(self.ui, stream)
324 return ret
323 return ret
325 except Exception as exc:
324 except Exception as exc:
326 # If the exception contains output salvaged from a bundle2
325 # If the exception contains output salvaged from a bundle2
327 # reply, we need to make sure it is printed before continuing
326 # reply, we need to make sure it is printed before continuing
328 # to fail. So we build a bundle2 with such output and consume
327 # to fail. So we build a bundle2 with such output and consume
329 # it directly.
328 # it directly.
330 #
329 #
331 # This is not very elegant but allows a "simple" solution for
330 # This is not very elegant but allows a "simple" solution for
332 # issue4594
331 # issue4594
333 output = getattr(exc, '_bundle2salvagedoutput', ())
332 output = getattr(exc, '_bundle2salvagedoutput', ())
334 if output:
333 if output:
335 bundler = bundle2.bundle20(self._repo.ui)
334 bundler = bundle2.bundle20(self._repo.ui)
336 for out in output:
335 for out in output:
337 bundler.addpart(out)
336 bundler.addpart(out)
338 stream = util.chunkbuffer(bundler.getchunks())
337 stream = util.chunkbuffer(bundler.getchunks())
339 b = bundle2.getunbundler(self.ui, stream)
338 b = bundle2.getunbundler(self.ui, stream)
340 bundle2.processbundle(self._repo, b)
339 bundle2.processbundle(self._repo, b)
341 raise
340 raise
342 except error.PushRaced as exc:
341 except error.PushRaced as exc:
343 raise error.ResponseError(_('push failed:'),
342 raise error.ResponseError(_('push failed:'),
344 stringutil.forcebytestr(exc))
343 stringutil.forcebytestr(exc))
345
344
346 # End of _basewirecommands interface.
345 # End of _basewirecommands interface.
347
346
348 # Begin of peer interface.
347 # Begin of peer interface.
349
348
350 def commandexecutor(self):
349 def commandexecutor(self):
351 return localcommandexecutor(self)
350 return localcommandexecutor(self)
352
351
353 # End of peer interface.
352 # End of peer interface.
354
353
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
354 @interfaceutil.implementer(repository.ipeerlegacycommands)
356 class locallegacypeer(localpeer):
355 class locallegacypeer(localpeer):
357 '''peer extension which implements legacy methods too; used for tests with
356 '''peer extension which implements legacy methods too; used for tests with
358 restricted capabilities'''
357 restricted capabilities'''
359
358
360 def __init__(self, repo):
359 def __init__(self, repo):
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
360 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
362
361
363 # Begin of baselegacywirecommands interface.
362 # Begin of baselegacywirecommands interface.
364
363
365 def between(self, pairs):
364 def between(self, pairs):
366 return self._repo.between(pairs)
365 return self._repo.between(pairs)
367
366
368 def branches(self, nodes):
367 def branches(self, nodes):
369 return self._repo.branches(nodes)
368 return self._repo.branches(nodes)
370
369
371 def changegroup(self, nodes, source):
370 def changegroup(self, nodes, source):
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
371 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
373 missingheads=self._repo.heads())
372 missingheads=self._repo.heads())
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
373 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
375
374
376 def changegroupsubset(self, bases, heads, source):
375 def changegroupsubset(self, bases, heads, source):
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
376 outgoing = discovery.outgoing(self._repo, missingroots=bases,
378 missingheads=heads)
377 missingheads=heads)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
378 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
380
379
381 # End of baselegacywirecommands interface.
380 # End of baselegacywirecommands interface.
382
381
383 # Increment the sub-version when the revlog v2 format changes to lock out old
382 # Increment the sub-version when the revlog v2 format changes to lock out old
384 # clients.
383 # clients.
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
384 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
386
385
387 # A repository with the sparserevlog feature will have delta chains that
386 # A repository with the sparserevlog feature will have delta chains that
388 # can spread over a larger span. Sparse reading cuts these large spans into
387 # can spread over a larger span. Sparse reading cuts these large spans into
389 # pieces, so that each piece isn't too big.
388 # pieces, so that each piece isn't too big.
390 # Without the sparserevlog capability, reading from the repository could use
389 # Without the sparserevlog capability, reading from the repository could use
391 # huge amounts of memory, because the whole span would be read at once,
390 # huge amounts of memory, because the whole span would be read at once,
392 # including all the intermediate revisions that aren't pertinent for the chain.
391 # including all the intermediate revisions that aren't pertinent for the chain.
393 # This is why once a repository has enabled sparse-read, it becomes required.
392 # This is why once a repository has enabled sparse-read, it becomes required.
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
393 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
395
394
396 # Functions receiving (ui, features) that extensions can register to impact
395 # Functions receiving (ui, features) that extensions can register to impact
397 # the ability to load repositories with custom requirements. Only
396 # the ability to load repositories with custom requirements. Only
398 # functions defined in loaded extensions are called.
397 # functions defined in loaded extensions are called.
399 #
398 #
400 # The function receives a set of requirement strings that the repository
399 # The function receives a set of requirement strings that the repository
401 # is capable of opening. Functions will typically add elements to the
400 # is capable of opening. Functions will typically add elements to the
402 # set to reflect that the extension knows how to handle that requirements.
401 # set to reflect that the extension knows how to handle that requirements.
403 featuresetupfuncs = set()
402 featuresetupfuncs = set()
404
403
405 def makelocalrepository(baseui, path, intents=None):
404 def makelocalrepository(baseui, path, intents=None):
406 """Create a local repository object.
405 """Create a local repository object.
407
406
408 Given arguments needed to construct a local repository, this function
407 Given arguments needed to construct a local repository, this function
409 performs various early repository loading functionality (such as
408 performs various early repository loading functionality (such as
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
409 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
411 the repository can be opened, derives a type suitable for representing
410 the repository can be opened, derives a type suitable for representing
412 that repository, and returns an instance of it.
411 that repository, and returns an instance of it.
413
412
414 The returned object conforms to the ``repository.completelocalrepository``
413 The returned object conforms to the ``repository.completelocalrepository``
415 interface.
414 interface.
416
415
417 The repository type is derived by calling a series of factory functions
416 The repository type is derived by calling a series of factory functions
418 for each aspect/interface of the final repository. These are defined by
417 for each aspect/interface of the final repository. These are defined by
419 ``REPO_INTERFACES``.
418 ``REPO_INTERFACES``.
420
419
421 Each factory function is called to produce a type implementing a specific
420 Each factory function is called to produce a type implementing a specific
422 interface. The cumulative list of returned types will be combined into a
421 interface. The cumulative list of returned types will be combined into a
423 new type and that type will be instantiated to represent the local
422 new type and that type will be instantiated to represent the local
424 repository.
423 repository.
425
424
426 The factory functions each receive various state that may be consulted
425 The factory functions each receive various state that may be consulted
427 as part of deriving a type.
426 as part of deriving a type.
428
427
429 Extensions should wrap these factory functions to customize repository type
428 Extensions should wrap these factory functions to customize repository type
430 creation. Note that an extension's wrapped function may be called even if
429 creation. Note that an extension's wrapped function may be called even if
431 that extension is not loaded for the repo being constructed. Extensions
430 that extension is not loaded for the repo being constructed. Extensions
432 should check if their ``__name__`` appears in the
431 should check if their ``__name__`` appears in the
433 ``extensionmodulenames`` set passed to the factory function and no-op if
432 ``extensionmodulenames`` set passed to the factory function and no-op if
434 not.
433 not.
435 """
434 """
436 ui = baseui.copy()
435 ui = baseui.copy()
437 # Prevent copying repo configuration.
436 # Prevent copying repo configuration.
438 ui.copy = baseui.copy
437 ui.copy = baseui.copy
439
438
440 # Working directory VFS rooted at repository root.
439 # Working directory VFS rooted at repository root.
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
440 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
442
441
443 # Main VFS for .hg/ directory.
442 # Main VFS for .hg/ directory.
444 hgpath = wdirvfs.join(b'.hg')
443 hgpath = wdirvfs.join(b'.hg')
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
444 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
446
445
447 # The .hg/ path should exist and should be a directory. All other
446 # The .hg/ path should exist and should be a directory. All other
448 # cases are errors.
447 # cases are errors.
449 if not hgvfs.isdir():
448 if not hgvfs.isdir():
450 try:
449 try:
451 hgvfs.stat()
450 hgvfs.stat()
452 except OSError as e:
451 except OSError as e:
453 if e.errno != errno.ENOENT:
452 if e.errno != errno.ENOENT:
454 raise
453 raise
455
454
456 raise error.RepoError(_(b'repository %s not found') % path)
455 raise error.RepoError(_(b'repository %s not found') % path)
457
456
458 # .hg/requires file contains a newline-delimited list of
457 # .hg/requires file contains a newline-delimited list of
459 # features/capabilities the opener (us) must have in order to use
458 # features/capabilities the opener (us) must have in order to use
460 # the repository. This file was introduced in Mercurial 0.9.2,
459 # the repository. This file was introduced in Mercurial 0.9.2,
461 # which means very old repositories may not have one. We assume
460 # which means very old repositories may not have one. We assume
462 # a missing file translates to no requirements.
461 # a missing file translates to no requirements.
463 try:
462 try:
464 requirements = set(hgvfs.read(b'requires').splitlines())
463 requirements = set(hgvfs.read(b'requires').splitlines())
465 except IOError as e:
464 except IOError as e:
466 if e.errno != errno.ENOENT:
465 if e.errno != errno.ENOENT:
467 raise
466 raise
468 requirements = set()
467 requirements = set()
469
468
470 # The .hg/hgrc file may load extensions or contain config options
469 # The .hg/hgrc file may load extensions or contain config options
471 # that influence repository construction. Attempt to load it and
470 # that influence repository construction. Attempt to load it and
472 # process any new extensions that it may have pulled in.
471 # process any new extensions that it may have pulled in.
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
472 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
473 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
475 extensions.loadall(ui)
474 extensions.loadall(ui)
476 extensions.populateui(ui)
475 extensions.populateui(ui)
477
476
478 # Set of module names of extensions loaded for this repository.
477 # Set of module names of extensions loaded for this repository.
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
478 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
480
479
481 supportedrequirements = gathersupportedrequirements(ui)
480 supportedrequirements = gathersupportedrequirements(ui)
482
481
483 # We first validate the requirements are known.
482 # We first validate the requirements are known.
484 ensurerequirementsrecognized(requirements, supportedrequirements)
483 ensurerequirementsrecognized(requirements, supportedrequirements)
485
484
486 # Then we validate that the known set is reasonable to use together.
485 # Then we validate that the known set is reasonable to use together.
487 ensurerequirementscompatible(ui, requirements)
486 ensurerequirementscompatible(ui, requirements)
488
487
489 # TODO there are unhandled edge cases related to opening repositories with
488 # TODO there are unhandled edge cases related to opening repositories with
490 # shared storage. If storage is shared, we should also test for requirements
489 # shared storage. If storage is shared, we should also test for requirements
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
490 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
492 # that repo, as that repo may load extensions needed to open it. This is a
491 # that repo, as that repo may load extensions needed to open it. This is a
493 # bit complicated because we don't want the other hgrc to overwrite settings
492 # bit complicated because we don't want the other hgrc to overwrite settings
494 # in this hgrc.
493 # in this hgrc.
495 #
494 #
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
495 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
497 # file when sharing repos. But if a requirement is added after the share is
496 # file when sharing repos. But if a requirement is added after the share is
498 # performed, thereby introducing a new requirement for the opener, we may
497 # performed, thereby introducing a new requirement for the opener, we may
499 # will not see that and could encounter a run-time error interacting with
498 # will not see that and could encounter a run-time error interacting with
500 # that shared store since it has an unknown-to-us requirement.
499 # that shared store since it has an unknown-to-us requirement.
501
500
502 # At this point, we know we should be capable of opening the repository.
501 # At this point, we know we should be capable of opening the repository.
503 # Now get on with doing that.
502 # Now get on with doing that.
504
503
505 features = set()
504 features = set()
506
505
507 # The "store" part of the repository holds versioned data. How it is
506 # The "store" part of the repository holds versioned data. How it is
508 # accessed is determined by various requirements. The ``shared`` or
507 # accessed is determined by various requirements. The ``shared`` or
509 # ``relshared`` requirements indicate the store lives in the path contained
508 # ``relshared`` requirements indicate the store lives in the path contained
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
509 # in the ``.hg/sharedpath`` file. This is an absolute path for
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
510 # ``shared`` and relative to ``.hg/`` for ``relshared``.
512 if b'shared' in requirements or b'relshared' in requirements:
511 if b'shared' in requirements or b'relshared' in requirements:
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 if b'relshared' in requirements:
513 if b'relshared' in requirements:
515 sharedpath = hgvfs.join(sharedpath)
514 sharedpath = hgvfs.join(sharedpath)
516
515
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518
517
519 if not sharedvfs.exists():
518 if not sharedvfs.exists():
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
519 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
521 b'directory %s') % sharedvfs.base)
520 b'directory %s') % sharedvfs.base)
522
521
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
522 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
524
523
525 storebasepath = sharedvfs.base
524 storebasepath = sharedvfs.base
526 cachepath = sharedvfs.join(b'cache')
525 cachepath = sharedvfs.join(b'cache')
527 else:
526 else:
528 storebasepath = hgvfs.base
527 storebasepath = hgvfs.base
529 cachepath = hgvfs.join(b'cache')
528 cachepath = hgvfs.join(b'cache')
530 wcachepath = hgvfs.join(b'wcache')
529 wcachepath = hgvfs.join(b'wcache')
531
530
532
531
533 # The store has changed over time and the exact layout is dictated by
532 # The store has changed over time and the exact layout is dictated by
534 # requirements. The store interface abstracts differences across all
533 # requirements. The store interface abstracts differences across all
535 # of them.
534 # of them.
536 store = makestore(requirements, storebasepath,
535 store = makestore(requirements, storebasepath,
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
536 lambda base: vfsmod.vfs(base, cacheaudited=True))
538 hgvfs.createmode = store.createmode
537 hgvfs.createmode = store.createmode
539
538
540 storevfs = store.vfs
539 storevfs = store.vfs
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
540 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
542
541
543 # The cache vfs is used to manage cache files.
542 # The cache vfs is used to manage cache files.
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
543 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 cachevfs.createmode = store.createmode
544 cachevfs.createmode = store.createmode
546 # The cache vfs is used to manage cache files related to the working copy
545 # The cache vfs is used to manage cache files related to the working copy
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
546 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
548 wcachevfs.createmode = store.createmode
547 wcachevfs.createmode = store.createmode
549
548
550 # Now resolve the type for the repository object. We do this by repeatedly
549 # Now resolve the type for the repository object. We do this by repeatedly
551 # calling a factory function to produces types for specific aspects of the
550 # calling a factory function to produces types for specific aspects of the
552 # repo's operation. The aggregate returned types are used as base classes
551 # repo's operation. The aggregate returned types are used as base classes
553 # for a dynamically-derived type, which will represent our new repository.
552 # for a dynamically-derived type, which will represent our new repository.
554
553
555 bases = []
554 bases = []
556 extrastate = {}
555 extrastate = {}
557
556
558 for iface, fn in REPO_INTERFACES:
557 for iface, fn in REPO_INTERFACES:
559 # We pass all potentially useful state to give extensions tons of
558 # We pass all potentially useful state to give extensions tons of
560 # flexibility.
559 # flexibility.
561 typ = fn()(ui=ui,
560 typ = fn()(ui=ui,
562 intents=intents,
561 intents=intents,
563 requirements=requirements,
562 requirements=requirements,
564 features=features,
563 features=features,
565 wdirvfs=wdirvfs,
564 wdirvfs=wdirvfs,
566 hgvfs=hgvfs,
565 hgvfs=hgvfs,
567 store=store,
566 store=store,
568 storevfs=storevfs,
567 storevfs=storevfs,
569 storeoptions=storevfs.options,
568 storeoptions=storevfs.options,
570 cachevfs=cachevfs,
569 cachevfs=cachevfs,
571 wcachevfs=wcachevfs,
570 wcachevfs=wcachevfs,
572 extensionmodulenames=extensionmodulenames,
571 extensionmodulenames=extensionmodulenames,
573 extrastate=extrastate,
572 extrastate=extrastate,
574 baseclasses=bases)
573 baseclasses=bases)
575
574
576 if not isinstance(typ, type):
575 if not isinstance(typ, type):
577 raise error.ProgrammingError('unable to construct type for %s' %
576 raise error.ProgrammingError('unable to construct type for %s' %
578 iface)
577 iface)
579
578
580 bases.append(typ)
579 bases.append(typ)
581
580
582 # type() allows you to use characters in type names that wouldn't be
581 # type() allows you to use characters in type names that wouldn't be
583 # recognized as Python symbols in source code. We abuse that to add
582 # recognized as Python symbols in source code. We abuse that to add
584 # rich information about our constructed repo.
583 # rich information about our constructed repo.
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
584 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
586 wdirvfs.base,
585 wdirvfs.base,
587 b','.join(sorted(requirements))))
586 b','.join(sorted(requirements))))
588
587
589 cls = type(name, tuple(bases), {})
588 cls = type(name, tuple(bases), {})
590
589
591 return cls(
590 return cls(
592 baseui=baseui,
591 baseui=baseui,
593 ui=ui,
592 ui=ui,
594 origroot=path,
593 origroot=path,
595 wdirvfs=wdirvfs,
594 wdirvfs=wdirvfs,
596 hgvfs=hgvfs,
595 hgvfs=hgvfs,
597 requirements=requirements,
596 requirements=requirements,
598 supportedrequirements=supportedrequirements,
597 supportedrequirements=supportedrequirements,
599 sharedpath=storebasepath,
598 sharedpath=storebasepath,
600 store=store,
599 store=store,
601 cachevfs=cachevfs,
600 cachevfs=cachevfs,
602 wcachevfs=wcachevfs,
601 wcachevfs=wcachevfs,
603 features=features,
602 features=features,
604 intents=intents)
603 intents=intents)
605
604
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
605 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
607 """Load hgrc files/content into a ui instance.
606 """Load hgrc files/content into a ui instance.
608
607
609 This is called during repository opening to load any additional
608 This is called during repository opening to load any additional
610 config files or settings relevant to the current repository.
609 config files or settings relevant to the current repository.
611
610
612 Returns a bool indicating whether any additional configs were loaded.
611 Returns a bool indicating whether any additional configs were loaded.
613
612
614 Extensions should monkeypatch this function to modify how per-repo
613 Extensions should monkeypatch this function to modify how per-repo
615 configs are loaded. For example, an extension may wish to pull in
614 configs are loaded. For example, an extension may wish to pull in
616 configs from alternate files or sources.
615 configs from alternate files or sources.
617 """
616 """
618 try:
617 try:
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
618 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
620 return True
619 return True
621 except IOError:
620 except IOError:
622 return False
621 return False
623
622
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
623 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
625 """Perform additional actions after .hg/hgrc is loaded.
624 """Perform additional actions after .hg/hgrc is loaded.
626
625
627 This function is called during repository loading immediately after
626 This function is called during repository loading immediately after
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
627 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
629
628
630 The function can be used to validate configs, automatically add
629 The function can be used to validate configs, automatically add
631 options (including extensions) based on requirements, etc.
630 options (including extensions) based on requirements, etc.
632 """
631 """
633
632
634 # Map of requirements to list of extensions to load automatically when
633 # Map of requirements to list of extensions to load automatically when
635 # requirement is present.
634 # requirement is present.
636 autoextensions = {
635 autoextensions = {
637 b'largefiles': [b'largefiles'],
636 b'largefiles': [b'largefiles'],
638 b'lfs': [b'lfs'],
637 b'lfs': [b'lfs'],
639 }
638 }
640
639
641 for requirement, names in sorted(autoextensions.items()):
640 for requirement, names in sorted(autoextensions.items()):
642 if requirement not in requirements:
641 if requirement not in requirements:
643 continue
642 continue
644
643
645 for name in names:
644 for name in names:
646 if not ui.hasconfig(b'extensions', name):
645 if not ui.hasconfig(b'extensions', name):
647 ui.setconfig(b'extensions', name, b'', source='autoload')
646 ui.setconfig(b'extensions', name, b'', source='autoload')
648
647
649 def gathersupportedrequirements(ui):
648 def gathersupportedrequirements(ui):
650 """Determine the complete set of recognized requirements."""
649 """Determine the complete set of recognized requirements."""
651 # Start with all requirements supported by this file.
650 # Start with all requirements supported by this file.
652 supported = set(localrepository._basesupported)
651 supported = set(localrepository._basesupported)
653
652
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
653 # Execute ``featuresetupfuncs`` entries if they belong to an extension
655 # relevant to this ui instance.
654 # relevant to this ui instance.
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
655 modules = {m.__name__ for n, m in extensions.extensions(ui)}
657
656
658 for fn in featuresetupfuncs:
657 for fn in featuresetupfuncs:
659 if fn.__module__ in modules:
658 if fn.__module__ in modules:
660 fn(ui, supported)
659 fn(ui, supported)
661
660
662 # Add derived requirements from registered compression engines.
661 # Add derived requirements from registered compression engines.
663 for name in util.compengines:
662 for name in util.compengines:
664 engine = util.compengines[name]
663 engine = util.compengines[name]
665 if engine.available() and engine.revlogheader():
664 if engine.available() and engine.revlogheader():
666 supported.add(b'exp-compression-%s' % name)
665 supported.add(b'exp-compression-%s' % name)
667 if engine.name() == 'zstd':
666 if engine.name() == 'zstd':
668 supported.add(b'revlog-compression-zstd')
667 supported.add(b'revlog-compression-zstd')
669
668
670 return supported
669 return supported
671
670
672 def ensurerequirementsrecognized(requirements, supported):
671 def ensurerequirementsrecognized(requirements, supported):
673 """Validate that a set of local requirements is recognized.
672 """Validate that a set of local requirements is recognized.
674
673
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
674 Receives a set of requirements. Raises an ``error.RepoError`` if there
676 exists any requirement in that set that currently loaded code doesn't
675 exists any requirement in that set that currently loaded code doesn't
677 recognize.
676 recognize.
678
677
679 Returns a set of supported requirements.
678 Returns a set of supported requirements.
680 """
679 """
681 missing = set()
680 missing = set()
682
681
683 for requirement in requirements:
682 for requirement in requirements:
684 if requirement in supported:
683 if requirement in supported:
685 continue
684 continue
686
685
687 if not requirement or not requirement[0:1].isalnum():
686 if not requirement or not requirement[0:1].isalnum():
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
687 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
689
688
690 missing.add(requirement)
689 missing.add(requirement)
691
690
692 if missing:
691 if missing:
693 raise error.RequirementError(
692 raise error.RequirementError(
694 _(b'repository requires features unknown to this Mercurial: %s') %
693 _(b'repository requires features unknown to this Mercurial: %s') %
695 b' '.join(sorted(missing)),
694 b' '.join(sorted(missing)),
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
695 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
697 b'for more information'))
696 b'for more information'))
698
697
699 def ensurerequirementscompatible(ui, requirements):
698 def ensurerequirementscompatible(ui, requirements):
700 """Validates that a set of recognized requirements is mutually compatible.
699 """Validates that a set of recognized requirements is mutually compatible.
701
700
702 Some requirements may not be compatible with others or require
701 Some requirements may not be compatible with others or require
703 config options that aren't enabled. This function is called during
702 config options that aren't enabled. This function is called during
704 repository opening to ensure that the set of requirements needed
703 repository opening to ensure that the set of requirements needed
705 to open a repository is sane and compatible with config options.
704 to open a repository is sane and compatible with config options.
706
705
707 Extensions can monkeypatch this function to perform additional
706 Extensions can monkeypatch this function to perform additional
708 checking.
707 checking.
709
708
710 ``error.RepoError`` should be raised on failure.
709 ``error.RepoError`` should be raised on failure.
711 """
710 """
712 if b'exp-sparse' in requirements and not sparse.enabled:
711 if b'exp-sparse' in requirements and not sparse.enabled:
713 raise error.RepoError(_(b'repository is using sparse feature but '
712 raise error.RepoError(_(b'repository is using sparse feature but '
714 b'sparse is not enabled; enable the '
713 b'sparse is not enabled; enable the '
715 b'"sparse" extensions to access'))
714 b'"sparse" extensions to access'))
716
715
717 def makestore(requirements, path, vfstype):
716 def makestore(requirements, path, vfstype):
718 """Construct a storage object for a repository."""
717 """Construct a storage object for a repository."""
719 if b'store' in requirements:
718 if b'store' in requirements:
720 if b'fncache' in requirements:
719 if b'fncache' in requirements:
721 return storemod.fncachestore(path, vfstype,
720 return storemod.fncachestore(path, vfstype,
722 b'dotencode' in requirements)
721 b'dotencode' in requirements)
723
722
724 return storemod.encodedstore(path, vfstype)
723 return storemod.encodedstore(path, vfstype)
725
724
726 return storemod.basicstore(path, vfstype)
725 return storemod.basicstore(path, vfstype)
727
726
728 def resolvestorevfsoptions(ui, requirements, features):
727 def resolvestorevfsoptions(ui, requirements, features):
729 """Resolve the options to pass to the store vfs opener.
728 """Resolve the options to pass to the store vfs opener.
730
729
731 The returned dict is used to influence behavior of the storage layer.
730 The returned dict is used to influence behavior of the storage layer.
732 """
731 """
733 options = {}
732 options = {}
734
733
735 if b'treemanifest' in requirements:
734 if b'treemanifest' in requirements:
736 options[b'treemanifest'] = True
735 options[b'treemanifest'] = True
737
736
738 # experimental config: format.manifestcachesize
737 # experimental config: format.manifestcachesize
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
738 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
740 if manifestcachesize is not None:
739 if manifestcachesize is not None:
741 options[b'manifestcachesize'] = manifestcachesize
740 options[b'manifestcachesize'] = manifestcachesize
742
741
743 # In the absence of another requirement superseding a revlog-related
742 # In the absence of another requirement superseding a revlog-related
744 # requirement, we have to assume the repo is using revlog version 0.
743 # requirement, we have to assume the repo is using revlog version 0.
745 # This revlog format is super old and we don't bother trying to parse
744 # This revlog format is super old and we don't bother trying to parse
746 # opener options for it because those options wouldn't do anything
745 # opener options for it because those options wouldn't do anything
747 # meaningful on such old repos.
746 # meaningful on such old repos.
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
747 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
748 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
750
749
751 return options
750 return options
752
751
753 def resolverevlogstorevfsoptions(ui, requirements, features):
752 def resolverevlogstorevfsoptions(ui, requirements, features):
754 """Resolve opener options specific to revlogs."""
753 """Resolve opener options specific to revlogs."""
755
754
756 options = {}
755 options = {}
757 options[b'flagprocessors'] = {}
756 options[b'flagprocessors'] = {}
758
757
759 if b'revlogv1' in requirements:
758 if b'revlogv1' in requirements:
760 options[b'revlogv1'] = True
759 options[b'revlogv1'] = True
761 if REVLOGV2_REQUIREMENT in requirements:
760 if REVLOGV2_REQUIREMENT in requirements:
762 options[b'revlogv2'] = True
761 options[b'revlogv2'] = True
763
762
764 if b'generaldelta' in requirements:
763 if b'generaldelta' in requirements:
765 options[b'generaldelta'] = True
764 options[b'generaldelta'] = True
766
765
767 # experimental config: format.chunkcachesize
766 # experimental config: format.chunkcachesize
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
767 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
769 if chunkcachesize is not None:
768 if chunkcachesize is not None:
770 options[b'chunkcachesize'] = chunkcachesize
769 options[b'chunkcachesize'] = chunkcachesize
771
770
772 deltabothparents = ui.configbool(b'storage',
771 deltabothparents = ui.configbool(b'storage',
773 b'revlog.optimize-delta-parent-choice')
772 b'revlog.optimize-delta-parent-choice')
774 options[b'deltabothparents'] = deltabothparents
773 options[b'deltabothparents'] = deltabothparents
775
774
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
775 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
777 lazydeltabase = False
776 lazydeltabase = False
778 if lazydelta:
777 if lazydelta:
779 lazydeltabase = ui.configbool(b'storage',
778 lazydeltabase = ui.configbool(b'storage',
780 b'revlog.reuse-external-delta-parent')
779 b'revlog.reuse-external-delta-parent')
781 if lazydeltabase is None:
780 if lazydeltabase is None:
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
781 lazydeltabase = not scmutil.gddeltaconfig(ui)
783 options[b'lazydelta'] = lazydelta
782 options[b'lazydelta'] = lazydelta
784 options[b'lazydeltabase'] = lazydeltabase
783 options[b'lazydeltabase'] = lazydeltabase
785
784
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
785 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
787 if 0 <= chainspan:
786 if 0 <= chainspan:
788 options[b'maxdeltachainspan'] = chainspan
787 options[b'maxdeltachainspan'] = chainspan
789
788
790 mmapindexthreshold = ui.configbytes(b'experimental',
789 mmapindexthreshold = ui.configbytes(b'experimental',
791 b'mmapindexthreshold')
790 b'mmapindexthreshold')
792 if mmapindexthreshold is not None:
791 if mmapindexthreshold is not None:
793 options[b'mmapindexthreshold'] = mmapindexthreshold
792 options[b'mmapindexthreshold'] = mmapindexthreshold
794
793
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
794 withsparseread = ui.configbool(b'experimental', b'sparse-read')
796 srdensitythres = float(ui.config(b'experimental',
795 srdensitythres = float(ui.config(b'experimental',
797 b'sparse-read.density-threshold'))
796 b'sparse-read.density-threshold'))
798 srmingapsize = ui.configbytes(b'experimental',
797 srmingapsize = ui.configbytes(b'experimental',
799 b'sparse-read.min-gap-size')
798 b'sparse-read.min-gap-size')
800 options[b'with-sparse-read'] = withsparseread
799 options[b'with-sparse-read'] = withsparseread
801 options[b'sparse-read-density-threshold'] = srdensitythres
800 options[b'sparse-read-density-threshold'] = srdensitythres
802 options[b'sparse-read-min-gap-size'] = srmingapsize
801 options[b'sparse-read-min-gap-size'] = srmingapsize
803
802
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
803 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
805 options[b'sparse-revlog'] = sparserevlog
804 options[b'sparse-revlog'] = sparserevlog
806 if sparserevlog:
805 if sparserevlog:
807 options[b'generaldelta'] = True
806 options[b'generaldelta'] = True
808
807
809 maxchainlen = None
808 maxchainlen = None
810 if sparserevlog:
809 if sparserevlog:
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
810 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
812 # experimental config: format.maxchainlen
811 # experimental config: format.maxchainlen
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
812 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
814 if maxchainlen is not None:
813 if maxchainlen is not None:
815 options[b'maxchainlen'] = maxchainlen
814 options[b'maxchainlen'] = maxchainlen
816
815
817 for r in requirements:
816 for r in requirements:
818 # we allow multiple compression engine requirement to co-exist because
817 # we allow multiple compression engine requirement to co-exist because
819 # strickly speaking, revlog seems to support mixed compression style.
818 # strickly speaking, revlog seems to support mixed compression style.
820 #
819 #
821 # The compression used for new entries will be "the last one"
820 # The compression used for new entries will be "the last one"
822 prefix = r.startswith
821 prefix = r.startswith
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
822 if prefix('revlog-compression-') or prefix('exp-compression-'):
824 options[b'compengine'] = r.split('-', 2)[2]
823 options[b'compengine'] = r.split('-', 2)[2]
825
824
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
825 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
827 if options[b'zlib.level'] is not None:
826 if options[b'zlib.level'] is not None:
828 if not (0 <= options[b'zlib.level'] <= 9):
827 if not (0 <= options[b'zlib.level'] <= 9):
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
828 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
830 raise error.Abort(msg % options[b'zlib.level'])
829 raise error.Abort(msg % options[b'zlib.level'])
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
830 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
832 if options[b'zstd.level'] is not None:
831 if options[b'zstd.level'] is not None:
833 if not (0 <= options[b'zstd.level'] <= 22):
832 if not (0 <= options[b'zstd.level'] <= 22):
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
833 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
835 raise error.Abort(msg % options[b'zstd.level'])
834 raise error.Abort(msg % options[b'zstd.level'])
836
835
837 if repository.NARROW_REQUIREMENT in requirements:
836 if repository.NARROW_REQUIREMENT in requirements:
838 options[b'enableellipsis'] = True
837 options[b'enableellipsis'] = True
839
838
840 return options
839 return options
841
840
842 def makemain(**kwargs):
841 def makemain(**kwargs):
843 """Produce a type conforming to ``ilocalrepositorymain``."""
842 """Produce a type conforming to ``ilocalrepositorymain``."""
844 return localrepository
843 return localrepository
845
844
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
845 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
847 class revlogfilestorage(object):
846 class revlogfilestorage(object):
848 """File storage when using revlogs."""
847 """File storage when using revlogs."""
849
848
850 def file(self, path):
849 def file(self, path):
851 if path[0] == b'/':
850 if path[0] == b'/':
852 path = path[1:]
851 path = path[1:]
853
852
854 return filelog.filelog(self.svfs, path)
853 return filelog.filelog(self.svfs, path)
855
854
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
855 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
857 class revlognarrowfilestorage(object):
856 class revlognarrowfilestorage(object):
858 """File storage when using revlogs and narrow files."""
857 """File storage when using revlogs and narrow files."""
859
858
860 def file(self, path):
859 def file(self, path):
861 if path[0] == b'/':
860 if path[0] == b'/':
862 path = path[1:]
861 path = path[1:]
863
862
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
863 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
865
864
866 def makefilestorage(requirements, features, **kwargs):
865 def makefilestorage(requirements, features, **kwargs):
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
866 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
867 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
868 features.add(repository.REPO_FEATURE_STREAM_CLONE)
870
869
871 if repository.NARROW_REQUIREMENT in requirements:
870 if repository.NARROW_REQUIREMENT in requirements:
872 return revlognarrowfilestorage
871 return revlognarrowfilestorage
873 else:
872 else:
874 return revlogfilestorage
873 return revlogfilestorage
875
874
876 # List of repository interfaces and factory functions for them. Each
875 # List of repository interfaces and factory functions for them. Each
877 # will be called in order during ``makelocalrepository()`` to iteratively
876 # will be called in order during ``makelocalrepository()`` to iteratively
878 # derive the final type for a local repository instance. We capture the
877 # derive the final type for a local repository instance. We capture the
879 # function as a lambda so we don't hold a reference and the module-level
878 # function as a lambda so we don't hold a reference and the module-level
880 # functions can be wrapped.
879 # functions can be wrapped.
881 REPO_INTERFACES = [
880 REPO_INTERFACES = [
882 (repository.ilocalrepositorymain, lambda: makemain),
881 (repository.ilocalrepositorymain, lambda: makemain),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
882 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
884 ]
883 ]
885
884
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
885 @interfaceutil.implementer(repository.ilocalrepositorymain)
887 class localrepository(object):
886 class localrepository(object):
888 """Main class for representing local repositories.
887 """Main class for representing local repositories.
889
888
890 All local repositories are instances of this class.
889 All local repositories are instances of this class.
891
890
892 Constructed on its own, instances of this class are not usable as
891 Constructed on its own, instances of this class are not usable as
893 repository objects. To obtain a usable repository object, call
892 repository objects. To obtain a usable repository object, call
894 ``hg.repository()``, ``localrepo.instance()``, or
893 ``hg.repository()``, ``localrepo.instance()``, or
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
894 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
896 ``instance()`` adds support for creating new repositories.
895 ``instance()`` adds support for creating new repositories.
897 ``hg.repository()`` adds more extension integration, including calling
896 ``hg.repository()`` adds more extension integration, including calling
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
897 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
899 used.
898 used.
900 """
899 """
901
900
902 # obsolete experimental requirements:
901 # obsolete experimental requirements:
903 # - manifestv2: An experimental new manifest format that allowed
902 # - manifestv2: An experimental new manifest format that allowed
904 # for stem compression of long paths. Experiment ended up not
903 # for stem compression of long paths. Experiment ended up not
905 # being successful (repository sizes went up due to worse delta
904 # being successful (repository sizes went up due to worse delta
906 # chains), and the code was deleted in 4.6.
905 # chains), and the code was deleted in 4.6.
907 supportedformats = {
906 supportedformats = {
908 'revlogv1',
907 'revlogv1',
909 'generaldelta',
908 'generaldelta',
910 'treemanifest',
909 'treemanifest',
911 REVLOGV2_REQUIREMENT,
910 REVLOGV2_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
911 SPARSEREVLOG_REQUIREMENT,
913 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
912 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
914 }
913 }
915 _basesupported = supportedformats | {
914 _basesupported = supportedformats | {
916 'store',
915 'store',
917 'fncache',
916 'fncache',
918 'shared',
917 'shared',
919 'relshared',
918 'relshared',
920 'dotencode',
919 'dotencode',
921 'exp-sparse',
920 'exp-sparse',
922 'internal-phase'
921 'internal-phase'
923 }
922 }
924
923
925 # list of prefix for file which can be written without 'wlock'
924 # list of prefix for file which can be written without 'wlock'
926 # Extensions should extend this list when needed
925 # Extensions should extend this list when needed
927 _wlockfreeprefix = {
926 _wlockfreeprefix = {
928 # We migh consider requiring 'wlock' for the next
927 # We migh consider requiring 'wlock' for the next
929 # two, but pretty much all the existing code assume
928 # two, but pretty much all the existing code assume
930 # wlock is not needed so we keep them excluded for
929 # wlock is not needed so we keep them excluded for
931 # now.
930 # now.
932 'hgrc',
931 'hgrc',
933 'requires',
932 'requires',
934 # XXX cache is a complicatged business someone
933 # XXX cache is a complicatged business someone
935 # should investigate this in depth at some point
934 # should investigate this in depth at some point
936 'cache/',
935 'cache/',
937 # XXX shouldn't be dirstate covered by the wlock?
936 # XXX shouldn't be dirstate covered by the wlock?
938 'dirstate',
937 'dirstate',
939 # XXX bisect was still a bit too messy at the time
938 # XXX bisect was still a bit too messy at the time
940 # this changeset was introduced. Someone should fix
939 # this changeset was introduced. Someone should fix
941 # the remainig bit and drop this line
940 # the remainig bit and drop this line
942 'bisect.state',
941 'bisect.state',
943 }
942 }
944
943
945 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
944 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
946 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
945 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
947 features, intents=None):
946 features, intents=None):
948 """Create a new local repository instance.
947 """Create a new local repository instance.
949
948
950 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
949 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
951 or ``localrepo.makelocalrepository()`` for obtaining a new repository
950 or ``localrepo.makelocalrepository()`` for obtaining a new repository
952 object.
951 object.
953
952
954 Arguments:
953 Arguments:
955
954
956 baseui
955 baseui
957 ``ui.ui`` instance that ``ui`` argument was based off of.
956 ``ui.ui`` instance that ``ui`` argument was based off of.
958
957
959 ui
958 ui
960 ``ui.ui`` instance for use by the repository.
959 ``ui.ui`` instance for use by the repository.
961
960
962 origroot
961 origroot
963 ``bytes`` path to working directory root of this repository.
962 ``bytes`` path to working directory root of this repository.
964
963
965 wdirvfs
964 wdirvfs
966 ``vfs.vfs`` rooted at the working directory.
965 ``vfs.vfs`` rooted at the working directory.
967
966
968 hgvfs
967 hgvfs
969 ``vfs.vfs`` rooted at .hg/
968 ``vfs.vfs`` rooted at .hg/
970
969
971 requirements
970 requirements
972 ``set`` of bytestrings representing repository opening requirements.
971 ``set`` of bytestrings representing repository opening requirements.
973
972
974 supportedrequirements
973 supportedrequirements
975 ``set`` of bytestrings representing repository requirements that we
974 ``set`` of bytestrings representing repository requirements that we
976 know how to open. May be a supetset of ``requirements``.
975 know how to open. May be a supetset of ``requirements``.
977
976
978 sharedpath
977 sharedpath
979 ``bytes`` Defining path to storage base directory. Points to a
978 ``bytes`` Defining path to storage base directory. Points to a
980 ``.hg/`` directory somewhere.
979 ``.hg/`` directory somewhere.
981
980
982 store
981 store
983 ``store.basicstore`` (or derived) instance providing access to
982 ``store.basicstore`` (or derived) instance providing access to
984 versioned storage.
983 versioned storage.
985
984
986 cachevfs
985 cachevfs
987 ``vfs.vfs`` used for cache files.
986 ``vfs.vfs`` used for cache files.
988
987
989 wcachevfs
988 wcachevfs
990 ``vfs.vfs`` used for cache files related to the working copy.
989 ``vfs.vfs`` used for cache files related to the working copy.
991
990
992 features
991 features
993 ``set`` of bytestrings defining features/capabilities of this
992 ``set`` of bytestrings defining features/capabilities of this
994 instance.
993 instance.
995
994
996 intents
995 intents
997 ``set`` of system strings indicating what this repo will be used
996 ``set`` of system strings indicating what this repo will be used
998 for.
997 for.
999 """
998 """
1000 self.baseui = baseui
999 self.baseui = baseui
1001 self.ui = ui
1000 self.ui = ui
1002 self.origroot = origroot
1001 self.origroot = origroot
1003 # vfs rooted at working directory.
1002 # vfs rooted at working directory.
1004 self.wvfs = wdirvfs
1003 self.wvfs = wdirvfs
1005 self.root = wdirvfs.base
1004 self.root = wdirvfs.base
1006 # vfs rooted at .hg/. Used to access most non-store paths.
1005 # vfs rooted at .hg/. Used to access most non-store paths.
1007 self.vfs = hgvfs
1006 self.vfs = hgvfs
1008 self.path = hgvfs.base
1007 self.path = hgvfs.base
1009 self.requirements = requirements
1008 self.requirements = requirements
1010 self.supported = supportedrequirements
1009 self.supported = supportedrequirements
1011 self.sharedpath = sharedpath
1010 self.sharedpath = sharedpath
1012 self.store = store
1011 self.store = store
1013 self.cachevfs = cachevfs
1012 self.cachevfs = cachevfs
1014 self.wcachevfs = wcachevfs
1013 self.wcachevfs = wcachevfs
1015 self.features = features
1014 self.features = features
1016
1015
1017 self.filtername = None
1016 self.filtername = None
1018
1017
1019 if (self.ui.configbool('devel', 'all-warnings') or
1018 if (self.ui.configbool('devel', 'all-warnings') or
1020 self.ui.configbool('devel', 'check-locks')):
1019 self.ui.configbool('devel', 'check-locks')):
1021 self.vfs.audit = self._getvfsward(self.vfs.audit)
1020 self.vfs.audit = self._getvfsward(self.vfs.audit)
1022 # A list of callback to shape the phase if no data were found.
1021 # A list of callback to shape the phase if no data were found.
1023 # Callback are in the form: func(repo, roots) --> processed root.
1022 # Callback are in the form: func(repo, roots) --> processed root.
1024 # This list it to be filled by extension during repo setup
1023 # This list it to be filled by extension during repo setup
1025 self._phasedefaults = []
1024 self._phasedefaults = []
1026
1025
1027 color.setup(self.ui)
1026 color.setup(self.ui)
1028
1027
1029 self.spath = self.store.path
1028 self.spath = self.store.path
1030 self.svfs = self.store.vfs
1029 self.svfs = self.store.vfs
1031 self.sjoin = self.store.join
1030 self.sjoin = self.store.join
1032 if (self.ui.configbool('devel', 'all-warnings') or
1031 if (self.ui.configbool('devel', 'all-warnings') or
1033 self.ui.configbool('devel', 'check-locks')):
1032 self.ui.configbool('devel', 'check-locks')):
1034 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1033 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1035 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1034 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1036 else: # standard vfs
1035 else: # standard vfs
1037 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1036 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1038
1037
1039 self._dirstatevalidatewarned = False
1038 self._dirstatevalidatewarned = False
1040
1039
1041 self._branchcaches = branchmap.BranchMapCache()
1040 self._branchcaches = branchmap.BranchMapCache()
1042 self._revbranchcache = None
1041 self._revbranchcache = None
1043 self._filterpats = {}
1042 self._filterpats = {}
1044 self._datafilters = {}
1043 self._datafilters = {}
1045 self._transref = self._lockref = self._wlockref = None
1044 self._transref = self._lockref = self._wlockref = None
1046
1045
1047 # A cache for various files under .hg/ that tracks file changes,
1046 # A cache for various files under .hg/ that tracks file changes,
1048 # (used by the filecache decorator)
1047 # (used by the filecache decorator)
1049 #
1048 #
1050 # Maps a property name to its util.filecacheentry
1049 # Maps a property name to its util.filecacheentry
1051 self._filecache = {}
1050 self._filecache = {}
1052
1051
1053 # hold sets of revision to be filtered
1052 # hold sets of revision to be filtered
1054 # should be cleared when something might have changed the filter value:
1053 # should be cleared when something might have changed the filter value:
1055 # - new changesets,
1054 # - new changesets,
1056 # - phase change,
1055 # - phase change,
1057 # - new obsolescence marker,
1056 # - new obsolescence marker,
1058 # - working directory parent change,
1057 # - working directory parent change,
1059 # - bookmark changes
1058 # - bookmark changes
1060 self.filteredrevcache = {}
1059 self.filteredrevcache = {}
1061
1060
1062 # post-dirstate-status hooks
1061 # post-dirstate-status hooks
1063 self._postdsstatus = []
1062 self._postdsstatus = []
1064
1063
1065 # generic mapping between names and nodes
1064 # generic mapping between names and nodes
1066 self.names = namespaces.namespaces()
1065 self.names = namespaces.namespaces()
1067
1066
1068 # Key to signature value.
1067 # Key to signature value.
1069 self._sparsesignaturecache = {}
1068 self._sparsesignaturecache = {}
1070 # Signature to cached matcher instance.
1069 # Signature to cached matcher instance.
1071 self._sparsematchercache = {}
1070 self._sparsematchercache = {}
1072
1071
1073 self._extrafilterid = repoview.extrafilter(ui)
1072 self._extrafilterid = repoview.extrafilter(ui)
1074
1073
1075 def _getvfsward(self, origfunc):
1074 def _getvfsward(self, origfunc):
1076 """build a ward for self.vfs"""
1075 """build a ward for self.vfs"""
1077 rref = weakref.ref(self)
1076 rref = weakref.ref(self)
1078 def checkvfs(path, mode=None):
1077 def checkvfs(path, mode=None):
1079 ret = origfunc(path, mode=mode)
1078 ret = origfunc(path, mode=mode)
1080 repo = rref()
1079 repo = rref()
1081 if (repo is None
1080 if (repo is None
1082 or not util.safehasattr(repo, '_wlockref')
1081 or not util.safehasattr(repo, '_wlockref')
1083 or not util.safehasattr(repo, '_lockref')):
1082 or not util.safehasattr(repo, '_lockref')):
1084 return
1083 return
1085 if mode in (None, 'r', 'rb'):
1084 if mode in (None, 'r', 'rb'):
1086 return
1085 return
1087 if path.startswith(repo.path):
1086 if path.startswith(repo.path):
1088 # truncate name relative to the repository (.hg)
1087 # truncate name relative to the repository (.hg)
1089 path = path[len(repo.path) + 1:]
1088 path = path[len(repo.path) + 1:]
1090 if path.startswith('cache/'):
1089 if path.startswith('cache/'):
1091 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1090 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1092 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1091 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1093 if path.startswith('journal.') or path.startswith('undo.'):
1092 if path.startswith('journal.') or path.startswith('undo.'):
1094 # journal is covered by 'lock'
1093 # journal is covered by 'lock'
1095 if repo._currentlock(repo._lockref) is None:
1094 if repo._currentlock(repo._lockref) is None:
1096 repo.ui.develwarn('write with no lock: "%s"' % path,
1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1097 stacklevel=3, config='check-locks')
1096 stacklevel=3, config='check-locks')
1098 elif repo._currentlock(repo._wlockref) is None:
1097 elif repo._currentlock(repo._wlockref) is None:
1099 # rest of vfs files are covered by 'wlock'
1098 # rest of vfs files are covered by 'wlock'
1100 #
1099 #
1101 # exclude special files
1100 # exclude special files
1102 for prefix in self._wlockfreeprefix:
1101 for prefix in self._wlockfreeprefix:
1103 if path.startswith(prefix):
1102 if path.startswith(prefix):
1104 return
1103 return
1105 repo.ui.develwarn('write with no wlock: "%s"' % path,
1104 repo.ui.develwarn('write with no wlock: "%s"' % path,
1106 stacklevel=3, config='check-locks')
1105 stacklevel=3, config='check-locks')
1107 return ret
1106 return ret
1108 return checkvfs
1107 return checkvfs
1109
1108
1110 def _getsvfsward(self, origfunc):
1109 def _getsvfsward(self, origfunc):
1111 """build a ward for self.svfs"""
1110 """build a ward for self.svfs"""
1112 rref = weakref.ref(self)
1111 rref = weakref.ref(self)
1113 def checksvfs(path, mode=None):
1112 def checksvfs(path, mode=None):
1114 ret = origfunc(path, mode=mode)
1113 ret = origfunc(path, mode=mode)
1115 repo = rref()
1114 repo = rref()
1116 if repo is None or not util.safehasattr(repo, '_lockref'):
1115 if repo is None or not util.safehasattr(repo, '_lockref'):
1117 return
1116 return
1118 if mode in (None, 'r', 'rb'):
1117 if mode in (None, 'r', 'rb'):
1119 return
1118 return
1120 if path.startswith(repo.sharedpath):
1119 if path.startswith(repo.sharedpath):
1121 # truncate name relative to the repository (.hg)
1120 # truncate name relative to the repository (.hg)
1122 path = path[len(repo.sharedpath) + 1:]
1121 path = path[len(repo.sharedpath) + 1:]
1123 if repo._currentlock(repo._lockref) is None:
1122 if repo._currentlock(repo._lockref) is None:
1124 repo.ui.develwarn('write with no lock: "%s"' % path,
1123 repo.ui.develwarn('write with no lock: "%s"' % path,
1125 stacklevel=4)
1124 stacklevel=4)
1126 return ret
1125 return ret
1127 return checksvfs
1126 return checksvfs
1128
1127
1129 def close(self):
1128 def close(self):
1130 self._writecaches()
1129 self._writecaches()
1131
1130
1132 def _writecaches(self):
1131 def _writecaches(self):
1133 if self._revbranchcache:
1132 if self._revbranchcache:
1134 self._revbranchcache.write()
1133 self._revbranchcache.write()
1135
1134
1136 def _restrictcapabilities(self, caps):
1135 def _restrictcapabilities(self, caps):
1137 if self.ui.configbool('experimental', 'bundle2-advertise'):
1136 if self.ui.configbool('experimental', 'bundle2-advertise'):
1138 caps = set(caps)
1137 caps = set(caps)
1139 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1138 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1140 role='client'))
1139 role='client'))
1141 caps.add('bundle2=' + urlreq.quote(capsblob))
1140 caps.add('bundle2=' + urlreq.quote(capsblob))
1142 return caps
1141 return caps
1143
1142
1144 def _writerequirements(self):
1143 def _writerequirements(self):
1145 scmutil.writerequires(self.vfs, self.requirements)
1144 scmutil.writerequires(self.vfs, self.requirements)
1146
1145
1147 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1146 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1148 # self -> auditor -> self._checknested -> self
1147 # self -> auditor -> self._checknested -> self
1149
1148
1150 @property
1149 @property
1151 def auditor(self):
1150 def auditor(self):
1152 # This is only used by context.workingctx.match in order to
1151 # This is only used by context.workingctx.match in order to
1153 # detect files in subrepos.
1152 # detect files in subrepos.
1154 return pathutil.pathauditor(self.root, callback=self._checknested)
1153 return pathutil.pathauditor(self.root, callback=self._checknested)
1155
1154
1156 @property
1155 @property
1157 def nofsauditor(self):
1156 def nofsauditor(self):
1158 # This is only used by context.basectx.match in order to detect
1157 # This is only used by context.basectx.match in order to detect
1159 # files in subrepos.
1158 # files in subrepos.
1160 return pathutil.pathauditor(self.root, callback=self._checknested,
1159 return pathutil.pathauditor(self.root, callback=self._checknested,
1161 realfs=False, cached=True)
1160 realfs=False, cached=True)
1162
1161
1163 def _checknested(self, path):
1162 def _checknested(self, path):
1164 """Determine if path is a legal nested repository."""
1163 """Determine if path is a legal nested repository."""
1165 if not path.startswith(self.root):
1164 if not path.startswith(self.root):
1166 return False
1165 return False
1167 subpath = path[len(self.root) + 1:]
1166 subpath = path[len(self.root) + 1:]
1168 normsubpath = util.pconvert(subpath)
1167 normsubpath = util.pconvert(subpath)
1169
1168
1170 # XXX: Checking against the current working copy is wrong in
1169 # XXX: Checking against the current working copy is wrong in
1171 # the sense that it can reject things like
1170 # the sense that it can reject things like
1172 #
1171 #
1173 # $ hg cat -r 10 sub/x.txt
1172 # $ hg cat -r 10 sub/x.txt
1174 #
1173 #
1175 # if sub/ is no longer a subrepository in the working copy
1174 # if sub/ is no longer a subrepository in the working copy
1176 # parent revision.
1175 # parent revision.
1177 #
1176 #
1178 # However, it can of course also allow things that would have
1177 # However, it can of course also allow things that would have
1179 # been rejected before, such as the above cat command if sub/
1178 # been rejected before, such as the above cat command if sub/
1180 # is a subrepository now, but was a normal directory before.
1179 # is a subrepository now, but was a normal directory before.
1181 # The old path auditor would have rejected by mistake since it
1180 # The old path auditor would have rejected by mistake since it
1182 # panics when it sees sub/.hg/.
1181 # panics when it sees sub/.hg/.
1183 #
1182 #
1184 # All in all, checking against the working copy seems sensible
1183 # All in all, checking against the working copy seems sensible
1185 # since we want to prevent access to nested repositories on
1184 # since we want to prevent access to nested repositories on
1186 # the filesystem *now*.
1185 # the filesystem *now*.
1187 ctx = self[None]
1186 ctx = self[None]
1188 parts = util.splitpath(subpath)
1187 parts = util.splitpath(subpath)
1189 while parts:
1188 while parts:
1190 prefix = '/'.join(parts)
1189 prefix = '/'.join(parts)
1191 if prefix in ctx.substate:
1190 if prefix in ctx.substate:
1192 if prefix == normsubpath:
1191 if prefix == normsubpath:
1193 return True
1192 return True
1194 else:
1193 else:
1195 sub = ctx.sub(prefix)
1194 sub = ctx.sub(prefix)
1196 return sub.checknested(subpath[len(prefix) + 1:])
1195 return sub.checknested(subpath[len(prefix) + 1:])
1197 else:
1196 else:
1198 parts.pop()
1197 parts.pop()
1199 return False
1198 return False
1200
1199
1201 def peer(self):
1200 def peer(self):
1202 return localpeer(self) # not cached to avoid reference cycle
1201 return localpeer(self) # not cached to avoid reference cycle
1203
1202
1204 def unfiltered(self):
1203 def unfiltered(self):
1205 """Return unfiltered version of the repository
1204 """Return unfiltered version of the repository
1206
1205
1207 Intended to be overwritten by filtered repo."""
1206 Intended to be overwritten by filtered repo."""
1208 return self
1207 return self
1209
1208
1210 def filtered(self, name, visibilityexceptions=None):
1209 def filtered(self, name, visibilityexceptions=None):
1211 """Return a filtered version of a repository
1210 """Return a filtered version of a repository
1212
1211
1213 The `name` parameter is the identifier of the requested view. This
1212 The `name` parameter is the identifier of the requested view. This
1214 will return a repoview object set "exactly" to the specified view.
1213 will return a repoview object set "exactly" to the specified view.
1215
1214
1216 This function does not apply recursive filtering to a repository. For
1215 This function does not apply recursive filtering to a repository. For
1217 example calling `repo.filtered("served")` will return a repoview using
1216 example calling `repo.filtered("served")` will return a repoview using
1218 the "served" view, regardless of the initial view used by `repo`.
1217 the "served" view, regardless of the initial view used by `repo`.
1219
1218
1220 In other word, there is always only one level of `repoview` "filtering".
1219 In other word, there is always only one level of `repoview` "filtering".
1221 """
1220 """
1222 if self._extrafilterid is not None and '%' not in name:
1221 if self._extrafilterid is not None and '%' not in name:
1223 name = name + '%' + self._extrafilterid
1222 name = name + '%' + self._extrafilterid
1224
1223
1225 cls = repoview.newtype(self.unfiltered().__class__)
1224 cls = repoview.newtype(self.unfiltered().__class__)
1226 return cls(self, name, visibilityexceptions)
1225 return cls(self, name, visibilityexceptions)
1227
1226
1228 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1227 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1229 ('bookmarks', ''), ('00changelog.i', ''))
1228 ('bookmarks', ''), ('00changelog.i', ''))
1230 def _bookmarks(self):
1229 def _bookmarks(self):
1231 return bookmarks.bmstore(self)
1230 return bookmarks.bmstore(self)
1232
1231
1233 @property
1232 @property
1234 def _activebookmark(self):
1233 def _activebookmark(self):
1235 return self._bookmarks.active
1234 return self._bookmarks.active
1236
1235
1237 # _phasesets depend on changelog. what we need is to call
1236 # _phasesets depend on changelog. what we need is to call
1238 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1237 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1239 # can't be easily expressed in filecache mechanism.
1238 # can't be easily expressed in filecache mechanism.
1240 @storecache('phaseroots', '00changelog.i')
1239 @storecache('phaseroots', '00changelog.i')
1241 def _phasecache(self):
1240 def _phasecache(self):
1242 return phases.phasecache(self, self._phasedefaults)
1241 return phases.phasecache(self, self._phasedefaults)
1243
1242
1244 @storecache('obsstore')
1243 @storecache('obsstore')
1245 def obsstore(self):
1244 def obsstore(self):
1246 return obsolete.makestore(self.ui, self)
1245 return obsolete.makestore(self.ui, self)
1247
1246
1248 @storecache('00changelog.i')
1247 @storecache('00changelog.i')
1249 def changelog(self):
1248 def changelog(self):
1250 return changelog.changelog(self.svfs,
1249 return changelog.changelog(self.svfs,
1251 trypending=txnutil.mayhavepending(self.root))
1250 trypending=txnutil.mayhavepending(self.root))
1252
1251
1253 @storecache('00manifest.i')
1252 @storecache('00manifest.i')
1254 def manifestlog(self):
1253 def manifestlog(self):
1255 rootstore = manifest.manifestrevlog(self.svfs)
1254 rootstore = manifest.manifestrevlog(self.svfs)
1256 return manifest.manifestlog(self.svfs, self, rootstore,
1255 return manifest.manifestlog(self.svfs, self, rootstore,
1257 self._storenarrowmatch)
1256 self._storenarrowmatch)
1258
1257
1259 @repofilecache('dirstate')
1258 @repofilecache('dirstate')
1260 def dirstate(self):
1259 def dirstate(self):
1261 return self._makedirstate()
1260 return self._makedirstate()
1262
1261
1263 def _makedirstate(self):
1262 def _makedirstate(self):
1264 """Extension point for wrapping the dirstate per-repo."""
1263 """Extension point for wrapping the dirstate per-repo."""
1265 sparsematchfn = lambda: sparse.matcher(self)
1264 sparsematchfn = lambda: sparse.matcher(self)
1266
1265
1267 return dirstate.dirstate(self.vfs, self.ui, self.root,
1266 return dirstate.dirstate(self.vfs, self.ui, self.root,
1268 self._dirstatevalidate, sparsematchfn)
1267 self._dirstatevalidate, sparsematchfn)
1269
1268
1270 def _dirstatevalidate(self, node):
1269 def _dirstatevalidate(self, node):
1271 try:
1270 try:
1272 self.changelog.rev(node)
1271 self.changelog.rev(node)
1273 return node
1272 return node
1274 except error.LookupError:
1273 except error.LookupError:
1275 if not self._dirstatevalidatewarned:
1274 if not self._dirstatevalidatewarned:
1276 self._dirstatevalidatewarned = True
1275 self._dirstatevalidatewarned = True
1277 self.ui.warn(_("warning: ignoring unknown"
1276 self.ui.warn(_("warning: ignoring unknown"
1278 " working parent %s!\n") % short(node))
1277 " working parent %s!\n") % short(node))
1279 return nullid
1278 return nullid
1280
1279
1281 @storecache(narrowspec.FILENAME)
1280 @storecache(narrowspec.FILENAME)
1282 def narrowpats(self):
1281 def narrowpats(self):
1283 """matcher patterns for this repository's narrowspec
1282 """matcher patterns for this repository's narrowspec
1284
1283
1285 A tuple of (includes, excludes).
1284 A tuple of (includes, excludes).
1286 """
1285 """
1287 return narrowspec.load(self)
1286 return narrowspec.load(self)
1288
1287
1289 @storecache(narrowspec.FILENAME)
1288 @storecache(narrowspec.FILENAME)
1290 def _storenarrowmatch(self):
1289 def _storenarrowmatch(self):
1291 if repository.NARROW_REQUIREMENT not in self.requirements:
1290 if repository.NARROW_REQUIREMENT not in self.requirements:
1292 return matchmod.always()
1291 return matchmod.always()
1293 include, exclude = self.narrowpats
1292 include, exclude = self.narrowpats
1294 return narrowspec.match(self.root, include=include, exclude=exclude)
1293 return narrowspec.match(self.root, include=include, exclude=exclude)
1295
1294
1296 @storecache(narrowspec.FILENAME)
1295 @storecache(narrowspec.FILENAME)
1297 def _narrowmatch(self):
1296 def _narrowmatch(self):
1298 if repository.NARROW_REQUIREMENT not in self.requirements:
1297 if repository.NARROW_REQUIREMENT not in self.requirements:
1299 return matchmod.always()
1298 return matchmod.always()
1300 narrowspec.checkworkingcopynarrowspec(self)
1299 narrowspec.checkworkingcopynarrowspec(self)
1301 include, exclude = self.narrowpats
1300 include, exclude = self.narrowpats
1302 return narrowspec.match(self.root, include=include, exclude=exclude)
1301 return narrowspec.match(self.root, include=include, exclude=exclude)
1303
1302
1304 def narrowmatch(self, match=None, includeexact=False):
1303 def narrowmatch(self, match=None, includeexact=False):
1305 """matcher corresponding the the repo's narrowspec
1304 """matcher corresponding the the repo's narrowspec
1306
1305
1307 If `match` is given, then that will be intersected with the narrow
1306 If `match` is given, then that will be intersected with the narrow
1308 matcher.
1307 matcher.
1309
1308
1310 If `includeexact` is True, then any exact matches from `match` will
1309 If `includeexact` is True, then any exact matches from `match` will
1311 be included even if they're outside the narrowspec.
1310 be included even if they're outside the narrowspec.
1312 """
1311 """
1313 if match:
1312 if match:
1314 if includeexact and not self._narrowmatch.always():
1313 if includeexact and not self._narrowmatch.always():
1315 # do not exclude explicitly-specified paths so that they can
1314 # do not exclude explicitly-specified paths so that they can
1316 # be warned later on
1315 # be warned later on
1317 em = matchmod.exact(match.files())
1316 em = matchmod.exact(match.files())
1318 nm = matchmod.unionmatcher([self._narrowmatch, em])
1317 nm = matchmod.unionmatcher([self._narrowmatch, em])
1319 return matchmod.intersectmatchers(match, nm)
1318 return matchmod.intersectmatchers(match, nm)
1320 return matchmod.intersectmatchers(match, self._narrowmatch)
1319 return matchmod.intersectmatchers(match, self._narrowmatch)
1321 return self._narrowmatch
1320 return self._narrowmatch
1322
1321
1323 def setnarrowpats(self, newincludes, newexcludes):
1322 def setnarrowpats(self, newincludes, newexcludes):
1324 narrowspec.save(self, newincludes, newexcludes)
1323 narrowspec.save(self, newincludes, newexcludes)
1325 self.invalidate(clearfilecache=True)
1324 self.invalidate(clearfilecache=True)
1326
1325
1327 def __getitem__(self, changeid):
1326 def __getitem__(self, changeid):
1328 if changeid is None:
1327 if changeid is None:
1329 return context.workingctx(self)
1328 return context.workingctx(self)
1330 if isinstance(changeid, context.basectx):
1329 if isinstance(changeid, context.basectx):
1331 return changeid
1330 return changeid
1332 if isinstance(changeid, slice):
1331 if isinstance(changeid, slice):
1333 # wdirrev isn't contiguous so the slice shouldn't include it
1332 # wdirrev isn't contiguous so the slice shouldn't include it
1334 return [self[i]
1333 return [self[i]
1335 for i in pycompat.xrange(*changeid.indices(len(self)))
1334 for i in pycompat.xrange(*changeid.indices(len(self)))
1336 if i not in self.changelog.filteredrevs]
1335 if i not in self.changelog.filteredrevs]
1337 try:
1336 try:
1338 if isinstance(changeid, int):
1337 if isinstance(changeid, int):
1339 node = self.changelog.node(changeid)
1338 node = self.changelog.node(changeid)
1340 rev = changeid
1339 rev = changeid
1341 elif changeid == 'null':
1340 elif changeid == 'null':
1342 node = nullid
1341 node = nullid
1343 rev = nullrev
1342 rev = nullrev
1344 elif changeid == 'tip':
1343 elif changeid == 'tip':
1345 node = self.changelog.tip()
1344 node = self.changelog.tip()
1346 rev = self.changelog.rev(node)
1345 rev = self.changelog.rev(node)
1347 elif changeid == '.':
1346 elif changeid == '.':
1348 # this is a hack to delay/avoid loading obsmarkers
1347 # this is a hack to delay/avoid loading obsmarkers
1349 # when we know that '.' won't be hidden
1348 # when we know that '.' won't be hidden
1350 node = self.dirstate.p1()
1349 node = self.dirstate.p1()
1351 rev = self.unfiltered().changelog.rev(node)
1350 rev = self.unfiltered().changelog.rev(node)
1352 elif len(changeid) == 20:
1351 elif len(changeid) == 20:
1353 try:
1352 try:
1354 node = changeid
1353 node = changeid
1355 rev = self.changelog.rev(changeid)
1354 rev = self.changelog.rev(changeid)
1356 except error.FilteredLookupError:
1355 except error.FilteredLookupError:
1357 changeid = hex(changeid) # for the error message
1356 changeid = hex(changeid) # for the error message
1358 raise
1357 raise
1359 except LookupError:
1358 except LookupError:
1360 # check if it might have come from damaged dirstate
1359 # check if it might have come from damaged dirstate
1361 #
1360 #
1362 # XXX we could avoid the unfiltered if we had a recognizable
1361 # XXX we could avoid the unfiltered if we had a recognizable
1363 # exception for filtered changeset access
1362 # exception for filtered changeset access
1364 if (self.local()
1363 if (self.local()
1365 and changeid in self.unfiltered().dirstate.parents()):
1364 and changeid in self.unfiltered().dirstate.parents()):
1366 msg = _("working directory has unknown parent '%s'!")
1365 msg = _("working directory has unknown parent '%s'!")
1367 raise error.Abort(msg % short(changeid))
1366 raise error.Abort(msg % short(changeid))
1368 changeid = hex(changeid) # for the error message
1367 changeid = hex(changeid) # for the error message
1369 raise
1368 raise
1370
1369
1371 elif len(changeid) == 40:
1370 elif len(changeid) == 40:
1372 node = bin(changeid)
1371 node = bin(changeid)
1373 rev = self.changelog.rev(node)
1372 rev = self.changelog.rev(node)
1374 else:
1373 else:
1375 raise error.ProgrammingError(
1374 raise error.ProgrammingError(
1376 "unsupported changeid '%s' of type %s" %
1375 "unsupported changeid '%s' of type %s" %
1377 (changeid, type(changeid)))
1376 (changeid, type(changeid)))
1378
1377
1379 return context.changectx(self, rev, node)
1378 return context.changectx(self, rev, node)
1380
1379
1381 except (error.FilteredIndexError, error.FilteredLookupError):
1380 except (error.FilteredIndexError, error.FilteredLookupError):
1382 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1381 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1383 % pycompat.bytestr(changeid))
1382 % pycompat.bytestr(changeid))
1384 except (IndexError, LookupError):
1383 except (IndexError, LookupError):
1385 raise error.RepoLookupError(
1384 raise error.RepoLookupError(
1386 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1385 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1387 except error.WdirUnsupported:
1386 except error.WdirUnsupported:
1388 return context.workingctx(self)
1387 return context.workingctx(self)
1389
1388
1390 def __contains__(self, changeid):
1389 def __contains__(self, changeid):
1391 """True if the given changeid exists
1390 """True if the given changeid exists
1392
1391
1393 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1392 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1394 specified.
1393 specified.
1395 """
1394 """
1396 try:
1395 try:
1397 self[changeid]
1396 self[changeid]
1398 return True
1397 return True
1399 except error.RepoLookupError:
1398 except error.RepoLookupError:
1400 return False
1399 return False
1401
1400
1402 def __nonzero__(self):
1401 def __nonzero__(self):
1403 return True
1402 return True
1404
1403
1405 __bool__ = __nonzero__
1404 __bool__ = __nonzero__
1406
1405
1407 def __len__(self):
1406 def __len__(self):
1408 # no need to pay the cost of repoview.changelog
1407 # no need to pay the cost of repoview.changelog
1409 unfi = self.unfiltered()
1408 unfi = self.unfiltered()
1410 return len(unfi.changelog)
1409 return len(unfi.changelog)
1411
1410
1412 def __iter__(self):
1411 def __iter__(self):
1413 return iter(self.changelog)
1412 return iter(self.changelog)
1414
1413
1415 def revs(self, expr, *args):
1414 def revs(self, expr, *args):
1416 '''Find revisions matching a revset.
1415 '''Find revisions matching a revset.
1417
1416
1418 The revset is specified as a string ``expr`` that may contain
1417 The revset is specified as a string ``expr`` that may contain
1419 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1418 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1420
1419
1421 Revset aliases from the configuration are not expanded. To expand
1420 Revset aliases from the configuration are not expanded. To expand
1422 user aliases, consider calling ``scmutil.revrange()`` or
1421 user aliases, consider calling ``scmutil.revrange()`` or
1423 ``repo.anyrevs([expr], user=True)``.
1422 ``repo.anyrevs([expr], user=True)``.
1424
1423
1425 Returns a revset.abstractsmartset, which is a list-like interface
1424 Returns a revset.abstractsmartset, which is a list-like interface
1426 that contains integer revisions.
1425 that contains integer revisions.
1427 '''
1426 '''
1428 tree = revsetlang.spectree(expr, *args)
1427 tree = revsetlang.spectree(expr, *args)
1429 return revset.makematcher(tree)(self)
1428 return revset.makematcher(tree)(self)
1430
1429
1431 def set(self, expr, *args):
1430 def set(self, expr, *args):
1432 '''Find revisions matching a revset and emit changectx instances.
1431 '''Find revisions matching a revset and emit changectx instances.
1433
1432
1434 This is a convenience wrapper around ``revs()`` that iterates the
1433 This is a convenience wrapper around ``revs()`` that iterates the
1435 result and is a generator of changectx instances.
1434 result and is a generator of changectx instances.
1436
1435
1437 Revset aliases from the configuration are not expanded. To expand
1436 Revset aliases from the configuration are not expanded. To expand
1438 user aliases, consider calling ``scmutil.revrange()``.
1437 user aliases, consider calling ``scmutil.revrange()``.
1439 '''
1438 '''
1440 for r in self.revs(expr, *args):
1439 for r in self.revs(expr, *args):
1441 yield self[r]
1440 yield self[r]
1442
1441
1443 def anyrevs(self, specs, user=False, localalias=None):
1442 def anyrevs(self, specs, user=False, localalias=None):
1444 '''Find revisions matching one of the given revsets.
1443 '''Find revisions matching one of the given revsets.
1445
1444
1446 Revset aliases from the configuration are not expanded by default. To
1445 Revset aliases from the configuration are not expanded by default. To
1447 expand user aliases, specify ``user=True``. To provide some local
1446 expand user aliases, specify ``user=True``. To provide some local
1448 definitions overriding user aliases, set ``localalias`` to
1447 definitions overriding user aliases, set ``localalias`` to
1449 ``{name: definitionstring}``.
1448 ``{name: definitionstring}``.
1450 '''
1449 '''
1451 if user:
1450 if user:
1452 m = revset.matchany(self.ui, specs,
1451 m = revset.matchany(self.ui, specs,
1453 lookup=revset.lookupfn(self),
1452 lookup=revset.lookupfn(self),
1454 localalias=localalias)
1453 localalias=localalias)
1455 else:
1454 else:
1456 m = revset.matchany(None, specs, localalias=localalias)
1455 m = revset.matchany(None, specs, localalias=localalias)
1457 return m(self)
1456 return m(self)
1458
1457
1459 def url(self):
1458 def url(self):
1460 return 'file:' + self.root
1459 return 'file:' + self.root
1461
1460
1462 def hook(self, name, throw=False, **args):
1461 def hook(self, name, throw=False, **args):
1463 """Call a hook, passing this repo instance.
1462 """Call a hook, passing this repo instance.
1464
1463
1465 This a convenience method to aid invoking hooks. Extensions likely
1464 This a convenience method to aid invoking hooks. Extensions likely
1466 won't call this unless they have registered a custom hook or are
1465 won't call this unless they have registered a custom hook or are
1467 replacing code that is expected to call a hook.
1466 replacing code that is expected to call a hook.
1468 """
1467 """
1469 return hook.hook(self.ui, self, name, throw, **args)
1468 return hook.hook(self.ui, self, name, throw, **args)
1470
1469
1471 @filteredpropertycache
1470 @filteredpropertycache
1472 def _tagscache(self):
1471 def _tagscache(self):
1473 '''Returns a tagscache object that contains various tags related
1472 '''Returns a tagscache object that contains various tags related
1474 caches.'''
1473 caches.'''
1475
1474
1476 # This simplifies its cache management by having one decorated
1475 # This simplifies its cache management by having one decorated
1477 # function (this one) and the rest simply fetch things from it.
1476 # function (this one) and the rest simply fetch things from it.
1478 class tagscache(object):
1477 class tagscache(object):
1479 def __init__(self):
1478 def __init__(self):
1480 # These two define the set of tags for this repository. tags
1479 # These two define the set of tags for this repository. tags
1481 # maps tag name to node; tagtypes maps tag name to 'global' or
1480 # maps tag name to node; tagtypes maps tag name to 'global' or
1482 # 'local'. (Global tags are defined by .hgtags across all
1481 # 'local'. (Global tags are defined by .hgtags across all
1483 # heads, and local tags are defined in .hg/localtags.)
1482 # heads, and local tags are defined in .hg/localtags.)
1484 # They constitute the in-memory cache of tags.
1483 # They constitute the in-memory cache of tags.
1485 self.tags = self.tagtypes = None
1484 self.tags = self.tagtypes = None
1486
1485
1487 self.nodetagscache = self.tagslist = None
1486 self.nodetagscache = self.tagslist = None
1488
1487
1489 cache = tagscache()
1488 cache = tagscache()
1490 cache.tags, cache.tagtypes = self._findtags()
1489 cache.tags, cache.tagtypes = self._findtags()
1491
1490
1492 return cache
1491 return cache
1493
1492
1494 def tags(self):
1493 def tags(self):
1495 '''return a mapping of tag to node'''
1494 '''return a mapping of tag to node'''
1496 t = {}
1495 t = {}
1497 if self.changelog.filteredrevs:
1496 if self.changelog.filteredrevs:
1498 tags, tt = self._findtags()
1497 tags, tt = self._findtags()
1499 else:
1498 else:
1500 tags = self._tagscache.tags
1499 tags = self._tagscache.tags
1501 rev = self.changelog.rev
1500 rev = self.changelog.rev
1502 for k, v in tags.iteritems():
1501 for k, v in tags.iteritems():
1503 try:
1502 try:
1504 # ignore tags to unknown nodes
1503 # ignore tags to unknown nodes
1505 rev(v)
1504 rev(v)
1506 t[k] = v
1505 t[k] = v
1507 except (error.LookupError, ValueError):
1506 except (error.LookupError, ValueError):
1508 pass
1507 pass
1509 return t
1508 return t
1510
1509
1511 def _findtags(self):
1510 def _findtags(self):
1512 '''Do the hard work of finding tags. Return a pair of dicts
1511 '''Do the hard work of finding tags. Return a pair of dicts
1513 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1512 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1514 maps tag name to a string like \'global\' or \'local\'.
1513 maps tag name to a string like \'global\' or \'local\'.
1515 Subclasses or extensions are free to add their own tags, but
1514 Subclasses or extensions are free to add their own tags, but
1516 should be aware that the returned dicts will be retained for the
1515 should be aware that the returned dicts will be retained for the
1517 duration of the localrepo object.'''
1516 duration of the localrepo object.'''
1518
1517
1519 # XXX what tagtype should subclasses/extensions use? Currently
1518 # XXX what tagtype should subclasses/extensions use? Currently
1520 # mq and bookmarks add tags, but do not set the tagtype at all.
1519 # mq and bookmarks add tags, but do not set the tagtype at all.
1521 # Should each extension invent its own tag type? Should there
1520 # Should each extension invent its own tag type? Should there
1522 # be one tagtype for all such "virtual" tags? Or is the status
1521 # be one tagtype for all such "virtual" tags? Or is the status
1523 # quo fine?
1522 # quo fine?
1524
1523
1525
1524
1526 # map tag name to (node, hist)
1525 # map tag name to (node, hist)
1527 alltags = tagsmod.findglobaltags(self.ui, self)
1526 alltags = tagsmod.findglobaltags(self.ui, self)
1528 # map tag name to tag type
1527 # map tag name to tag type
1529 tagtypes = dict((tag, 'global') for tag in alltags)
1528 tagtypes = dict((tag, 'global') for tag in alltags)
1530
1529
1531 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1530 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1532
1531
1533 # Build the return dicts. Have to re-encode tag names because
1532 # Build the return dicts. Have to re-encode tag names because
1534 # the tags module always uses UTF-8 (in order not to lose info
1533 # the tags module always uses UTF-8 (in order not to lose info
1535 # writing to the cache), but the rest of Mercurial wants them in
1534 # writing to the cache), but the rest of Mercurial wants them in
1536 # local encoding.
1535 # local encoding.
1537 tags = {}
1536 tags = {}
1538 for (name, (node, hist)) in alltags.iteritems():
1537 for (name, (node, hist)) in alltags.iteritems():
1539 if node != nullid:
1538 if node != nullid:
1540 tags[encoding.tolocal(name)] = node
1539 tags[encoding.tolocal(name)] = node
1541 tags['tip'] = self.changelog.tip()
1540 tags['tip'] = self.changelog.tip()
1542 tagtypes = dict([(encoding.tolocal(name), value)
1541 tagtypes = dict([(encoding.tolocal(name), value)
1543 for (name, value) in tagtypes.iteritems()])
1542 for (name, value) in tagtypes.iteritems()])
1544 return (tags, tagtypes)
1543 return (tags, tagtypes)
1545
1544
1546 def tagtype(self, tagname):
1545 def tagtype(self, tagname):
1547 '''
1546 '''
1548 return the type of the given tag. result can be:
1547 return the type of the given tag. result can be:
1549
1548
1550 'local' : a local tag
1549 'local' : a local tag
1551 'global' : a global tag
1550 'global' : a global tag
1552 None : tag does not exist
1551 None : tag does not exist
1553 '''
1552 '''
1554
1553
1555 return self._tagscache.tagtypes.get(tagname)
1554 return self._tagscache.tagtypes.get(tagname)
1556
1555
1557 def tagslist(self):
1556 def tagslist(self):
1558 '''return a list of tags ordered by revision'''
1557 '''return a list of tags ordered by revision'''
1559 if not self._tagscache.tagslist:
1558 if not self._tagscache.tagslist:
1560 l = []
1559 l = []
1561 for t, n in self.tags().iteritems():
1560 for t, n in self.tags().iteritems():
1562 l.append((self.changelog.rev(n), t, n))
1561 l.append((self.changelog.rev(n), t, n))
1563 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1562 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1564
1563
1565 return self._tagscache.tagslist
1564 return self._tagscache.tagslist
1566
1565
1567 def nodetags(self, node):
1566 def nodetags(self, node):
1568 '''return the tags associated with a node'''
1567 '''return the tags associated with a node'''
1569 if not self._tagscache.nodetagscache:
1568 if not self._tagscache.nodetagscache:
1570 nodetagscache = {}
1569 nodetagscache = {}
1571 for t, n in self._tagscache.tags.iteritems():
1570 for t, n in self._tagscache.tags.iteritems():
1572 nodetagscache.setdefault(n, []).append(t)
1571 nodetagscache.setdefault(n, []).append(t)
1573 for tags in nodetagscache.itervalues():
1572 for tags in nodetagscache.itervalues():
1574 tags.sort()
1573 tags.sort()
1575 self._tagscache.nodetagscache = nodetagscache
1574 self._tagscache.nodetagscache = nodetagscache
1576 return self._tagscache.nodetagscache.get(node, [])
1575 return self._tagscache.nodetagscache.get(node, [])
1577
1576
1578 def nodebookmarks(self, node):
1577 def nodebookmarks(self, node):
1579 """return the list of bookmarks pointing to the specified node"""
1578 """return the list of bookmarks pointing to the specified node"""
1580 return self._bookmarks.names(node)
1579 return self._bookmarks.names(node)
1581
1580
1582 def branchmap(self):
1581 def branchmap(self):
1583 '''returns a dictionary {branch: [branchheads]} with branchheads
1582 '''returns a dictionary {branch: [branchheads]} with branchheads
1584 ordered by increasing revision number'''
1583 ordered by increasing revision number'''
1585 return self._branchcaches[self]
1584 return self._branchcaches[self]
1586
1585
1587 @unfilteredmethod
1586 @unfilteredmethod
1588 def revbranchcache(self):
1587 def revbranchcache(self):
1589 if not self._revbranchcache:
1588 if not self._revbranchcache:
1590 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1589 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1591 return self._revbranchcache
1590 return self._revbranchcache
1592
1591
1593 def branchtip(self, branch, ignoremissing=False):
1592 def branchtip(self, branch, ignoremissing=False):
1594 '''return the tip node for a given branch
1593 '''return the tip node for a given branch
1595
1594
1596 If ignoremissing is True, then this method will not raise an error.
1595 If ignoremissing is True, then this method will not raise an error.
1597 This is helpful for callers that only expect None for a missing branch
1596 This is helpful for callers that only expect None for a missing branch
1598 (e.g. namespace).
1597 (e.g. namespace).
1599
1598
1600 '''
1599 '''
1601 try:
1600 try:
1602 return self.branchmap().branchtip(branch)
1601 return self.branchmap().branchtip(branch)
1603 except KeyError:
1602 except KeyError:
1604 if not ignoremissing:
1603 if not ignoremissing:
1605 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1604 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1606 else:
1605 else:
1607 pass
1606 pass
1608
1607
1609 def lookup(self, key):
1608 def lookup(self, key):
1610 node = scmutil.revsymbol(self, key).node()
1609 node = scmutil.revsymbol(self, key).node()
1611 if node is None:
1610 if node is None:
1612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1613 return node
1612 return node
1614
1613
1615 def lookupbranch(self, key):
1614 def lookupbranch(self, key):
1616 if self.branchmap().hasbranch(key):
1615 if self.branchmap().hasbranch(key):
1617 return key
1616 return key
1618
1617
1619 return scmutil.revsymbol(self, key).branch()
1618 return scmutil.revsymbol(self, key).branch()
1620
1619
1621 def known(self, nodes):
1620 def known(self, nodes):
1622 cl = self.changelog
1621 cl = self.changelog
1623 nm = cl.nodemap
1622 nm = cl.nodemap
1624 filtered = cl.filteredrevs
1623 filtered = cl.filteredrevs
1625 result = []
1624 result = []
1626 for n in nodes:
1625 for n in nodes:
1627 r = nm.get(n)
1626 r = nm.get(n)
1628 resp = not (r is None or r in filtered)
1627 resp = not (r is None or r in filtered)
1629 result.append(resp)
1628 result.append(resp)
1630 return result
1629 return result
1631
1630
1632 def local(self):
1631 def local(self):
1633 return self
1632 return self
1634
1633
1635 def publishing(self):
1634 def publishing(self):
1636 # it's safe (and desirable) to trust the publish flag unconditionally
1635 # it's safe (and desirable) to trust the publish flag unconditionally
1637 # so that we don't finalize changes shared between users via ssh or nfs
1636 # so that we don't finalize changes shared between users via ssh or nfs
1638 return self.ui.configbool('phases', 'publish', untrusted=True)
1637 return self.ui.configbool('phases', 'publish', untrusted=True)
1639
1638
1640 def cancopy(self):
1639 def cancopy(self):
1641 # so statichttprepo's override of local() works
1640 # so statichttprepo's override of local() works
1642 if not self.local():
1641 if not self.local():
1643 return False
1642 return False
1644 if not self.publishing():
1643 if not self.publishing():
1645 return True
1644 return True
1646 # if publishing we can't copy if there is filtered content
1645 # if publishing we can't copy if there is filtered content
1647 return not self.filtered('visible').changelog.filteredrevs
1646 return not self.filtered('visible').changelog.filteredrevs
1648
1647
1649 def shared(self):
1648 def shared(self):
1650 '''the type of shared repository (None if not shared)'''
1649 '''the type of shared repository (None if not shared)'''
1651 if self.sharedpath != self.path:
1650 if self.sharedpath != self.path:
1652 return 'store'
1651 return 'store'
1653 return None
1652 return None
1654
1653
1655 def wjoin(self, f, *insidef):
1654 def wjoin(self, f, *insidef):
1656 return self.vfs.reljoin(self.root, f, *insidef)
1655 return self.vfs.reljoin(self.root, f, *insidef)
1657
1656
1658 def setparents(self, p1, p2=nullid):
1657 def setparents(self, p1, p2=nullid):
1659 with self.dirstate.parentchange():
1658 with self.dirstate.parentchange():
1660 copies = self.dirstate.setparents(p1, p2)
1659 copies = self.dirstate.setparents(p1, p2)
1661 pctx = self[p1]
1660 pctx = self[p1]
1662 if copies:
1661 if copies:
1663 # Adjust copy records, the dirstate cannot do it, it
1662 # Adjust copy records, the dirstate cannot do it, it
1664 # requires access to parents manifests. Preserve them
1663 # requires access to parents manifests. Preserve them
1665 # only for entries added to first parent.
1664 # only for entries added to first parent.
1666 for f in copies:
1665 for f in copies:
1667 if f not in pctx and copies[f] in pctx:
1666 if f not in pctx and copies[f] in pctx:
1668 self.dirstate.copy(copies[f], f)
1667 self.dirstate.copy(copies[f], f)
1669 if p2 == nullid:
1668 if p2 == nullid:
1670 for f, s in sorted(self.dirstate.copies().items()):
1669 for f, s in sorted(self.dirstate.copies().items()):
1671 if f not in pctx and s not in pctx:
1670 if f not in pctx and s not in pctx:
1672 self.dirstate.copy(None, f)
1671 self.dirstate.copy(None, f)
1673
1672
1674 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1673 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1675 """changeid must be a changeset revision, if specified.
1674 """changeid must be a changeset revision, if specified.
1676 fileid can be a file revision or node."""
1675 fileid can be a file revision or node."""
1677 return context.filectx(self, path, changeid, fileid,
1676 return context.filectx(self, path, changeid, fileid,
1678 changectx=changectx)
1677 changectx=changectx)
1679
1678
1680 def getcwd(self):
1679 def getcwd(self):
1681 return self.dirstate.getcwd()
1680 return self.dirstate.getcwd()
1682
1681
1683 def pathto(self, f, cwd=None):
1682 def pathto(self, f, cwd=None):
1684 return self.dirstate.pathto(f, cwd)
1683 return self.dirstate.pathto(f, cwd)
1685
1684
1686 def _loadfilter(self, filter):
1685 def _loadfilter(self, filter):
1687 if filter not in self._filterpats:
1686 if filter not in self._filterpats:
1688 l = []
1687 l = []
1689 for pat, cmd in self.ui.configitems(filter):
1688 for pat, cmd in self.ui.configitems(filter):
1690 if cmd == '!':
1689 if cmd == '!':
1691 continue
1690 continue
1692 mf = matchmod.match(self.root, '', [pat])
1691 mf = matchmod.match(self.root, '', [pat])
1693 fn = None
1692 fn = None
1694 params = cmd
1693 params = cmd
1695 for name, filterfn in self._datafilters.iteritems():
1694 for name, filterfn in self._datafilters.iteritems():
1696 if cmd.startswith(name):
1695 if cmd.startswith(name):
1697 fn = filterfn
1696 fn = filterfn
1698 params = cmd[len(name):].lstrip()
1697 params = cmd[len(name):].lstrip()
1699 break
1698 break
1700 if not fn:
1699 if not fn:
1701 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1700 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1702 # Wrap old filters not supporting keyword arguments
1701 # Wrap old filters not supporting keyword arguments
1703 if not pycompat.getargspec(fn)[2]:
1702 if not pycompat.getargspec(fn)[2]:
1704 oldfn = fn
1703 oldfn = fn
1705 fn = lambda s, c, **kwargs: oldfn(s, c)
1704 fn = lambda s, c, **kwargs: oldfn(s, c)
1706 l.append((mf, fn, params))
1705 l.append((mf, fn, params))
1707 self._filterpats[filter] = l
1706 self._filterpats[filter] = l
1708 return self._filterpats[filter]
1707 return self._filterpats[filter]
1709
1708
1710 def _filter(self, filterpats, filename, data):
1709 def _filter(self, filterpats, filename, data):
1711 for mf, fn, cmd in filterpats:
1710 for mf, fn, cmd in filterpats:
1712 if mf(filename):
1711 if mf(filename):
1713 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1712 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1714 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1713 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1715 break
1714 break
1716
1715
1717 return data
1716 return data
1718
1717
1719 @unfilteredpropertycache
1718 @unfilteredpropertycache
1720 def _encodefilterpats(self):
1719 def _encodefilterpats(self):
1721 return self._loadfilter('encode')
1720 return self._loadfilter('encode')
1722
1721
1723 @unfilteredpropertycache
1722 @unfilteredpropertycache
1724 def _decodefilterpats(self):
1723 def _decodefilterpats(self):
1725 return self._loadfilter('decode')
1724 return self._loadfilter('decode')
1726
1725
1727 def adddatafilter(self, name, filter):
1726 def adddatafilter(self, name, filter):
1728 self._datafilters[name] = filter
1727 self._datafilters[name] = filter
1729
1728
1730 def wread(self, filename):
1729 def wread(self, filename):
1731 if self.wvfs.islink(filename):
1730 if self.wvfs.islink(filename):
1732 data = self.wvfs.readlink(filename)
1731 data = self.wvfs.readlink(filename)
1733 else:
1732 else:
1734 data = self.wvfs.read(filename)
1733 data = self.wvfs.read(filename)
1735 return self._filter(self._encodefilterpats, filename, data)
1734 return self._filter(self._encodefilterpats, filename, data)
1736
1735
1737 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1736 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1738 """write ``data`` into ``filename`` in the working directory
1737 """write ``data`` into ``filename`` in the working directory
1739
1738
1740 This returns length of written (maybe decoded) data.
1739 This returns length of written (maybe decoded) data.
1741 """
1740 """
1742 data = self._filter(self._decodefilterpats, filename, data)
1741 data = self._filter(self._decodefilterpats, filename, data)
1743 if 'l' in flags:
1742 if 'l' in flags:
1744 self.wvfs.symlink(data, filename)
1743 self.wvfs.symlink(data, filename)
1745 else:
1744 else:
1746 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1745 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1747 **kwargs)
1746 **kwargs)
1748 if 'x' in flags:
1747 if 'x' in flags:
1749 self.wvfs.setflags(filename, False, True)
1748 self.wvfs.setflags(filename, False, True)
1750 else:
1749 else:
1751 self.wvfs.setflags(filename, False, False)
1750 self.wvfs.setflags(filename, False, False)
1752 return len(data)
1751 return len(data)
1753
1752
1754 def wwritedata(self, filename, data):
1753 def wwritedata(self, filename, data):
1755 return self._filter(self._decodefilterpats, filename, data)
1754 return self._filter(self._decodefilterpats, filename, data)
1756
1755
1757 def currenttransaction(self):
1756 def currenttransaction(self):
1758 """return the current transaction or None if non exists"""
1757 """return the current transaction or None if non exists"""
1759 if self._transref:
1758 if self._transref:
1760 tr = self._transref()
1759 tr = self._transref()
1761 else:
1760 else:
1762 tr = None
1761 tr = None
1763
1762
1764 if tr and tr.running():
1763 if tr and tr.running():
1765 return tr
1764 return tr
1766 return None
1765 return None
1767
1766
1768 def transaction(self, desc, report=None):
1767 def transaction(self, desc, report=None):
1769 if (self.ui.configbool('devel', 'all-warnings')
1768 if (self.ui.configbool('devel', 'all-warnings')
1770 or self.ui.configbool('devel', 'check-locks')):
1769 or self.ui.configbool('devel', 'check-locks')):
1771 if self._currentlock(self._lockref) is None:
1770 if self._currentlock(self._lockref) is None:
1772 raise error.ProgrammingError('transaction requires locking')
1771 raise error.ProgrammingError('transaction requires locking')
1773 tr = self.currenttransaction()
1772 tr = self.currenttransaction()
1774 if tr is not None:
1773 if tr is not None:
1775 return tr.nest(name=desc)
1774 return tr.nest(name=desc)
1776
1775
1777 # abort here if the journal already exists
1776 # abort here if the journal already exists
1778 if self.svfs.exists("journal"):
1777 if self.svfs.exists("journal"):
1779 raise error.RepoError(
1778 raise error.RepoError(
1780 _("abandoned transaction found"),
1779 _("abandoned transaction found"),
1781 hint=_("run 'hg recover' to clean up transaction"))
1780 hint=_("run 'hg recover' to clean up transaction"))
1782
1781
1783 idbase = "%.40f#%f" % (random.random(), time.time())
1782 idbase = "%.40f#%f" % (random.random(), time.time())
1784 ha = hex(hashlib.sha1(idbase).digest())
1783 ha = hex(hashlib.sha1(idbase).digest())
1785 txnid = 'TXN:' + ha
1784 txnid = 'TXN:' + ha
1786 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1785 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1787
1786
1788 self._writejournal(desc)
1787 self._writejournal(desc)
1789 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1788 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1790 if report:
1789 if report:
1791 rp = report
1790 rp = report
1792 else:
1791 else:
1793 rp = self.ui.warn
1792 rp = self.ui.warn
1794 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1793 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1795 # we must avoid cyclic reference between repo and transaction.
1794 # we must avoid cyclic reference between repo and transaction.
1796 reporef = weakref.ref(self)
1795 reporef = weakref.ref(self)
1797 # Code to track tag movement
1796 # Code to track tag movement
1798 #
1797 #
1799 # Since tags are all handled as file content, it is actually quite hard
1798 # Since tags are all handled as file content, it is actually quite hard
1800 # to track these movement from a code perspective. So we fallback to a
1799 # to track these movement from a code perspective. So we fallback to a
1801 # tracking at the repository level. One could envision to track changes
1800 # tracking at the repository level. One could envision to track changes
1802 # to the '.hgtags' file through changegroup apply but that fails to
1801 # to the '.hgtags' file through changegroup apply but that fails to
1803 # cope with case where transaction expose new heads without changegroup
1802 # cope with case where transaction expose new heads without changegroup
1804 # being involved (eg: phase movement).
1803 # being involved (eg: phase movement).
1805 #
1804 #
1806 # For now, We gate the feature behind a flag since this likely comes
1805 # For now, We gate the feature behind a flag since this likely comes
1807 # with performance impacts. The current code run more often than needed
1806 # with performance impacts. The current code run more often than needed
1808 # and do not use caches as much as it could. The current focus is on
1807 # and do not use caches as much as it could. The current focus is on
1809 # the behavior of the feature so we disable it by default. The flag
1808 # the behavior of the feature so we disable it by default. The flag
1810 # will be removed when we are happy with the performance impact.
1809 # will be removed when we are happy with the performance impact.
1811 #
1810 #
1812 # Once this feature is no longer experimental move the following
1811 # Once this feature is no longer experimental move the following
1813 # documentation to the appropriate help section:
1812 # documentation to the appropriate help section:
1814 #
1813 #
1815 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1814 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1816 # tags (new or changed or deleted tags). In addition the details of
1815 # tags (new or changed or deleted tags). In addition the details of
1817 # these changes are made available in a file at:
1816 # these changes are made available in a file at:
1818 # ``REPOROOT/.hg/changes/tags.changes``.
1817 # ``REPOROOT/.hg/changes/tags.changes``.
1819 # Make sure you check for HG_TAG_MOVED before reading that file as it
1818 # Make sure you check for HG_TAG_MOVED before reading that file as it
1820 # might exist from a previous transaction even if no tag were touched
1819 # might exist from a previous transaction even if no tag were touched
1821 # in this one. Changes are recorded in a line base format::
1820 # in this one. Changes are recorded in a line base format::
1822 #
1821 #
1823 # <action> <hex-node> <tag-name>\n
1822 # <action> <hex-node> <tag-name>\n
1824 #
1823 #
1825 # Actions are defined as follow:
1824 # Actions are defined as follow:
1826 # "-R": tag is removed,
1825 # "-R": tag is removed,
1827 # "+A": tag is added,
1826 # "+A": tag is added,
1828 # "-M": tag is moved (old value),
1827 # "-M": tag is moved (old value),
1829 # "+M": tag is moved (new value),
1828 # "+M": tag is moved (new value),
1830 tracktags = lambda x: None
1829 tracktags = lambda x: None
1831 # experimental config: experimental.hook-track-tags
1830 # experimental config: experimental.hook-track-tags
1832 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1831 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1833 if desc != 'strip' and shouldtracktags:
1832 if desc != 'strip' and shouldtracktags:
1834 oldheads = self.changelog.headrevs()
1833 oldheads = self.changelog.headrevs()
1835 def tracktags(tr2):
1834 def tracktags(tr2):
1836 repo = reporef()
1835 repo = reporef()
1837 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1836 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1838 newheads = repo.changelog.headrevs()
1837 newheads = repo.changelog.headrevs()
1839 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1838 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1840 # notes: we compare lists here.
1839 # notes: we compare lists here.
1841 # As we do it only once buiding set would not be cheaper
1840 # As we do it only once buiding set would not be cheaper
1842 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1841 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1843 if changes:
1842 if changes:
1844 tr2.hookargs['tag_moved'] = '1'
1843 tr2.hookargs['tag_moved'] = '1'
1845 with repo.vfs('changes/tags.changes', 'w',
1844 with repo.vfs('changes/tags.changes', 'w',
1846 atomictemp=True) as changesfile:
1845 atomictemp=True) as changesfile:
1847 # note: we do not register the file to the transaction
1846 # note: we do not register the file to the transaction
1848 # because we needs it to still exist on the transaction
1847 # because we needs it to still exist on the transaction
1849 # is close (for txnclose hooks)
1848 # is close (for txnclose hooks)
1850 tagsmod.writediff(changesfile, changes)
1849 tagsmod.writediff(changesfile, changes)
1851 def validate(tr2):
1850 def validate(tr2):
1852 """will run pre-closing hooks"""
1851 """will run pre-closing hooks"""
1853 # XXX the transaction API is a bit lacking here so we take a hacky
1852 # XXX the transaction API is a bit lacking here so we take a hacky
1854 # path for now
1853 # path for now
1855 #
1854 #
1856 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1855 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1857 # dict is copied before these run. In addition we needs the data
1856 # dict is copied before these run. In addition we needs the data
1858 # available to in memory hooks too.
1857 # available to in memory hooks too.
1859 #
1858 #
1860 # Moreover, we also need to make sure this runs before txnclose
1859 # Moreover, we also need to make sure this runs before txnclose
1861 # hooks and there is no "pending" mechanism that would execute
1860 # hooks and there is no "pending" mechanism that would execute
1862 # logic only if hooks are about to run.
1861 # logic only if hooks are about to run.
1863 #
1862 #
1864 # Fixing this limitation of the transaction is also needed to track
1863 # Fixing this limitation of the transaction is also needed to track
1865 # other families of changes (bookmarks, phases, obsolescence).
1864 # other families of changes (bookmarks, phases, obsolescence).
1866 #
1865 #
1867 # This will have to be fixed before we remove the experimental
1866 # This will have to be fixed before we remove the experimental
1868 # gating.
1867 # gating.
1869 tracktags(tr2)
1868 tracktags(tr2)
1870 repo = reporef()
1869 repo = reporef()
1871 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1870 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1872 scmutil.enforcesinglehead(repo, tr2, desc)
1871 scmutil.enforcesinglehead(repo, tr2, desc)
1873 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1872 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1874 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1873 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1875 args = tr.hookargs.copy()
1874 args = tr.hookargs.copy()
1876 args.update(bookmarks.preparehookargs(name, old, new))
1875 args.update(bookmarks.preparehookargs(name, old, new))
1877 repo.hook('pretxnclose-bookmark', throw=True,
1876 repo.hook('pretxnclose-bookmark', throw=True,
1878 **pycompat.strkwargs(args))
1877 **pycompat.strkwargs(args))
1879 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1878 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1880 cl = repo.unfiltered().changelog
1879 cl = repo.unfiltered().changelog
1881 for rev, (old, new) in tr.changes['phases'].items():
1880 for rev, (old, new) in tr.changes['phases'].items():
1882 args = tr.hookargs.copy()
1881 args = tr.hookargs.copy()
1883 node = hex(cl.node(rev))
1882 node = hex(cl.node(rev))
1884 args.update(phases.preparehookargs(node, old, new))
1883 args.update(phases.preparehookargs(node, old, new))
1885 repo.hook('pretxnclose-phase', throw=True,
1884 repo.hook('pretxnclose-phase', throw=True,
1886 **pycompat.strkwargs(args))
1885 **pycompat.strkwargs(args))
1887
1886
1888 repo.hook('pretxnclose', throw=True,
1887 repo.hook('pretxnclose', throw=True,
1889 **pycompat.strkwargs(tr.hookargs))
1888 **pycompat.strkwargs(tr.hookargs))
1890 def releasefn(tr, success):
1889 def releasefn(tr, success):
1891 repo = reporef()
1890 repo = reporef()
1892 if success:
1891 if success:
1893 # this should be explicitly invoked here, because
1892 # this should be explicitly invoked here, because
1894 # in-memory changes aren't written out at closing
1893 # in-memory changes aren't written out at closing
1895 # transaction, if tr.addfilegenerator (via
1894 # transaction, if tr.addfilegenerator (via
1896 # dirstate.write or so) isn't invoked while
1895 # dirstate.write or so) isn't invoked while
1897 # transaction running
1896 # transaction running
1898 repo.dirstate.write(None)
1897 repo.dirstate.write(None)
1899 else:
1898 else:
1900 # discard all changes (including ones already written
1899 # discard all changes (including ones already written
1901 # out) in this transaction
1900 # out) in this transaction
1902 narrowspec.restorebackup(self, 'journal.narrowspec')
1901 narrowspec.restorebackup(self, 'journal.narrowspec')
1903 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1902 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1904 repo.dirstate.restorebackup(None, 'journal.dirstate')
1903 repo.dirstate.restorebackup(None, 'journal.dirstate')
1905
1904
1906 repo.invalidate(clearfilecache=True)
1905 repo.invalidate(clearfilecache=True)
1907
1906
1908 tr = transaction.transaction(rp, self.svfs, vfsmap,
1907 tr = transaction.transaction(rp, self.svfs, vfsmap,
1909 "journal",
1908 "journal",
1910 "undo",
1909 "undo",
1911 aftertrans(renames),
1910 aftertrans(renames),
1912 self.store.createmode,
1911 self.store.createmode,
1913 validator=validate,
1912 validator=validate,
1914 releasefn=releasefn,
1913 releasefn=releasefn,
1915 checkambigfiles=_cachedfiles,
1914 checkambigfiles=_cachedfiles,
1916 name=desc)
1915 name=desc)
1917 tr.changes['origrepolen'] = len(self)
1916 tr.changes['origrepolen'] = len(self)
1918 tr.changes['obsmarkers'] = set()
1917 tr.changes['obsmarkers'] = set()
1919 tr.changes['phases'] = {}
1918 tr.changes['phases'] = {}
1920 tr.changes['bookmarks'] = {}
1919 tr.changes['bookmarks'] = {}
1921
1920
1922 tr.hookargs['txnid'] = txnid
1921 tr.hookargs['txnid'] = txnid
1923 tr.hookargs['txnname'] = desc
1922 tr.hookargs['txnname'] = desc
1924 # note: writing the fncache only during finalize mean that the file is
1923 # note: writing the fncache only during finalize mean that the file is
1925 # outdated when running hooks. As fncache is used for streaming clone,
1924 # outdated when running hooks. As fncache is used for streaming clone,
1926 # this is not expected to break anything that happen during the hooks.
1925 # this is not expected to break anything that happen during the hooks.
1927 tr.addfinalize('flush-fncache', self.store.write)
1926 tr.addfinalize('flush-fncache', self.store.write)
1928 def txnclosehook(tr2):
1927 def txnclosehook(tr2):
1929 """To be run if transaction is successful, will schedule a hook run
1928 """To be run if transaction is successful, will schedule a hook run
1930 """
1929 """
1931 # Don't reference tr2 in hook() so we don't hold a reference.
1930 # Don't reference tr2 in hook() so we don't hold a reference.
1932 # This reduces memory consumption when there are multiple
1931 # This reduces memory consumption when there are multiple
1933 # transactions per lock. This can likely go away if issue5045
1932 # transactions per lock. This can likely go away if issue5045
1934 # fixes the function accumulation.
1933 # fixes the function accumulation.
1935 hookargs = tr2.hookargs
1934 hookargs = tr2.hookargs
1936
1935
1937 def hookfunc():
1936 def hookfunc():
1938 repo = reporef()
1937 repo = reporef()
1939 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1938 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1940 bmchanges = sorted(tr.changes['bookmarks'].items())
1939 bmchanges = sorted(tr.changes['bookmarks'].items())
1941 for name, (old, new) in bmchanges:
1940 for name, (old, new) in bmchanges:
1942 args = tr.hookargs.copy()
1941 args = tr.hookargs.copy()
1943 args.update(bookmarks.preparehookargs(name, old, new))
1942 args.update(bookmarks.preparehookargs(name, old, new))
1944 repo.hook('txnclose-bookmark', throw=False,
1943 repo.hook('txnclose-bookmark', throw=False,
1945 **pycompat.strkwargs(args))
1944 **pycompat.strkwargs(args))
1946
1945
1947 if hook.hashook(repo.ui, 'txnclose-phase'):
1946 if hook.hashook(repo.ui, 'txnclose-phase'):
1948 cl = repo.unfiltered().changelog
1947 cl = repo.unfiltered().changelog
1949 phasemv = sorted(tr.changes['phases'].items())
1948 phasemv = sorted(tr.changes['phases'].items())
1950 for rev, (old, new) in phasemv:
1949 for rev, (old, new) in phasemv:
1951 args = tr.hookargs.copy()
1950 args = tr.hookargs.copy()
1952 node = hex(cl.node(rev))
1951 node = hex(cl.node(rev))
1953 args.update(phases.preparehookargs(node, old, new))
1952 args.update(phases.preparehookargs(node, old, new))
1954 repo.hook('txnclose-phase', throw=False,
1953 repo.hook('txnclose-phase', throw=False,
1955 **pycompat.strkwargs(args))
1954 **pycompat.strkwargs(args))
1956
1955
1957 repo.hook('txnclose', throw=False,
1956 repo.hook('txnclose', throw=False,
1958 **pycompat.strkwargs(hookargs))
1957 **pycompat.strkwargs(hookargs))
1959 reporef()._afterlock(hookfunc)
1958 reporef()._afterlock(hookfunc)
1960 tr.addfinalize('txnclose-hook', txnclosehook)
1959 tr.addfinalize('txnclose-hook', txnclosehook)
1961 # Include a leading "-" to make it happen before the transaction summary
1960 # Include a leading "-" to make it happen before the transaction summary
1962 # reports registered via scmutil.registersummarycallback() whose names
1961 # reports registered via scmutil.registersummarycallback() whose names
1963 # are 00-txnreport etc. That way, the caches will be warm when the
1962 # are 00-txnreport etc. That way, the caches will be warm when the
1964 # callbacks run.
1963 # callbacks run.
1965 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1964 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1966 def txnaborthook(tr2):
1965 def txnaborthook(tr2):
1967 """To be run if transaction is aborted
1966 """To be run if transaction is aborted
1968 """
1967 """
1969 reporef().hook('txnabort', throw=False,
1968 reporef().hook('txnabort', throw=False,
1970 **pycompat.strkwargs(tr2.hookargs))
1969 **pycompat.strkwargs(tr2.hookargs))
1971 tr.addabort('txnabort-hook', txnaborthook)
1970 tr.addabort('txnabort-hook', txnaborthook)
1972 # avoid eager cache invalidation. in-memory data should be identical
1971 # avoid eager cache invalidation. in-memory data should be identical
1973 # to stored data if transaction has no error.
1972 # to stored data if transaction has no error.
1974 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1973 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1975 self._transref = weakref.ref(tr)
1974 self._transref = weakref.ref(tr)
1976 scmutil.registersummarycallback(self, tr, desc)
1975 scmutil.registersummarycallback(self, tr, desc)
1977 return tr
1976 return tr
1978
1977
1979 def _journalfiles(self):
1978 def _journalfiles(self):
1980 return ((self.svfs, 'journal'),
1979 return ((self.svfs, 'journal'),
1981 (self.svfs, 'journal.narrowspec'),
1980 (self.svfs, 'journal.narrowspec'),
1982 (self.vfs, 'journal.narrowspec.dirstate'),
1981 (self.vfs, 'journal.narrowspec.dirstate'),
1983 (self.vfs, 'journal.dirstate'),
1982 (self.vfs, 'journal.dirstate'),
1984 (self.vfs, 'journal.branch'),
1983 (self.vfs, 'journal.branch'),
1985 (self.vfs, 'journal.desc'),
1984 (self.vfs, 'journal.desc'),
1986 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1985 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1987 (self.svfs, 'journal.phaseroots'))
1986 (self.svfs, 'journal.phaseroots'))
1988
1987
1989 def undofiles(self):
1988 def undofiles(self):
1990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1989 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1991
1990
1992 @unfilteredmethod
1991 @unfilteredmethod
1993 def _writejournal(self, desc):
1992 def _writejournal(self, desc):
1994 self.dirstate.savebackup(None, 'journal.dirstate')
1993 self.dirstate.savebackup(None, 'journal.dirstate')
1995 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1994 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1996 narrowspec.savebackup(self, 'journal.narrowspec')
1995 narrowspec.savebackup(self, 'journal.narrowspec')
1997 self.vfs.write("journal.branch",
1996 self.vfs.write("journal.branch",
1998 encoding.fromlocal(self.dirstate.branch()))
1997 encoding.fromlocal(self.dirstate.branch()))
1999 self.vfs.write("journal.desc",
1998 self.vfs.write("journal.desc",
2000 "%d\n%s\n" % (len(self), desc))
1999 "%d\n%s\n" % (len(self), desc))
2001 bookmarksvfs = bookmarks.bookmarksvfs(self)
2000 bookmarksvfs = bookmarks.bookmarksvfs(self)
2002 bookmarksvfs.write("journal.bookmarks",
2001 bookmarksvfs.write("journal.bookmarks",
2003 bookmarksvfs.tryread("bookmarks"))
2002 bookmarksvfs.tryread("bookmarks"))
2004 self.svfs.write("journal.phaseroots",
2003 self.svfs.write("journal.phaseroots",
2005 self.svfs.tryread("phaseroots"))
2004 self.svfs.tryread("phaseroots"))
2006
2005
2007 def recover(self):
2006 def recover(self):
2008 with self.lock():
2007 with self.lock():
2009 if self.svfs.exists("journal"):
2008 if self.svfs.exists("journal"):
2010 self.ui.status(_("rolling back interrupted transaction\n"))
2009 self.ui.status(_("rolling back interrupted transaction\n"))
2011 vfsmap = {'': self.svfs,
2010 vfsmap = {'': self.svfs,
2012 'plain': self.vfs,}
2011 'plain': self.vfs,}
2013 transaction.rollback(self.svfs, vfsmap, "journal",
2012 transaction.rollback(self.svfs, vfsmap, "journal",
2014 self.ui.warn,
2013 self.ui.warn,
2015 checkambigfiles=_cachedfiles)
2014 checkambigfiles=_cachedfiles)
2016 self.invalidate()
2015 self.invalidate()
2017 return True
2016 return True
2018 else:
2017 else:
2019 self.ui.warn(_("no interrupted transaction available\n"))
2018 self.ui.warn(_("no interrupted transaction available\n"))
2020 return False
2019 return False
2021
2020
2022 def rollback(self, dryrun=False, force=False):
2021 def rollback(self, dryrun=False, force=False):
2023 wlock = lock = dsguard = None
2022 wlock = lock = dsguard = None
2024 try:
2023 try:
2025 wlock = self.wlock()
2024 wlock = self.wlock()
2026 lock = self.lock()
2025 lock = self.lock()
2027 if self.svfs.exists("undo"):
2026 if self.svfs.exists("undo"):
2028 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2027 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2029
2028
2030 return self._rollback(dryrun, force, dsguard)
2029 return self._rollback(dryrun, force, dsguard)
2031 else:
2030 else:
2032 self.ui.warn(_("no rollback information available\n"))
2031 self.ui.warn(_("no rollback information available\n"))
2033 return 1
2032 return 1
2034 finally:
2033 finally:
2035 release(dsguard, lock, wlock)
2034 release(dsguard, lock, wlock)
2036
2035
2037 @unfilteredmethod # Until we get smarter cache management
2036 @unfilteredmethod # Until we get smarter cache management
2038 def _rollback(self, dryrun, force, dsguard):
2037 def _rollback(self, dryrun, force, dsguard):
2039 ui = self.ui
2038 ui = self.ui
2040 try:
2039 try:
2041 args = self.vfs.read('undo.desc').splitlines()
2040 args = self.vfs.read('undo.desc').splitlines()
2042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2041 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2043 if len(args) >= 3:
2042 if len(args) >= 3:
2044 detail = args[2]
2043 detail = args[2]
2045 oldtip = oldlen - 1
2044 oldtip = oldlen - 1
2046
2045
2047 if detail and ui.verbose:
2046 if detail and ui.verbose:
2048 msg = (_('repository tip rolled back to revision %d'
2047 msg = (_('repository tip rolled back to revision %d'
2049 ' (undo %s: %s)\n')
2048 ' (undo %s: %s)\n')
2050 % (oldtip, desc, detail))
2049 % (oldtip, desc, detail))
2051 else:
2050 else:
2052 msg = (_('repository tip rolled back to revision %d'
2051 msg = (_('repository tip rolled back to revision %d'
2053 ' (undo %s)\n')
2052 ' (undo %s)\n')
2054 % (oldtip, desc))
2053 % (oldtip, desc))
2055 except IOError:
2054 except IOError:
2056 msg = _('rolling back unknown transaction\n')
2055 msg = _('rolling back unknown transaction\n')
2057 desc = None
2056 desc = None
2058
2057
2059 if not force and self['.'] != self['tip'] and desc == 'commit':
2058 if not force and self['.'] != self['tip'] and desc == 'commit':
2060 raise error.Abort(
2059 raise error.Abort(
2061 _('rollback of last commit while not checked out '
2060 _('rollback of last commit while not checked out '
2062 'may lose data'), hint=_('use -f to force'))
2061 'may lose data'), hint=_('use -f to force'))
2063
2062
2064 ui.status(msg)
2063 ui.status(msg)
2065 if dryrun:
2064 if dryrun:
2066 return 0
2065 return 0
2067
2066
2068 parents = self.dirstate.parents()
2067 parents = self.dirstate.parents()
2069 self.destroying()
2068 self.destroying()
2070 vfsmap = {'plain': self.vfs, '': self.svfs}
2069 vfsmap = {'plain': self.vfs, '': self.svfs}
2071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2070 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2072 checkambigfiles=_cachedfiles)
2071 checkambigfiles=_cachedfiles)
2073 bookmarksvfs = bookmarks.bookmarksvfs(self)
2072 bookmarksvfs = bookmarks.bookmarksvfs(self)
2074 if bookmarksvfs.exists('undo.bookmarks'):
2073 if bookmarksvfs.exists('undo.bookmarks'):
2075 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2074 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2076 if self.svfs.exists('undo.phaseroots'):
2075 if self.svfs.exists('undo.phaseroots'):
2077 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2076 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2078 self.invalidate()
2077 self.invalidate()
2079
2078
2080 parentgone = any(p not in self.changelog.nodemap for p in parents)
2079 parentgone = any(p not in self.changelog.nodemap for p in parents)
2081 if parentgone:
2080 if parentgone:
2082 # prevent dirstateguard from overwriting already restored one
2081 # prevent dirstateguard from overwriting already restored one
2083 dsguard.close()
2082 dsguard.close()
2084
2083
2085 narrowspec.restorebackup(self, 'undo.narrowspec')
2084 narrowspec.restorebackup(self, 'undo.narrowspec')
2086 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2085 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2087 self.dirstate.restorebackup(None, 'undo.dirstate')
2086 self.dirstate.restorebackup(None, 'undo.dirstate')
2088 try:
2087 try:
2089 branch = self.vfs.read('undo.branch')
2088 branch = self.vfs.read('undo.branch')
2090 self.dirstate.setbranch(encoding.tolocal(branch))
2089 self.dirstate.setbranch(encoding.tolocal(branch))
2091 except IOError:
2090 except IOError:
2092 ui.warn(_('named branch could not be reset: '
2091 ui.warn(_('named branch could not be reset: '
2093 'current branch is still \'%s\'\n')
2092 'current branch is still \'%s\'\n')
2094 % self.dirstate.branch())
2093 % self.dirstate.branch())
2095
2094
2096 parents = tuple([p.rev() for p in self[None].parents()])
2095 parents = tuple([p.rev() for p in self[None].parents()])
2097 if len(parents) > 1:
2096 if len(parents) > 1:
2098 ui.status(_('working directory now based on '
2097 ui.status(_('working directory now based on '
2099 'revisions %d and %d\n') % parents)
2098 'revisions %d and %d\n') % parents)
2100 else:
2099 else:
2101 ui.status(_('working directory now based on '
2100 ui.status(_('working directory now based on '
2102 'revision %d\n') % parents)
2101 'revision %d\n') % parents)
2103 mergemod.mergestate.clean(self, self['.'].node())
2102 mergemod.mergestate.clean(self, self['.'].node())
2104
2103
2105 # TODO: if we know which new heads may result from this rollback, pass
2104 # TODO: if we know which new heads may result from this rollback, pass
2106 # them to destroy(), which will prevent the branchhead cache from being
2105 # them to destroy(), which will prevent the branchhead cache from being
2107 # invalidated.
2106 # invalidated.
2108 self.destroyed()
2107 self.destroyed()
2109 return 0
2108 return 0
2110
2109
2111 def _buildcacheupdater(self, newtransaction):
2110 def _buildcacheupdater(self, newtransaction):
2112 """called during transaction to build the callback updating cache
2111 """called during transaction to build the callback updating cache
2113
2112
2114 Lives on the repository to help extension who might want to augment
2113 Lives on the repository to help extension who might want to augment
2115 this logic. For this purpose, the created transaction is passed to the
2114 this logic. For this purpose, the created transaction is passed to the
2116 method.
2115 method.
2117 """
2116 """
2118 # we must avoid cyclic reference between repo and transaction.
2117 # we must avoid cyclic reference between repo and transaction.
2119 reporef = weakref.ref(self)
2118 reporef = weakref.ref(self)
2120 def updater(tr):
2119 def updater(tr):
2121 repo = reporef()
2120 repo = reporef()
2122 repo.updatecaches(tr)
2121 repo.updatecaches(tr)
2123 return updater
2122 return updater
2124
2123
2125 @unfilteredmethod
2124 @unfilteredmethod
2126 def updatecaches(self, tr=None, full=False):
2125 def updatecaches(self, tr=None, full=False):
2127 """warm appropriate caches
2126 """warm appropriate caches
2128
2127
2129 If this function is called after a transaction closed. The transaction
2128 If this function is called after a transaction closed. The transaction
2130 will be available in the 'tr' argument. This can be used to selectively
2129 will be available in the 'tr' argument. This can be used to selectively
2131 update caches relevant to the changes in that transaction.
2130 update caches relevant to the changes in that transaction.
2132
2131
2133 If 'full' is set, make sure all caches the function knows about have
2132 If 'full' is set, make sure all caches the function knows about have
2134 up-to-date data. Even the ones usually loaded more lazily.
2133 up-to-date data. Even the ones usually loaded more lazily.
2135 """
2134 """
2136 if tr is not None and tr.hookargs.get('source') == 'strip':
2135 if tr is not None and tr.hookargs.get('source') == 'strip':
2137 # During strip, many caches are invalid but
2136 # During strip, many caches are invalid but
2138 # later call to `destroyed` will refresh them.
2137 # later call to `destroyed` will refresh them.
2139 return
2138 return
2140
2139
2141 if tr is None or tr.changes['origrepolen'] < len(self):
2140 if tr is None or tr.changes['origrepolen'] < len(self):
2142 # accessing the 'ser ved' branchmap should refresh all the others,
2141 # accessing the 'ser ved' branchmap should refresh all the others,
2143 self.ui.debug('updating the branch cache\n')
2142 self.ui.debug('updating the branch cache\n')
2144 self.filtered('served').branchmap()
2143 self.filtered('served').branchmap()
2145 self.filtered('served.hidden').branchmap()
2144 self.filtered('served.hidden').branchmap()
2146
2145
2147 if full:
2146 if full:
2148 unfi = self.unfiltered()
2147 unfi = self.unfiltered()
2149 rbc = unfi.revbranchcache()
2148 rbc = unfi.revbranchcache()
2150 for r in unfi.changelog:
2149 for r in unfi.changelog:
2151 rbc.branchinfo(r)
2150 rbc.branchinfo(r)
2152 rbc.write()
2151 rbc.write()
2153
2152
2154 # ensure the working copy parents are in the manifestfulltextcache
2153 # ensure the working copy parents are in the manifestfulltextcache
2155 for ctx in self['.'].parents():
2154 for ctx in self['.'].parents():
2156 ctx.manifest() # accessing the manifest is enough
2155 ctx.manifest() # accessing the manifest is enough
2157
2156
2158 # accessing fnode cache warms the cache
2157 # accessing fnode cache warms the cache
2159 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2158 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2160 # accessing tags warm the cache
2159 # accessing tags warm the cache
2161 self.tags()
2160 self.tags()
2162 self.filtered('served').tags()
2161 self.filtered('served').tags()
2163
2162
2164 def invalidatecaches(self):
2163 def invalidatecaches(self):
2165
2164
2166 if r'_tagscache' in vars(self):
2165 if r'_tagscache' in vars(self):
2167 # can't use delattr on proxy
2166 # can't use delattr on proxy
2168 del self.__dict__[r'_tagscache']
2167 del self.__dict__[r'_tagscache']
2169
2168
2170 self._branchcaches.clear()
2169 self._branchcaches.clear()
2171 self.invalidatevolatilesets()
2170 self.invalidatevolatilesets()
2172 self._sparsesignaturecache.clear()
2171 self._sparsesignaturecache.clear()
2173
2172
2174 def invalidatevolatilesets(self):
2173 def invalidatevolatilesets(self):
2175 self.filteredrevcache.clear()
2174 self.filteredrevcache.clear()
2176 obsolete.clearobscaches(self)
2175 obsolete.clearobscaches(self)
2177
2176
2178 def invalidatedirstate(self):
2177 def invalidatedirstate(self):
2179 '''Invalidates the dirstate, causing the next call to dirstate
2178 '''Invalidates the dirstate, causing the next call to dirstate
2180 to check if it was modified since the last time it was read,
2179 to check if it was modified since the last time it was read,
2181 rereading it if it has.
2180 rereading it if it has.
2182
2181
2183 This is different to dirstate.invalidate() that it doesn't always
2182 This is different to dirstate.invalidate() that it doesn't always
2184 rereads the dirstate. Use dirstate.invalidate() if you want to
2183 rereads the dirstate. Use dirstate.invalidate() if you want to
2185 explicitly read the dirstate again (i.e. restoring it to a previous
2184 explicitly read the dirstate again (i.e. restoring it to a previous
2186 known good state).'''
2185 known good state).'''
2187 if hasunfilteredcache(self, r'dirstate'):
2186 if hasunfilteredcache(self, r'dirstate'):
2188 for k in self.dirstate._filecache:
2187 for k in self.dirstate._filecache:
2189 try:
2188 try:
2190 delattr(self.dirstate, k)
2189 delattr(self.dirstate, k)
2191 except AttributeError:
2190 except AttributeError:
2192 pass
2191 pass
2193 delattr(self.unfiltered(), r'dirstate')
2192 delattr(self.unfiltered(), r'dirstate')
2194
2193
2195 def invalidate(self, clearfilecache=False):
2194 def invalidate(self, clearfilecache=False):
2196 '''Invalidates both store and non-store parts other than dirstate
2195 '''Invalidates both store and non-store parts other than dirstate
2197
2196
2198 If a transaction is running, invalidation of store is omitted,
2197 If a transaction is running, invalidation of store is omitted,
2199 because discarding in-memory changes might cause inconsistency
2198 because discarding in-memory changes might cause inconsistency
2200 (e.g. incomplete fncache causes unintentional failure, but
2199 (e.g. incomplete fncache causes unintentional failure, but
2201 redundant one doesn't).
2200 redundant one doesn't).
2202 '''
2201 '''
2203 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2202 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2204 for k in list(self._filecache.keys()):
2203 for k in list(self._filecache.keys()):
2205 # dirstate is invalidated separately in invalidatedirstate()
2204 # dirstate is invalidated separately in invalidatedirstate()
2206 if k == 'dirstate':
2205 if k == 'dirstate':
2207 continue
2206 continue
2208 if (k == 'changelog' and
2207 if (k == 'changelog' and
2209 self.currenttransaction() and
2208 self.currenttransaction() and
2210 self.changelog._delayed):
2209 self.changelog._delayed):
2211 # The changelog object may store unwritten revisions. We don't
2210 # The changelog object may store unwritten revisions. We don't
2212 # want to lose them.
2211 # want to lose them.
2213 # TODO: Solve the problem instead of working around it.
2212 # TODO: Solve the problem instead of working around it.
2214 continue
2213 continue
2215
2214
2216 if clearfilecache:
2215 if clearfilecache:
2217 del self._filecache[k]
2216 del self._filecache[k]
2218 try:
2217 try:
2219 delattr(unfiltered, k)
2218 delattr(unfiltered, k)
2220 except AttributeError:
2219 except AttributeError:
2221 pass
2220 pass
2222 self.invalidatecaches()
2221 self.invalidatecaches()
2223 if not self.currenttransaction():
2222 if not self.currenttransaction():
2224 # TODO: Changing contents of store outside transaction
2223 # TODO: Changing contents of store outside transaction
2225 # causes inconsistency. We should make in-memory store
2224 # causes inconsistency. We should make in-memory store
2226 # changes detectable, and abort if changed.
2225 # changes detectable, and abort if changed.
2227 self.store.invalidatecaches()
2226 self.store.invalidatecaches()
2228
2227
2229 def invalidateall(self):
2228 def invalidateall(self):
2230 '''Fully invalidates both store and non-store parts, causing the
2229 '''Fully invalidates both store and non-store parts, causing the
2231 subsequent operation to reread any outside changes.'''
2230 subsequent operation to reread any outside changes.'''
2232 # extension should hook this to invalidate its caches
2231 # extension should hook this to invalidate its caches
2233 self.invalidate()
2232 self.invalidate()
2234 self.invalidatedirstate()
2233 self.invalidatedirstate()
2235
2234
2236 @unfilteredmethod
2235 @unfilteredmethod
2237 def _refreshfilecachestats(self, tr):
2236 def _refreshfilecachestats(self, tr):
2238 """Reload stats of cached files so that they are flagged as valid"""
2237 """Reload stats of cached files so that they are flagged as valid"""
2239 for k, ce in self._filecache.items():
2238 for k, ce in self._filecache.items():
2240 k = pycompat.sysstr(k)
2239 k = pycompat.sysstr(k)
2241 if k == r'dirstate' or k not in self.__dict__:
2240 if k == r'dirstate' or k not in self.__dict__:
2242 continue
2241 continue
2243 ce.refresh()
2242 ce.refresh()
2244
2243
2245 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2244 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2246 inheritchecker=None, parentenvvar=None):
2245 inheritchecker=None, parentenvvar=None):
2247 parentlock = None
2246 parentlock = None
2248 # the contents of parentenvvar are used by the underlying lock to
2247 # the contents of parentenvvar are used by the underlying lock to
2249 # determine whether it can be inherited
2248 # determine whether it can be inherited
2250 if parentenvvar is not None:
2249 if parentenvvar is not None:
2251 parentlock = encoding.environ.get(parentenvvar)
2250 parentlock = encoding.environ.get(parentenvvar)
2252
2251
2253 timeout = 0
2252 timeout = 0
2254 warntimeout = 0
2253 warntimeout = 0
2255 if wait:
2254 if wait:
2256 timeout = self.ui.configint("ui", "timeout")
2255 timeout = self.ui.configint("ui", "timeout")
2257 warntimeout = self.ui.configint("ui", "timeout.warn")
2256 warntimeout = self.ui.configint("ui", "timeout.warn")
2258 # internal config: ui.signal-safe-lock
2257 # internal config: ui.signal-safe-lock
2259 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2258 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2260
2259
2261 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2260 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2262 releasefn=releasefn,
2261 releasefn=releasefn,
2263 acquirefn=acquirefn, desc=desc,
2262 acquirefn=acquirefn, desc=desc,
2264 inheritchecker=inheritchecker,
2263 inheritchecker=inheritchecker,
2265 parentlock=parentlock,
2264 parentlock=parentlock,
2266 signalsafe=signalsafe)
2265 signalsafe=signalsafe)
2267 return l
2266 return l
2268
2267
2269 def _afterlock(self, callback):
2268 def _afterlock(self, callback):
2270 """add a callback to be run when the repository is fully unlocked
2269 """add a callback to be run when the repository is fully unlocked
2271
2270
2272 The callback will be executed when the outermost lock is released
2271 The callback will be executed when the outermost lock is released
2273 (with wlock being higher level than 'lock')."""
2272 (with wlock being higher level than 'lock')."""
2274 for ref in (self._wlockref, self._lockref):
2273 for ref in (self._wlockref, self._lockref):
2275 l = ref and ref()
2274 l = ref and ref()
2276 if l and l.held:
2275 if l and l.held:
2277 l.postrelease.append(callback)
2276 l.postrelease.append(callback)
2278 break
2277 break
2279 else: # no lock have been found.
2278 else: # no lock have been found.
2280 callback()
2279 callback()
2281
2280
2282 def lock(self, wait=True):
2281 def lock(self, wait=True):
2283 '''Lock the repository store (.hg/store) and return a weak reference
2282 '''Lock the repository store (.hg/store) and return a weak reference
2284 to the lock. Use this before modifying the store (e.g. committing or
2283 to the lock. Use this before modifying the store (e.g. committing or
2285 stripping). If you are opening a transaction, get a lock as well.)
2284 stripping). If you are opening a transaction, get a lock as well.)
2286
2285
2287 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2286 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2288 'wlock' first to avoid a dead-lock hazard.'''
2287 'wlock' first to avoid a dead-lock hazard.'''
2289 l = self._currentlock(self._lockref)
2288 l = self._currentlock(self._lockref)
2290 if l is not None:
2289 if l is not None:
2291 l.lock()
2290 l.lock()
2292 return l
2291 return l
2293
2292
2294 l = self._lock(vfs=self.svfs,
2293 l = self._lock(vfs=self.svfs,
2295 lockname="lock",
2294 lockname="lock",
2296 wait=wait,
2295 wait=wait,
2297 releasefn=None,
2296 releasefn=None,
2298 acquirefn=self.invalidate,
2297 acquirefn=self.invalidate,
2299 desc=_('repository %s') % self.origroot)
2298 desc=_('repository %s') % self.origroot)
2300 self._lockref = weakref.ref(l)
2299 self._lockref = weakref.ref(l)
2301 return l
2300 return l
2302
2301
2303 def _wlockchecktransaction(self):
2302 def _wlockchecktransaction(self):
2304 if self.currenttransaction() is not None:
2303 if self.currenttransaction() is not None:
2305 raise error.LockInheritanceContractViolation(
2304 raise error.LockInheritanceContractViolation(
2306 'wlock cannot be inherited in the middle of a transaction')
2305 'wlock cannot be inherited in the middle of a transaction')
2307
2306
2308 def wlock(self, wait=True):
2307 def wlock(self, wait=True):
2309 '''Lock the non-store parts of the repository (everything under
2308 '''Lock the non-store parts of the repository (everything under
2310 .hg except .hg/store) and return a weak reference to the lock.
2309 .hg except .hg/store) and return a weak reference to the lock.
2311
2310
2312 Use this before modifying files in .hg.
2311 Use this before modifying files in .hg.
2313
2312
2314 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2313 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2315 'wlock' first to avoid a dead-lock hazard.'''
2314 'wlock' first to avoid a dead-lock hazard.'''
2316 l = self._wlockref and self._wlockref()
2315 l = self._wlockref and self._wlockref()
2317 if l is not None and l.held:
2316 if l is not None and l.held:
2318 l.lock()
2317 l.lock()
2319 return l
2318 return l
2320
2319
2321 # We do not need to check for non-waiting lock acquisition. Such
2320 # We do not need to check for non-waiting lock acquisition. Such
2322 # acquisition would not cause dead-lock as they would just fail.
2321 # acquisition would not cause dead-lock as they would just fail.
2323 if wait and (self.ui.configbool('devel', 'all-warnings')
2322 if wait and (self.ui.configbool('devel', 'all-warnings')
2324 or self.ui.configbool('devel', 'check-locks')):
2323 or self.ui.configbool('devel', 'check-locks')):
2325 if self._currentlock(self._lockref) is not None:
2324 if self._currentlock(self._lockref) is not None:
2326 self.ui.develwarn('"wlock" acquired after "lock"')
2325 self.ui.develwarn('"wlock" acquired after "lock"')
2327
2326
2328 def unlock():
2327 def unlock():
2329 if self.dirstate.pendingparentchange():
2328 if self.dirstate.pendingparentchange():
2330 self.dirstate.invalidate()
2329 self.dirstate.invalidate()
2331 else:
2330 else:
2332 self.dirstate.write(None)
2331 self.dirstate.write(None)
2333
2332
2334 self._filecache['dirstate'].refresh()
2333 self._filecache['dirstate'].refresh()
2335
2334
2336 l = self._lock(self.vfs, "wlock", wait, unlock,
2335 l = self._lock(self.vfs, "wlock", wait, unlock,
2337 self.invalidatedirstate, _('working directory of %s') %
2336 self.invalidatedirstate, _('working directory of %s') %
2338 self.origroot,
2337 self.origroot,
2339 inheritchecker=self._wlockchecktransaction,
2338 inheritchecker=self._wlockchecktransaction,
2340 parentenvvar='HG_WLOCK_LOCKER')
2339 parentenvvar='HG_WLOCK_LOCKER')
2341 self._wlockref = weakref.ref(l)
2340 self._wlockref = weakref.ref(l)
2342 return l
2341 return l
2343
2342
2344 def _currentlock(self, lockref):
2343 def _currentlock(self, lockref):
2345 """Returns the lock if it's held, or None if it's not."""
2344 """Returns the lock if it's held, or None if it's not."""
2346 if lockref is None:
2345 if lockref is None:
2347 return None
2346 return None
2348 l = lockref()
2347 l = lockref()
2349 if l is None or not l.held:
2348 if l is None or not l.held:
2350 return None
2349 return None
2351 return l
2350 return l
2352
2351
2353 def currentwlock(self):
2352 def currentwlock(self):
2354 """Returns the wlock if it's held, or None if it's not."""
2353 """Returns the wlock if it's held, or None if it's not."""
2355 return self._currentlock(self._wlockref)
2354 return self._currentlock(self._wlockref)
2356
2355
2357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2356 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2358 includecopymeta):
2357 includecopymeta):
2359 """
2358 """
2360 commit an individual file as part of a larger transaction
2359 commit an individual file as part of a larger transaction
2361 """
2360 """
2362
2361
2363 fname = fctx.path()
2362 fname = fctx.path()
2364 fparent1 = manifest1.get(fname, nullid)
2363 fparent1 = manifest1.get(fname, nullid)
2365 fparent2 = manifest2.get(fname, nullid)
2364 fparent2 = manifest2.get(fname, nullid)
2366 if isinstance(fctx, context.filectx):
2365 if isinstance(fctx, context.filectx):
2367 node = fctx.filenode()
2366 node = fctx.filenode()
2368 if node in [fparent1, fparent2]:
2367 if node in [fparent1, fparent2]:
2369 self.ui.debug('reusing %s filelog entry\n' % fname)
2368 self.ui.debug('reusing %s filelog entry\n' % fname)
2370 if ((fparent1 != nullid and
2369 if ((fparent1 != nullid and
2371 manifest1.flags(fname) != fctx.flags()) or
2370 manifest1.flags(fname) != fctx.flags()) or
2372 (fparent2 != nullid and
2371 (fparent2 != nullid and
2373 manifest2.flags(fname) != fctx.flags())):
2372 manifest2.flags(fname) != fctx.flags())):
2374 changelist.append(fname)
2373 changelist.append(fname)
2375 return node
2374 return node
2376
2375
2377 flog = self.file(fname)
2376 flog = self.file(fname)
2378 meta = {}
2377 meta = {}
2379 cfname = fctx.copysource()
2378 cfname = fctx.copysource()
2380 if cfname and cfname != fname:
2379 if cfname and cfname != fname:
2381 # Mark the new revision of this file as a copy of another
2380 # Mark the new revision of this file as a copy of another
2382 # file. This copy data will effectively act as a parent
2381 # file. This copy data will effectively act as a parent
2383 # of this new revision. If this is a merge, the first
2382 # of this new revision. If this is a merge, the first
2384 # parent will be the nullid (meaning "look up the copy data")
2383 # parent will be the nullid (meaning "look up the copy data")
2385 # and the second one will be the other parent. For example:
2384 # and the second one will be the other parent. For example:
2386 #
2385 #
2387 # 0 --- 1 --- 3 rev1 changes file foo
2386 # 0 --- 1 --- 3 rev1 changes file foo
2388 # \ / rev2 renames foo to bar and changes it
2387 # \ / rev2 renames foo to bar and changes it
2389 # \- 2 -/ rev3 should have bar with all changes and
2388 # \- 2 -/ rev3 should have bar with all changes and
2390 # should record that bar descends from
2389 # should record that bar descends from
2391 # bar in rev2 and foo in rev1
2390 # bar in rev2 and foo in rev1
2392 #
2391 #
2393 # this allows this merge to succeed:
2392 # this allows this merge to succeed:
2394 #
2393 #
2395 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2394 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2396 # \ / merging rev3 and rev4 should use bar@rev2
2395 # \ / merging rev3 and rev4 should use bar@rev2
2397 # \- 2 --- 4 as the merge base
2396 # \- 2 --- 4 as the merge base
2398 #
2397 #
2399
2398
2400 cnode = manifest1.get(cfname)
2399 cnode = manifest1.get(cfname)
2401 newfparent = fparent2
2400 newfparent = fparent2
2402
2401
2403 if manifest2: # branch merge
2402 if manifest2: # branch merge
2404 if fparent2 == nullid or cnode is None: # copied on remote side
2403 if fparent2 == nullid or cnode is None: # copied on remote side
2405 if cfname in manifest2:
2404 if cfname in manifest2:
2406 cnode = manifest2[cfname]
2405 cnode = manifest2[cfname]
2407 newfparent = fparent1
2406 newfparent = fparent1
2408
2407
2409 # Here, we used to search backwards through history to try to find
2408 # Here, we used to search backwards through history to try to find
2410 # where the file copy came from if the source of a copy was not in
2409 # where the file copy came from if the source of a copy was not in
2411 # the parent directory. However, this doesn't actually make sense to
2410 # the parent directory. However, this doesn't actually make sense to
2412 # do (what does a copy from something not in your working copy even
2411 # do (what does a copy from something not in your working copy even
2413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2412 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2414 # the user that copy information was dropped, so if they didn't
2413 # the user that copy information was dropped, so if they didn't
2415 # expect this outcome it can be fixed, but this is the correct
2414 # expect this outcome it can be fixed, but this is the correct
2416 # behavior in this circumstance.
2415 # behavior in this circumstance.
2417
2416
2418 if cnode:
2417 if cnode:
2419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2418 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2420 if includecopymeta:
2419 if includecopymeta:
2421 meta["copy"] = cfname
2420 meta["copy"] = cfname
2422 meta["copyrev"] = hex(cnode)
2421 meta["copyrev"] = hex(cnode)
2423 fparent1, fparent2 = nullid, newfparent
2422 fparent1, fparent2 = nullid, newfparent
2424 else:
2423 else:
2425 self.ui.warn(_("warning: can't find ancestor for '%s' "
2424 self.ui.warn(_("warning: can't find ancestor for '%s' "
2426 "copied from '%s'!\n") % (fname, cfname))
2425 "copied from '%s'!\n") % (fname, cfname))
2427
2426
2428 elif fparent1 == nullid:
2427 elif fparent1 == nullid:
2429 fparent1, fparent2 = fparent2, nullid
2428 fparent1, fparent2 = fparent2, nullid
2430 elif fparent2 != nullid:
2429 elif fparent2 != nullid:
2431 # is one parent an ancestor of the other?
2430 # is one parent an ancestor of the other?
2432 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2431 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2433 if fparent1 in fparentancestors:
2432 if fparent1 in fparentancestors:
2434 fparent1, fparent2 = fparent2, nullid
2433 fparent1, fparent2 = fparent2, nullid
2435 elif fparent2 in fparentancestors:
2434 elif fparent2 in fparentancestors:
2436 fparent2 = nullid
2435 fparent2 = nullid
2437
2436
2438 # is the file changed?
2437 # is the file changed?
2439 text = fctx.data()
2438 text = fctx.data()
2440 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2439 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2441 changelist.append(fname)
2440 changelist.append(fname)
2442 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2441 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2443 # are just the flags changed during merge?
2442 # are just the flags changed during merge?
2444 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2443 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2445 changelist.append(fname)
2444 changelist.append(fname)
2446
2445
2447 return fparent1
2446 return fparent1
2448
2447
2449 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2448 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2450 """check for commit arguments that aren't committable"""
2449 """check for commit arguments that aren't committable"""
2451 if match.isexact() or match.prefix():
2450 if match.isexact() or match.prefix():
2452 matched = set(status.modified + status.added + status.removed)
2451 matched = set(status.modified + status.added + status.removed)
2453
2452
2454 for f in match.files():
2453 for f in match.files():
2455 f = self.dirstate.normalize(f)
2454 f = self.dirstate.normalize(f)
2456 if f == '.' or f in matched or f in wctx.substate:
2455 if f == '.' or f in matched or f in wctx.substate:
2457 continue
2456 continue
2458 if f in status.deleted:
2457 if f in status.deleted:
2459 fail(f, _('file not found!'))
2458 fail(f, _('file not found!'))
2460 if f in vdirs: # visited directory
2459 if f in vdirs: # visited directory
2461 d = f + '/'
2460 d = f + '/'
2462 for mf in matched:
2461 for mf in matched:
2463 if mf.startswith(d):
2462 if mf.startswith(d):
2464 break
2463 break
2465 else:
2464 else:
2466 fail(f, _("no match under directory!"))
2465 fail(f, _("no match under directory!"))
2467 elif f not in self.dirstate:
2466 elif f not in self.dirstate:
2468 fail(f, _("file not tracked!"))
2467 fail(f, _("file not tracked!"))
2469
2468
2470 @unfilteredmethod
2469 @unfilteredmethod
2471 def commit(self, text="", user=None, date=None, match=None, force=False,
2470 def commit(self, text="", user=None, date=None, match=None, force=False,
2472 editor=False, extra=None):
2471 editor=False, extra=None):
2473 """Add a new revision to current repository.
2472 """Add a new revision to current repository.
2474
2473
2475 Revision information is gathered from the working directory,
2474 Revision information is gathered from the working directory,
2476 match can be used to filter the committed files. If editor is
2475 match can be used to filter the committed files. If editor is
2477 supplied, it is called to get a commit message.
2476 supplied, it is called to get a commit message.
2478 """
2477 """
2479 if extra is None:
2478 if extra is None:
2480 extra = {}
2479 extra = {}
2481
2480
2482 def fail(f, msg):
2481 def fail(f, msg):
2483 raise error.Abort('%s: %s' % (f, msg))
2482 raise error.Abort('%s: %s' % (f, msg))
2484
2483
2485 if not match:
2484 if not match:
2486 match = matchmod.always()
2485 match = matchmod.always()
2487
2486
2488 if not force:
2487 if not force:
2489 vdirs = []
2488 vdirs = []
2490 match.explicitdir = vdirs.append
2489 match.explicitdir = vdirs.append
2491 match.bad = fail
2490 match.bad = fail
2492
2491
2493 # lock() for recent changelog (see issue4368)
2492 # lock() for recent changelog (see issue4368)
2494 with self.wlock(), self.lock():
2493 with self.wlock(), self.lock():
2495 wctx = self[None]
2494 wctx = self[None]
2496 merge = len(wctx.parents()) > 1
2495 merge = len(wctx.parents()) > 1
2497
2496
2498 if not force and merge and not match.always():
2497 if not force and merge and not match.always():
2499 raise error.Abort(_('cannot partially commit a merge '
2498 raise error.Abort(_('cannot partially commit a merge '
2500 '(do not specify files or patterns)'))
2499 '(do not specify files or patterns)'))
2501
2500
2502 status = self.status(match=match, clean=force)
2501 status = self.status(match=match, clean=force)
2503 if force:
2502 if force:
2504 status.modified.extend(status.clean) # mq may commit clean files
2503 status.modified.extend(status.clean) # mq may commit clean files
2505
2504
2506 # check subrepos
2505 # check subrepos
2507 subs, commitsubs, newstate = subrepoutil.precommit(
2506 subs, commitsubs, newstate = subrepoutil.precommit(
2508 self.ui, wctx, status, match, force=force)
2507 self.ui, wctx, status, match, force=force)
2509
2508
2510 # make sure all explicit patterns are matched
2509 # make sure all explicit patterns are matched
2511 if not force:
2510 if not force:
2512 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2511 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2513
2512
2514 cctx = context.workingcommitctx(self, status,
2513 cctx = context.workingcommitctx(self, status,
2515 text, user, date, extra)
2514 text, user, date, extra)
2516
2515
2517 # internal config: ui.allowemptycommit
2516 # internal config: ui.allowemptycommit
2518 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2517 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2519 or extra.get('close') or merge or cctx.files()
2518 or extra.get('close') or merge or cctx.files()
2520 or self.ui.configbool('ui', 'allowemptycommit'))
2519 or self.ui.configbool('ui', 'allowemptycommit'))
2521 if not allowemptycommit:
2520 if not allowemptycommit:
2522 return None
2521 return None
2523
2522
2524 if merge and cctx.deleted():
2523 if merge and cctx.deleted():
2525 raise error.Abort(_("cannot commit merge with missing files"))
2524 raise error.Abort(_("cannot commit merge with missing files"))
2526
2525
2527 ms = mergemod.mergestate.read(self)
2526 ms = mergemod.mergestate.read(self)
2528 mergeutil.checkunresolved(ms)
2527 mergeutil.checkunresolved(ms)
2529
2528
2530 if editor:
2529 if editor:
2531 cctx._text = editor(self, cctx, subs)
2530 cctx._text = editor(self, cctx, subs)
2532 edited = (text != cctx._text)
2531 edited = (text != cctx._text)
2533
2532
2534 # Save commit message in case this transaction gets rolled back
2533 # Save commit message in case this transaction gets rolled back
2535 # (e.g. by a pretxncommit hook). Leave the content alone on
2534 # (e.g. by a pretxncommit hook). Leave the content alone on
2536 # the assumption that the user will use the same editor again.
2535 # the assumption that the user will use the same editor again.
2537 msgfn = self.savecommitmessage(cctx._text)
2536 msgfn = self.savecommitmessage(cctx._text)
2538
2537
2539 # commit subs and write new state
2538 # commit subs and write new state
2540 if subs:
2539 if subs:
2541 uipathfn = scmutil.getuipathfn(self)
2540 uipathfn = scmutil.getuipathfn(self)
2542 for s in sorted(commitsubs):
2541 for s in sorted(commitsubs):
2543 sub = wctx.sub(s)
2542 sub = wctx.sub(s)
2544 self.ui.status(_('committing subrepository %s\n') %
2543 self.ui.status(_('committing subrepository %s\n') %
2545 uipathfn(subrepoutil.subrelpath(sub)))
2544 uipathfn(subrepoutil.subrelpath(sub)))
2546 sr = sub.commit(cctx._text, user, date)
2545 sr = sub.commit(cctx._text, user, date)
2547 newstate[s] = (newstate[s][0], sr)
2546 newstate[s] = (newstate[s][0], sr)
2548 subrepoutil.writestate(self, newstate)
2547 subrepoutil.writestate(self, newstate)
2549
2548
2550 p1, p2 = self.dirstate.parents()
2549 p1, p2 = self.dirstate.parents()
2551 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2550 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2552 try:
2551 try:
2553 self.hook("precommit", throw=True, parent1=hookp1,
2552 self.hook("precommit", throw=True, parent1=hookp1,
2554 parent2=hookp2)
2553 parent2=hookp2)
2555 with self.transaction('commit'):
2554 with self.transaction('commit'):
2556 ret = self.commitctx(cctx, True)
2555 ret = self.commitctx(cctx, True)
2557 # update bookmarks, dirstate and mergestate
2556 # update bookmarks, dirstate and mergestate
2558 bookmarks.update(self, [p1, p2], ret)
2557 bookmarks.update(self, [p1, p2], ret)
2559 cctx.markcommitted(ret)
2558 cctx.markcommitted(ret)
2560 ms.reset()
2559 ms.reset()
2561 except: # re-raises
2560 except: # re-raises
2562 if edited:
2561 if edited:
2563 self.ui.write(
2562 self.ui.write(
2564 _('note: commit message saved in %s\n') % msgfn)
2563 _('note: commit message saved in %s\n') % msgfn)
2565 raise
2564 raise
2566
2565
2567 def commithook():
2566 def commithook():
2568 # hack for command that use a temporary commit (eg: histedit)
2567 # hack for command that use a temporary commit (eg: histedit)
2569 # temporary commit got stripped before hook release
2568 # temporary commit got stripped before hook release
2570 if self.changelog.hasnode(ret):
2569 if self.changelog.hasnode(ret):
2571 self.hook("commit", node=hex(ret), parent1=hookp1,
2570 self.hook("commit", node=hex(ret), parent1=hookp1,
2572 parent2=hookp2)
2571 parent2=hookp2)
2573 self._afterlock(commithook)
2572 self._afterlock(commithook)
2574 return ret
2573 return ret
2575
2574
2576 @unfilteredmethod
2575 @unfilteredmethod
2577 def commitctx(self, ctx, error=False):
2576 def commitctx(self, ctx, error=False):
2578 """Add a new revision to current repository.
2577 """Add a new revision to current repository.
2579 Revision information is passed via the context argument.
2578 Revision information is passed via the context argument.
2580
2579
2581 ctx.files() should list all files involved in this commit, i.e.
2580 ctx.files() should list all files involved in this commit, i.e.
2582 modified/added/removed files. On merge, it may be wider than the
2581 modified/added/removed files. On merge, it may be wider than the
2583 ctx.files() to be committed, since any file nodes derived directly
2582 ctx.files() to be committed, since any file nodes derived directly
2584 from p1 or p2 are excluded from the committed ctx.files().
2583 from p1 or p2 are excluded from the committed ctx.files().
2585 """
2584 """
2586
2585
2587 p1, p2 = ctx.p1(), ctx.p2()
2586 p1, p2 = ctx.p1(), ctx.p2()
2588 user = ctx.user()
2587 user = ctx.user()
2589
2588
2590 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2589 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2591 writefilecopymeta = writecopiesto != 'changeset-only'
2590 writefilecopymeta = writecopiesto != 'changeset-only'
2592 writechangesetcopy = (writecopiesto in
2591 writechangesetcopy = (writecopiesto in
2593 ('changeset-only', 'compatibility'))
2592 ('changeset-only', 'compatibility'))
2594 p1copies, p2copies = None, None
2593 p1copies, p2copies = None, None
2595 if writechangesetcopy:
2594 if writechangesetcopy:
2596 p1copies = ctx.p1copies()
2595 p1copies = ctx.p1copies()
2597 p2copies = ctx.p2copies()
2596 p2copies = ctx.p2copies()
2598 filesadded, filesremoved = None, None
2597 filesadded, filesremoved = None, None
2599 with self.lock(), self.transaction("commit") as tr:
2598 with self.lock(), self.transaction("commit") as tr:
2600 trp = weakref.proxy(tr)
2599 trp = weakref.proxy(tr)
2601
2600
2602 if ctx.manifestnode():
2601 if ctx.manifestnode():
2603 # reuse an existing manifest revision
2602 # reuse an existing manifest revision
2604 self.ui.debug('reusing known manifest\n')
2603 self.ui.debug('reusing known manifest\n')
2605 mn = ctx.manifestnode()
2604 mn = ctx.manifestnode()
2606 files = ctx.files()
2605 files = ctx.files()
2607 if writechangesetcopy:
2606 if writechangesetcopy:
2608 filesadded = ctx.filesadded()
2607 filesadded = ctx.filesadded()
2609 filesremoved = ctx.filesremoved()
2608 filesremoved = ctx.filesremoved()
2610 elif ctx.files():
2609 elif ctx.files():
2611 m1ctx = p1.manifestctx()
2610 m1ctx = p1.manifestctx()
2612 m2ctx = p2.manifestctx()
2611 m2ctx = p2.manifestctx()
2613 mctx = m1ctx.copy()
2612 mctx = m1ctx.copy()
2614
2613
2615 m = mctx.read()
2614 m = mctx.read()
2616 m1 = m1ctx.read()
2615 m1 = m1ctx.read()
2617 m2 = m2ctx.read()
2616 m2 = m2ctx.read()
2618
2617
2619 # check in files
2618 # check in files
2620 added = []
2619 added = []
2621 changed = []
2620 changed = []
2622 removed = list(ctx.removed())
2621 removed = list(ctx.removed())
2623 linkrev = len(self)
2622 linkrev = len(self)
2624 self.ui.note(_("committing files:\n"))
2623 self.ui.note(_("committing files:\n"))
2625 uipathfn = scmutil.getuipathfn(self)
2624 uipathfn = scmutil.getuipathfn(self)
2626 for f in sorted(ctx.modified() + ctx.added()):
2625 for f in sorted(ctx.modified() + ctx.added()):
2627 self.ui.note(uipathfn(f) + "\n")
2626 self.ui.note(uipathfn(f) + "\n")
2628 try:
2627 try:
2629 fctx = ctx[f]
2628 fctx = ctx[f]
2630 if fctx is None:
2629 if fctx is None:
2631 removed.append(f)
2630 removed.append(f)
2632 else:
2631 else:
2633 added.append(f)
2632 added.append(f)
2634 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2633 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2635 trp, changed,
2634 trp, changed,
2636 writefilecopymeta)
2635 writefilecopymeta)
2637 m.setflag(f, fctx.flags())
2636 m.setflag(f, fctx.flags())
2638 except OSError:
2637 except OSError:
2639 self.ui.warn(_("trouble committing %s!\n") %
2638 self.ui.warn(_("trouble committing %s!\n") %
2640 uipathfn(f))
2639 uipathfn(f))
2641 raise
2640 raise
2642 except IOError as inst:
2641 except IOError as inst:
2643 errcode = getattr(inst, 'errno', errno.ENOENT)
2642 errcode = getattr(inst, 'errno', errno.ENOENT)
2644 if error or errcode and errcode != errno.ENOENT:
2643 if error or errcode and errcode != errno.ENOENT:
2645 self.ui.warn(_("trouble committing %s!\n") %
2644 self.ui.warn(_("trouble committing %s!\n") %
2646 uipathfn(f))
2645 uipathfn(f))
2647 raise
2646 raise
2648
2647
2649 # update manifest
2648 # update manifest
2650 removed = [f for f in removed if f in m1 or f in m2]
2649 removed = [f for f in removed if f in m1 or f in m2]
2651 drop = sorted([f for f in removed if f in m])
2650 drop = sorted([f for f in removed if f in m])
2652 for f in drop:
2651 for f in drop:
2653 del m[f]
2652 del m[f]
2654 files = changed + removed
2653 files = changed + removed
2655 md = None
2654 md = None
2656 if not files:
2655 if not files:
2657 # if no "files" actually changed in terms of the changelog,
2656 # if no "files" actually changed in terms of the changelog,
2658 # try hard to detect unmodified manifest entry so that the
2657 # try hard to detect unmodified manifest entry so that the
2659 # exact same commit can be reproduced later on convert.
2658 # exact same commit can be reproduced later on convert.
2660 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2659 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2661 if not files and md:
2660 if not files and md:
2662 self.ui.debug('not reusing manifest (no file change in '
2661 self.ui.debug('not reusing manifest (no file change in '
2663 'changelog, but manifest differs)\n')
2662 'changelog, but manifest differs)\n')
2664 if files or md:
2663 if files or md:
2665 self.ui.note(_("committing manifest\n"))
2664 self.ui.note(_("committing manifest\n"))
2666 # we're using narrowmatch here since it's already applied at
2665 # we're using narrowmatch here since it's already applied at
2667 # other stages (such as dirstate.walk), so we're already
2666 # other stages (such as dirstate.walk), so we're already
2668 # ignoring things outside of narrowspec in most cases. The
2667 # ignoring things outside of narrowspec in most cases. The
2669 # one case where we might have files outside the narrowspec
2668 # one case where we might have files outside the narrowspec
2670 # at this point is merges, and we already error out in the
2669 # at this point is merges, and we already error out in the
2671 # case where the merge has files outside of the narrowspec,
2670 # case where the merge has files outside of the narrowspec,
2672 # so this is safe.
2671 # so this is safe.
2673 mn = mctx.write(trp, linkrev,
2672 mn = mctx.write(trp, linkrev,
2674 p1.manifestnode(), p2.manifestnode(),
2673 p1.manifestnode(), p2.manifestnode(),
2675 added, drop, match=self.narrowmatch())
2674 added, drop, match=self.narrowmatch())
2676
2675
2677 if writechangesetcopy:
2676 if writechangesetcopy:
2678 filesadded = [f for f in changed
2677 filesadded = [f for f in changed
2679 if not (f in m1 or f in m2)]
2678 if not (f in m1 or f in m2)]
2680 filesremoved = removed
2679 filesremoved = removed
2681 else:
2680 else:
2682 self.ui.debug('reusing manifest from p1 (listed files '
2681 self.ui.debug('reusing manifest from p1 (listed files '
2683 'actually unchanged)\n')
2682 'actually unchanged)\n')
2684 mn = p1.manifestnode()
2683 mn = p1.manifestnode()
2685 else:
2684 else:
2686 self.ui.debug('reusing manifest from p1 (no file change)\n')
2685 self.ui.debug('reusing manifest from p1 (no file change)\n')
2687 mn = p1.manifestnode()
2686 mn = p1.manifestnode()
2688 files = []
2687 files = []
2689
2688
2690 if writecopiesto == 'changeset-only':
2689 if writecopiesto == 'changeset-only':
2691 # If writing only to changeset extras, use None to indicate that
2690 # If writing only to changeset extras, use None to indicate that
2692 # no entry should be written. If writing to both, write an empty
2691 # no entry should be written. If writing to both, write an empty
2693 # entry to prevent the reader from falling back to reading
2692 # entry to prevent the reader from falling back to reading
2694 # filelogs.
2693 # filelogs.
2695 p1copies = p1copies or None
2694 p1copies = p1copies or None
2696 p2copies = p2copies or None
2695 p2copies = p2copies or None
2697 filesadded = filesadded or None
2696 filesadded = filesadded or None
2698 filesremoved = filesremoved or None
2697 filesremoved = filesremoved or None
2699
2698
2700 # update changelog
2699 # update changelog
2701 self.ui.note(_("committing changelog\n"))
2700 self.ui.note(_("committing changelog\n"))
2702 self.changelog.delayupdate(tr)
2701 self.changelog.delayupdate(tr)
2703 n = self.changelog.add(mn, files, ctx.description(),
2702 n = self.changelog.add(mn, files, ctx.description(),
2704 trp, p1.node(), p2.node(),
2703 trp, p1.node(), p2.node(),
2705 user, ctx.date(), ctx.extra().copy(),
2704 user, ctx.date(), ctx.extra().copy(),
2706 p1copies, p2copies, filesadded, filesremoved)
2705 p1copies, p2copies, filesadded, filesremoved)
2707 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2706 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2708 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2707 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2709 parent2=xp2)
2708 parent2=xp2)
2710 # set the new commit is proper phase
2709 # set the new commit is proper phase
2711 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2710 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2712 if targetphase:
2711 if targetphase:
2713 # retract boundary do not alter parent changeset.
2712 # retract boundary do not alter parent changeset.
2714 # if a parent have higher the resulting phase will
2713 # if a parent have higher the resulting phase will
2715 # be compliant anyway
2714 # be compliant anyway
2716 #
2715 #
2717 # if minimal phase was 0 we don't need to retract anything
2716 # if minimal phase was 0 we don't need to retract anything
2718 phases.registernew(self, tr, targetphase, [n])
2717 phases.registernew(self, tr, targetphase, [n])
2719 return n
2718 return n
2720
2719
2721 @unfilteredmethod
2720 @unfilteredmethod
2722 def destroying(self):
2721 def destroying(self):
2723 '''Inform the repository that nodes are about to be destroyed.
2722 '''Inform the repository that nodes are about to be destroyed.
2724 Intended for use by strip and rollback, so there's a common
2723 Intended for use by strip and rollback, so there's a common
2725 place for anything that has to be done before destroying history.
2724 place for anything that has to be done before destroying history.
2726
2725
2727 This is mostly useful for saving state that is in memory and waiting
2726 This is mostly useful for saving state that is in memory and waiting
2728 to be flushed when the current lock is released. Because a call to
2727 to be flushed when the current lock is released. Because a call to
2729 destroyed is imminent, the repo will be invalidated causing those
2728 destroyed is imminent, the repo will be invalidated causing those
2730 changes to stay in memory (waiting for the next unlock), or vanish
2729 changes to stay in memory (waiting for the next unlock), or vanish
2731 completely.
2730 completely.
2732 '''
2731 '''
2733 # When using the same lock to commit and strip, the phasecache is left
2732 # When using the same lock to commit and strip, the phasecache is left
2734 # dirty after committing. Then when we strip, the repo is invalidated,
2733 # dirty after committing. Then when we strip, the repo is invalidated,
2735 # causing those changes to disappear.
2734 # causing those changes to disappear.
2736 if '_phasecache' in vars(self):
2735 if '_phasecache' in vars(self):
2737 self._phasecache.write()
2736 self._phasecache.write()
2738
2737
2739 @unfilteredmethod
2738 @unfilteredmethod
2740 def destroyed(self):
2739 def destroyed(self):
2741 '''Inform the repository that nodes have been destroyed.
2740 '''Inform the repository that nodes have been destroyed.
2742 Intended for use by strip and rollback, so there's a common
2741 Intended for use by strip and rollback, so there's a common
2743 place for anything that has to be done after destroying history.
2742 place for anything that has to be done after destroying history.
2744 '''
2743 '''
2745 # When one tries to:
2744 # When one tries to:
2746 # 1) destroy nodes thus calling this method (e.g. strip)
2745 # 1) destroy nodes thus calling this method (e.g. strip)
2747 # 2) use phasecache somewhere (e.g. commit)
2746 # 2) use phasecache somewhere (e.g. commit)
2748 #
2747 #
2749 # then 2) will fail because the phasecache contains nodes that were
2748 # then 2) will fail because the phasecache contains nodes that were
2750 # removed. We can either remove phasecache from the filecache,
2749 # removed. We can either remove phasecache from the filecache,
2751 # causing it to reload next time it is accessed, or simply filter
2750 # causing it to reload next time it is accessed, or simply filter
2752 # the removed nodes now and write the updated cache.
2751 # the removed nodes now and write the updated cache.
2753 self._phasecache.filterunknown(self)
2752 self._phasecache.filterunknown(self)
2754 self._phasecache.write()
2753 self._phasecache.write()
2755
2754
2756 # refresh all repository caches
2755 # refresh all repository caches
2757 self.updatecaches()
2756 self.updatecaches()
2758
2757
2759 # Ensure the persistent tag cache is updated. Doing it now
2758 # Ensure the persistent tag cache is updated. Doing it now
2760 # means that the tag cache only has to worry about destroyed
2759 # means that the tag cache only has to worry about destroyed
2761 # heads immediately after a strip/rollback. That in turn
2760 # heads immediately after a strip/rollback. That in turn
2762 # guarantees that "cachetip == currenttip" (comparing both rev
2761 # guarantees that "cachetip == currenttip" (comparing both rev
2763 # and node) always means no nodes have been added or destroyed.
2762 # and node) always means no nodes have been added or destroyed.
2764
2763
2765 # XXX this is suboptimal when qrefresh'ing: we strip the current
2764 # XXX this is suboptimal when qrefresh'ing: we strip the current
2766 # head, refresh the tag cache, then immediately add a new head.
2765 # head, refresh the tag cache, then immediately add a new head.
2767 # But I think doing it this way is necessary for the "instant
2766 # But I think doing it this way is necessary for the "instant
2768 # tag cache retrieval" case to work.
2767 # tag cache retrieval" case to work.
2769 self.invalidate()
2768 self.invalidate()
2770
2769
2771 def status(self, node1='.', node2=None, match=None,
2770 def status(self, node1='.', node2=None, match=None,
2772 ignored=False, clean=False, unknown=False,
2771 ignored=False, clean=False, unknown=False,
2773 listsubrepos=False):
2772 listsubrepos=False):
2774 '''a convenience method that calls node1.status(node2)'''
2773 '''a convenience method that calls node1.status(node2)'''
2775 return self[node1].status(node2, match, ignored, clean, unknown,
2774 return self[node1].status(node2, match, ignored, clean, unknown,
2776 listsubrepos)
2775 listsubrepos)
2777
2776
2778 def addpostdsstatus(self, ps):
2777 def addpostdsstatus(self, ps):
2779 """Add a callback to run within the wlock, at the point at which status
2778 """Add a callback to run within the wlock, at the point at which status
2780 fixups happen.
2779 fixups happen.
2781
2780
2782 On status completion, callback(wctx, status) will be called with the
2781 On status completion, callback(wctx, status) will be called with the
2783 wlock held, unless the dirstate has changed from underneath or the wlock
2782 wlock held, unless the dirstate has changed from underneath or the wlock
2784 couldn't be grabbed.
2783 couldn't be grabbed.
2785
2784
2786 Callbacks should not capture and use a cached copy of the dirstate --
2785 Callbacks should not capture and use a cached copy of the dirstate --
2787 it might change in the meanwhile. Instead, they should access the
2786 it might change in the meanwhile. Instead, they should access the
2788 dirstate via wctx.repo().dirstate.
2787 dirstate via wctx.repo().dirstate.
2789
2788
2790 This list is emptied out after each status run -- extensions should
2789 This list is emptied out after each status run -- extensions should
2791 make sure it adds to this list each time dirstate.status is called.
2790 make sure it adds to this list each time dirstate.status is called.
2792 Extensions should also make sure they don't call this for statuses
2791 Extensions should also make sure they don't call this for statuses
2793 that don't involve the dirstate.
2792 that don't involve the dirstate.
2794 """
2793 """
2795
2794
2796 # The list is located here for uniqueness reasons -- it is actually
2795 # The list is located here for uniqueness reasons -- it is actually
2797 # managed by the workingctx, but that isn't unique per-repo.
2796 # managed by the workingctx, but that isn't unique per-repo.
2798 self._postdsstatus.append(ps)
2797 self._postdsstatus.append(ps)
2799
2798
2800 def postdsstatus(self):
2799 def postdsstatus(self):
2801 """Used by workingctx to get the list of post-dirstate-status hooks."""
2800 """Used by workingctx to get the list of post-dirstate-status hooks."""
2802 return self._postdsstatus
2801 return self._postdsstatus
2803
2802
2804 def clearpostdsstatus(self):
2803 def clearpostdsstatus(self):
2805 """Used by workingctx to clear post-dirstate-status hooks."""
2804 """Used by workingctx to clear post-dirstate-status hooks."""
2806 del self._postdsstatus[:]
2805 del self._postdsstatus[:]
2807
2806
2808 def heads(self, start=None):
2807 def heads(self, start=None):
2809 if start is None:
2808 if start is None:
2810 cl = self.changelog
2809 cl = self.changelog
2811 headrevs = reversed(cl.headrevs())
2810 headrevs = reversed(cl.headrevs())
2812 return [cl.node(rev) for rev in headrevs]
2811 return [cl.node(rev) for rev in headrevs]
2813
2812
2814 heads = self.changelog.heads(start)
2813 heads = self.changelog.heads(start)
2815 # sort the output in rev descending order
2814 # sort the output in rev descending order
2816 return sorted(heads, key=self.changelog.rev, reverse=True)
2815 return sorted(heads, key=self.changelog.rev, reverse=True)
2817
2816
2818 def branchheads(self, branch=None, start=None, closed=False):
2817 def branchheads(self, branch=None, start=None, closed=False):
2819 '''return a (possibly filtered) list of heads for the given branch
2818 '''return a (possibly filtered) list of heads for the given branch
2820
2819
2821 Heads are returned in topological order, from newest to oldest.
2820 Heads are returned in topological order, from newest to oldest.
2822 If branch is None, use the dirstate branch.
2821 If branch is None, use the dirstate branch.
2823 If start is not None, return only heads reachable from start.
2822 If start is not None, return only heads reachable from start.
2824 If closed is True, return heads that are marked as closed as well.
2823 If closed is True, return heads that are marked as closed as well.
2825 '''
2824 '''
2826 if branch is None:
2825 if branch is None:
2827 branch = self[None].branch()
2826 branch = self[None].branch()
2828 branches = self.branchmap()
2827 branches = self.branchmap()
2829 if not branches.hasbranch(branch):
2828 if not branches.hasbranch(branch):
2830 return []
2829 return []
2831 # the cache returns heads ordered lowest to highest
2830 # the cache returns heads ordered lowest to highest
2832 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2831 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2833 if start is not None:
2832 if start is not None:
2834 # filter out the heads that cannot be reached from startrev
2833 # filter out the heads that cannot be reached from startrev
2835 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2834 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2836 bheads = [h for h in bheads if h in fbheads]
2835 bheads = [h for h in bheads if h in fbheads]
2837 return bheads
2836 return bheads
2838
2837
2839 def branches(self, nodes):
2838 def branches(self, nodes):
2840 if not nodes:
2839 if not nodes:
2841 nodes = [self.changelog.tip()]
2840 nodes = [self.changelog.tip()]
2842 b = []
2841 b = []
2843 for n in nodes:
2842 for n in nodes:
2844 t = n
2843 t = n
2845 while True:
2844 while True:
2846 p = self.changelog.parents(n)
2845 p = self.changelog.parents(n)
2847 if p[1] != nullid or p[0] == nullid:
2846 if p[1] != nullid or p[0] == nullid:
2848 b.append((t, n, p[0], p[1]))
2847 b.append((t, n, p[0], p[1]))
2849 break
2848 break
2850 n = p[0]
2849 n = p[0]
2851 return b
2850 return b
2852
2851
2853 def between(self, pairs):
2852 def between(self, pairs):
2854 r = []
2853 r = []
2855
2854
2856 for top, bottom in pairs:
2855 for top, bottom in pairs:
2857 n, l, i = top, [], 0
2856 n, l, i = top, [], 0
2858 f = 1
2857 f = 1
2859
2858
2860 while n != bottom and n != nullid:
2859 while n != bottom and n != nullid:
2861 p = self.changelog.parents(n)[0]
2860 p = self.changelog.parents(n)[0]
2862 if i == f:
2861 if i == f:
2863 l.append(n)
2862 l.append(n)
2864 f = f * 2
2863 f = f * 2
2865 n = p
2864 n = p
2866 i += 1
2865 i += 1
2867
2866
2868 r.append(l)
2867 r.append(l)
2869
2868
2870 return r
2869 return r
2871
2870
2872 def checkpush(self, pushop):
2871 def checkpush(self, pushop):
2873 """Extensions can override this function if additional checks have
2872 """Extensions can override this function if additional checks have
2874 to be performed before pushing, or call it if they override push
2873 to be performed before pushing, or call it if they override push
2875 command.
2874 command.
2876 """
2875 """
2877
2876
2878 @unfilteredpropertycache
2877 @unfilteredpropertycache
2879 def prepushoutgoinghooks(self):
2878 def prepushoutgoinghooks(self):
2880 """Return util.hooks consists of a pushop with repo, remote, outgoing
2879 """Return util.hooks consists of a pushop with repo, remote, outgoing
2881 methods, which are called before pushing changesets.
2880 methods, which are called before pushing changesets.
2882 """
2881 """
2883 return util.hooks()
2882 return util.hooks()
2884
2883
2885 def pushkey(self, namespace, key, old, new):
2884 def pushkey(self, namespace, key, old, new):
2886 try:
2885 try:
2887 tr = self.currenttransaction()
2886 tr = self.currenttransaction()
2888 hookargs = {}
2887 hookargs = {}
2889 if tr is not None:
2888 if tr is not None:
2890 hookargs.update(tr.hookargs)
2889 hookargs.update(tr.hookargs)
2891 hookargs = pycompat.strkwargs(hookargs)
2890 hookargs = pycompat.strkwargs(hookargs)
2892 hookargs[r'namespace'] = namespace
2891 hookargs[r'namespace'] = namespace
2893 hookargs[r'key'] = key
2892 hookargs[r'key'] = key
2894 hookargs[r'old'] = old
2893 hookargs[r'old'] = old
2895 hookargs[r'new'] = new
2894 hookargs[r'new'] = new
2896 self.hook('prepushkey', throw=True, **hookargs)
2895 self.hook('prepushkey', throw=True, **hookargs)
2897 except error.HookAbort as exc:
2896 except error.HookAbort as exc:
2898 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2897 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2899 if exc.hint:
2898 if exc.hint:
2900 self.ui.write_err(_("(%s)\n") % exc.hint)
2899 self.ui.write_err(_("(%s)\n") % exc.hint)
2901 return False
2900 return False
2902 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2901 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2903 ret = pushkey.push(self, namespace, key, old, new)
2902 ret = pushkey.push(self, namespace, key, old, new)
2904 def runhook():
2903 def runhook():
2905 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2904 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2906 ret=ret)
2905 ret=ret)
2907 self._afterlock(runhook)
2906 self._afterlock(runhook)
2908 return ret
2907 return ret
2909
2908
2910 def listkeys(self, namespace):
2909 def listkeys(self, namespace):
2911 self.hook('prelistkeys', throw=True, namespace=namespace)
2910 self.hook('prelistkeys', throw=True, namespace=namespace)
2912 self.ui.debug('listing keys for "%s"\n' % namespace)
2911 self.ui.debug('listing keys for "%s"\n' % namespace)
2913 values = pushkey.list(self, namespace)
2912 values = pushkey.list(self, namespace)
2914 self.hook('listkeys', namespace=namespace, values=values)
2913 self.hook('listkeys', namespace=namespace, values=values)
2915 return values
2914 return values
2916
2915
2917 def debugwireargs(self, one, two, three=None, four=None, five=None):
2916 def debugwireargs(self, one, two, three=None, four=None, five=None):
2918 '''used to test argument passing over the wire'''
2917 '''used to test argument passing over the wire'''
2919 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2918 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2920 pycompat.bytestr(four),
2919 pycompat.bytestr(four),
2921 pycompat.bytestr(five))
2920 pycompat.bytestr(five))
2922
2921
2923 def savecommitmessage(self, text):
2922 def savecommitmessage(self, text):
2924 fp = self.vfs('last-message.txt', 'wb')
2923 fp = self.vfs('last-message.txt', 'wb')
2925 try:
2924 try:
2926 fp.write(text)
2925 fp.write(text)
2927 finally:
2926 finally:
2928 fp.close()
2927 fp.close()
2929 return self.pathto(fp.name[len(self.root) + 1:])
2928 return self.pathto(fp.name[len(self.root) + 1:])
2930
2929
2931 # used to avoid circular references so destructors work
2930 # used to avoid circular references so destructors work
2932 def aftertrans(files):
2931 def aftertrans(files):
2933 renamefiles = [tuple(t) for t in files]
2932 renamefiles = [tuple(t) for t in files]
2934 def a():
2933 def a():
2935 for vfs, src, dest in renamefiles:
2934 for vfs, src, dest in renamefiles:
2936 # if src and dest refer to a same file, vfs.rename is a no-op,
2935 # if src and dest refer to a same file, vfs.rename is a no-op,
2937 # leaving both src and dest on disk. delete dest to make sure
2936 # leaving both src and dest on disk. delete dest to make sure
2938 # the rename couldn't be such a no-op.
2937 # the rename couldn't be such a no-op.
2939 vfs.tryunlink(dest)
2938 vfs.tryunlink(dest)
2940 try:
2939 try:
2941 vfs.rename(src, dest)
2940 vfs.rename(src, dest)
2942 except OSError: # journal file does not yet exist
2941 except OSError: # journal file does not yet exist
2943 pass
2942 pass
2944 return a
2943 return a
2945
2944
2946 def undoname(fn):
2945 def undoname(fn):
2947 base, name = os.path.split(fn)
2946 base, name = os.path.split(fn)
2948 assert name.startswith('journal')
2947 assert name.startswith('journal')
2949 return os.path.join(base, name.replace('journal', 'undo', 1))
2948 return os.path.join(base, name.replace('journal', 'undo', 1))
2950
2949
2951 def instance(ui, path, create, intents=None, createopts=None):
2950 def instance(ui, path, create, intents=None, createopts=None):
2952 localpath = util.urllocalpath(path)
2951 localpath = util.urllocalpath(path)
2953 if create:
2952 if create:
2954 createrepository(ui, localpath, createopts=createopts)
2953 createrepository(ui, localpath, createopts=createopts)
2955
2954
2956 return makelocalrepository(ui, localpath, intents=intents)
2955 return makelocalrepository(ui, localpath, intents=intents)
2957
2956
2958 def islocal(path):
2957 def islocal(path):
2959 return True
2958 return True
2960
2959
2961 def defaultcreateopts(ui, createopts=None):
2960 def defaultcreateopts(ui, createopts=None):
2962 """Populate the default creation options for a repository.
2961 """Populate the default creation options for a repository.
2963
2962
2964 A dictionary of explicitly requested creation options can be passed
2963 A dictionary of explicitly requested creation options can be passed
2965 in. Missing keys will be populated.
2964 in. Missing keys will be populated.
2966 """
2965 """
2967 createopts = dict(createopts or {})
2966 createopts = dict(createopts or {})
2968
2967
2969 if 'backend' not in createopts:
2968 if 'backend' not in createopts:
2970 # experimental config: storage.new-repo-backend
2969 # experimental config: storage.new-repo-backend
2971 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2970 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2972
2971
2973 return createopts
2972 return createopts
2974
2973
2975 def newreporequirements(ui, createopts):
2974 def newreporequirements(ui, createopts):
2976 """Determine the set of requirements for a new local repository.
2975 """Determine the set of requirements for a new local repository.
2977
2976
2978 Extensions can wrap this function to specify custom requirements for
2977 Extensions can wrap this function to specify custom requirements for
2979 new repositories.
2978 new repositories.
2980 """
2979 """
2981 # If the repo is being created from a shared repository, we copy
2980 # If the repo is being created from a shared repository, we copy
2982 # its requirements.
2981 # its requirements.
2983 if 'sharedrepo' in createopts:
2982 if 'sharedrepo' in createopts:
2984 requirements = set(createopts['sharedrepo'].requirements)
2983 requirements = set(createopts['sharedrepo'].requirements)
2985 if createopts.get('sharedrelative'):
2984 if createopts.get('sharedrelative'):
2986 requirements.add('relshared')
2985 requirements.add('relshared')
2987 else:
2986 else:
2988 requirements.add('shared')
2987 requirements.add('shared')
2989
2988
2990 return requirements
2989 return requirements
2991
2990
2992 if 'backend' not in createopts:
2991 if 'backend' not in createopts:
2993 raise error.ProgrammingError('backend key not present in createopts; '
2992 raise error.ProgrammingError('backend key not present in createopts; '
2994 'was defaultcreateopts() called?')
2993 'was defaultcreateopts() called?')
2995
2994
2996 if createopts['backend'] != 'revlogv1':
2995 if createopts['backend'] != 'revlogv1':
2997 raise error.Abort(_('unable to determine repository requirements for '
2996 raise error.Abort(_('unable to determine repository requirements for '
2998 'storage backend: %s') % createopts['backend'])
2997 'storage backend: %s') % createopts['backend'])
2999
2998
3000 requirements = {'revlogv1'}
2999 requirements = {'revlogv1'}
3001 if ui.configbool('format', 'usestore'):
3000 if ui.configbool('format', 'usestore'):
3002 requirements.add('store')
3001 requirements.add('store')
3003 if ui.configbool('format', 'usefncache'):
3002 if ui.configbool('format', 'usefncache'):
3004 requirements.add('fncache')
3003 requirements.add('fncache')
3005 if ui.configbool('format', 'dotencode'):
3004 if ui.configbool('format', 'dotencode'):
3006 requirements.add('dotencode')
3005 requirements.add('dotencode')
3007
3006
3008 compengine = ui.config('format', 'revlog-compression')
3007 compengine = ui.config('format', 'revlog-compression')
3009 if compengine not in util.compengines:
3008 if compengine not in util.compengines:
3010 raise error.Abort(_('compression engine %s defined by '
3009 raise error.Abort(_('compression engine %s defined by '
3011 'format.revlog-compression not available') %
3010 'format.revlog-compression not available') %
3012 compengine,
3011 compengine,
3013 hint=_('run "hg debuginstall" to list available '
3012 hint=_('run "hg debuginstall" to list available '
3014 'compression engines'))
3013 'compression engines'))
3015
3014
3016 # zlib is the historical default and doesn't need an explicit requirement.
3015 # zlib is the historical default and doesn't need an explicit requirement.
3017 elif compengine == 'zstd':
3016 elif compengine == 'zstd':
3018 requirements.add('revlog-compression-zstd')
3017 requirements.add('revlog-compression-zstd')
3019 elif compengine != 'zlib':
3018 elif compengine != 'zlib':
3020 requirements.add('exp-compression-%s' % compengine)
3019 requirements.add('exp-compression-%s' % compengine)
3021
3020
3022 if scmutil.gdinitconfig(ui):
3021 if scmutil.gdinitconfig(ui):
3023 requirements.add('generaldelta')
3022 requirements.add('generaldelta')
3024 if ui.configbool('format', 'sparse-revlog'):
3023 if ui.configbool('format', 'sparse-revlog'):
3025 requirements.add(SPARSEREVLOG_REQUIREMENT)
3024 requirements.add(SPARSEREVLOG_REQUIREMENT)
3026 if ui.configbool('experimental', 'treemanifest'):
3025 if ui.configbool('experimental', 'treemanifest'):
3027 requirements.add('treemanifest')
3026 requirements.add('treemanifest')
3028
3027
3029 revlogv2 = ui.config('experimental', 'revlogv2')
3028 revlogv2 = ui.config('experimental', 'revlogv2')
3030 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3029 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3031 requirements.remove('revlogv1')
3030 requirements.remove('revlogv1')
3032 # generaldelta is implied by revlogv2.
3031 # generaldelta is implied by revlogv2.
3033 requirements.discard('generaldelta')
3032 requirements.discard('generaldelta')
3034 requirements.add(REVLOGV2_REQUIREMENT)
3033 requirements.add(REVLOGV2_REQUIREMENT)
3035 # experimental config: format.internal-phase
3034 # experimental config: format.internal-phase
3036 if ui.configbool('format', 'internal-phase'):
3035 if ui.configbool('format', 'internal-phase'):
3037 requirements.add('internal-phase')
3036 requirements.add('internal-phase')
3038
3037
3039 if createopts.get('narrowfiles'):
3038 if createopts.get('narrowfiles'):
3040 requirements.add(repository.NARROW_REQUIREMENT)
3039 requirements.add(repository.NARROW_REQUIREMENT)
3041
3040
3042 if createopts.get('lfs'):
3041 if createopts.get('lfs'):
3043 requirements.add('lfs')
3042 requirements.add('lfs')
3044
3043
3045 if ui.configbool('format', 'bookmarks-in-store'):
3044 if ui.configbool('format', 'bookmarks-in-store'):
3046 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3045 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3047
3046
3048 return requirements
3047 return requirements
3049
3048
3050 def filterknowncreateopts(ui, createopts):
3049 def filterknowncreateopts(ui, createopts):
3051 """Filters a dict of repo creation options against options that are known.
3050 """Filters a dict of repo creation options against options that are known.
3052
3051
3053 Receives a dict of repo creation options and returns a dict of those
3052 Receives a dict of repo creation options and returns a dict of those
3054 options that we don't know how to handle.
3053 options that we don't know how to handle.
3055
3054
3056 This function is called as part of repository creation. If the
3055 This function is called as part of repository creation. If the
3057 returned dict contains any items, repository creation will not
3056 returned dict contains any items, repository creation will not
3058 be allowed, as it means there was a request to create a repository
3057 be allowed, as it means there was a request to create a repository
3059 with options not recognized by loaded code.
3058 with options not recognized by loaded code.
3060
3059
3061 Extensions can wrap this function to filter out creation options
3060 Extensions can wrap this function to filter out creation options
3062 they know how to handle.
3061 they know how to handle.
3063 """
3062 """
3064 known = {
3063 known = {
3065 'backend',
3064 'backend',
3066 'lfs',
3065 'lfs',
3067 'narrowfiles',
3066 'narrowfiles',
3068 'sharedrepo',
3067 'sharedrepo',
3069 'sharedrelative',
3068 'sharedrelative',
3070 'shareditems',
3069 'shareditems',
3071 'shallowfilestore',
3070 'shallowfilestore',
3072 }
3071 }
3073
3072
3074 return {k: v for k, v in createopts.items() if k not in known}
3073 return {k: v for k, v in createopts.items() if k not in known}
3075
3074
3076 def createrepository(ui, path, createopts=None):
3075 def createrepository(ui, path, createopts=None):
3077 """Create a new repository in a vfs.
3076 """Create a new repository in a vfs.
3078
3077
3079 ``path`` path to the new repo's working directory.
3078 ``path`` path to the new repo's working directory.
3080 ``createopts`` options for the new repository.
3079 ``createopts`` options for the new repository.
3081
3080
3082 The following keys for ``createopts`` are recognized:
3081 The following keys for ``createopts`` are recognized:
3083
3082
3084 backend
3083 backend
3085 The storage backend to use.
3084 The storage backend to use.
3086 lfs
3085 lfs
3087 Repository will be created with ``lfs`` requirement. The lfs extension
3086 Repository will be created with ``lfs`` requirement. The lfs extension
3088 will automatically be loaded when the repository is accessed.
3087 will automatically be loaded when the repository is accessed.
3089 narrowfiles
3088 narrowfiles
3090 Set up repository to support narrow file storage.
3089 Set up repository to support narrow file storage.
3091 sharedrepo
3090 sharedrepo
3092 Repository object from which storage should be shared.
3091 Repository object from which storage should be shared.
3093 sharedrelative
3092 sharedrelative
3094 Boolean indicating if the path to the shared repo should be
3093 Boolean indicating if the path to the shared repo should be
3095 stored as relative. By default, the pointer to the "parent" repo
3094 stored as relative. By default, the pointer to the "parent" repo
3096 is stored as an absolute path.
3095 is stored as an absolute path.
3097 shareditems
3096 shareditems
3098 Set of items to share to the new repository (in addition to storage).
3097 Set of items to share to the new repository (in addition to storage).
3099 shallowfilestore
3098 shallowfilestore
3100 Indicates that storage for files should be shallow (not all ancestor
3099 Indicates that storage for files should be shallow (not all ancestor
3101 revisions are known).
3100 revisions are known).
3102 """
3101 """
3103 createopts = defaultcreateopts(ui, createopts=createopts)
3102 createopts = defaultcreateopts(ui, createopts=createopts)
3104
3103
3105 unknownopts = filterknowncreateopts(ui, createopts)
3104 unknownopts = filterknowncreateopts(ui, createopts)
3106
3105
3107 if not isinstance(unknownopts, dict):
3106 if not isinstance(unknownopts, dict):
3108 raise error.ProgrammingError('filterknowncreateopts() did not return '
3107 raise error.ProgrammingError('filterknowncreateopts() did not return '
3109 'a dict')
3108 'a dict')
3110
3109
3111 if unknownopts:
3110 if unknownopts:
3112 raise error.Abort(_('unable to create repository because of unknown '
3111 raise error.Abort(_('unable to create repository because of unknown '
3113 'creation option: %s') %
3112 'creation option: %s') %
3114 ', '.join(sorted(unknownopts)),
3113 ', '.join(sorted(unknownopts)),
3115 hint=_('is a required extension not loaded?'))
3114 hint=_('is a required extension not loaded?'))
3116
3115
3117 requirements = newreporequirements(ui, createopts=createopts)
3116 requirements = newreporequirements(ui, createopts=createopts)
3118
3117
3119 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3118 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3120
3119
3121 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3120 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3122 if hgvfs.exists():
3121 if hgvfs.exists():
3123 raise error.RepoError(_('repository %s already exists') % path)
3122 raise error.RepoError(_('repository %s already exists') % path)
3124
3123
3125 if 'sharedrepo' in createopts:
3124 if 'sharedrepo' in createopts:
3126 sharedpath = createopts['sharedrepo'].sharedpath
3125 sharedpath = createopts['sharedrepo'].sharedpath
3127
3126
3128 if createopts.get('sharedrelative'):
3127 if createopts.get('sharedrelative'):
3129 try:
3128 try:
3130 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3129 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3131 except (IOError, ValueError) as e:
3130 except (IOError, ValueError) as e:
3132 # ValueError is raised on Windows if the drive letters differ
3131 # ValueError is raised on Windows if the drive letters differ
3133 # on each path.
3132 # on each path.
3134 raise error.Abort(_('cannot calculate relative path'),
3133 raise error.Abort(_('cannot calculate relative path'),
3135 hint=stringutil.forcebytestr(e))
3134 hint=stringutil.forcebytestr(e))
3136
3135
3137 if not wdirvfs.exists():
3136 if not wdirvfs.exists():
3138 wdirvfs.makedirs()
3137 wdirvfs.makedirs()
3139
3138
3140 hgvfs.makedir(notindexed=True)
3139 hgvfs.makedir(notindexed=True)
3141 if 'sharedrepo' not in createopts:
3140 if 'sharedrepo' not in createopts:
3142 hgvfs.mkdir(b'cache')
3141 hgvfs.mkdir(b'cache')
3143 hgvfs.mkdir(b'wcache')
3142 hgvfs.mkdir(b'wcache')
3144
3143
3145 if b'store' in requirements and 'sharedrepo' not in createopts:
3144 if b'store' in requirements and 'sharedrepo' not in createopts:
3146 hgvfs.mkdir(b'store')
3145 hgvfs.mkdir(b'store')
3147
3146
3148 # We create an invalid changelog outside the store so very old
3147 # We create an invalid changelog outside the store so very old
3149 # Mercurial versions (which didn't know about the requirements
3148 # Mercurial versions (which didn't know about the requirements
3150 # file) encounter an error on reading the changelog. This
3149 # file) encounter an error on reading the changelog. This
3151 # effectively locks out old clients and prevents them from
3150 # effectively locks out old clients and prevents them from
3152 # mucking with a repo in an unknown format.
3151 # mucking with a repo in an unknown format.
3153 #
3152 #
3154 # The revlog header has version 2, which won't be recognized by
3153 # The revlog header has version 2, which won't be recognized by
3155 # such old clients.
3154 # such old clients.
3156 hgvfs.append(b'00changelog.i',
3155 hgvfs.append(b'00changelog.i',
3157 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3156 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3158 b'layout')
3157 b'layout')
3159
3158
3160 scmutil.writerequires(hgvfs, requirements)
3159 scmutil.writerequires(hgvfs, requirements)
3161
3160
3162 # Write out file telling readers where to find the shared store.
3161 # Write out file telling readers where to find the shared store.
3163 if 'sharedrepo' in createopts:
3162 if 'sharedrepo' in createopts:
3164 hgvfs.write(b'sharedpath', sharedpath)
3163 hgvfs.write(b'sharedpath', sharedpath)
3165
3164
3166 if createopts.get('shareditems'):
3165 if createopts.get('shareditems'):
3167 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3166 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3168 hgvfs.write(b'shared', shared)
3167 hgvfs.write(b'shared', shared)
3169
3168
3170 def poisonrepository(repo):
3169 def poisonrepository(repo):
3171 """Poison a repository instance so it can no longer be used."""
3170 """Poison a repository instance so it can no longer be used."""
3172 # Perform any cleanup on the instance.
3171 # Perform any cleanup on the instance.
3173 repo.close()
3172 repo.close()
3174
3173
3175 # Our strategy is to replace the type of the object with one that
3174 # Our strategy is to replace the type of the object with one that
3176 # has all attribute lookups result in error.
3175 # has all attribute lookups result in error.
3177 #
3176 #
3178 # But we have to allow the close() method because some constructors
3177 # But we have to allow the close() method because some constructors
3179 # of repos call close() on repo references.
3178 # of repos call close() on repo references.
3180 class poisonedrepository(object):
3179 class poisonedrepository(object):
3181 def __getattribute__(self, item):
3180 def __getattribute__(self, item):
3182 if item == r'close':
3181 if item == r'close':
3183 return object.__getattribute__(self, item)
3182 return object.__getattribute__(self, item)
3184
3183
3185 raise error.ProgrammingError('repo instances should not be used '
3184 raise error.ProgrammingError('repo instances should not be used '
3186 'after unshare')
3185 'after unshare')
3187
3186
3188 def close(self):
3187 def close(self):
3189 pass
3188 pass
3190
3189
3191 # We may have a repoview, which intercepts __setattr__. So be sure
3190 # We may have a repoview, which intercepts __setattr__. So be sure
3192 # we operate at the lowest level possible.
3191 # we operate at the lowest level possible.
3193 object.__setattr__(repo, r'__class__', poisonedrepository)
3192 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now