##// END OF EJS Templates
repo: add a `wcachevfs` to access the `.hg/wcache/` directory...
Boris Feld -
r40826:e1c3a2e9 default
parent child Browse files
Show More
@@ -1,3065 +1,3076 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
512
511
513
512 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
513 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
514 # of them.
516 # of them.
515 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
518
520
519 storevfs = store.vfs
521 storevfs = store.vfs
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521
523
522 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
525
530
526 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
527 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
528 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
529 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
530
535
531 bases = []
536 bases = []
532 extrastate = {}
537 extrastate = {}
533
538
534 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
535 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
536 # flexibility.
541 # flexibility.
537 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
538 intents=intents,
543 intents=intents,
539 requirements=requirements,
544 requirements=requirements,
540 features=features,
545 features=features,
541 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
542 hgvfs=hgvfs,
547 hgvfs=hgvfs,
543 store=store,
548 store=store,
544 storevfs=storevfs,
549 storevfs=storevfs,
545 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
546 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
547 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
548 extrastate=extrastate,
554 extrastate=extrastate,
549 baseclasses=bases)
555 baseclasses=bases)
550
556
551 if not isinstance(typ, type):
557 if not isinstance(typ, type):
552 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
553 iface)
559 iface)
554
560
555 bases.append(typ)
561 bases.append(typ)
556
562
557 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
558 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
559 # rich information about our constructed repo.
565 # rich information about our constructed repo.
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 wdirvfs.base,
567 wdirvfs.base,
562 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
563
569
564 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
565
571
566 return cls(
572 return cls(
567 baseui=baseui,
573 baseui=baseui,
568 ui=ui,
574 ui=ui,
569 origroot=path,
575 origroot=path,
570 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
571 hgvfs=hgvfs,
577 hgvfs=hgvfs,
572 requirements=requirements,
578 requirements=requirements,
573 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
574 sharedpath=storebasepath,
580 sharedpath=storebasepath,
575 store=store,
581 store=store,
576 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
577 features=features,
584 features=features,
578 intents=intents)
585 intents=intents)
579
586
580 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
581 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
582
589
583 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
584 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
585
592
586 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
587
594
588 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
589 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
590 configs from alternate files or sources.
597 configs from alternate files or sources.
591 """
598 """
592 try:
599 try:
593 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
594 return True
601 return True
595 except IOError:
602 except IOError:
596 return False
603 return False
597
604
598 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
599 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
600
607
601 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
602 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
603
610
604 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
605 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
606 """
613 """
607
614
608 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
609 # requirement is present.
616 # requirement is present.
610 autoextensions = {
617 autoextensions = {
611 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
612 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
613 }
620 }
614
621
615 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
616 if requirement not in requirements:
623 if requirement not in requirements:
617 continue
624 continue
618
625
619 for name in names:
626 for name in names:
620 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
621 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
622
629
623 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
624 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
625 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
626 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
627
634
628 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
629 # relevant to this ui instance.
636 # relevant to this ui instance.
630 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
631
638
632 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
633 if fn.__module__ in modules:
640 if fn.__module__ in modules:
634 fn(ui, supported)
641 fn(ui, supported)
635
642
636 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
637 for name in util.compengines:
644 for name in util.compengines:
638 engine = util.compengines[name]
645 engine = util.compengines[name]
639 if engine.revlogheader():
646 if engine.revlogheader():
640 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
641
648
642 return supported
649 return supported
643
650
644 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
645 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
646
653
647 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
648 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
649 recognize.
656 recognize.
650
657
651 Returns a set of supported requirements.
658 Returns a set of supported requirements.
652 """
659 """
653 missing = set()
660 missing = set()
654
661
655 for requirement in requirements:
662 for requirement in requirements:
656 if requirement in supported:
663 if requirement in supported:
657 continue
664 continue
658
665
659 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
660 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
661
668
662 missing.add(requirement)
669 missing.add(requirement)
663
670
664 if missing:
671 if missing:
665 raise error.RequirementError(
672 raise error.RequirementError(
666 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
667 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
668 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
669 b'for more information'))
676 b'for more information'))
670
677
671 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
672 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
673
680
674 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
675 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
676 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
677 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
678
685
679 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
680 checking.
687 checking.
681
688
682 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
683 """
690 """
684 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
685 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
686 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
687 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
688
695
689 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
690 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
691 if b'store' in requirements:
698 if b'store' in requirements:
692 if b'fncache' in requirements:
699 if b'fncache' in requirements:
693 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
694 b'dotencode' in requirements)
701 b'dotencode' in requirements)
695
702
696 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
697
704
698 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
699
706
700 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
701 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
702
709
703 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
704 """
711 """
705 options = {}
712 options = {}
706
713
707 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
708 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
709
716
710 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
711 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
712 if manifestcachesize is not None:
719 if manifestcachesize is not None:
713 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
714
721
715 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
716 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
717 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
718 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
719 # meaningful on such old repos.
726 # meaningful on such old repos.
720 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
721 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
722
729
723 return options
730 return options
724
731
725 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
726 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
727
734
728 options = {}
735 options = {}
729 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
730
737
731 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
732 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
733 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
734 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
735
742
736 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
737 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
738
745
739 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
740 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
741 if chunkcachesize is not None:
748 if chunkcachesize is not None:
742 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
743
750
744 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
745 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
746 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
747
754
748 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
749
756
750 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
751 if 0 <= chainspan:
758 if 0 <= chainspan:
752 options[b'maxdeltachainspan'] = chainspan
759 options[b'maxdeltachainspan'] = chainspan
753
760
754 mmapindexthreshold = ui.configbytes(b'storage', b'mmap-threshold')
761 mmapindexthreshold = ui.configbytes(b'storage', b'mmap-threshold')
755 if mmapindexthreshold is not None:
762 if mmapindexthreshold is not None:
756 options[b'mmapindexthreshold'] = mmapindexthreshold
763 options[b'mmapindexthreshold'] = mmapindexthreshold
757
764
758 withsparseread = ui.configbool(b'experimental', b'sparse-read')
765 withsparseread = ui.configbool(b'experimental', b'sparse-read')
759 srdensitythres = float(ui.config(b'experimental',
766 srdensitythres = float(ui.config(b'experimental',
760 b'sparse-read.density-threshold'))
767 b'sparse-read.density-threshold'))
761 srmingapsize = ui.configbytes(b'experimental',
768 srmingapsize = ui.configbytes(b'experimental',
762 b'sparse-read.min-gap-size')
769 b'sparse-read.min-gap-size')
763 options[b'with-sparse-read'] = withsparseread
770 options[b'with-sparse-read'] = withsparseread
764 options[b'sparse-read-density-threshold'] = srdensitythres
771 options[b'sparse-read-density-threshold'] = srdensitythres
765 options[b'sparse-read-min-gap-size'] = srmingapsize
772 options[b'sparse-read-min-gap-size'] = srmingapsize
766
773
767 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
774 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
768 options[b'sparse-revlog'] = sparserevlog
775 options[b'sparse-revlog'] = sparserevlog
769 if sparserevlog:
776 if sparserevlog:
770 options[b'generaldelta'] = True
777 options[b'generaldelta'] = True
771
778
772 maxchainlen = None
779 maxchainlen = None
773 if sparserevlog:
780 if sparserevlog:
774 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
781 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
775 # experimental config: format.maxchainlen
782 # experimental config: format.maxchainlen
776 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
783 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
777 if maxchainlen is not None:
784 if maxchainlen is not None:
778 options[b'maxchainlen'] = maxchainlen
785 options[b'maxchainlen'] = maxchainlen
779
786
780 for r in requirements:
787 for r in requirements:
781 if r.startswith(b'exp-compression-'):
788 if r.startswith(b'exp-compression-'):
782 options[b'compengine'] = r[len(b'exp-compression-'):]
789 options[b'compengine'] = r[len(b'exp-compression-'):]
783
790
784 if repository.NARROW_REQUIREMENT in requirements:
791 if repository.NARROW_REQUIREMENT in requirements:
785 options[b'enableellipsis'] = True
792 options[b'enableellipsis'] = True
786
793
787 return options
794 return options
788
795
789 def makemain(**kwargs):
796 def makemain(**kwargs):
790 """Produce a type conforming to ``ilocalrepositorymain``."""
797 """Produce a type conforming to ``ilocalrepositorymain``."""
791 return localrepository
798 return localrepository
792
799
793 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
800 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
794 class revlogfilestorage(object):
801 class revlogfilestorage(object):
795 """File storage when using revlogs."""
802 """File storage when using revlogs."""
796
803
797 def file(self, path):
804 def file(self, path):
798 if path[0] == b'/':
805 if path[0] == b'/':
799 path = path[1:]
806 path = path[1:]
800
807
801 return filelog.filelog(self.svfs, path)
808 return filelog.filelog(self.svfs, path)
802
809
803 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
810 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
804 class revlognarrowfilestorage(object):
811 class revlognarrowfilestorage(object):
805 """File storage when using revlogs and narrow files."""
812 """File storage when using revlogs and narrow files."""
806
813
807 def file(self, path):
814 def file(self, path):
808 if path[0] == b'/':
815 if path[0] == b'/':
809 path = path[1:]
816 path = path[1:]
810
817
811 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
818 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
812
819
813 def makefilestorage(requirements, features, **kwargs):
820 def makefilestorage(requirements, features, **kwargs):
814 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
821 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
815 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
822 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
816 features.add(repository.REPO_FEATURE_STREAM_CLONE)
823 features.add(repository.REPO_FEATURE_STREAM_CLONE)
817
824
818 if repository.NARROW_REQUIREMENT in requirements:
825 if repository.NARROW_REQUIREMENT in requirements:
819 return revlognarrowfilestorage
826 return revlognarrowfilestorage
820 else:
827 else:
821 return revlogfilestorage
828 return revlogfilestorage
822
829
823 # List of repository interfaces and factory functions for them. Each
830 # List of repository interfaces and factory functions for them. Each
824 # will be called in order during ``makelocalrepository()`` to iteratively
831 # will be called in order during ``makelocalrepository()`` to iteratively
825 # derive the final type for a local repository instance. We capture the
832 # derive the final type for a local repository instance. We capture the
826 # function as a lambda so we don't hold a reference and the module-level
833 # function as a lambda so we don't hold a reference and the module-level
827 # functions can be wrapped.
834 # functions can be wrapped.
828 REPO_INTERFACES = [
835 REPO_INTERFACES = [
829 (repository.ilocalrepositorymain, lambda: makemain),
836 (repository.ilocalrepositorymain, lambda: makemain),
830 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
837 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
831 ]
838 ]
832
839
833 @interfaceutil.implementer(repository.ilocalrepositorymain)
840 @interfaceutil.implementer(repository.ilocalrepositorymain)
834 class localrepository(object):
841 class localrepository(object):
835 """Main class for representing local repositories.
842 """Main class for representing local repositories.
836
843
837 All local repositories are instances of this class.
844 All local repositories are instances of this class.
838
845
839 Constructed on its own, instances of this class are not usable as
846 Constructed on its own, instances of this class are not usable as
840 repository objects. To obtain a usable repository object, call
847 repository objects. To obtain a usable repository object, call
841 ``hg.repository()``, ``localrepo.instance()``, or
848 ``hg.repository()``, ``localrepo.instance()``, or
842 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
849 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
843 ``instance()`` adds support for creating new repositories.
850 ``instance()`` adds support for creating new repositories.
844 ``hg.repository()`` adds more extension integration, including calling
851 ``hg.repository()`` adds more extension integration, including calling
845 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
852 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
846 used.
853 used.
847 """
854 """
848
855
849 # obsolete experimental requirements:
856 # obsolete experimental requirements:
850 # - manifestv2: An experimental new manifest format that allowed
857 # - manifestv2: An experimental new manifest format that allowed
851 # for stem compression of long paths. Experiment ended up not
858 # for stem compression of long paths. Experiment ended up not
852 # being successful (repository sizes went up due to worse delta
859 # being successful (repository sizes went up due to worse delta
853 # chains), and the code was deleted in 4.6.
860 # chains), and the code was deleted in 4.6.
854 supportedformats = {
861 supportedformats = {
855 'revlogv1',
862 'revlogv1',
856 'generaldelta',
863 'generaldelta',
857 'treemanifest',
864 'treemanifest',
858 REVLOGV2_REQUIREMENT,
865 REVLOGV2_REQUIREMENT,
859 SPARSEREVLOG_REQUIREMENT,
866 SPARSEREVLOG_REQUIREMENT,
860 }
867 }
861 _basesupported = supportedformats | {
868 _basesupported = supportedformats | {
862 'store',
869 'store',
863 'fncache',
870 'fncache',
864 'shared',
871 'shared',
865 'relshared',
872 'relshared',
866 'dotencode',
873 'dotencode',
867 'exp-sparse',
874 'exp-sparse',
868 'internal-phase'
875 'internal-phase'
869 }
876 }
870
877
871 # list of prefix for file which can be written without 'wlock'
878 # list of prefix for file which can be written without 'wlock'
872 # Extensions should extend this list when needed
879 # Extensions should extend this list when needed
873 _wlockfreeprefix = {
880 _wlockfreeprefix = {
874 # We migh consider requiring 'wlock' for the next
881 # We migh consider requiring 'wlock' for the next
875 # two, but pretty much all the existing code assume
882 # two, but pretty much all the existing code assume
876 # wlock is not needed so we keep them excluded for
883 # wlock is not needed so we keep them excluded for
877 # now.
884 # now.
878 'hgrc',
885 'hgrc',
879 'requires',
886 'requires',
880 # XXX cache is a complicatged business someone
887 # XXX cache is a complicatged business someone
881 # should investigate this in depth at some point
888 # should investigate this in depth at some point
882 'cache/',
889 'cache/',
883 # XXX shouldn't be dirstate covered by the wlock?
890 # XXX shouldn't be dirstate covered by the wlock?
884 'dirstate',
891 'dirstate',
885 # XXX bisect was still a bit too messy at the time
892 # XXX bisect was still a bit too messy at the time
886 # this changeset was introduced. Someone should fix
893 # this changeset was introduced. Someone should fix
887 # the remainig bit and drop this line
894 # the remainig bit and drop this line
888 'bisect.state',
895 'bisect.state',
889 }
896 }
890
897
891 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
898 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
892 supportedrequirements, sharedpath, store, cachevfs,
899 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
893 features, intents=None):
900 features, intents=None):
894 """Create a new local repository instance.
901 """Create a new local repository instance.
895
902
896 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
903 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
897 or ``localrepo.makelocalrepository()`` for obtaining a new repository
904 or ``localrepo.makelocalrepository()`` for obtaining a new repository
898 object.
905 object.
899
906
900 Arguments:
907 Arguments:
901
908
902 baseui
909 baseui
903 ``ui.ui`` instance that ``ui`` argument was based off of.
910 ``ui.ui`` instance that ``ui`` argument was based off of.
904
911
905 ui
912 ui
906 ``ui.ui`` instance for use by the repository.
913 ``ui.ui`` instance for use by the repository.
907
914
908 origroot
915 origroot
909 ``bytes`` path to working directory root of this repository.
916 ``bytes`` path to working directory root of this repository.
910
917
911 wdirvfs
918 wdirvfs
912 ``vfs.vfs`` rooted at the working directory.
919 ``vfs.vfs`` rooted at the working directory.
913
920
914 hgvfs
921 hgvfs
915 ``vfs.vfs`` rooted at .hg/
922 ``vfs.vfs`` rooted at .hg/
916
923
917 requirements
924 requirements
918 ``set`` of bytestrings representing repository opening requirements.
925 ``set`` of bytestrings representing repository opening requirements.
919
926
920 supportedrequirements
927 supportedrequirements
921 ``set`` of bytestrings representing repository requirements that we
928 ``set`` of bytestrings representing repository requirements that we
922 know how to open. May be a supetset of ``requirements``.
929 know how to open. May be a supetset of ``requirements``.
923
930
924 sharedpath
931 sharedpath
925 ``bytes`` Defining path to storage base directory. Points to a
932 ``bytes`` Defining path to storage base directory. Points to a
926 ``.hg/`` directory somewhere.
933 ``.hg/`` directory somewhere.
927
934
928 store
935 store
929 ``store.basicstore`` (or derived) instance providing access to
936 ``store.basicstore`` (or derived) instance providing access to
930 versioned storage.
937 versioned storage.
931
938
932 cachevfs
939 cachevfs
933 ``vfs.vfs`` used for cache files.
940 ``vfs.vfs`` used for cache files.
934
941
942 wcachevfs
943 ``vfs.vfs`` used for cache files related to the working copy.
944
935 features
945 features
936 ``set`` of bytestrings defining features/capabilities of this
946 ``set`` of bytestrings defining features/capabilities of this
937 instance.
947 instance.
938
948
939 intents
949 intents
940 ``set`` of system strings indicating what this repo will be used
950 ``set`` of system strings indicating what this repo will be used
941 for.
951 for.
942 """
952 """
943 self.baseui = baseui
953 self.baseui = baseui
944 self.ui = ui
954 self.ui = ui
945 self.origroot = origroot
955 self.origroot = origroot
946 # vfs rooted at working directory.
956 # vfs rooted at working directory.
947 self.wvfs = wdirvfs
957 self.wvfs = wdirvfs
948 self.root = wdirvfs.base
958 self.root = wdirvfs.base
949 # vfs rooted at .hg/. Used to access most non-store paths.
959 # vfs rooted at .hg/. Used to access most non-store paths.
950 self.vfs = hgvfs
960 self.vfs = hgvfs
951 self.path = hgvfs.base
961 self.path = hgvfs.base
952 self.requirements = requirements
962 self.requirements = requirements
953 self.supported = supportedrequirements
963 self.supported = supportedrequirements
954 self.sharedpath = sharedpath
964 self.sharedpath = sharedpath
955 self.store = store
965 self.store = store
956 self.cachevfs = cachevfs
966 self.cachevfs = cachevfs
967 self.wcachevfs = wcachevfs
957 self.features = features
968 self.features = features
958
969
959 self.filtername = None
970 self.filtername = None
960
971
961 if (self.ui.configbool('devel', 'all-warnings') or
972 if (self.ui.configbool('devel', 'all-warnings') or
962 self.ui.configbool('devel', 'check-locks')):
973 self.ui.configbool('devel', 'check-locks')):
963 self.vfs.audit = self._getvfsward(self.vfs.audit)
974 self.vfs.audit = self._getvfsward(self.vfs.audit)
964 # A list of callback to shape the phase if no data were found.
975 # A list of callback to shape the phase if no data were found.
965 # Callback are in the form: func(repo, roots) --> processed root.
976 # Callback are in the form: func(repo, roots) --> processed root.
966 # This list it to be filled by extension during repo setup
977 # This list it to be filled by extension during repo setup
967 self._phasedefaults = []
978 self._phasedefaults = []
968
979
969 color.setup(self.ui)
980 color.setup(self.ui)
970
981
971 self.spath = self.store.path
982 self.spath = self.store.path
972 self.svfs = self.store.vfs
983 self.svfs = self.store.vfs
973 self.sjoin = self.store.join
984 self.sjoin = self.store.join
974 if (self.ui.configbool('devel', 'all-warnings') or
985 if (self.ui.configbool('devel', 'all-warnings') or
975 self.ui.configbool('devel', 'check-locks')):
986 self.ui.configbool('devel', 'check-locks')):
976 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
987 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
977 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
988 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
978 else: # standard vfs
989 else: # standard vfs
979 self.svfs.audit = self._getsvfsward(self.svfs.audit)
990 self.svfs.audit = self._getsvfsward(self.svfs.audit)
980
991
981 self._dirstatevalidatewarned = False
992 self._dirstatevalidatewarned = False
982
993
983 self._branchcaches = {}
994 self._branchcaches = {}
984 self._revbranchcache = None
995 self._revbranchcache = None
985 self._filterpats = {}
996 self._filterpats = {}
986 self._datafilters = {}
997 self._datafilters = {}
987 self._transref = self._lockref = self._wlockref = None
998 self._transref = self._lockref = self._wlockref = None
988
999
989 # A cache for various files under .hg/ that tracks file changes,
1000 # A cache for various files under .hg/ that tracks file changes,
990 # (used by the filecache decorator)
1001 # (used by the filecache decorator)
991 #
1002 #
992 # Maps a property name to its util.filecacheentry
1003 # Maps a property name to its util.filecacheentry
993 self._filecache = {}
1004 self._filecache = {}
994
1005
995 # hold sets of revision to be filtered
1006 # hold sets of revision to be filtered
996 # should be cleared when something might have changed the filter value:
1007 # should be cleared when something might have changed the filter value:
997 # - new changesets,
1008 # - new changesets,
998 # - phase change,
1009 # - phase change,
999 # - new obsolescence marker,
1010 # - new obsolescence marker,
1000 # - working directory parent change,
1011 # - working directory parent change,
1001 # - bookmark changes
1012 # - bookmark changes
1002 self.filteredrevcache = {}
1013 self.filteredrevcache = {}
1003
1014
1004 # post-dirstate-status hooks
1015 # post-dirstate-status hooks
1005 self._postdsstatus = []
1016 self._postdsstatus = []
1006
1017
1007 # generic mapping between names and nodes
1018 # generic mapping between names and nodes
1008 self.names = namespaces.namespaces()
1019 self.names = namespaces.namespaces()
1009
1020
1010 # Key to signature value.
1021 # Key to signature value.
1011 self._sparsesignaturecache = {}
1022 self._sparsesignaturecache = {}
1012 # Signature to cached matcher instance.
1023 # Signature to cached matcher instance.
1013 self._sparsematchercache = {}
1024 self._sparsematchercache = {}
1014
1025
1015 def _getvfsward(self, origfunc):
1026 def _getvfsward(self, origfunc):
1016 """build a ward for self.vfs"""
1027 """build a ward for self.vfs"""
1017 rref = weakref.ref(self)
1028 rref = weakref.ref(self)
1018 def checkvfs(path, mode=None):
1029 def checkvfs(path, mode=None):
1019 ret = origfunc(path, mode=mode)
1030 ret = origfunc(path, mode=mode)
1020 repo = rref()
1031 repo = rref()
1021 if (repo is None
1032 if (repo is None
1022 or not util.safehasattr(repo, '_wlockref')
1033 or not util.safehasattr(repo, '_wlockref')
1023 or not util.safehasattr(repo, '_lockref')):
1034 or not util.safehasattr(repo, '_lockref')):
1024 return
1035 return
1025 if mode in (None, 'r', 'rb'):
1036 if mode in (None, 'r', 'rb'):
1026 return
1037 return
1027 if path.startswith(repo.path):
1038 if path.startswith(repo.path):
1028 # truncate name relative to the repository (.hg)
1039 # truncate name relative to the repository (.hg)
1029 path = path[len(repo.path) + 1:]
1040 path = path[len(repo.path) + 1:]
1030 if path.startswith('cache/'):
1041 if path.startswith('cache/'):
1031 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1042 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1032 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1043 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1033 if path.startswith('journal.') or path.startswith('undo.'):
1044 if path.startswith('journal.') or path.startswith('undo.'):
1034 # journal is covered by 'lock'
1045 # journal is covered by 'lock'
1035 if repo._currentlock(repo._lockref) is None:
1046 if repo._currentlock(repo._lockref) is None:
1036 repo.ui.develwarn('write with no lock: "%s"' % path,
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1037 stacklevel=3, config='check-locks')
1048 stacklevel=3, config='check-locks')
1038 elif repo._currentlock(repo._wlockref) is None:
1049 elif repo._currentlock(repo._wlockref) is None:
1039 # rest of vfs files are covered by 'wlock'
1050 # rest of vfs files are covered by 'wlock'
1040 #
1051 #
1041 # exclude special files
1052 # exclude special files
1042 for prefix in self._wlockfreeprefix:
1053 for prefix in self._wlockfreeprefix:
1043 if path.startswith(prefix):
1054 if path.startswith(prefix):
1044 return
1055 return
1045 repo.ui.develwarn('write with no wlock: "%s"' % path,
1056 repo.ui.develwarn('write with no wlock: "%s"' % path,
1046 stacklevel=3, config='check-locks')
1057 stacklevel=3, config='check-locks')
1047 return ret
1058 return ret
1048 return checkvfs
1059 return checkvfs
1049
1060
1050 def _getsvfsward(self, origfunc):
1061 def _getsvfsward(self, origfunc):
1051 """build a ward for self.svfs"""
1062 """build a ward for self.svfs"""
1052 rref = weakref.ref(self)
1063 rref = weakref.ref(self)
1053 def checksvfs(path, mode=None):
1064 def checksvfs(path, mode=None):
1054 ret = origfunc(path, mode=mode)
1065 ret = origfunc(path, mode=mode)
1055 repo = rref()
1066 repo = rref()
1056 if repo is None or not util.safehasattr(repo, '_lockref'):
1067 if repo is None or not util.safehasattr(repo, '_lockref'):
1057 return
1068 return
1058 if mode in (None, 'r', 'rb'):
1069 if mode in (None, 'r', 'rb'):
1059 return
1070 return
1060 if path.startswith(repo.sharedpath):
1071 if path.startswith(repo.sharedpath):
1061 # truncate name relative to the repository (.hg)
1072 # truncate name relative to the repository (.hg)
1062 path = path[len(repo.sharedpath) + 1:]
1073 path = path[len(repo.sharedpath) + 1:]
1063 if repo._currentlock(repo._lockref) is None:
1074 if repo._currentlock(repo._lockref) is None:
1064 repo.ui.develwarn('write with no lock: "%s"' % path,
1075 repo.ui.develwarn('write with no lock: "%s"' % path,
1065 stacklevel=4)
1076 stacklevel=4)
1066 return ret
1077 return ret
1067 return checksvfs
1078 return checksvfs
1068
1079
1069 def close(self):
1080 def close(self):
1070 self._writecaches()
1081 self._writecaches()
1071
1082
1072 def _writecaches(self):
1083 def _writecaches(self):
1073 if self._revbranchcache:
1084 if self._revbranchcache:
1074 self._revbranchcache.write()
1085 self._revbranchcache.write()
1075
1086
1076 def _restrictcapabilities(self, caps):
1087 def _restrictcapabilities(self, caps):
1077 if self.ui.configbool('experimental', 'bundle2-advertise'):
1088 if self.ui.configbool('experimental', 'bundle2-advertise'):
1078 caps = set(caps)
1089 caps = set(caps)
1079 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1090 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1080 role='client'))
1091 role='client'))
1081 caps.add('bundle2=' + urlreq.quote(capsblob))
1092 caps.add('bundle2=' + urlreq.quote(capsblob))
1082 return caps
1093 return caps
1083
1094
1084 def _writerequirements(self):
1095 def _writerequirements(self):
1085 scmutil.writerequires(self.vfs, self.requirements)
1096 scmutil.writerequires(self.vfs, self.requirements)
1086
1097
1087 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1098 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1088 # self -> auditor -> self._checknested -> self
1099 # self -> auditor -> self._checknested -> self
1089
1100
1090 @property
1101 @property
1091 def auditor(self):
1102 def auditor(self):
1092 # This is only used by context.workingctx.match in order to
1103 # This is only used by context.workingctx.match in order to
1093 # detect files in subrepos.
1104 # detect files in subrepos.
1094 return pathutil.pathauditor(self.root, callback=self._checknested)
1105 return pathutil.pathauditor(self.root, callback=self._checknested)
1095
1106
1096 @property
1107 @property
1097 def nofsauditor(self):
1108 def nofsauditor(self):
1098 # This is only used by context.basectx.match in order to detect
1109 # This is only used by context.basectx.match in order to detect
1099 # files in subrepos.
1110 # files in subrepos.
1100 return pathutil.pathauditor(self.root, callback=self._checknested,
1111 return pathutil.pathauditor(self.root, callback=self._checknested,
1101 realfs=False, cached=True)
1112 realfs=False, cached=True)
1102
1113
1103 def _checknested(self, path):
1114 def _checknested(self, path):
1104 """Determine if path is a legal nested repository."""
1115 """Determine if path is a legal nested repository."""
1105 if not path.startswith(self.root):
1116 if not path.startswith(self.root):
1106 return False
1117 return False
1107 subpath = path[len(self.root) + 1:]
1118 subpath = path[len(self.root) + 1:]
1108 normsubpath = util.pconvert(subpath)
1119 normsubpath = util.pconvert(subpath)
1109
1120
1110 # XXX: Checking against the current working copy is wrong in
1121 # XXX: Checking against the current working copy is wrong in
1111 # the sense that it can reject things like
1122 # the sense that it can reject things like
1112 #
1123 #
1113 # $ hg cat -r 10 sub/x.txt
1124 # $ hg cat -r 10 sub/x.txt
1114 #
1125 #
1115 # if sub/ is no longer a subrepository in the working copy
1126 # if sub/ is no longer a subrepository in the working copy
1116 # parent revision.
1127 # parent revision.
1117 #
1128 #
1118 # However, it can of course also allow things that would have
1129 # However, it can of course also allow things that would have
1119 # been rejected before, such as the above cat command if sub/
1130 # been rejected before, such as the above cat command if sub/
1120 # is a subrepository now, but was a normal directory before.
1131 # is a subrepository now, but was a normal directory before.
1121 # The old path auditor would have rejected by mistake since it
1132 # The old path auditor would have rejected by mistake since it
1122 # panics when it sees sub/.hg/.
1133 # panics when it sees sub/.hg/.
1123 #
1134 #
1124 # All in all, checking against the working copy seems sensible
1135 # All in all, checking against the working copy seems sensible
1125 # since we want to prevent access to nested repositories on
1136 # since we want to prevent access to nested repositories on
1126 # the filesystem *now*.
1137 # the filesystem *now*.
1127 ctx = self[None]
1138 ctx = self[None]
1128 parts = util.splitpath(subpath)
1139 parts = util.splitpath(subpath)
1129 while parts:
1140 while parts:
1130 prefix = '/'.join(parts)
1141 prefix = '/'.join(parts)
1131 if prefix in ctx.substate:
1142 if prefix in ctx.substate:
1132 if prefix == normsubpath:
1143 if prefix == normsubpath:
1133 return True
1144 return True
1134 else:
1145 else:
1135 sub = ctx.sub(prefix)
1146 sub = ctx.sub(prefix)
1136 return sub.checknested(subpath[len(prefix) + 1:])
1147 return sub.checknested(subpath[len(prefix) + 1:])
1137 else:
1148 else:
1138 parts.pop()
1149 parts.pop()
1139 return False
1150 return False
1140
1151
1141 def peer(self):
1152 def peer(self):
1142 return localpeer(self) # not cached to avoid reference cycle
1153 return localpeer(self) # not cached to avoid reference cycle
1143
1154
1144 def unfiltered(self):
1155 def unfiltered(self):
1145 """Return unfiltered version of the repository
1156 """Return unfiltered version of the repository
1146
1157
1147 Intended to be overwritten by filtered repo."""
1158 Intended to be overwritten by filtered repo."""
1148 return self
1159 return self
1149
1160
1150 def filtered(self, name, visibilityexceptions=None):
1161 def filtered(self, name, visibilityexceptions=None):
1151 """Return a filtered version of a repository"""
1162 """Return a filtered version of a repository"""
1152 cls = repoview.newtype(self.unfiltered().__class__)
1163 cls = repoview.newtype(self.unfiltered().__class__)
1153 return cls(self, name, visibilityexceptions)
1164 return cls(self, name, visibilityexceptions)
1154
1165
1155 @repofilecache('bookmarks', 'bookmarks.current')
1166 @repofilecache('bookmarks', 'bookmarks.current')
1156 def _bookmarks(self):
1167 def _bookmarks(self):
1157 return bookmarks.bmstore(self)
1168 return bookmarks.bmstore(self)
1158
1169
1159 @property
1170 @property
1160 def _activebookmark(self):
1171 def _activebookmark(self):
1161 return self._bookmarks.active
1172 return self._bookmarks.active
1162
1173
1163 # _phasesets depend on changelog. what we need is to call
1174 # _phasesets depend on changelog. what we need is to call
1164 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1175 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1165 # can't be easily expressed in filecache mechanism.
1176 # can't be easily expressed in filecache mechanism.
1166 @storecache('phaseroots', '00changelog.i')
1177 @storecache('phaseroots', '00changelog.i')
1167 def _phasecache(self):
1178 def _phasecache(self):
1168 return phases.phasecache(self, self._phasedefaults)
1179 return phases.phasecache(self, self._phasedefaults)
1169
1180
1170 @storecache('obsstore')
1181 @storecache('obsstore')
1171 def obsstore(self):
1182 def obsstore(self):
1172 return obsolete.makestore(self.ui, self)
1183 return obsolete.makestore(self.ui, self)
1173
1184
1174 @storecache('00changelog.i')
1185 @storecache('00changelog.i')
1175 def changelog(self):
1186 def changelog(self):
1176 return changelog.changelog(self.svfs,
1187 return changelog.changelog(self.svfs,
1177 trypending=txnutil.mayhavepending(self.root))
1188 trypending=txnutil.mayhavepending(self.root))
1178
1189
1179 @storecache('00manifest.i')
1190 @storecache('00manifest.i')
1180 def manifestlog(self):
1191 def manifestlog(self):
1181 rootstore = manifest.manifestrevlog(self.svfs)
1192 rootstore = manifest.manifestrevlog(self.svfs)
1182 return manifest.manifestlog(self.svfs, self, rootstore)
1193 return manifest.manifestlog(self.svfs, self, rootstore)
1183
1194
1184 @repofilecache('dirstate')
1195 @repofilecache('dirstate')
1185 def dirstate(self):
1196 def dirstate(self):
1186 return self._makedirstate()
1197 return self._makedirstate()
1187
1198
1188 def _makedirstate(self):
1199 def _makedirstate(self):
1189 """Extension point for wrapping the dirstate per-repo."""
1200 """Extension point for wrapping the dirstate per-repo."""
1190 sparsematchfn = lambda: sparse.matcher(self)
1201 sparsematchfn = lambda: sparse.matcher(self)
1191
1202
1192 return dirstate.dirstate(self.vfs, self.ui, self.root,
1203 return dirstate.dirstate(self.vfs, self.ui, self.root,
1193 self._dirstatevalidate, sparsematchfn)
1204 self._dirstatevalidate, sparsematchfn)
1194
1205
1195 def _dirstatevalidate(self, node):
1206 def _dirstatevalidate(self, node):
1196 try:
1207 try:
1197 self.changelog.rev(node)
1208 self.changelog.rev(node)
1198 return node
1209 return node
1199 except error.LookupError:
1210 except error.LookupError:
1200 if not self._dirstatevalidatewarned:
1211 if not self._dirstatevalidatewarned:
1201 self._dirstatevalidatewarned = True
1212 self._dirstatevalidatewarned = True
1202 self.ui.warn(_("warning: ignoring unknown"
1213 self.ui.warn(_("warning: ignoring unknown"
1203 " working parent %s!\n") % short(node))
1214 " working parent %s!\n") % short(node))
1204 return nullid
1215 return nullid
1205
1216
1206 @storecache(narrowspec.FILENAME)
1217 @storecache(narrowspec.FILENAME)
1207 def narrowpats(self):
1218 def narrowpats(self):
1208 """matcher patterns for this repository's narrowspec
1219 """matcher patterns for this repository's narrowspec
1209
1220
1210 A tuple of (includes, excludes).
1221 A tuple of (includes, excludes).
1211 """
1222 """
1212 return narrowspec.load(self)
1223 return narrowspec.load(self)
1213
1224
1214 @storecache(narrowspec.FILENAME)
1225 @storecache(narrowspec.FILENAME)
1215 def _narrowmatch(self):
1226 def _narrowmatch(self):
1216 if repository.NARROW_REQUIREMENT not in self.requirements:
1227 if repository.NARROW_REQUIREMENT not in self.requirements:
1217 return matchmod.always(self.root, '')
1228 return matchmod.always(self.root, '')
1218 include, exclude = self.narrowpats
1229 include, exclude = self.narrowpats
1219 return narrowspec.match(self.root, include=include, exclude=exclude)
1230 return narrowspec.match(self.root, include=include, exclude=exclude)
1220
1231
1221 def narrowmatch(self, match=None, includeexact=False):
1232 def narrowmatch(self, match=None, includeexact=False):
1222 """matcher corresponding the the repo's narrowspec
1233 """matcher corresponding the the repo's narrowspec
1223
1234
1224 If `match` is given, then that will be intersected with the narrow
1235 If `match` is given, then that will be intersected with the narrow
1225 matcher.
1236 matcher.
1226
1237
1227 If `includeexact` is True, then any exact matches from `match` will
1238 If `includeexact` is True, then any exact matches from `match` will
1228 be included even if they're outside the narrowspec.
1239 be included even if they're outside the narrowspec.
1229 """
1240 """
1230 if match:
1241 if match:
1231 if includeexact and not self._narrowmatch.always():
1242 if includeexact and not self._narrowmatch.always():
1232 # do not exclude explicitly-specified paths so that they can
1243 # do not exclude explicitly-specified paths so that they can
1233 # be warned later on
1244 # be warned later on
1234 em = matchmod.exact(match._root, match._cwd, match.files())
1245 em = matchmod.exact(match._root, match._cwd, match.files())
1235 nm = matchmod.unionmatcher([self._narrowmatch, em])
1246 nm = matchmod.unionmatcher([self._narrowmatch, em])
1236 return matchmod.intersectmatchers(match, nm)
1247 return matchmod.intersectmatchers(match, nm)
1237 return matchmod.intersectmatchers(match, self._narrowmatch)
1248 return matchmod.intersectmatchers(match, self._narrowmatch)
1238 return self._narrowmatch
1249 return self._narrowmatch
1239
1250
1240 def setnarrowpats(self, newincludes, newexcludes):
1251 def setnarrowpats(self, newincludes, newexcludes):
1241 narrowspec.save(self, newincludes, newexcludes)
1252 narrowspec.save(self, newincludes, newexcludes)
1242 self.invalidate(clearfilecache=True)
1253 self.invalidate(clearfilecache=True)
1243
1254
1244 def __getitem__(self, changeid):
1255 def __getitem__(self, changeid):
1245 if changeid is None:
1256 if changeid is None:
1246 return context.workingctx(self)
1257 return context.workingctx(self)
1247 if isinstance(changeid, context.basectx):
1258 if isinstance(changeid, context.basectx):
1248 return changeid
1259 return changeid
1249 if isinstance(changeid, slice):
1260 if isinstance(changeid, slice):
1250 # wdirrev isn't contiguous so the slice shouldn't include it
1261 # wdirrev isn't contiguous so the slice shouldn't include it
1251 return [self[i]
1262 return [self[i]
1252 for i in pycompat.xrange(*changeid.indices(len(self)))
1263 for i in pycompat.xrange(*changeid.indices(len(self)))
1253 if i not in self.changelog.filteredrevs]
1264 if i not in self.changelog.filteredrevs]
1254 try:
1265 try:
1255 if isinstance(changeid, int):
1266 if isinstance(changeid, int):
1256 node = self.changelog.node(changeid)
1267 node = self.changelog.node(changeid)
1257 rev = changeid
1268 rev = changeid
1258 elif changeid == 'null':
1269 elif changeid == 'null':
1259 node = nullid
1270 node = nullid
1260 rev = nullrev
1271 rev = nullrev
1261 elif changeid == 'tip':
1272 elif changeid == 'tip':
1262 node = self.changelog.tip()
1273 node = self.changelog.tip()
1263 rev = self.changelog.rev(node)
1274 rev = self.changelog.rev(node)
1264 elif changeid == '.':
1275 elif changeid == '.':
1265 # this is a hack to delay/avoid loading obsmarkers
1276 # this is a hack to delay/avoid loading obsmarkers
1266 # when we know that '.' won't be hidden
1277 # when we know that '.' won't be hidden
1267 node = self.dirstate.p1()
1278 node = self.dirstate.p1()
1268 rev = self.unfiltered().changelog.rev(node)
1279 rev = self.unfiltered().changelog.rev(node)
1269 elif len(changeid) == 20:
1280 elif len(changeid) == 20:
1270 try:
1281 try:
1271 node = changeid
1282 node = changeid
1272 rev = self.changelog.rev(changeid)
1283 rev = self.changelog.rev(changeid)
1273 except error.FilteredLookupError:
1284 except error.FilteredLookupError:
1274 changeid = hex(changeid) # for the error message
1285 changeid = hex(changeid) # for the error message
1275 raise
1286 raise
1276 except LookupError:
1287 except LookupError:
1277 # check if it might have come from damaged dirstate
1288 # check if it might have come from damaged dirstate
1278 #
1289 #
1279 # XXX we could avoid the unfiltered if we had a recognizable
1290 # XXX we could avoid the unfiltered if we had a recognizable
1280 # exception for filtered changeset access
1291 # exception for filtered changeset access
1281 if (self.local()
1292 if (self.local()
1282 and changeid in self.unfiltered().dirstate.parents()):
1293 and changeid in self.unfiltered().dirstate.parents()):
1283 msg = _("working directory has unknown parent '%s'!")
1294 msg = _("working directory has unknown parent '%s'!")
1284 raise error.Abort(msg % short(changeid))
1295 raise error.Abort(msg % short(changeid))
1285 changeid = hex(changeid) # for the error message
1296 changeid = hex(changeid) # for the error message
1286 raise
1297 raise
1287
1298
1288 elif len(changeid) == 40:
1299 elif len(changeid) == 40:
1289 node = bin(changeid)
1300 node = bin(changeid)
1290 rev = self.changelog.rev(node)
1301 rev = self.changelog.rev(node)
1291 else:
1302 else:
1292 raise error.ProgrammingError(
1303 raise error.ProgrammingError(
1293 "unsupported changeid '%s' of type %s" %
1304 "unsupported changeid '%s' of type %s" %
1294 (changeid, type(changeid)))
1305 (changeid, type(changeid)))
1295
1306
1296 return context.changectx(self, rev, node)
1307 return context.changectx(self, rev, node)
1297
1308
1298 except (error.FilteredIndexError, error.FilteredLookupError):
1309 except (error.FilteredIndexError, error.FilteredLookupError):
1299 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1310 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1300 % pycompat.bytestr(changeid))
1311 % pycompat.bytestr(changeid))
1301 except (IndexError, LookupError):
1312 except (IndexError, LookupError):
1302 raise error.RepoLookupError(
1313 raise error.RepoLookupError(
1303 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1314 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1304 except error.WdirUnsupported:
1315 except error.WdirUnsupported:
1305 return context.workingctx(self)
1316 return context.workingctx(self)
1306
1317
1307 def __contains__(self, changeid):
1318 def __contains__(self, changeid):
1308 """True if the given changeid exists
1319 """True if the given changeid exists
1309
1320
1310 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1321 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1311 specified.
1322 specified.
1312 """
1323 """
1313 try:
1324 try:
1314 self[changeid]
1325 self[changeid]
1315 return True
1326 return True
1316 except error.RepoLookupError:
1327 except error.RepoLookupError:
1317 return False
1328 return False
1318
1329
1319 def __nonzero__(self):
1330 def __nonzero__(self):
1320 return True
1331 return True
1321
1332
1322 __bool__ = __nonzero__
1333 __bool__ = __nonzero__
1323
1334
1324 def __len__(self):
1335 def __len__(self):
1325 # no need to pay the cost of repoview.changelog
1336 # no need to pay the cost of repoview.changelog
1326 unfi = self.unfiltered()
1337 unfi = self.unfiltered()
1327 return len(unfi.changelog)
1338 return len(unfi.changelog)
1328
1339
1329 def __iter__(self):
1340 def __iter__(self):
1330 return iter(self.changelog)
1341 return iter(self.changelog)
1331
1342
1332 def revs(self, expr, *args):
1343 def revs(self, expr, *args):
1333 '''Find revisions matching a revset.
1344 '''Find revisions matching a revset.
1334
1345
1335 The revset is specified as a string ``expr`` that may contain
1346 The revset is specified as a string ``expr`` that may contain
1336 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1347 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1337
1348
1338 Revset aliases from the configuration are not expanded. To expand
1349 Revset aliases from the configuration are not expanded. To expand
1339 user aliases, consider calling ``scmutil.revrange()`` or
1350 user aliases, consider calling ``scmutil.revrange()`` or
1340 ``repo.anyrevs([expr], user=True)``.
1351 ``repo.anyrevs([expr], user=True)``.
1341
1352
1342 Returns a revset.abstractsmartset, which is a list-like interface
1353 Returns a revset.abstractsmartset, which is a list-like interface
1343 that contains integer revisions.
1354 that contains integer revisions.
1344 '''
1355 '''
1345 expr = revsetlang.formatspec(expr, *args)
1356 expr = revsetlang.formatspec(expr, *args)
1346 m = revset.match(None, expr)
1357 m = revset.match(None, expr)
1347 return m(self)
1358 return m(self)
1348
1359
1349 def set(self, expr, *args):
1360 def set(self, expr, *args):
1350 '''Find revisions matching a revset and emit changectx instances.
1361 '''Find revisions matching a revset and emit changectx instances.
1351
1362
1352 This is a convenience wrapper around ``revs()`` that iterates the
1363 This is a convenience wrapper around ``revs()`` that iterates the
1353 result and is a generator of changectx instances.
1364 result and is a generator of changectx instances.
1354
1365
1355 Revset aliases from the configuration are not expanded. To expand
1366 Revset aliases from the configuration are not expanded. To expand
1356 user aliases, consider calling ``scmutil.revrange()``.
1367 user aliases, consider calling ``scmutil.revrange()``.
1357 '''
1368 '''
1358 for r in self.revs(expr, *args):
1369 for r in self.revs(expr, *args):
1359 yield self[r]
1370 yield self[r]
1360
1371
1361 def anyrevs(self, specs, user=False, localalias=None):
1372 def anyrevs(self, specs, user=False, localalias=None):
1362 '''Find revisions matching one of the given revsets.
1373 '''Find revisions matching one of the given revsets.
1363
1374
1364 Revset aliases from the configuration are not expanded by default. To
1375 Revset aliases from the configuration are not expanded by default. To
1365 expand user aliases, specify ``user=True``. To provide some local
1376 expand user aliases, specify ``user=True``. To provide some local
1366 definitions overriding user aliases, set ``localalias`` to
1377 definitions overriding user aliases, set ``localalias`` to
1367 ``{name: definitionstring}``.
1378 ``{name: definitionstring}``.
1368 '''
1379 '''
1369 if user:
1380 if user:
1370 m = revset.matchany(self.ui, specs,
1381 m = revset.matchany(self.ui, specs,
1371 lookup=revset.lookupfn(self),
1382 lookup=revset.lookupfn(self),
1372 localalias=localalias)
1383 localalias=localalias)
1373 else:
1384 else:
1374 m = revset.matchany(None, specs, localalias=localalias)
1385 m = revset.matchany(None, specs, localalias=localalias)
1375 return m(self)
1386 return m(self)
1376
1387
1377 def url(self):
1388 def url(self):
1378 return 'file:' + self.root
1389 return 'file:' + self.root
1379
1390
1380 def hook(self, name, throw=False, **args):
1391 def hook(self, name, throw=False, **args):
1381 """Call a hook, passing this repo instance.
1392 """Call a hook, passing this repo instance.
1382
1393
1383 This a convenience method to aid invoking hooks. Extensions likely
1394 This a convenience method to aid invoking hooks. Extensions likely
1384 won't call this unless they have registered a custom hook or are
1395 won't call this unless they have registered a custom hook or are
1385 replacing code that is expected to call a hook.
1396 replacing code that is expected to call a hook.
1386 """
1397 """
1387 return hook.hook(self.ui, self, name, throw, **args)
1398 return hook.hook(self.ui, self, name, throw, **args)
1388
1399
1389 @filteredpropertycache
1400 @filteredpropertycache
1390 def _tagscache(self):
1401 def _tagscache(self):
1391 '''Returns a tagscache object that contains various tags related
1402 '''Returns a tagscache object that contains various tags related
1392 caches.'''
1403 caches.'''
1393
1404
1394 # This simplifies its cache management by having one decorated
1405 # This simplifies its cache management by having one decorated
1395 # function (this one) and the rest simply fetch things from it.
1406 # function (this one) and the rest simply fetch things from it.
1396 class tagscache(object):
1407 class tagscache(object):
1397 def __init__(self):
1408 def __init__(self):
1398 # These two define the set of tags for this repository. tags
1409 # These two define the set of tags for this repository. tags
1399 # maps tag name to node; tagtypes maps tag name to 'global' or
1410 # maps tag name to node; tagtypes maps tag name to 'global' or
1400 # 'local'. (Global tags are defined by .hgtags across all
1411 # 'local'. (Global tags are defined by .hgtags across all
1401 # heads, and local tags are defined in .hg/localtags.)
1412 # heads, and local tags are defined in .hg/localtags.)
1402 # They constitute the in-memory cache of tags.
1413 # They constitute the in-memory cache of tags.
1403 self.tags = self.tagtypes = None
1414 self.tags = self.tagtypes = None
1404
1415
1405 self.nodetagscache = self.tagslist = None
1416 self.nodetagscache = self.tagslist = None
1406
1417
1407 cache = tagscache()
1418 cache = tagscache()
1408 cache.tags, cache.tagtypes = self._findtags()
1419 cache.tags, cache.tagtypes = self._findtags()
1409
1420
1410 return cache
1421 return cache
1411
1422
1412 def tags(self):
1423 def tags(self):
1413 '''return a mapping of tag to node'''
1424 '''return a mapping of tag to node'''
1414 t = {}
1425 t = {}
1415 if self.changelog.filteredrevs:
1426 if self.changelog.filteredrevs:
1416 tags, tt = self._findtags()
1427 tags, tt = self._findtags()
1417 else:
1428 else:
1418 tags = self._tagscache.tags
1429 tags = self._tagscache.tags
1419 rev = self.changelog.rev
1430 rev = self.changelog.rev
1420 for k, v in tags.iteritems():
1431 for k, v in tags.iteritems():
1421 try:
1432 try:
1422 # ignore tags to unknown nodes
1433 # ignore tags to unknown nodes
1423 rev(v)
1434 rev(v)
1424 t[k] = v
1435 t[k] = v
1425 except (error.LookupError, ValueError):
1436 except (error.LookupError, ValueError):
1426 pass
1437 pass
1427 return t
1438 return t
1428
1439
1429 def _findtags(self):
1440 def _findtags(self):
1430 '''Do the hard work of finding tags. Return a pair of dicts
1441 '''Do the hard work of finding tags. Return a pair of dicts
1431 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1442 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1432 maps tag name to a string like \'global\' or \'local\'.
1443 maps tag name to a string like \'global\' or \'local\'.
1433 Subclasses or extensions are free to add their own tags, but
1444 Subclasses or extensions are free to add their own tags, but
1434 should be aware that the returned dicts will be retained for the
1445 should be aware that the returned dicts will be retained for the
1435 duration of the localrepo object.'''
1446 duration of the localrepo object.'''
1436
1447
1437 # XXX what tagtype should subclasses/extensions use? Currently
1448 # XXX what tagtype should subclasses/extensions use? Currently
1438 # mq and bookmarks add tags, but do not set the tagtype at all.
1449 # mq and bookmarks add tags, but do not set the tagtype at all.
1439 # Should each extension invent its own tag type? Should there
1450 # Should each extension invent its own tag type? Should there
1440 # be one tagtype for all such "virtual" tags? Or is the status
1451 # be one tagtype for all such "virtual" tags? Or is the status
1441 # quo fine?
1452 # quo fine?
1442
1453
1443
1454
1444 # map tag name to (node, hist)
1455 # map tag name to (node, hist)
1445 alltags = tagsmod.findglobaltags(self.ui, self)
1456 alltags = tagsmod.findglobaltags(self.ui, self)
1446 # map tag name to tag type
1457 # map tag name to tag type
1447 tagtypes = dict((tag, 'global') for tag in alltags)
1458 tagtypes = dict((tag, 'global') for tag in alltags)
1448
1459
1449 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1460 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1450
1461
1451 # Build the return dicts. Have to re-encode tag names because
1462 # Build the return dicts. Have to re-encode tag names because
1452 # the tags module always uses UTF-8 (in order not to lose info
1463 # the tags module always uses UTF-8 (in order not to lose info
1453 # writing to the cache), but the rest of Mercurial wants them in
1464 # writing to the cache), but the rest of Mercurial wants them in
1454 # local encoding.
1465 # local encoding.
1455 tags = {}
1466 tags = {}
1456 for (name, (node, hist)) in alltags.iteritems():
1467 for (name, (node, hist)) in alltags.iteritems():
1457 if node != nullid:
1468 if node != nullid:
1458 tags[encoding.tolocal(name)] = node
1469 tags[encoding.tolocal(name)] = node
1459 tags['tip'] = self.changelog.tip()
1470 tags['tip'] = self.changelog.tip()
1460 tagtypes = dict([(encoding.tolocal(name), value)
1471 tagtypes = dict([(encoding.tolocal(name), value)
1461 for (name, value) in tagtypes.iteritems()])
1472 for (name, value) in tagtypes.iteritems()])
1462 return (tags, tagtypes)
1473 return (tags, tagtypes)
1463
1474
1464 def tagtype(self, tagname):
1475 def tagtype(self, tagname):
1465 '''
1476 '''
1466 return the type of the given tag. result can be:
1477 return the type of the given tag. result can be:
1467
1478
1468 'local' : a local tag
1479 'local' : a local tag
1469 'global' : a global tag
1480 'global' : a global tag
1470 None : tag does not exist
1481 None : tag does not exist
1471 '''
1482 '''
1472
1483
1473 return self._tagscache.tagtypes.get(tagname)
1484 return self._tagscache.tagtypes.get(tagname)
1474
1485
1475 def tagslist(self):
1486 def tagslist(self):
1476 '''return a list of tags ordered by revision'''
1487 '''return a list of tags ordered by revision'''
1477 if not self._tagscache.tagslist:
1488 if not self._tagscache.tagslist:
1478 l = []
1489 l = []
1479 for t, n in self.tags().iteritems():
1490 for t, n in self.tags().iteritems():
1480 l.append((self.changelog.rev(n), t, n))
1491 l.append((self.changelog.rev(n), t, n))
1481 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1492 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1482
1493
1483 return self._tagscache.tagslist
1494 return self._tagscache.tagslist
1484
1495
1485 def nodetags(self, node):
1496 def nodetags(self, node):
1486 '''return the tags associated with a node'''
1497 '''return the tags associated with a node'''
1487 if not self._tagscache.nodetagscache:
1498 if not self._tagscache.nodetagscache:
1488 nodetagscache = {}
1499 nodetagscache = {}
1489 for t, n in self._tagscache.tags.iteritems():
1500 for t, n in self._tagscache.tags.iteritems():
1490 nodetagscache.setdefault(n, []).append(t)
1501 nodetagscache.setdefault(n, []).append(t)
1491 for tags in nodetagscache.itervalues():
1502 for tags in nodetagscache.itervalues():
1492 tags.sort()
1503 tags.sort()
1493 self._tagscache.nodetagscache = nodetagscache
1504 self._tagscache.nodetagscache = nodetagscache
1494 return self._tagscache.nodetagscache.get(node, [])
1505 return self._tagscache.nodetagscache.get(node, [])
1495
1506
1496 def nodebookmarks(self, node):
1507 def nodebookmarks(self, node):
1497 """return the list of bookmarks pointing to the specified node"""
1508 """return the list of bookmarks pointing to the specified node"""
1498 return self._bookmarks.names(node)
1509 return self._bookmarks.names(node)
1499
1510
1500 def branchmap(self):
1511 def branchmap(self):
1501 '''returns a dictionary {branch: [branchheads]} with branchheads
1512 '''returns a dictionary {branch: [branchheads]} with branchheads
1502 ordered by increasing revision number'''
1513 ordered by increasing revision number'''
1503 branchmap.updatecache(self)
1514 branchmap.updatecache(self)
1504 return self._branchcaches[self.filtername]
1515 return self._branchcaches[self.filtername]
1505
1516
1506 @unfilteredmethod
1517 @unfilteredmethod
1507 def revbranchcache(self):
1518 def revbranchcache(self):
1508 if not self._revbranchcache:
1519 if not self._revbranchcache:
1509 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1520 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1510 return self._revbranchcache
1521 return self._revbranchcache
1511
1522
1512 def branchtip(self, branch, ignoremissing=False):
1523 def branchtip(self, branch, ignoremissing=False):
1513 '''return the tip node for a given branch
1524 '''return the tip node for a given branch
1514
1525
1515 If ignoremissing is True, then this method will not raise an error.
1526 If ignoremissing is True, then this method will not raise an error.
1516 This is helpful for callers that only expect None for a missing branch
1527 This is helpful for callers that only expect None for a missing branch
1517 (e.g. namespace).
1528 (e.g. namespace).
1518
1529
1519 '''
1530 '''
1520 try:
1531 try:
1521 return self.branchmap().branchtip(branch)
1532 return self.branchmap().branchtip(branch)
1522 except KeyError:
1533 except KeyError:
1523 if not ignoremissing:
1534 if not ignoremissing:
1524 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1535 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1525 else:
1536 else:
1526 pass
1537 pass
1527
1538
1528 def lookup(self, key):
1539 def lookup(self, key):
1529 return scmutil.revsymbol(self, key).node()
1540 return scmutil.revsymbol(self, key).node()
1530
1541
1531 def lookupbranch(self, key):
1542 def lookupbranch(self, key):
1532 if key in self.branchmap():
1543 if key in self.branchmap():
1533 return key
1544 return key
1534
1545
1535 return scmutil.revsymbol(self, key).branch()
1546 return scmutil.revsymbol(self, key).branch()
1536
1547
1537 def known(self, nodes):
1548 def known(self, nodes):
1538 cl = self.changelog
1549 cl = self.changelog
1539 nm = cl.nodemap
1550 nm = cl.nodemap
1540 filtered = cl.filteredrevs
1551 filtered = cl.filteredrevs
1541 result = []
1552 result = []
1542 for n in nodes:
1553 for n in nodes:
1543 r = nm.get(n)
1554 r = nm.get(n)
1544 resp = not (r is None or r in filtered)
1555 resp = not (r is None or r in filtered)
1545 result.append(resp)
1556 result.append(resp)
1546 return result
1557 return result
1547
1558
1548 def local(self):
1559 def local(self):
1549 return self
1560 return self
1550
1561
1551 def publishing(self):
1562 def publishing(self):
1552 # it's safe (and desirable) to trust the publish flag unconditionally
1563 # it's safe (and desirable) to trust the publish flag unconditionally
1553 # so that we don't finalize changes shared between users via ssh or nfs
1564 # so that we don't finalize changes shared between users via ssh or nfs
1554 return self.ui.configbool('phases', 'publish', untrusted=True)
1565 return self.ui.configbool('phases', 'publish', untrusted=True)
1555
1566
1556 def cancopy(self):
1567 def cancopy(self):
1557 # so statichttprepo's override of local() works
1568 # so statichttprepo's override of local() works
1558 if not self.local():
1569 if not self.local():
1559 return False
1570 return False
1560 if not self.publishing():
1571 if not self.publishing():
1561 return True
1572 return True
1562 # if publishing we can't copy if there is filtered content
1573 # if publishing we can't copy if there is filtered content
1563 return not self.filtered('visible').changelog.filteredrevs
1574 return not self.filtered('visible').changelog.filteredrevs
1564
1575
1565 def shared(self):
1576 def shared(self):
1566 '''the type of shared repository (None if not shared)'''
1577 '''the type of shared repository (None if not shared)'''
1567 if self.sharedpath != self.path:
1578 if self.sharedpath != self.path:
1568 return 'store'
1579 return 'store'
1569 return None
1580 return None
1570
1581
1571 def wjoin(self, f, *insidef):
1582 def wjoin(self, f, *insidef):
1572 return self.vfs.reljoin(self.root, f, *insidef)
1583 return self.vfs.reljoin(self.root, f, *insidef)
1573
1584
1574 def setparents(self, p1, p2=nullid):
1585 def setparents(self, p1, p2=nullid):
1575 with self.dirstate.parentchange():
1586 with self.dirstate.parentchange():
1576 copies = self.dirstate.setparents(p1, p2)
1587 copies = self.dirstate.setparents(p1, p2)
1577 pctx = self[p1]
1588 pctx = self[p1]
1578 if copies:
1589 if copies:
1579 # Adjust copy records, the dirstate cannot do it, it
1590 # Adjust copy records, the dirstate cannot do it, it
1580 # requires access to parents manifests. Preserve them
1591 # requires access to parents manifests. Preserve them
1581 # only for entries added to first parent.
1592 # only for entries added to first parent.
1582 for f in copies:
1593 for f in copies:
1583 if f not in pctx and copies[f] in pctx:
1594 if f not in pctx and copies[f] in pctx:
1584 self.dirstate.copy(copies[f], f)
1595 self.dirstate.copy(copies[f], f)
1585 if p2 == nullid:
1596 if p2 == nullid:
1586 for f, s in sorted(self.dirstate.copies().items()):
1597 for f, s in sorted(self.dirstate.copies().items()):
1587 if f not in pctx and s not in pctx:
1598 if f not in pctx and s not in pctx:
1588 self.dirstate.copy(None, f)
1599 self.dirstate.copy(None, f)
1589
1600
1590 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1601 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1591 """changeid must be a changeset revision, if specified.
1602 """changeid must be a changeset revision, if specified.
1592 fileid can be a file revision or node."""
1603 fileid can be a file revision or node."""
1593 return context.filectx(self, path, changeid, fileid,
1604 return context.filectx(self, path, changeid, fileid,
1594 changectx=changectx)
1605 changectx=changectx)
1595
1606
1596 def getcwd(self):
1607 def getcwd(self):
1597 return self.dirstate.getcwd()
1608 return self.dirstate.getcwd()
1598
1609
1599 def pathto(self, f, cwd=None):
1610 def pathto(self, f, cwd=None):
1600 return self.dirstate.pathto(f, cwd)
1611 return self.dirstate.pathto(f, cwd)
1601
1612
1602 def _loadfilter(self, filter):
1613 def _loadfilter(self, filter):
1603 if filter not in self._filterpats:
1614 if filter not in self._filterpats:
1604 l = []
1615 l = []
1605 for pat, cmd in self.ui.configitems(filter):
1616 for pat, cmd in self.ui.configitems(filter):
1606 if cmd == '!':
1617 if cmd == '!':
1607 continue
1618 continue
1608 mf = matchmod.match(self.root, '', [pat])
1619 mf = matchmod.match(self.root, '', [pat])
1609 fn = None
1620 fn = None
1610 params = cmd
1621 params = cmd
1611 for name, filterfn in self._datafilters.iteritems():
1622 for name, filterfn in self._datafilters.iteritems():
1612 if cmd.startswith(name):
1623 if cmd.startswith(name):
1613 fn = filterfn
1624 fn = filterfn
1614 params = cmd[len(name):].lstrip()
1625 params = cmd[len(name):].lstrip()
1615 break
1626 break
1616 if not fn:
1627 if not fn:
1617 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1628 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1618 # Wrap old filters not supporting keyword arguments
1629 # Wrap old filters not supporting keyword arguments
1619 if not pycompat.getargspec(fn)[2]:
1630 if not pycompat.getargspec(fn)[2]:
1620 oldfn = fn
1631 oldfn = fn
1621 fn = lambda s, c, **kwargs: oldfn(s, c)
1632 fn = lambda s, c, **kwargs: oldfn(s, c)
1622 l.append((mf, fn, params))
1633 l.append((mf, fn, params))
1623 self._filterpats[filter] = l
1634 self._filterpats[filter] = l
1624 return self._filterpats[filter]
1635 return self._filterpats[filter]
1625
1636
1626 def _filter(self, filterpats, filename, data):
1637 def _filter(self, filterpats, filename, data):
1627 for mf, fn, cmd in filterpats:
1638 for mf, fn, cmd in filterpats:
1628 if mf(filename):
1639 if mf(filename):
1629 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1640 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1630 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1641 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1631 break
1642 break
1632
1643
1633 return data
1644 return data
1634
1645
1635 @unfilteredpropertycache
1646 @unfilteredpropertycache
1636 def _encodefilterpats(self):
1647 def _encodefilterpats(self):
1637 return self._loadfilter('encode')
1648 return self._loadfilter('encode')
1638
1649
1639 @unfilteredpropertycache
1650 @unfilteredpropertycache
1640 def _decodefilterpats(self):
1651 def _decodefilterpats(self):
1641 return self._loadfilter('decode')
1652 return self._loadfilter('decode')
1642
1653
1643 def adddatafilter(self, name, filter):
1654 def adddatafilter(self, name, filter):
1644 self._datafilters[name] = filter
1655 self._datafilters[name] = filter
1645
1656
1646 def wread(self, filename):
1657 def wread(self, filename):
1647 if self.wvfs.islink(filename):
1658 if self.wvfs.islink(filename):
1648 data = self.wvfs.readlink(filename)
1659 data = self.wvfs.readlink(filename)
1649 else:
1660 else:
1650 data = self.wvfs.read(filename)
1661 data = self.wvfs.read(filename)
1651 return self._filter(self._encodefilterpats, filename, data)
1662 return self._filter(self._encodefilterpats, filename, data)
1652
1663
1653 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1664 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1654 """write ``data`` into ``filename`` in the working directory
1665 """write ``data`` into ``filename`` in the working directory
1655
1666
1656 This returns length of written (maybe decoded) data.
1667 This returns length of written (maybe decoded) data.
1657 """
1668 """
1658 data = self._filter(self._decodefilterpats, filename, data)
1669 data = self._filter(self._decodefilterpats, filename, data)
1659 if 'l' in flags:
1670 if 'l' in flags:
1660 self.wvfs.symlink(data, filename)
1671 self.wvfs.symlink(data, filename)
1661 else:
1672 else:
1662 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1673 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1663 **kwargs)
1674 **kwargs)
1664 if 'x' in flags:
1675 if 'x' in flags:
1665 self.wvfs.setflags(filename, False, True)
1676 self.wvfs.setflags(filename, False, True)
1666 else:
1677 else:
1667 self.wvfs.setflags(filename, False, False)
1678 self.wvfs.setflags(filename, False, False)
1668 return len(data)
1679 return len(data)
1669
1680
1670 def wwritedata(self, filename, data):
1681 def wwritedata(self, filename, data):
1671 return self._filter(self._decodefilterpats, filename, data)
1682 return self._filter(self._decodefilterpats, filename, data)
1672
1683
1673 def currenttransaction(self):
1684 def currenttransaction(self):
1674 """return the current transaction or None if non exists"""
1685 """return the current transaction or None if non exists"""
1675 if self._transref:
1686 if self._transref:
1676 tr = self._transref()
1687 tr = self._transref()
1677 else:
1688 else:
1678 tr = None
1689 tr = None
1679
1690
1680 if tr and tr.running():
1691 if tr and tr.running():
1681 return tr
1692 return tr
1682 return None
1693 return None
1683
1694
1684 def transaction(self, desc, report=None):
1695 def transaction(self, desc, report=None):
1685 if (self.ui.configbool('devel', 'all-warnings')
1696 if (self.ui.configbool('devel', 'all-warnings')
1686 or self.ui.configbool('devel', 'check-locks')):
1697 or self.ui.configbool('devel', 'check-locks')):
1687 if self._currentlock(self._lockref) is None:
1698 if self._currentlock(self._lockref) is None:
1688 raise error.ProgrammingError('transaction requires locking')
1699 raise error.ProgrammingError('transaction requires locking')
1689 tr = self.currenttransaction()
1700 tr = self.currenttransaction()
1690 if tr is not None:
1701 if tr is not None:
1691 return tr.nest(name=desc)
1702 return tr.nest(name=desc)
1692
1703
1693 # abort here if the journal already exists
1704 # abort here if the journal already exists
1694 if self.svfs.exists("journal"):
1705 if self.svfs.exists("journal"):
1695 raise error.RepoError(
1706 raise error.RepoError(
1696 _("abandoned transaction found"),
1707 _("abandoned transaction found"),
1697 hint=_("run 'hg recover' to clean up transaction"))
1708 hint=_("run 'hg recover' to clean up transaction"))
1698
1709
1699 idbase = "%.40f#%f" % (random.random(), time.time())
1710 idbase = "%.40f#%f" % (random.random(), time.time())
1700 ha = hex(hashlib.sha1(idbase).digest())
1711 ha = hex(hashlib.sha1(idbase).digest())
1701 txnid = 'TXN:' + ha
1712 txnid = 'TXN:' + ha
1702 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1713 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1703
1714
1704 self._writejournal(desc)
1715 self._writejournal(desc)
1705 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1716 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1706 if report:
1717 if report:
1707 rp = report
1718 rp = report
1708 else:
1719 else:
1709 rp = self.ui.warn
1720 rp = self.ui.warn
1710 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1721 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1711 # we must avoid cyclic reference between repo and transaction.
1722 # we must avoid cyclic reference between repo and transaction.
1712 reporef = weakref.ref(self)
1723 reporef = weakref.ref(self)
1713 # Code to track tag movement
1724 # Code to track tag movement
1714 #
1725 #
1715 # Since tags are all handled as file content, it is actually quite hard
1726 # Since tags are all handled as file content, it is actually quite hard
1716 # to track these movement from a code perspective. So we fallback to a
1727 # to track these movement from a code perspective. So we fallback to a
1717 # tracking at the repository level. One could envision to track changes
1728 # tracking at the repository level. One could envision to track changes
1718 # to the '.hgtags' file through changegroup apply but that fails to
1729 # to the '.hgtags' file through changegroup apply but that fails to
1719 # cope with case where transaction expose new heads without changegroup
1730 # cope with case where transaction expose new heads without changegroup
1720 # being involved (eg: phase movement).
1731 # being involved (eg: phase movement).
1721 #
1732 #
1722 # For now, We gate the feature behind a flag since this likely comes
1733 # For now, We gate the feature behind a flag since this likely comes
1723 # with performance impacts. The current code run more often than needed
1734 # with performance impacts. The current code run more often than needed
1724 # and do not use caches as much as it could. The current focus is on
1735 # and do not use caches as much as it could. The current focus is on
1725 # the behavior of the feature so we disable it by default. The flag
1736 # the behavior of the feature so we disable it by default. The flag
1726 # will be removed when we are happy with the performance impact.
1737 # will be removed when we are happy with the performance impact.
1727 #
1738 #
1728 # Once this feature is no longer experimental move the following
1739 # Once this feature is no longer experimental move the following
1729 # documentation to the appropriate help section:
1740 # documentation to the appropriate help section:
1730 #
1741 #
1731 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1742 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1732 # tags (new or changed or deleted tags). In addition the details of
1743 # tags (new or changed or deleted tags). In addition the details of
1733 # these changes are made available in a file at:
1744 # these changes are made available in a file at:
1734 # ``REPOROOT/.hg/changes/tags.changes``.
1745 # ``REPOROOT/.hg/changes/tags.changes``.
1735 # Make sure you check for HG_TAG_MOVED before reading that file as it
1746 # Make sure you check for HG_TAG_MOVED before reading that file as it
1736 # might exist from a previous transaction even if no tag were touched
1747 # might exist from a previous transaction even if no tag were touched
1737 # in this one. Changes are recorded in a line base format::
1748 # in this one. Changes are recorded in a line base format::
1738 #
1749 #
1739 # <action> <hex-node> <tag-name>\n
1750 # <action> <hex-node> <tag-name>\n
1740 #
1751 #
1741 # Actions are defined as follow:
1752 # Actions are defined as follow:
1742 # "-R": tag is removed,
1753 # "-R": tag is removed,
1743 # "+A": tag is added,
1754 # "+A": tag is added,
1744 # "-M": tag is moved (old value),
1755 # "-M": tag is moved (old value),
1745 # "+M": tag is moved (new value),
1756 # "+M": tag is moved (new value),
1746 tracktags = lambda x: None
1757 tracktags = lambda x: None
1747 # experimental config: experimental.hook-track-tags
1758 # experimental config: experimental.hook-track-tags
1748 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1759 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1749 if desc != 'strip' and shouldtracktags:
1760 if desc != 'strip' and shouldtracktags:
1750 oldheads = self.changelog.headrevs()
1761 oldheads = self.changelog.headrevs()
1751 def tracktags(tr2):
1762 def tracktags(tr2):
1752 repo = reporef()
1763 repo = reporef()
1753 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1764 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1754 newheads = repo.changelog.headrevs()
1765 newheads = repo.changelog.headrevs()
1755 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1766 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1756 # notes: we compare lists here.
1767 # notes: we compare lists here.
1757 # As we do it only once buiding set would not be cheaper
1768 # As we do it only once buiding set would not be cheaper
1758 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1769 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1759 if changes:
1770 if changes:
1760 tr2.hookargs['tag_moved'] = '1'
1771 tr2.hookargs['tag_moved'] = '1'
1761 with repo.vfs('changes/tags.changes', 'w',
1772 with repo.vfs('changes/tags.changes', 'w',
1762 atomictemp=True) as changesfile:
1773 atomictemp=True) as changesfile:
1763 # note: we do not register the file to the transaction
1774 # note: we do not register the file to the transaction
1764 # because we needs it to still exist on the transaction
1775 # because we needs it to still exist on the transaction
1765 # is close (for txnclose hooks)
1776 # is close (for txnclose hooks)
1766 tagsmod.writediff(changesfile, changes)
1777 tagsmod.writediff(changesfile, changes)
1767 def validate(tr2):
1778 def validate(tr2):
1768 """will run pre-closing hooks"""
1779 """will run pre-closing hooks"""
1769 # XXX the transaction API is a bit lacking here so we take a hacky
1780 # XXX the transaction API is a bit lacking here so we take a hacky
1770 # path for now
1781 # path for now
1771 #
1782 #
1772 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1783 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1773 # dict is copied before these run. In addition we needs the data
1784 # dict is copied before these run. In addition we needs the data
1774 # available to in memory hooks too.
1785 # available to in memory hooks too.
1775 #
1786 #
1776 # Moreover, we also need to make sure this runs before txnclose
1787 # Moreover, we also need to make sure this runs before txnclose
1777 # hooks and there is no "pending" mechanism that would execute
1788 # hooks and there is no "pending" mechanism that would execute
1778 # logic only if hooks are about to run.
1789 # logic only if hooks are about to run.
1779 #
1790 #
1780 # Fixing this limitation of the transaction is also needed to track
1791 # Fixing this limitation of the transaction is also needed to track
1781 # other families of changes (bookmarks, phases, obsolescence).
1792 # other families of changes (bookmarks, phases, obsolescence).
1782 #
1793 #
1783 # This will have to be fixed before we remove the experimental
1794 # This will have to be fixed before we remove the experimental
1784 # gating.
1795 # gating.
1785 tracktags(tr2)
1796 tracktags(tr2)
1786 repo = reporef()
1797 repo = reporef()
1787 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1798 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1788 scmutil.enforcesinglehead(repo, tr2, desc)
1799 scmutil.enforcesinglehead(repo, tr2, desc)
1789 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1800 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1790 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1801 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1791 args = tr.hookargs.copy()
1802 args = tr.hookargs.copy()
1792 args.update(bookmarks.preparehookargs(name, old, new))
1803 args.update(bookmarks.preparehookargs(name, old, new))
1793 repo.hook('pretxnclose-bookmark', throw=True,
1804 repo.hook('pretxnclose-bookmark', throw=True,
1794 txnname=desc,
1805 txnname=desc,
1795 **pycompat.strkwargs(args))
1806 **pycompat.strkwargs(args))
1796 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1807 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1797 cl = repo.unfiltered().changelog
1808 cl = repo.unfiltered().changelog
1798 for rev, (old, new) in tr.changes['phases'].items():
1809 for rev, (old, new) in tr.changes['phases'].items():
1799 args = tr.hookargs.copy()
1810 args = tr.hookargs.copy()
1800 node = hex(cl.node(rev))
1811 node = hex(cl.node(rev))
1801 args.update(phases.preparehookargs(node, old, new))
1812 args.update(phases.preparehookargs(node, old, new))
1802 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1813 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1803 **pycompat.strkwargs(args))
1814 **pycompat.strkwargs(args))
1804
1815
1805 repo.hook('pretxnclose', throw=True,
1816 repo.hook('pretxnclose', throw=True,
1806 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1817 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1807 def releasefn(tr, success):
1818 def releasefn(tr, success):
1808 repo = reporef()
1819 repo = reporef()
1809 if success:
1820 if success:
1810 # this should be explicitly invoked here, because
1821 # this should be explicitly invoked here, because
1811 # in-memory changes aren't written out at closing
1822 # in-memory changes aren't written out at closing
1812 # transaction, if tr.addfilegenerator (via
1823 # transaction, if tr.addfilegenerator (via
1813 # dirstate.write or so) isn't invoked while
1824 # dirstate.write or so) isn't invoked while
1814 # transaction running
1825 # transaction running
1815 repo.dirstate.write(None)
1826 repo.dirstate.write(None)
1816 else:
1827 else:
1817 # discard all changes (including ones already written
1828 # discard all changes (including ones already written
1818 # out) in this transaction
1829 # out) in this transaction
1819 narrowspec.restorebackup(self, 'journal.narrowspec')
1830 narrowspec.restorebackup(self, 'journal.narrowspec')
1820 repo.dirstate.restorebackup(None, 'journal.dirstate')
1831 repo.dirstate.restorebackup(None, 'journal.dirstate')
1821
1832
1822 repo.invalidate(clearfilecache=True)
1833 repo.invalidate(clearfilecache=True)
1823
1834
1824 tr = transaction.transaction(rp, self.svfs, vfsmap,
1835 tr = transaction.transaction(rp, self.svfs, vfsmap,
1825 "journal",
1836 "journal",
1826 "undo",
1837 "undo",
1827 aftertrans(renames),
1838 aftertrans(renames),
1828 self.store.createmode,
1839 self.store.createmode,
1829 validator=validate,
1840 validator=validate,
1830 releasefn=releasefn,
1841 releasefn=releasefn,
1831 checkambigfiles=_cachedfiles,
1842 checkambigfiles=_cachedfiles,
1832 name=desc)
1843 name=desc)
1833 tr.changes['origrepolen'] = len(self)
1844 tr.changes['origrepolen'] = len(self)
1834 tr.changes['obsmarkers'] = set()
1845 tr.changes['obsmarkers'] = set()
1835 tr.changes['phases'] = {}
1846 tr.changes['phases'] = {}
1836 tr.changes['bookmarks'] = {}
1847 tr.changes['bookmarks'] = {}
1837
1848
1838 tr.hookargs['txnid'] = txnid
1849 tr.hookargs['txnid'] = txnid
1839 # note: writing the fncache only during finalize mean that the file is
1850 # note: writing the fncache only during finalize mean that the file is
1840 # outdated when running hooks. As fncache is used for streaming clone,
1851 # outdated when running hooks. As fncache is used for streaming clone,
1841 # this is not expected to break anything that happen during the hooks.
1852 # this is not expected to break anything that happen during the hooks.
1842 tr.addfinalize('flush-fncache', self.store.write)
1853 tr.addfinalize('flush-fncache', self.store.write)
1843 def txnclosehook(tr2):
1854 def txnclosehook(tr2):
1844 """To be run if transaction is successful, will schedule a hook run
1855 """To be run if transaction is successful, will schedule a hook run
1845 """
1856 """
1846 # Don't reference tr2 in hook() so we don't hold a reference.
1857 # Don't reference tr2 in hook() so we don't hold a reference.
1847 # This reduces memory consumption when there are multiple
1858 # This reduces memory consumption when there are multiple
1848 # transactions per lock. This can likely go away if issue5045
1859 # transactions per lock. This can likely go away if issue5045
1849 # fixes the function accumulation.
1860 # fixes the function accumulation.
1850 hookargs = tr2.hookargs
1861 hookargs = tr2.hookargs
1851
1862
1852 def hookfunc():
1863 def hookfunc():
1853 repo = reporef()
1864 repo = reporef()
1854 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1865 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1855 bmchanges = sorted(tr.changes['bookmarks'].items())
1866 bmchanges = sorted(tr.changes['bookmarks'].items())
1856 for name, (old, new) in bmchanges:
1867 for name, (old, new) in bmchanges:
1857 args = tr.hookargs.copy()
1868 args = tr.hookargs.copy()
1858 args.update(bookmarks.preparehookargs(name, old, new))
1869 args.update(bookmarks.preparehookargs(name, old, new))
1859 repo.hook('txnclose-bookmark', throw=False,
1870 repo.hook('txnclose-bookmark', throw=False,
1860 txnname=desc, **pycompat.strkwargs(args))
1871 txnname=desc, **pycompat.strkwargs(args))
1861
1872
1862 if hook.hashook(repo.ui, 'txnclose-phase'):
1873 if hook.hashook(repo.ui, 'txnclose-phase'):
1863 cl = repo.unfiltered().changelog
1874 cl = repo.unfiltered().changelog
1864 phasemv = sorted(tr.changes['phases'].items())
1875 phasemv = sorted(tr.changes['phases'].items())
1865 for rev, (old, new) in phasemv:
1876 for rev, (old, new) in phasemv:
1866 args = tr.hookargs.copy()
1877 args = tr.hookargs.copy()
1867 node = hex(cl.node(rev))
1878 node = hex(cl.node(rev))
1868 args.update(phases.preparehookargs(node, old, new))
1879 args.update(phases.preparehookargs(node, old, new))
1869 repo.hook('txnclose-phase', throw=False, txnname=desc,
1880 repo.hook('txnclose-phase', throw=False, txnname=desc,
1870 **pycompat.strkwargs(args))
1881 **pycompat.strkwargs(args))
1871
1882
1872 repo.hook('txnclose', throw=False, txnname=desc,
1883 repo.hook('txnclose', throw=False, txnname=desc,
1873 **pycompat.strkwargs(hookargs))
1884 **pycompat.strkwargs(hookargs))
1874 reporef()._afterlock(hookfunc)
1885 reporef()._afterlock(hookfunc)
1875 tr.addfinalize('txnclose-hook', txnclosehook)
1886 tr.addfinalize('txnclose-hook', txnclosehook)
1876 # Include a leading "-" to make it happen before the transaction summary
1887 # Include a leading "-" to make it happen before the transaction summary
1877 # reports registered via scmutil.registersummarycallback() whose names
1888 # reports registered via scmutil.registersummarycallback() whose names
1878 # are 00-txnreport etc. That way, the caches will be warm when the
1889 # are 00-txnreport etc. That way, the caches will be warm when the
1879 # callbacks run.
1890 # callbacks run.
1880 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1891 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1881 def txnaborthook(tr2):
1892 def txnaborthook(tr2):
1882 """To be run if transaction is aborted
1893 """To be run if transaction is aborted
1883 """
1894 """
1884 reporef().hook('txnabort', throw=False, txnname=desc,
1895 reporef().hook('txnabort', throw=False, txnname=desc,
1885 **pycompat.strkwargs(tr2.hookargs))
1896 **pycompat.strkwargs(tr2.hookargs))
1886 tr.addabort('txnabort-hook', txnaborthook)
1897 tr.addabort('txnabort-hook', txnaborthook)
1887 # avoid eager cache invalidation. in-memory data should be identical
1898 # avoid eager cache invalidation. in-memory data should be identical
1888 # to stored data if transaction has no error.
1899 # to stored data if transaction has no error.
1889 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1900 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1890 self._transref = weakref.ref(tr)
1901 self._transref = weakref.ref(tr)
1891 scmutil.registersummarycallback(self, tr, desc)
1902 scmutil.registersummarycallback(self, tr, desc)
1892 return tr
1903 return tr
1893
1904
1894 def _journalfiles(self):
1905 def _journalfiles(self):
1895 return ((self.svfs, 'journal'),
1906 return ((self.svfs, 'journal'),
1896 (self.vfs, 'journal.dirstate'),
1907 (self.vfs, 'journal.dirstate'),
1897 (self.vfs, 'journal.branch'),
1908 (self.vfs, 'journal.branch'),
1898 (self.vfs, 'journal.desc'),
1909 (self.vfs, 'journal.desc'),
1899 (self.vfs, 'journal.bookmarks'),
1910 (self.vfs, 'journal.bookmarks'),
1900 (self.svfs, 'journal.phaseroots'))
1911 (self.svfs, 'journal.phaseroots'))
1901
1912
1902 def undofiles(self):
1913 def undofiles(self):
1903 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1914 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1904
1915
1905 @unfilteredmethod
1916 @unfilteredmethod
1906 def _writejournal(self, desc):
1917 def _writejournal(self, desc):
1907 self.dirstate.savebackup(None, 'journal.dirstate')
1918 self.dirstate.savebackup(None, 'journal.dirstate')
1908 narrowspec.savebackup(self, 'journal.narrowspec')
1919 narrowspec.savebackup(self, 'journal.narrowspec')
1909 self.vfs.write("journal.branch",
1920 self.vfs.write("journal.branch",
1910 encoding.fromlocal(self.dirstate.branch()))
1921 encoding.fromlocal(self.dirstate.branch()))
1911 self.vfs.write("journal.desc",
1922 self.vfs.write("journal.desc",
1912 "%d\n%s\n" % (len(self), desc))
1923 "%d\n%s\n" % (len(self), desc))
1913 self.vfs.write("journal.bookmarks",
1924 self.vfs.write("journal.bookmarks",
1914 self.vfs.tryread("bookmarks"))
1925 self.vfs.tryread("bookmarks"))
1915 self.svfs.write("journal.phaseroots",
1926 self.svfs.write("journal.phaseroots",
1916 self.svfs.tryread("phaseroots"))
1927 self.svfs.tryread("phaseroots"))
1917
1928
1918 def recover(self):
1929 def recover(self):
1919 with self.lock():
1930 with self.lock():
1920 if self.svfs.exists("journal"):
1931 if self.svfs.exists("journal"):
1921 self.ui.status(_("rolling back interrupted transaction\n"))
1932 self.ui.status(_("rolling back interrupted transaction\n"))
1922 vfsmap = {'': self.svfs,
1933 vfsmap = {'': self.svfs,
1923 'plain': self.vfs,}
1934 'plain': self.vfs,}
1924 transaction.rollback(self.svfs, vfsmap, "journal",
1935 transaction.rollback(self.svfs, vfsmap, "journal",
1925 self.ui.warn,
1936 self.ui.warn,
1926 checkambigfiles=_cachedfiles)
1937 checkambigfiles=_cachedfiles)
1927 self.invalidate()
1938 self.invalidate()
1928 return True
1939 return True
1929 else:
1940 else:
1930 self.ui.warn(_("no interrupted transaction available\n"))
1941 self.ui.warn(_("no interrupted transaction available\n"))
1931 return False
1942 return False
1932
1943
1933 def rollback(self, dryrun=False, force=False):
1944 def rollback(self, dryrun=False, force=False):
1934 wlock = lock = dsguard = None
1945 wlock = lock = dsguard = None
1935 try:
1946 try:
1936 wlock = self.wlock()
1947 wlock = self.wlock()
1937 lock = self.lock()
1948 lock = self.lock()
1938 if self.svfs.exists("undo"):
1949 if self.svfs.exists("undo"):
1939 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1950 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1940
1951
1941 return self._rollback(dryrun, force, dsguard)
1952 return self._rollback(dryrun, force, dsguard)
1942 else:
1953 else:
1943 self.ui.warn(_("no rollback information available\n"))
1954 self.ui.warn(_("no rollback information available\n"))
1944 return 1
1955 return 1
1945 finally:
1956 finally:
1946 release(dsguard, lock, wlock)
1957 release(dsguard, lock, wlock)
1947
1958
1948 @unfilteredmethod # Until we get smarter cache management
1959 @unfilteredmethod # Until we get smarter cache management
1949 def _rollback(self, dryrun, force, dsguard):
1960 def _rollback(self, dryrun, force, dsguard):
1950 ui = self.ui
1961 ui = self.ui
1951 try:
1962 try:
1952 args = self.vfs.read('undo.desc').splitlines()
1963 args = self.vfs.read('undo.desc').splitlines()
1953 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1964 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1954 if len(args) >= 3:
1965 if len(args) >= 3:
1955 detail = args[2]
1966 detail = args[2]
1956 oldtip = oldlen - 1
1967 oldtip = oldlen - 1
1957
1968
1958 if detail and ui.verbose:
1969 if detail and ui.verbose:
1959 msg = (_('repository tip rolled back to revision %d'
1970 msg = (_('repository tip rolled back to revision %d'
1960 ' (undo %s: %s)\n')
1971 ' (undo %s: %s)\n')
1961 % (oldtip, desc, detail))
1972 % (oldtip, desc, detail))
1962 else:
1973 else:
1963 msg = (_('repository tip rolled back to revision %d'
1974 msg = (_('repository tip rolled back to revision %d'
1964 ' (undo %s)\n')
1975 ' (undo %s)\n')
1965 % (oldtip, desc))
1976 % (oldtip, desc))
1966 except IOError:
1977 except IOError:
1967 msg = _('rolling back unknown transaction\n')
1978 msg = _('rolling back unknown transaction\n')
1968 desc = None
1979 desc = None
1969
1980
1970 if not force and self['.'] != self['tip'] and desc == 'commit':
1981 if not force and self['.'] != self['tip'] and desc == 'commit':
1971 raise error.Abort(
1982 raise error.Abort(
1972 _('rollback of last commit while not checked out '
1983 _('rollback of last commit while not checked out '
1973 'may lose data'), hint=_('use -f to force'))
1984 'may lose data'), hint=_('use -f to force'))
1974
1985
1975 ui.status(msg)
1986 ui.status(msg)
1976 if dryrun:
1987 if dryrun:
1977 return 0
1988 return 0
1978
1989
1979 parents = self.dirstate.parents()
1990 parents = self.dirstate.parents()
1980 self.destroying()
1991 self.destroying()
1981 vfsmap = {'plain': self.vfs, '': self.svfs}
1992 vfsmap = {'plain': self.vfs, '': self.svfs}
1982 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1993 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1983 checkambigfiles=_cachedfiles)
1994 checkambigfiles=_cachedfiles)
1984 if self.vfs.exists('undo.bookmarks'):
1995 if self.vfs.exists('undo.bookmarks'):
1985 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1996 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1986 if self.svfs.exists('undo.phaseroots'):
1997 if self.svfs.exists('undo.phaseroots'):
1987 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1998 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1988 self.invalidate()
1999 self.invalidate()
1989
2000
1990 parentgone = (parents[0] not in self.changelog.nodemap or
2001 parentgone = (parents[0] not in self.changelog.nodemap or
1991 parents[1] not in self.changelog.nodemap)
2002 parents[1] not in self.changelog.nodemap)
1992 if parentgone:
2003 if parentgone:
1993 # prevent dirstateguard from overwriting already restored one
2004 # prevent dirstateguard from overwriting already restored one
1994 dsguard.close()
2005 dsguard.close()
1995
2006
1996 narrowspec.restorebackup(self, 'undo.narrowspec')
2007 narrowspec.restorebackup(self, 'undo.narrowspec')
1997 self.dirstate.restorebackup(None, 'undo.dirstate')
2008 self.dirstate.restorebackup(None, 'undo.dirstate')
1998 try:
2009 try:
1999 branch = self.vfs.read('undo.branch')
2010 branch = self.vfs.read('undo.branch')
2000 self.dirstate.setbranch(encoding.tolocal(branch))
2011 self.dirstate.setbranch(encoding.tolocal(branch))
2001 except IOError:
2012 except IOError:
2002 ui.warn(_('named branch could not be reset: '
2013 ui.warn(_('named branch could not be reset: '
2003 'current branch is still \'%s\'\n')
2014 'current branch is still \'%s\'\n')
2004 % self.dirstate.branch())
2015 % self.dirstate.branch())
2005
2016
2006 parents = tuple([p.rev() for p in self[None].parents()])
2017 parents = tuple([p.rev() for p in self[None].parents()])
2007 if len(parents) > 1:
2018 if len(parents) > 1:
2008 ui.status(_('working directory now based on '
2019 ui.status(_('working directory now based on '
2009 'revisions %d and %d\n') % parents)
2020 'revisions %d and %d\n') % parents)
2010 else:
2021 else:
2011 ui.status(_('working directory now based on '
2022 ui.status(_('working directory now based on '
2012 'revision %d\n') % parents)
2023 'revision %d\n') % parents)
2013 mergemod.mergestate.clean(self, self['.'].node())
2024 mergemod.mergestate.clean(self, self['.'].node())
2014
2025
2015 # TODO: if we know which new heads may result from this rollback, pass
2026 # TODO: if we know which new heads may result from this rollback, pass
2016 # them to destroy(), which will prevent the branchhead cache from being
2027 # them to destroy(), which will prevent the branchhead cache from being
2017 # invalidated.
2028 # invalidated.
2018 self.destroyed()
2029 self.destroyed()
2019 return 0
2030 return 0
2020
2031
2021 def _buildcacheupdater(self, newtransaction):
2032 def _buildcacheupdater(self, newtransaction):
2022 """called during transaction to build the callback updating cache
2033 """called during transaction to build the callback updating cache
2023
2034
2024 Lives on the repository to help extension who might want to augment
2035 Lives on the repository to help extension who might want to augment
2025 this logic. For this purpose, the created transaction is passed to the
2036 this logic. For this purpose, the created transaction is passed to the
2026 method.
2037 method.
2027 """
2038 """
2028 # we must avoid cyclic reference between repo and transaction.
2039 # we must avoid cyclic reference between repo and transaction.
2029 reporef = weakref.ref(self)
2040 reporef = weakref.ref(self)
2030 def updater(tr):
2041 def updater(tr):
2031 repo = reporef()
2042 repo = reporef()
2032 repo.updatecaches(tr)
2043 repo.updatecaches(tr)
2033 return updater
2044 return updater
2034
2045
2035 @unfilteredmethod
2046 @unfilteredmethod
2036 def updatecaches(self, tr=None, full=False):
2047 def updatecaches(self, tr=None, full=False):
2037 """warm appropriate caches
2048 """warm appropriate caches
2038
2049
2039 If this function is called after a transaction closed. The transaction
2050 If this function is called after a transaction closed. The transaction
2040 will be available in the 'tr' argument. This can be used to selectively
2051 will be available in the 'tr' argument. This can be used to selectively
2041 update caches relevant to the changes in that transaction.
2052 update caches relevant to the changes in that transaction.
2042
2053
2043 If 'full' is set, make sure all caches the function knows about have
2054 If 'full' is set, make sure all caches the function knows about have
2044 up-to-date data. Even the ones usually loaded more lazily.
2055 up-to-date data. Even the ones usually loaded more lazily.
2045 """
2056 """
2046 if tr is not None and tr.hookargs.get('source') == 'strip':
2057 if tr is not None and tr.hookargs.get('source') == 'strip':
2047 # During strip, many caches are invalid but
2058 # During strip, many caches are invalid but
2048 # later call to `destroyed` will refresh them.
2059 # later call to `destroyed` will refresh them.
2049 return
2060 return
2050
2061
2051 if tr is None or tr.changes['origrepolen'] < len(self):
2062 if tr is None or tr.changes['origrepolen'] < len(self):
2052 # updating the unfiltered branchmap should refresh all the others,
2063 # updating the unfiltered branchmap should refresh all the others,
2053 self.ui.debug('updating the branch cache\n')
2064 self.ui.debug('updating the branch cache\n')
2054 branchmap.updatecache(self.filtered('served'))
2065 branchmap.updatecache(self.filtered('served'))
2055
2066
2056 if full:
2067 if full:
2057 rbc = self.revbranchcache()
2068 rbc = self.revbranchcache()
2058 for r in self.changelog:
2069 for r in self.changelog:
2059 rbc.branchinfo(r)
2070 rbc.branchinfo(r)
2060 rbc.write()
2071 rbc.write()
2061
2072
2062 # ensure the working copy parents are in the manifestfulltextcache
2073 # ensure the working copy parents are in the manifestfulltextcache
2063 for ctx in self['.'].parents():
2074 for ctx in self['.'].parents():
2064 ctx.manifest() # accessing the manifest is enough
2075 ctx.manifest() # accessing the manifest is enough
2065
2076
2066 def invalidatecaches(self):
2077 def invalidatecaches(self):
2067
2078
2068 if r'_tagscache' in vars(self):
2079 if r'_tagscache' in vars(self):
2069 # can't use delattr on proxy
2080 # can't use delattr on proxy
2070 del self.__dict__[r'_tagscache']
2081 del self.__dict__[r'_tagscache']
2071
2082
2072 self.unfiltered()._branchcaches.clear()
2083 self.unfiltered()._branchcaches.clear()
2073 self.invalidatevolatilesets()
2084 self.invalidatevolatilesets()
2074 self._sparsesignaturecache.clear()
2085 self._sparsesignaturecache.clear()
2075
2086
2076 def invalidatevolatilesets(self):
2087 def invalidatevolatilesets(self):
2077 self.filteredrevcache.clear()
2088 self.filteredrevcache.clear()
2078 obsolete.clearobscaches(self)
2089 obsolete.clearobscaches(self)
2079
2090
2080 def invalidatedirstate(self):
2091 def invalidatedirstate(self):
2081 '''Invalidates the dirstate, causing the next call to dirstate
2092 '''Invalidates the dirstate, causing the next call to dirstate
2082 to check if it was modified since the last time it was read,
2093 to check if it was modified since the last time it was read,
2083 rereading it if it has.
2094 rereading it if it has.
2084
2095
2085 This is different to dirstate.invalidate() that it doesn't always
2096 This is different to dirstate.invalidate() that it doesn't always
2086 rereads the dirstate. Use dirstate.invalidate() if you want to
2097 rereads the dirstate. Use dirstate.invalidate() if you want to
2087 explicitly read the dirstate again (i.e. restoring it to a previous
2098 explicitly read the dirstate again (i.e. restoring it to a previous
2088 known good state).'''
2099 known good state).'''
2089 if hasunfilteredcache(self, r'dirstate'):
2100 if hasunfilteredcache(self, r'dirstate'):
2090 for k in self.dirstate._filecache:
2101 for k in self.dirstate._filecache:
2091 try:
2102 try:
2092 delattr(self.dirstate, k)
2103 delattr(self.dirstate, k)
2093 except AttributeError:
2104 except AttributeError:
2094 pass
2105 pass
2095 delattr(self.unfiltered(), r'dirstate')
2106 delattr(self.unfiltered(), r'dirstate')
2096
2107
2097 def invalidate(self, clearfilecache=False):
2108 def invalidate(self, clearfilecache=False):
2098 '''Invalidates both store and non-store parts other than dirstate
2109 '''Invalidates both store and non-store parts other than dirstate
2099
2110
2100 If a transaction is running, invalidation of store is omitted,
2111 If a transaction is running, invalidation of store is omitted,
2101 because discarding in-memory changes might cause inconsistency
2112 because discarding in-memory changes might cause inconsistency
2102 (e.g. incomplete fncache causes unintentional failure, but
2113 (e.g. incomplete fncache causes unintentional failure, but
2103 redundant one doesn't).
2114 redundant one doesn't).
2104 '''
2115 '''
2105 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2116 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2106 for k in list(self._filecache.keys()):
2117 for k in list(self._filecache.keys()):
2107 # dirstate is invalidated separately in invalidatedirstate()
2118 # dirstate is invalidated separately in invalidatedirstate()
2108 if k == 'dirstate':
2119 if k == 'dirstate':
2109 continue
2120 continue
2110 if (k == 'changelog' and
2121 if (k == 'changelog' and
2111 self.currenttransaction() and
2122 self.currenttransaction() and
2112 self.changelog._delayed):
2123 self.changelog._delayed):
2113 # The changelog object may store unwritten revisions. We don't
2124 # The changelog object may store unwritten revisions. We don't
2114 # want to lose them.
2125 # want to lose them.
2115 # TODO: Solve the problem instead of working around it.
2126 # TODO: Solve the problem instead of working around it.
2116 continue
2127 continue
2117
2128
2118 if clearfilecache:
2129 if clearfilecache:
2119 del self._filecache[k]
2130 del self._filecache[k]
2120 try:
2131 try:
2121 delattr(unfiltered, k)
2132 delattr(unfiltered, k)
2122 except AttributeError:
2133 except AttributeError:
2123 pass
2134 pass
2124 self.invalidatecaches()
2135 self.invalidatecaches()
2125 if not self.currenttransaction():
2136 if not self.currenttransaction():
2126 # TODO: Changing contents of store outside transaction
2137 # TODO: Changing contents of store outside transaction
2127 # causes inconsistency. We should make in-memory store
2138 # causes inconsistency. We should make in-memory store
2128 # changes detectable, and abort if changed.
2139 # changes detectable, and abort if changed.
2129 self.store.invalidatecaches()
2140 self.store.invalidatecaches()
2130
2141
2131 def invalidateall(self):
2142 def invalidateall(self):
2132 '''Fully invalidates both store and non-store parts, causing the
2143 '''Fully invalidates both store and non-store parts, causing the
2133 subsequent operation to reread any outside changes.'''
2144 subsequent operation to reread any outside changes.'''
2134 # extension should hook this to invalidate its caches
2145 # extension should hook this to invalidate its caches
2135 self.invalidate()
2146 self.invalidate()
2136 self.invalidatedirstate()
2147 self.invalidatedirstate()
2137
2148
2138 @unfilteredmethod
2149 @unfilteredmethod
2139 def _refreshfilecachestats(self, tr):
2150 def _refreshfilecachestats(self, tr):
2140 """Reload stats of cached files so that they are flagged as valid"""
2151 """Reload stats of cached files so that they are flagged as valid"""
2141 for k, ce in self._filecache.items():
2152 for k, ce in self._filecache.items():
2142 k = pycompat.sysstr(k)
2153 k = pycompat.sysstr(k)
2143 if k == r'dirstate' or k not in self.__dict__:
2154 if k == r'dirstate' or k not in self.__dict__:
2144 continue
2155 continue
2145 ce.refresh()
2156 ce.refresh()
2146
2157
2147 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2158 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2148 inheritchecker=None, parentenvvar=None):
2159 inheritchecker=None, parentenvvar=None):
2149 parentlock = None
2160 parentlock = None
2150 # the contents of parentenvvar are used by the underlying lock to
2161 # the contents of parentenvvar are used by the underlying lock to
2151 # determine whether it can be inherited
2162 # determine whether it can be inherited
2152 if parentenvvar is not None:
2163 if parentenvvar is not None:
2153 parentlock = encoding.environ.get(parentenvvar)
2164 parentlock = encoding.environ.get(parentenvvar)
2154
2165
2155 timeout = 0
2166 timeout = 0
2156 warntimeout = 0
2167 warntimeout = 0
2157 if wait:
2168 if wait:
2158 timeout = self.ui.configint("ui", "timeout")
2169 timeout = self.ui.configint("ui", "timeout")
2159 warntimeout = self.ui.configint("ui", "timeout.warn")
2170 warntimeout = self.ui.configint("ui", "timeout.warn")
2160 # internal config: ui.signal-safe-lock
2171 # internal config: ui.signal-safe-lock
2161 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2172 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2162
2173
2163 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2174 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2164 releasefn=releasefn,
2175 releasefn=releasefn,
2165 acquirefn=acquirefn, desc=desc,
2176 acquirefn=acquirefn, desc=desc,
2166 inheritchecker=inheritchecker,
2177 inheritchecker=inheritchecker,
2167 parentlock=parentlock,
2178 parentlock=parentlock,
2168 signalsafe=signalsafe)
2179 signalsafe=signalsafe)
2169 return l
2180 return l
2170
2181
2171 def _afterlock(self, callback):
2182 def _afterlock(self, callback):
2172 """add a callback to be run when the repository is fully unlocked
2183 """add a callback to be run when the repository is fully unlocked
2173
2184
2174 The callback will be executed when the outermost lock is released
2185 The callback will be executed when the outermost lock is released
2175 (with wlock being higher level than 'lock')."""
2186 (with wlock being higher level than 'lock')."""
2176 for ref in (self._wlockref, self._lockref):
2187 for ref in (self._wlockref, self._lockref):
2177 l = ref and ref()
2188 l = ref and ref()
2178 if l and l.held:
2189 if l and l.held:
2179 l.postrelease.append(callback)
2190 l.postrelease.append(callback)
2180 break
2191 break
2181 else: # no lock have been found.
2192 else: # no lock have been found.
2182 callback()
2193 callback()
2183
2194
2184 def lock(self, wait=True):
2195 def lock(self, wait=True):
2185 '''Lock the repository store (.hg/store) and return a weak reference
2196 '''Lock the repository store (.hg/store) and return a weak reference
2186 to the lock. Use this before modifying the store (e.g. committing or
2197 to the lock. Use this before modifying the store (e.g. committing or
2187 stripping). If you are opening a transaction, get a lock as well.)
2198 stripping). If you are opening a transaction, get a lock as well.)
2188
2199
2189 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2200 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2190 'wlock' first to avoid a dead-lock hazard.'''
2201 'wlock' first to avoid a dead-lock hazard.'''
2191 l = self._currentlock(self._lockref)
2202 l = self._currentlock(self._lockref)
2192 if l is not None:
2203 if l is not None:
2193 l.lock()
2204 l.lock()
2194 return l
2205 return l
2195
2206
2196 l = self._lock(self.svfs, "lock", wait, None,
2207 l = self._lock(self.svfs, "lock", wait, None,
2197 self.invalidate, _('repository %s') % self.origroot)
2208 self.invalidate, _('repository %s') % self.origroot)
2198 self._lockref = weakref.ref(l)
2209 self._lockref = weakref.ref(l)
2199 return l
2210 return l
2200
2211
2201 def _wlockchecktransaction(self):
2212 def _wlockchecktransaction(self):
2202 if self.currenttransaction() is not None:
2213 if self.currenttransaction() is not None:
2203 raise error.LockInheritanceContractViolation(
2214 raise error.LockInheritanceContractViolation(
2204 'wlock cannot be inherited in the middle of a transaction')
2215 'wlock cannot be inherited in the middle of a transaction')
2205
2216
2206 def wlock(self, wait=True):
2217 def wlock(self, wait=True):
2207 '''Lock the non-store parts of the repository (everything under
2218 '''Lock the non-store parts of the repository (everything under
2208 .hg except .hg/store) and return a weak reference to the lock.
2219 .hg except .hg/store) and return a weak reference to the lock.
2209
2220
2210 Use this before modifying files in .hg.
2221 Use this before modifying files in .hg.
2211
2222
2212 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2223 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2213 'wlock' first to avoid a dead-lock hazard.'''
2224 'wlock' first to avoid a dead-lock hazard.'''
2214 l = self._wlockref and self._wlockref()
2225 l = self._wlockref and self._wlockref()
2215 if l is not None and l.held:
2226 if l is not None and l.held:
2216 l.lock()
2227 l.lock()
2217 return l
2228 return l
2218
2229
2219 # We do not need to check for non-waiting lock acquisition. Such
2230 # We do not need to check for non-waiting lock acquisition. Such
2220 # acquisition would not cause dead-lock as they would just fail.
2231 # acquisition would not cause dead-lock as they would just fail.
2221 if wait and (self.ui.configbool('devel', 'all-warnings')
2232 if wait and (self.ui.configbool('devel', 'all-warnings')
2222 or self.ui.configbool('devel', 'check-locks')):
2233 or self.ui.configbool('devel', 'check-locks')):
2223 if self._currentlock(self._lockref) is not None:
2234 if self._currentlock(self._lockref) is not None:
2224 self.ui.develwarn('"wlock" acquired after "lock"')
2235 self.ui.develwarn('"wlock" acquired after "lock"')
2225
2236
2226 def unlock():
2237 def unlock():
2227 if self.dirstate.pendingparentchange():
2238 if self.dirstate.pendingparentchange():
2228 self.dirstate.invalidate()
2239 self.dirstate.invalidate()
2229 else:
2240 else:
2230 self.dirstate.write(None)
2241 self.dirstate.write(None)
2231
2242
2232 self._filecache['dirstate'].refresh()
2243 self._filecache['dirstate'].refresh()
2233
2244
2234 l = self._lock(self.vfs, "wlock", wait, unlock,
2245 l = self._lock(self.vfs, "wlock", wait, unlock,
2235 self.invalidatedirstate, _('working directory of %s') %
2246 self.invalidatedirstate, _('working directory of %s') %
2236 self.origroot,
2247 self.origroot,
2237 inheritchecker=self._wlockchecktransaction,
2248 inheritchecker=self._wlockchecktransaction,
2238 parentenvvar='HG_WLOCK_LOCKER')
2249 parentenvvar='HG_WLOCK_LOCKER')
2239 self._wlockref = weakref.ref(l)
2250 self._wlockref = weakref.ref(l)
2240 return l
2251 return l
2241
2252
2242 def _currentlock(self, lockref):
2253 def _currentlock(self, lockref):
2243 """Returns the lock if it's held, or None if it's not."""
2254 """Returns the lock if it's held, or None if it's not."""
2244 if lockref is None:
2255 if lockref is None:
2245 return None
2256 return None
2246 l = lockref()
2257 l = lockref()
2247 if l is None or not l.held:
2258 if l is None or not l.held:
2248 return None
2259 return None
2249 return l
2260 return l
2250
2261
2251 def currentwlock(self):
2262 def currentwlock(self):
2252 """Returns the wlock if it's held, or None if it's not."""
2263 """Returns the wlock if it's held, or None if it's not."""
2253 return self._currentlock(self._wlockref)
2264 return self._currentlock(self._wlockref)
2254
2265
2255 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2266 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2256 """
2267 """
2257 commit an individual file as part of a larger transaction
2268 commit an individual file as part of a larger transaction
2258 """
2269 """
2259
2270
2260 fname = fctx.path()
2271 fname = fctx.path()
2261 fparent1 = manifest1.get(fname, nullid)
2272 fparent1 = manifest1.get(fname, nullid)
2262 fparent2 = manifest2.get(fname, nullid)
2273 fparent2 = manifest2.get(fname, nullid)
2263 if isinstance(fctx, context.filectx):
2274 if isinstance(fctx, context.filectx):
2264 node = fctx.filenode()
2275 node = fctx.filenode()
2265 if node in [fparent1, fparent2]:
2276 if node in [fparent1, fparent2]:
2266 self.ui.debug('reusing %s filelog entry\n' % fname)
2277 self.ui.debug('reusing %s filelog entry\n' % fname)
2267 if manifest1.flags(fname) != fctx.flags():
2278 if manifest1.flags(fname) != fctx.flags():
2268 changelist.append(fname)
2279 changelist.append(fname)
2269 return node
2280 return node
2270
2281
2271 flog = self.file(fname)
2282 flog = self.file(fname)
2272 meta = {}
2283 meta = {}
2273 copy = fctx.renamed()
2284 copy = fctx.renamed()
2274 if copy and copy[0] != fname:
2285 if copy and copy[0] != fname:
2275 # Mark the new revision of this file as a copy of another
2286 # Mark the new revision of this file as a copy of another
2276 # file. This copy data will effectively act as a parent
2287 # file. This copy data will effectively act as a parent
2277 # of this new revision. If this is a merge, the first
2288 # of this new revision. If this is a merge, the first
2278 # parent will be the nullid (meaning "look up the copy data")
2289 # parent will be the nullid (meaning "look up the copy data")
2279 # and the second one will be the other parent. For example:
2290 # and the second one will be the other parent. For example:
2280 #
2291 #
2281 # 0 --- 1 --- 3 rev1 changes file foo
2292 # 0 --- 1 --- 3 rev1 changes file foo
2282 # \ / rev2 renames foo to bar and changes it
2293 # \ / rev2 renames foo to bar and changes it
2283 # \- 2 -/ rev3 should have bar with all changes and
2294 # \- 2 -/ rev3 should have bar with all changes and
2284 # should record that bar descends from
2295 # should record that bar descends from
2285 # bar in rev2 and foo in rev1
2296 # bar in rev2 and foo in rev1
2286 #
2297 #
2287 # this allows this merge to succeed:
2298 # this allows this merge to succeed:
2288 #
2299 #
2289 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2300 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2290 # \ / merging rev3 and rev4 should use bar@rev2
2301 # \ / merging rev3 and rev4 should use bar@rev2
2291 # \- 2 --- 4 as the merge base
2302 # \- 2 --- 4 as the merge base
2292 #
2303 #
2293
2304
2294 cfname = copy[0]
2305 cfname = copy[0]
2295 crev = manifest1.get(cfname)
2306 crev = manifest1.get(cfname)
2296 newfparent = fparent2
2307 newfparent = fparent2
2297
2308
2298 if manifest2: # branch merge
2309 if manifest2: # branch merge
2299 if fparent2 == nullid or crev is None: # copied on remote side
2310 if fparent2 == nullid or crev is None: # copied on remote side
2300 if cfname in manifest2:
2311 if cfname in manifest2:
2301 crev = manifest2[cfname]
2312 crev = manifest2[cfname]
2302 newfparent = fparent1
2313 newfparent = fparent1
2303
2314
2304 # Here, we used to search backwards through history to try to find
2315 # Here, we used to search backwards through history to try to find
2305 # where the file copy came from if the source of a copy was not in
2316 # where the file copy came from if the source of a copy was not in
2306 # the parent directory. However, this doesn't actually make sense to
2317 # the parent directory. However, this doesn't actually make sense to
2307 # do (what does a copy from something not in your working copy even
2318 # do (what does a copy from something not in your working copy even
2308 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2319 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2309 # the user that copy information was dropped, so if they didn't
2320 # the user that copy information was dropped, so if they didn't
2310 # expect this outcome it can be fixed, but this is the correct
2321 # expect this outcome it can be fixed, but this is the correct
2311 # behavior in this circumstance.
2322 # behavior in this circumstance.
2312
2323
2313 if crev:
2324 if crev:
2314 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2325 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2315 meta["copy"] = cfname
2326 meta["copy"] = cfname
2316 meta["copyrev"] = hex(crev)
2327 meta["copyrev"] = hex(crev)
2317 fparent1, fparent2 = nullid, newfparent
2328 fparent1, fparent2 = nullid, newfparent
2318 else:
2329 else:
2319 self.ui.warn(_("warning: can't find ancestor for '%s' "
2330 self.ui.warn(_("warning: can't find ancestor for '%s' "
2320 "copied from '%s'!\n") % (fname, cfname))
2331 "copied from '%s'!\n") % (fname, cfname))
2321
2332
2322 elif fparent1 == nullid:
2333 elif fparent1 == nullid:
2323 fparent1, fparent2 = fparent2, nullid
2334 fparent1, fparent2 = fparent2, nullid
2324 elif fparent2 != nullid:
2335 elif fparent2 != nullid:
2325 # is one parent an ancestor of the other?
2336 # is one parent an ancestor of the other?
2326 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2337 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2327 if fparent1 in fparentancestors:
2338 if fparent1 in fparentancestors:
2328 fparent1, fparent2 = fparent2, nullid
2339 fparent1, fparent2 = fparent2, nullid
2329 elif fparent2 in fparentancestors:
2340 elif fparent2 in fparentancestors:
2330 fparent2 = nullid
2341 fparent2 = nullid
2331
2342
2332 # is the file changed?
2343 # is the file changed?
2333 text = fctx.data()
2344 text = fctx.data()
2334 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2345 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2335 changelist.append(fname)
2346 changelist.append(fname)
2336 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2347 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2337 # are just the flags changed during merge?
2348 # are just the flags changed during merge?
2338 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2349 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2339 changelist.append(fname)
2350 changelist.append(fname)
2340
2351
2341 return fparent1
2352 return fparent1
2342
2353
2343 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2354 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2344 """check for commit arguments that aren't committable"""
2355 """check for commit arguments that aren't committable"""
2345 if match.isexact() or match.prefix():
2356 if match.isexact() or match.prefix():
2346 matched = set(status.modified + status.added + status.removed)
2357 matched = set(status.modified + status.added + status.removed)
2347
2358
2348 for f in match.files():
2359 for f in match.files():
2349 f = self.dirstate.normalize(f)
2360 f = self.dirstate.normalize(f)
2350 if f == '.' or f in matched or f in wctx.substate:
2361 if f == '.' or f in matched or f in wctx.substate:
2351 continue
2362 continue
2352 if f in status.deleted:
2363 if f in status.deleted:
2353 fail(f, _('file not found!'))
2364 fail(f, _('file not found!'))
2354 if f in vdirs: # visited directory
2365 if f in vdirs: # visited directory
2355 d = f + '/'
2366 d = f + '/'
2356 for mf in matched:
2367 for mf in matched:
2357 if mf.startswith(d):
2368 if mf.startswith(d):
2358 break
2369 break
2359 else:
2370 else:
2360 fail(f, _("no match under directory!"))
2371 fail(f, _("no match under directory!"))
2361 elif f not in self.dirstate:
2372 elif f not in self.dirstate:
2362 fail(f, _("file not tracked!"))
2373 fail(f, _("file not tracked!"))
2363
2374
2364 @unfilteredmethod
2375 @unfilteredmethod
2365 def commit(self, text="", user=None, date=None, match=None, force=False,
2376 def commit(self, text="", user=None, date=None, match=None, force=False,
2366 editor=False, extra=None):
2377 editor=False, extra=None):
2367 """Add a new revision to current repository.
2378 """Add a new revision to current repository.
2368
2379
2369 Revision information is gathered from the working directory,
2380 Revision information is gathered from the working directory,
2370 match can be used to filter the committed files. If editor is
2381 match can be used to filter the committed files. If editor is
2371 supplied, it is called to get a commit message.
2382 supplied, it is called to get a commit message.
2372 """
2383 """
2373 if extra is None:
2384 if extra is None:
2374 extra = {}
2385 extra = {}
2375
2386
2376 def fail(f, msg):
2387 def fail(f, msg):
2377 raise error.Abort('%s: %s' % (f, msg))
2388 raise error.Abort('%s: %s' % (f, msg))
2378
2389
2379 if not match:
2390 if not match:
2380 match = matchmod.always(self.root, '')
2391 match = matchmod.always(self.root, '')
2381
2392
2382 if not force:
2393 if not force:
2383 vdirs = []
2394 vdirs = []
2384 match.explicitdir = vdirs.append
2395 match.explicitdir = vdirs.append
2385 match.bad = fail
2396 match.bad = fail
2386
2397
2387 wlock = lock = tr = None
2398 wlock = lock = tr = None
2388 try:
2399 try:
2389 wlock = self.wlock()
2400 wlock = self.wlock()
2390 lock = self.lock() # for recent changelog (see issue4368)
2401 lock = self.lock() # for recent changelog (see issue4368)
2391
2402
2392 wctx = self[None]
2403 wctx = self[None]
2393 merge = len(wctx.parents()) > 1
2404 merge = len(wctx.parents()) > 1
2394
2405
2395 if not force and merge and not match.always():
2406 if not force and merge and not match.always():
2396 raise error.Abort(_('cannot partially commit a merge '
2407 raise error.Abort(_('cannot partially commit a merge '
2397 '(do not specify files or patterns)'))
2408 '(do not specify files or patterns)'))
2398
2409
2399 status = self.status(match=match, clean=force)
2410 status = self.status(match=match, clean=force)
2400 if force:
2411 if force:
2401 status.modified.extend(status.clean) # mq may commit clean files
2412 status.modified.extend(status.clean) # mq may commit clean files
2402
2413
2403 # check subrepos
2414 # check subrepos
2404 subs, commitsubs, newstate = subrepoutil.precommit(
2415 subs, commitsubs, newstate = subrepoutil.precommit(
2405 self.ui, wctx, status, match, force=force)
2416 self.ui, wctx, status, match, force=force)
2406
2417
2407 # make sure all explicit patterns are matched
2418 # make sure all explicit patterns are matched
2408 if not force:
2419 if not force:
2409 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2420 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2410
2421
2411 cctx = context.workingcommitctx(self, status,
2422 cctx = context.workingcommitctx(self, status,
2412 text, user, date, extra)
2423 text, user, date, extra)
2413
2424
2414 # internal config: ui.allowemptycommit
2425 # internal config: ui.allowemptycommit
2415 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2426 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2416 or extra.get('close') or merge or cctx.files()
2427 or extra.get('close') or merge or cctx.files()
2417 or self.ui.configbool('ui', 'allowemptycommit'))
2428 or self.ui.configbool('ui', 'allowemptycommit'))
2418 if not allowemptycommit:
2429 if not allowemptycommit:
2419 return None
2430 return None
2420
2431
2421 if merge and cctx.deleted():
2432 if merge and cctx.deleted():
2422 raise error.Abort(_("cannot commit merge with missing files"))
2433 raise error.Abort(_("cannot commit merge with missing files"))
2423
2434
2424 ms = mergemod.mergestate.read(self)
2435 ms = mergemod.mergestate.read(self)
2425 mergeutil.checkunresolved(ms)
2436 mergeutil.checkunresolved(ms)
2426
2437
2427 if editor:
2438 if editor:
2428 cctx._text = editor(self, cctx, subs)
2439 cctx._text = editor(self, cctx, subs)
2429 edited = (text != cctx._text)
2440 edited = (text != cctx._text)
2430
2441
2431 # Save commit message in case this transaction gets rolled back
2442 # Save commit message in case this transaction gets rolled back
2432 # (e.g. by a pretxncommit hook). Leave the content alone on
2443 # (e.g. by a pretxncommit hook). Leave the content alone on
2433 # the assumption that the user will use the same editor again.
2444 # the assumption that the user will use the same editor again.
2434 msgfn = self.savecommitmessage(cctx._text)
2445 msgfn = self.savecommitmessage(cctx._text)
2435
2446
2436 # commit subs and write new state
2447 # commit subs and write new state
2437 if subs:
2448 if subs:
2438 for s in sorted(commitsubs):
2449 for s in sorted(commitsubs):
2439 sub = wctx.sub(s)
2450 sub = wctx.sub(s)
2440 self.ui.status(_('committing subrepository %s\n') %
2451 self.ui.status(_('committing subrepository %s\n') %
2441 subrepoutil.subrelpath(sub))
2452 subrepoutil.subrelpath(sub))
2442 sr = sub.commit(cctx._text, user, date)
2453 sr = sub.commit(cctx._text, user, date)
2443 newstate[s] = (newstate[s][0], sr)
2454 newstate[s] = (newstate[s][0], sr)
2444 subrepoutil.writestate(self, newstate)
2455 subrepoutil.writestate(self, newstate)
2445
2456
2446 p1, p2 = self.dirstate.parents()
2457 p1, p2 = self.dirstate.parents()
2447 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2458 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2448 try:
2459 try:
2449 self.hook("precommit", throw=True, parent1=hookp1,
2460 self.hook("precommit", throw=True, parent1=hookp1,
2450 parent2=hookp2)
2461 parent2=hookp2)
2451 tr = self.transaction('commit')
2462 tr = self.transaction('commit')
2452 ret = self.commitctx(cctx, True)
2463 ret = self.commitctx(cctx, True)
2453 except: # re-raises
2464 except: # re-raises
2454 if edited:
2465 if edited:
2455 self.ui.write(
2466 self.ui.write(
2456 _('note: commit message saved in %s\n') % msgfn)
2467 _('note: commit message saved in %s\n') % msgfn)
2457 raise
2468 raise
2458 # update bookmarks, dirstate and mergestate
2469 # update bookmarks, dirstate and mergestate
2459 bookmarks.update(self, [p1, p2], ret)
2470 bookmarks.update(self, [p1, p2], ret)
2460 cctx.markcommitted(ret)
2471 cctx.markcommitted(ret)
2461 ms.reset()
2472 ms.reset()
2462 tr.close()
2473 tr.close()
2463
2474
2464 finally:
2475 finally:
2465 lockmod.release(tr, lock, wlock)
2476 lockmod.release(tr, lock, wlock)
2466
2477
2467 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2478 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2468 # hack for command that use a temporary commit (eg: histedit)
2479 # hack for command that use a temporary commit (eg: histedit)
2469 # temporary commit got stripped before hook release
2480 # temporary commit got stripped before hook release
2470 if self.changelog.hasnode(ret):
2481 if self.changelog.hasnode(ret):
2471 self.hook("commit", node=node, parent1=parent1,
2482 self.hook("commit", node=node, parent1=parent1,
2472 parent2=parent2)
2483 parent2=parent2)
2473 self._afterlock(commithook)
2484 self._afterlock(commithook)
2474 return ret
2485 return ret
2475
2486
2476 @unfilteredmethod
2487 @unfilteredmethod
2477 def commitctx(self, ctx, error=False):
2488 def commitctx(self, ctx, error=False):
2478 """Add a new revision to current repository.
2489 """Add a new revision to current repository.
2479 Revision information is passed via the context argument.
2490 Revision information is passed via the context argument.
2480
2491
2481 ctx.files() should list all files involved in this commit, i.e.
2492 ctx.files() should list all files involved in this commit, i.e.
2482 modified/added/removed files. On merge, it may be wider than the
2493 modified/added/removed files. On merge, it may be wider than the
2483 ctx.files() to be committed, since any file nodes derived directly
2494 ctx.files() to be committed, since any file nodes derived directly
2484 from p1 or p2 are excluded from the committed ctx.files().
2495 from p1 or p2 are excluded from the committed ctx.files().
2485 """
2496 """
2486
2497
2487 tr = None
2498 tr = None
2488 p1, p2 = ctx.p1(), ctx.p2()
2499 p1, p2 = ctx.p1(), ctx.p2()
2489 user = ctx.user()
2500 user = ctx.user()
2490
2501
2491 lock = self.lock()
2502 lock = self.lock()
2492 try:
2503 try:
2493 tr = self.transaction("commit")
2504 tr = self.transaction("commit")
2494 trp = weakref.proxy(tr)
2505 trp = weakref.proxy(tr)
2495
2506
2496 if ctx.manifestnode():
2507 if ctx.manifestnode():
2497 # reuse an existing manifest revision
2508 # reuse an existing manifest revision
2498 self.ui.debug('reusing known manifest\n')
2509 self.ui.debug('reusing known manifest\n')
2499 mn = ctx.manifestnode()
2510 mn = ctx.manifestnode()
2500 files = ctx.files()
2511 files = ctx.files()
2501 elif ctx.files():
2512 elif ctx.files():
2502 m1ctx = p1.manifestctx()
2513 m1ctx = p1.manifestctx()
2503 m2ctx = p2.manifestctx()
2514 m2ctx = p2.manifestctx()
2504 mctx = m1ctx.copy()
2515 mctx = m1ctx.copy()
2505
2516
2506 m = mctx.read()
2517 m = mctx.read()
2507 m1 = m1ctx.read()
2518 m1 = m1ctx.read()
2508 m2 = m2ctx.read()
2519 m2 = m2ctx.read()
2509
2520
2510 # check in files
2521 # check in files
2511 added = []
2522 added = []
2512 changed = []
2523 changed = []
2513 removed = list(ctx.removed())
2524 removed = list(ctx.removed())
2514 linkrev = len(self)
2525 linkrev = len(self)
2515 self.ui.note(_("committing files:\n"))
2526 self.ui.note(_("committing files:\n"))
2516 for f in sorted(ctx.modified() + ctx.added()):
2527 for f in sorted(ctx.modified() + ctx.added()):
2517 self.ui.note(f + "\n")
2528 self.ui.note(f + "\n")
2518 try:
2529 try:
2519 fctx = ctx[f]
2530 fctx = ctx[f]
2520 if fctx is None:
2531 if fctx is None:
2521 removed.append(f)
2532 removed.append(f)
2522 else:
2533 else:
2523 added.append(f)
2534 added.append(f)
2524 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2535 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2525 trp, changed)
2536 trp, changed)
2526 m.setflag(f, fctx.flags())
2537 m.setflag(f, fctx.flags())
2527 except OSError as inst:
2538 except OSError as inst:
2528 self.ui.warn(_("trouble committing %s!\n") % f)
2539 self.ui.warn(_("trouble committing %s!\n") % f)
2529 raise
2540 raise
2530 except IOError as inst:
2541 except IOError as inst:
2531 errcode = getattr(inst, 'errno', errno.ENOENT)
2542 errcode = getattr(inst, 'errno', errno.ENOENT)
2532 if error or errcode and errcode != errno.ENOENT:
2543 if error or errcode and errcode != errno.ENOENT:
2533 self.ui.warn(_("trouble committing %s!\n") % f)
2544 self.ui.warn(_("trouble committing %s!\n") % f)
2534 raise
2545 raise
2535
2546
2536 # update manifest
2547 # update manifest
2537 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2548 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2538 drop = [f for f in removed if f in m]
2549 drop = [f for f in removed if f in m]
2539 for f in drop:
2550 for f in drop:
2540 del m[f]
2551 del m[f]
2541 files = changed + removed
2552 files = changed + removed
2542 md = None
2553 md = None
2543 if not files:
2554 if not files:
2544 # if no "files" actually changed in terms of the changelog,
2555 # if no "files" actually changed in terms of the changelog,
2545 # try hard to detect unmodified manifest entry so that the
2556 # try hard to detect unmodified manifest entry so that the
2546 # exact same commit can be reproduced later on convert.
2557 # exact same commit can be reproduced later on convert.
2547 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2558 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2548 if not files and md:
2559 if not files and md:
2549 self.ui.debug('not reusing manifest (no file change in '
2560 self.ui.debug('not reusing manifest (no file change in '
2550 'changelog, but manifest differs)\n')
2561 'changelog, but manifest differs)\n')
2551 if files or md:
2562 if files or md:
2552 self.ui.note(_("committing manifest\n"))
2563 self.ui.note(_("committing manifest\n"))
2553 # we're using narrowmatch here since it's already applied at
2564 # we're using narrowmatch here since it's already applied at
2554 # other stages (such as dirstate.walk), so we're already
2565 # other stages (such as dirstate.walk), so we're already
2555 # ignoring things outside of narrowspec in most cases. The
2566 # ignoring things outside of narrowspec in most cases. The
2556 # one case where we might have files outside the narrowspec
2567 # one case where we might have files outside the narrowspec
2557 # at this point is merges, and we already error out in the
2568 # at this point is merges, and we already error out in the
2558 # case where the merge has files outside of the narrowspec,
2569 # case where the merge has files outside of the narrowspec,
2559 # so this is safe.
2570 # so this is safe.
2560 mn = mctx.write(trp, linkrev,
2571 mn = mctx.write(trp, linkrev,
2561 p1.manifestnode(), p2.manifestnode(),
2572 p1.manifestnode(), p2.manifestnode(),
2562 added, drop, match=self.narrowmatch())
2573 added, drop, match=self.narrowmatch())
2563 else:
2574 else:
2564 self.ui.debug('reusing manifest form p1 (listed files '
2575 self.ui.debug('reusing manifest form p1 (listed files '
2565 'actually unchanged)\n')
2576 'actually unchanged)\n')
2566 mn = p1.manifestnode()
2577 mn = p1.manifestnode()
2567 else:
2578 else:
2568 self.ui.debug('reusing manifest from p1 (no file change)\n')
2579 self.ui.debug('reusing manifest from p1 (no file change)\n')
2569 mn = p1.manifestnode()
2580 mn = p1.manifestnode()
2570 files = []
2581 files = []
2571
2582
2572 # update changelog
2583 # update changelog
2573 self.ui.note(_("committing changelog\n"))
2584 self.ui.note(_("committing changelog\n"))
2574 self.changelog.delayupdate(tr)
2585 self.changelog.delayupdate(tr)
2575 n = self.changelog.add(mn, files, ctx.description(),
2586 n = self.changelog.add(mn, files, ctx.description(),
2576 trp, p1.node(), p2.node(),
2587 trp, p1.node(), p2.node(),
2577 user, ctx.date(), ctx.extra().copy())
2588 user, ctx.date(), ctx.extra().copy())
2578 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2589 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2579 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2590 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2580 parent2=xp2)
2591 parent2=xp2)
2581 # set the new commit is proper phase
2592 # set the new commit is proper phase
2582 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2593 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2583 if targetphase:
2594 if targetphase:
2584 # retract boundary do not alter parent changeset.
2595 # retract boundary do not alter parent changeset.
2585 # if a parent have higher the resulting phase will
2596 # if a parent have higher the resulting phase will
2586 # be compliant anyway
2597 # be compliant anyway
2587 #
2598 #
2588 # if minimal phase was 0 we don't need to retract anything
2599 # if minimal phase was 0 we don't need to retract anything
2589 phases.registernew(self, tr, targetphase, [n])
2600 phases.registernew(self, tr, targetphase, [n])
2590 tr.close()
2601 tr.close()
2591 return n
2602 return n
2592 finally:
2603 finally:
2593 if tr:
2604 if tr:
2594 tr.release()
2605 tr.release()
2595 lock.release()
2606 lock.release()
2596
2607
2597 @unfilteredmethod
2608 @unfilteredmethod
2598 def destroying(self):
2609 def destroying(self):
2599 '''Inform the repository that nodes are about to be destroyed.
2610 '''Inform the repository that nodes are about to be destroyed.
2600 Intended for use by strip and rollback, so there's a common
2611 Intended for use by strip and rollback, so there's a common
2601 place for anything that has to be done before destroying history.
2612 place for anything that has to be done before destroying history.
2602
2613
2603 This is mostly useful for saving state that is in memory and waiting
2614 This is mostly useful for saving state that is in memory and waiting
2604 to be flushed when the current lock is released. Because a call to
2615 to be flushed when the current lock is released. Because a call to
2605 destroyed is imminent, the repo will be invalidated causing those
2616 destroyed is imminent, the repo will be invalidated causing those
2606 changes to stay in memory (waiting for the next unlock), or vanish
2617 changes to stay in memory (waiting for the next unlock), or vanish
2607 completely.
2618 completely.
2608 '''
2619 '''
2609 # When using the same lock to commit and strip, the phasecache is left
2620 # When using the same lock to commit and strip, the phasecache is left
2610 # dirty after committing. Then when we strip, the repo is invalidated,
2621 # dirty after committing. Then when we strip, the repo is invalidated,
2611 # causing those changes to disappear.
2622 # causing those changes to disappear.
2612 if '_phasecache' in vars(self):
2623 if '_phasecache' in vars(self):
2613 self._phasecache.write()
2624 self._phasecache.write()
2614
2625
2615 @unfilteredmethod
2626 @unfilteredmethod
2616 def destroyed(self):
2627 def destroyed(self):
2617 '''Inform the repository that nodes have been destroyed.
2628 '''Inform the repository that nodes have been destroyed.
2618 Intended for use by strip and rollback, so there's a common
2629 Intended for use by strip and rollback, so there's a common
2619 place for anything that has to be done after destroying history.
2630 place for anything that has to be done after destroying history.
2620 '''
2631 '''
2621 # When one tries to:
2632 # When one tries to:
2622 # 1) destroy nodes thus calling this method (e.g. strip)
2633 # 1) destroy nodes thus calling this method (e.g. strip)
2623 # 2) use phasecache somewhere (e.g. commit)
2634 # 2) use phasecache somewhere (e.g. commit)
2624 #
2635 #
2625 # then 2) will fail because the phasecache contains nodes that were
2636 # then 2) will fail because the phasecache contains nodes that were
2626 # removed. We can either remove phasecache from the filecache,
2637 # removed. We can either remove phasecache from the filecache,
2627 # causing it to reload next time it is accessed, or simply filter
2638 # causing it to reload next time it is accessed, or simply filter
2628 # the removed nodes now and write the updated cache.
2639 # the removed nodes now and write the updated cache.
2629 self._phasecache.filterunknown(self)
2640 self._phasecache.filterunknown(self)
2630 self._phasecache.write()
2641 self._phasecache.write()
2631
2642
2632 # refresh all repository caches
2643 # refresh all repository caches
2633 self.updatecaches()
2644 self.updatecaches()
2634
2645
2635 # Ensure the persistent tag cache is updated. Doing it now
2646 # Ensure the persistent tag cache is updated. Doing it now
2636 # means that the tag cache only has to worry about destroyed
2647 # means that the tag cache only has to worry about destroyed
2637 # heads immediately after a strip/rollback. That in turn
2648 # heads immediately after a strip/rollback. That in turn
2638 # guarantees that "cachetip == currenttip" (comparing both rev
2649 # guarantees that "cachetip == currenttip" (comparing both rev
2639 # and node) always means no nodes have been added or destroyed.
2650 # and node) always means no nodes have been added or destroyed.
2640
2651
2641 # XXX this is suboptimal when qrefresh'ing: we strip the current
2652 # XXX this is suboptimal when qrefresh'ing: we strip the current
2642 # head, refresh the tag cache, then immediately add a new head.
2653 # head, refresh the tag cache, then immediately add a new head.
2643 # But I think doing it this way is necessary for the "instant
2654 # But I think doing it this way is necessary for the "instant
2644 # tag cache retrieval" case to work.
2655 # tag cache retrieval" case to work.
2645 self.invalidate()
2656 self.invalidate()
2646
2657
2647 def status(self, node1='.', node2=None, match=None,
2658 def status(self, node1='.', node2=None, match=None,
2648 ignored=False, clean=False, unknown=False,
2659 ignored=False, clean=False, unknown=False,
2649 listsubrepos=False):
2660 listsubrepos=False):
2650 '''a convenience method that calls node1.status(node2)'''
2661 '''a convenience method that calls node1.status(node2)'''
2651 return self[node1].status(node2, match, ignored, clean, unknown,
2662 return self[node1].status(node2, match, ignored, clean, unknown,
2652 listsubrepos)
2663 listsubrepos)
2653
2664
2654 def addpostdsstatus(self, ps):
2665 def addpostdsstatus(self, ps):
2655 """Add a callback to run within the wlock, at the point at which status
2666 """Add a callback to run within the wlock, at the point at which status
2656 fixups happen.
2667 fixups happen.
2657
2668
2658 On status completion, callback(wctx, status) will be called with the
2669 On status completion, callback(wctx, status) will be called with the
2659 wlock held, unless the dirstate has changed from underneath or the wlock
2670 wlock held, unless the dirstate has changed from underneath or the wlock
2660 couldn't be grabbed.
2671 couldn't be grabbed.
2661
2672
2662 Callbacks should not capture and use a cached copy of the dirstate --
2673 Callbacks should not capture and use a cached copy of the dirstate --
2663 it might change in the meanwhile. Instead, they should access the
2674 it might change in the meanwhile. Instead, they should access the
2664 dirstate via wctx.repo().dirstate.
2675 dirstate via wctx.repo().dirstate.
2665
2676
2666 This list is emptied out after each status run -- extensions should
2677 This list is emptied out after each status run -- extensions should
2667 make sure it adds to this list each time dirstate.status is called.
2678 make sure it adds to this list each time dirstate.status is called.
2668 Extensions should also make sure they don't call this for statuses
2679 Extensions should also make sure they don't call this for statuses
2669 that don't involve the dirstate.
2680 that don't involve the dirstate.
2670 """
2681 """
2671
2682
2672 # The list is located here for uniqueness reasons -- it is actually
2683 # The list is located here for uniqueness reasons -- it is actually
2673 # managed by the workingctx, but that isn't unique per-repo.
2684 # managed by the workingctx, but that isn't unique per-repo.
2674 self._postdsstatus.append(ps)
2685 self._postdsstatus.append(ps)
2675
2686
2676 def postdsstatus(self):
2687 def postdsstatus(self):
2677 """Used by workingctx to get the list of post-dirstate-status hooks."""
2688 """Used by workingctx to get the list of post-dirstate-status hooks."""
2678 return self._postdsstatus
2689 return self._postdsstatus
2679
2690
2680 def clearpostdsstatus(self):
2691 def clearpostdsstatus(self):
2681 """Used by workingctx to clear post-dirstate-status hooks."""
2692 """Used by workingctx to clear post-dirstate-status hooks."""
2682 del self._postdsstatus[:]
2693 del self._postdsstatus[:]
2683
2694
2684 def heads(self, start=None):
2695 def heads(self, start=None):
2685 if start is None:
2696 if start is None:
2686 cl = self.changelog
2697 cl = self.changelog
2687 headrevs = reversed(cl.headrevs())
2698 headrevs = reversed(cl.headrevs())
2688 return [cl.node(rev) for rev in headrevs]
2699 return [cl.node(rev) for rev in headrevs]
2689
2700
2690 heads = self.changelog.heads(start)
2701 heads = self.changelog.heads(start)
2691 # sort the output in rev descending order
2702 # sort the output in rev descending order
2692 return sorted(heads, key=self.changelog.rev, reverse=True)
2703 return sorted(heads, key=self.changelog.rev, reverse=True)
2693
2704
2694 def branchheads(self, branch=None, start=None, closed=False):
2705 def branchheads(self, branch=None, start=None, closed=False):
2695 '''return a (possibly filtered) list of heads for the given branch
2706 '''return a (possibly filtered) list of heads for the given branch
2696
2707
2697 Heads are returned in topological order, from newest to oldest.
2708 Heads are returned in topological order, from newest to oldest.
2698 If branch is None, use the dirstate branch.
2709 If branch is None, use the dirstate branch.
2699 If start is not None, return only heads reachable from start.
2710 If start is not None, return only heads reachable from start.
2700 If closed is True, return heads that are marked as closed as well.
2711 If closed is True, return heads that are marked as closed as well.
2701 '''
2712 '''
2702 if branch is None:
2713 if branch is None:
2703 branch = self[None].branch()
2714 branch = self[None].branch()
2704 branches = self.branchmap()
2715 branches = self.branchmap()
2705 if branch not in branches:
2716 if branch not in branches:
2706 return []
2717 return []
2707 # the cache returns heads ordered lowest to highest
2718 # the cache returns heads ordered lowest to highest
2708 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2719 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2709 if start is not None:
2720 if start is not None:
2710 # filter out the heads that cannot be reached from startrev
2721 # filter out the heads that cannot be reached from startrev
2711 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2722 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2712 bheads = [h for h in bheads if h in fbheads]
2723 bheads = [h for h in bheads if h in fbheads]
2713 return bheads
2724 return bheads
2714
2725
2715 def branches(self, nodes):
2726 def branches(self, nodes):
2716 if not nodes:
2727 if not nodes:
2717 nodes = [self.changelog.tip()]
2728 nodes = [self.changelog.tip()]
2718 b = []
2729 b = []
2719 for n in nodes:
2730 for n in nodes:
2720 t = n
2731 t = n
2721 while True:
2732 while True:
2722 p = self.changelog.parents(n)
2733 p = self.changelog.parents(n)
2723 if p[1] != nullid or p[0] == nullid:
2734 if p[1] != nullid or p[0] == nullid:
2724 b.append((t, n, p[0], p[1]))
2735 b.append((t, n, p[0], p[1]))
2725 break
2736 break
2726 n = p[0]
2737 n = p[0]
2727 return b
2738 return b
2728
2739
2729 def between(self, pairs):
2740 def between(self, pairs):
2730 r = []
2741 r = []
2731
2742
2732 for top, bottom in pairs:
2743 for top, bottom in pairs:
2733 n, l, i = top, [], 0
2744 n, l, i = top, [], 0
2734 f = 1
2745 f = 1
2735
2746
2736 while n != bottom and n != nullid:
2747 while n != bottom and n != nullid:
2737 p = self.changelog.parents(n)[0]
2748 p = self.changelog.parents(n)[0]
2738 if i == f:
2749 if i == f:
2739 l.append(n)
2750 l.append(n)
2740 f = f * 2
2751 f = f * 2
2741 n = p
2752 n = p
2742 i += 1
2753 i += 1
2743
2754
2744 r.append(l)
2755 r.append(l)
2745
2756
2746 return r
2757 return r
2747
2758
2748 def checkpush(self, pushop):
2759 def checkpush(self, pushop):
2749 """Extensions can override this function if additional checks have
2760 """Extensions can override this function if additional checks have
2750 to be performed before pushing, or call it if they override push
2761 to be performed before pushing, or call it if they override push
2751 command.
2762 command.
2752 """
2763 """
2753
2764
2754 @unfilteredpropertycache
2765 @unfilteredpropertycache
2755 def prepushoutgoinghooks(self):
2766 def prepushoutgoinghooks(self):
2756 """Return util.hooks consists of a pushop with repo, remote, outgoing
2767 """Return util.hooks consists of a pushop with repo, remote, outgoing
2757 methods, which are called before pushing changesets.
2768 methods, which are called before pushing changesets.
2758 """
2769 """
2759 return util.hooks()
2770 return util.hooks()
2760
2771
2761 def pushkey(self, namespace, key, old, new):
2772 def pushkey(self, namespace, key, old, new):
2762 try:
2773 try:
2763 tr = self.currenttransaction()
2774 tr = self.currenttransaction()
2764 hookargs = {}
2775 hookargs = {}
2765 if tr is not None:
2776 if tr is not None:
2766 hookargs.update(tr.hookargs)
2777 hookargs.update(tr.hookargs)
2767 hookargs = pycompat.strkwargs(hookargs)
2778 hookargs = pycompat.strkwargs(hookargs)
2768 hookargs[r'namespace'] = namespace
2779 hookargs[r'namespace'] = namespace
2769 hookargs[r'key'] = key
2780 hookargs[r'key'] = key
2770 hookargs[r'old'] = old
2781 hookargs[r'old'] = old
2771 hookargs[r'new'] = new
2782 hookargs[r'new'] = new
2772 self.hook('prepushkey', throw=True, **hookargs)
2783 self.hook('prepushkey', throw=True, **hookargs)
2773 except error.HookAbort as exc:
2784 except error.HookAbort as exc:
2774 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2785 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2775 if exc.hint:
2786 if exc.hint:
2776 self.ui.write_err(_("(%s)\n") % exc.hint)
2787 self.ui.write_err(_("(%s)\n") % exc.hint)
2777 return False
2788 return False
2778 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2789 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2779 ret = pushkey.push(self, namespace, key, old, new)
2790 ret = pushkey.push(self, namespace, key, old, new)
2780 def runhook():
2791 def runhook():
2781 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2792 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2782 ret=ret)
2793 ret=ret)
2783 self._afterlock(runhook)
2794 self._afterlock(runhook)
2784 return ret
2795 return ret
2785
2796
2786 def listkeys(self, namespace):
2797 def listkeys(self, namespace):
2787 self.hook('prelistkeys', throw=True, namespace=namespace)
2798 self.hook('prelistkeys', throw=True, namespace=namespace)
2788 self.ui.debug('listing keys for "%s"\n' % namespace)
2799 self.ui.debug('listing keys for "%s"\n' % namespace)
2789 values = pushkey.list(self, namespace)
2800 values = pushkey.list(self, namespace)
2790 self.hook('listkeys', namespace=namespace, values=values)
2801 self.hook('listkeys', namespace=namespace, values=values)
2791 return values
2802 return values
2792
2803
2793 def debugwireargs(self, one, two, three=None, four=None, five=None):
2804 def debugwireargs(self, one, two, three=None, four=None, five=None):
2794 '''used to test argument passing over the wire'''
2805 '''used to test argument passing over the wire'''
2795 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2806 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2796 pycompat.bytestr(four),
2807 pycompat.bytestr(four),
2797 pycompat.bytestr(five))
2808 pycompat.bytestr(five))
2798
2809
2799 def savecommitmessage(self, text):
2810 def savecommitmessage(self, text):
2800 fp = self.vfs('last-message.txt', 'wb')
2811 fp = self.vfs('last-message.txt', 'wb')
2801 try:
2812 try:
2802 fp.write(text)
2813 fp.write(text)
2803 finally:
2814 finally:
2804 fp.close()
2815 fp.close()
2805 return self.pathto(fp.name[len(self.root) + 1:])
2816 return self.pathto(fp.name[len(self.root) + 1:])
2806
2817
2807 # used to avoid circular references so destructors work
2818 # used to avoid circular references so destructors work
2808 def aftertrans(files):
2819 def aftertrans(files):
2809 renamefiles = [tuple(t) for t in files]
2820 renamefiles = [tuple(t) for t in files]
2810 def a():
2821 def a():
2811 for vfs, src, dest in renamefiles:
2822 for vfs, src, dest in renamefiles:
2812 # if src and dest refer to a same file, vfs.rename is a no-op,
2823 # if src and dest refer to a same file, vfs.rename is a no-op,
2813 # leaving both src and dest on disk. delete dest to make sure
2824 # leaving both src and dest on disk. delete dest to make sure
2814 # the rename couldn't be such a no-op.
2825 # the rename couldn't be such a no-op.
2815 vfs.tryunlink(dest)
2826 vfs.tryunlink(dest)
2816 try:
2827 try:
2817 vfs.rename(src, dest)
2828 vfs.rename(src, dest)
2818 except OSError: # journal file does not yet exist
2829 except OSError: # journal file does not yet exist
2819 pass
2830 pass
2820 return a
2831 return a
2821
2832
2822 def undoname(fn):
2833 def undoname(fn):
2823 base, name = os.path.split(fn)
2834 base, name = os.path.split(fn)
2824 assert name.startswith('journal')
2835 assert name.startswith('journal')
2825 return os.path.join(base, name.replace('journal', 'undo', 1))
2836 return os.path.join(base, name.replace('journal', 'undo', 1))
2826
2837
2827 def instance(ui, path, create, intents=None, createopts=None):
2838 def instance(ui, path, create, intents=None, createopts=None):
2828 localpath = util.urllocalpath(path)
2839 localpath = util.urllocalpath(path)
2829 if create:
2840 if create:
2830 createrepository(ui, localpath, createopts=createopts)
2841 createrepository(ui, localpath, createopts=createopts)
2831
2842
2832 return makelocalrepository(ui, localpath, intents=intents)
2843 return makelocalrepository(ui, localpath, intents=intents)
2833
2844
2834 def islocal(path):
2845 def islocal(path):
2835 return True
2846 return True
2836
2847
2837 def defaultcreateopts(ui, createopts=None):
2848 def defaultcreateopts(ui, createopts=None):
2838 """Populate the default creation options for a repository.
2849 """Populate the default creation options for a repository.
2839
2850
2840 A dictionary of explicitly requested creation options can be passed
2851 A dictionary of explicitly requested creation options can be passed
2841 in. Missing keys will be populated.
2852 in. Missing keys will be populated.
2842 """
2853 """
2843 createopts = dict(createopts or {})
2854 createopts = dict(createopts or {})
2844
2855
2845 if 'backend' not in createopts:
2856 if 'backend' not in createopts:
2846 # experimental config: storage.new-repo-backend
2857 # experimental config: storage.new-repo-backend
2847 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2858 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2848
2859
2849 return createopts
2860 return createopts
2850
2861
2851 def newreporequirements(ui, createopts):
2862 def newreporequirements(ui, createopts):
2852 """Determine the set of requirements for a new local repository.
2863 """Determine the set of requirements for a new local repository.
2853
2864
2854 Extensions can wrap this function to specify custom requirements for
2865 Extensions can wrap this function to specify custom requirements for
2855 new repositories.
2866 new repositories.
2856 """
2867 """
2857 # If the repo is being created from a shared repository, we copy
2868 # If the repo is being created from a shared repository, we copy
2858 # its requirements.
2869 # its requirements.
2859 if 'sharedrepo' in createopts:
2870 if 'sharedrepo' in createopts:
2860 requirements = set(createopts['sharedrepo'].requirements)
2871 requirements = set(createopts['sharedrepo'].requirements)
2861 if createopts.get('sharedrelative'):
2872 if createopts.get('sharedrelative'):
2862 requirements.add('relshared')
2873 requirements.add('relshared')
2863 else:
2874 else:
2864 requirements.add('shared')
2875 requirements.add('shared')
2865
2876
2866 return requirements
2877 return requirements
2867
2878
2868 if 'backend' not in createopts:
2879 if 'backend' not in createopts:
2869 raise error.ProgrammingError('backend key not present in createopts; '
2880 raise error.ProgrammingError('backend key not present in createopts; '
2870 'was defaultcreateopts() called?')
2881 'was defaultcreateopts() called?')
2871
2882
2872 if createopts['backend'] != 'revlogv1':
2883 if createopts['backend'] != 'revlogv1':
2873 raise error.Abort(_('unable to determine repository requirements for '
2884 raise error.Abort(_('unable to determine repository requirements for '
2874 'storage backend: %s') % createopts['backend'])
2885 'storage backend: %s') % createopts['backend'])
2875
2886
2876 requirements = {'revlogv1'}
2887 requirements = {'revlogv1'}
2877 if ui.configbool('format', 'usestore'):
2888 if ui.configbool('format', 'usestore'):
2878 requirements.add('store')
2889 requirements.add('store')
2879 if ui.configbool('format', 'usefncache'):
2890 if ui.configbool('format', 'usefncache'):
2880 requirements.add('fncache')
2891 requirements.add('fncache')
2881 if ui.configbool('format', 'dotencode'):
2892 if ui.configbool('format', 'dotencode'):
2882 requirements.add('dotencode')
2893 requirements.add('dotencode')
2883
2894
2884 compengine = ui.config('experimental', 'format.compression')
2895 compengine = ui.config('experimental', 'format.compression')
2885 if compengine not in util.compengines:
2896 if compengine not in util.compengines:
2886 raise error.Abort(_('compression engine %s defined by '
2897 raise error.Abort(_('compression engine %s defined by '
2887 'experimental.format.compression not available') %
2898 'experimental.format.compression not available') %
2888 compengine,
2899 compengine,
2889 hint=_('run "hg debuginstall" to list available '
2900 hint=_('run "hg debuginstall" to list available '
2890 'compression engines'))
2901 'compression engines'))
2891
2902
2892 # zlib is the historical default and doesn't need an explicit requirement.
2903 # zlib is the historical default and doesn't need an explicit requirement.
2893 if compengine != 'zlib':
2904 if compengine != 'zlib':
2894 requirements.add('exp-compression-%s' % compengine)
2905 requirements.add('exp-compression-%s' % compengine)
2895
2906
2896 if scmutil.gdinitconfig(ui):
2907 if scmutil.gdinitconfig(ui):
2897 requirements.add('generaldelta')
2908 requirements.add('generaldelta')
2898 if ui.configbool('experimental', 'treemanifest'):
2909 if ui.configbool('experimental', 'treemanifest'):
2899 requirements.add('treemanifest')
2910 requirements.add('treemanifest')
2900 # experimental config: format.sparse-revlog
2911 # experimental config: format.sparse-revlog
2901 if ui.configbool('format', 'sparse-revlog'):
2912 if ui.configbool('format', 'sparse-revlog'):
2902 requirements.add(SPARSEREVLOG_REQUIREMENT)
2913 requirements.add(SPARSEREVLOG_REQUIREMENT)
2903
2914
2904 revlogv2 = ui.config('experimental', 'revlogv2')
2915 revlogv2 = ui.config('experimental', 'revlogv2')
2905 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2916 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2906 requirements.remove('revlogv1')
2917 requirements.remove('revlogv1')
2907 # generaldelta is implied by revlogv2.
2918 # generaldelta is implied by revlogv2.
2908 requirements.discard('generaldelta')
2919 requirements.discard('generaldelta')
2909 requirements.add(REVLOGV2_REQUIREMENT)
2920 requirements.add(REVLOGV2_REQUIREMENT)
2910 # experimental config: format.internal-phase
2921 # experimental config: format.internal-phase
2911 if ui.configbool('format', 'internal-phase'):
2922 if ui.configbool('format', 'internal-phase'):
2912 requirements.add('internal-phase')
2923 requirements.add('internal-phase')
2913
2924
2914 if createopts.get('narrowfiles'):
2925 if createopts.get('narrowfiles'):
2915 requirements.add(repository.NARROW_REQUIREMENT)
2926 requirements.add(repository.NARROW_REQUIREMENT)
2916
2927
2917 if createopts.get('lfs'):
2928 if createopts.get('lfs'):
2918 requirements.add('lfs')
2929 requirements.add('lfs')
2919
2930
2920 return requirements
2931 return requirements
2921
2932
2922 def filterknowncreateopts(ui, createopts):
2933 def filterknowncreateopts(ui, createopts):
2923 """Filters a dict of repo creation options against options that are known.
2934 """Filters a dict of repo creation options against options that are known.
2924
2935
2925 Receives a dict of repo creation options and returns a dict of those
2936 Receives a dict of repo creation options and returns a dict of those
2926 options that we don't know how to handle.
2937 options that we don't know how to handle.
2927
2938
2928 This function is called as part of repository creation. If the
2939 This function is called as part of repository creation. If the
2929 returned dict contains any items, repository creation will not
2940 returned dict contains any items, repository creation will not
2930 be allowed, as it means there was a request to create a repository
2941 be allowed, as it means there was a request to create a repository
2931 with options not recognized by loaded code.
2942 with options not recognized by loaded code.
2932
2943
2933 Extensions can wrap this function to filter out creation options
2944 Extensions can wrap this function to filter out creation options
2934 they know how to handle.
2945 they know how to handle.
2935 """
2946 """
2936 known = {
2947 known = {
2937 'backend',
2948 'backend',
2938 'lfs',
2949 'lfs',
2939 'narrowfiles',
2950 'narrowfiles',
2940 'sharedrepo',
2951 'sharedrepo',
2941 'sharedrelative',
2952 'sharedrelative',
2942 'shareditems',
2953 'shareditems',
2943 'shallowfilestore',
2954 'shallowfilestore',
2944 }
2955 }
2945
2956
2946 return {k: v for k, v in createopts.items() if k not in known}
2957 return {k: v for k, v in createopts.items() if k not in known}
2947
2958
2948 def createrepository(ui, path, createopts=None):
2959 def createrepository(ui, path, createopts=None):
2949 """Create a new repository in a vfs.
2960 """Create a new repository in a vfs.
2950
2961
2951 ``path`` path to the new repo's working directory.
2962 ``path`` path to the new repo's working directory.
2952 ``createopts`` options for the new repository.
2963 ``createopts`` options for the new repository.
2953
2964
2954 The following keys for ``createopts`` are recognized:
2965 The following keys for ``createopts`` are recognized:
2955
2966
2956 backend
2967 backend
2957 The storage backend to use.
2968 The storage backend to use.
2958 lfs
2969 lfs
2959 Repository will be created with ``lfs`` requirement. The lfs extension
2970 Repository will be created with ``lfs`` requirement. The lfs extension
2960 will automatically be loaded when the repository is accessed.
2971 will automatically be loaded when the repository is accessed.
2961 narrowfiles
2972 narrowfiles
2962 Set up repository to support narrow file storage.
2973 Set up repository to support narrow file storage.
2963 sharedrepo
2974 sharedrepo
2964 Repository object from which storage should be shared.
2975 Repository object from which storage should be shared.
2965 sharedrelative
2976 sharedrelative
2966 Boolean indicating if the path to the shared repo should be
2977 Boolean indicating if the path to the shared repo should be
2967 stored as relative. By default, the pointer to the "parent" repo
2978 stored as relative. By default, the pointer to the "parent" repo
2968 is stored as an absolute path.
2979 is stored as an absolute path.
2969 shareditems
2980 shareditems
2970 Set of items to share to the new repository (in addition to storage).
2981 Set of items to share to the new repository (in addition to storage).
2971 shallowfilestore
2982 shallowfilestore
2972 Indicates that storage for files should be shallow (not all ancestor
2983 Indicates that storage for files should be shallow (not all ancestor
2973 revisions are known).
2984 revisions are known).
2974 """
2985 """
2975 createopts = defaultcreateopts(ui, createopts=createopts)
2986 createopts = defaultcreateopts(ui, createopts=createopts)
2976
2987
2977 unknownopts = filterknowncreateopts(ui, createopts)
2988 unknownopts = filterknowncreateopts(ui, createopts)
2978
2989
2979 if not isinstance(unknownopts, dict):
2990 if not isinstance(unknownopts, dict):
2980 raise error.ProgrammingError('filterknowncreateopts() did not return '
2991 raise error.ProgrammingError('filterknowncreateopts() did not return '
2981 'a dict')
2992 'a dict')
2982
2993
2983 if unknownopts:
2994 if unknownopts:
2984 raise error.Abort(_('unable to create repository because of unknown '
2995 raise error.Abort(_('unable to create repository because of unknown '
2985 'creation option: %s') %
2996 'creation option: %s') %
2986 ', '.join(sorted(unknownopts)),
2997 ', '.join(sorted(unknownopts)),
2987 hint=_('is a required extension not loaded?'))
2998 hint=_('is a required extension not loaded?'))
2988
2999
2989 requirements = newreporequirements(ui, createopts=createopts)
3000 requirements = newreporequirements(ui, createopts=createopts)
2990
3001
2991 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3002 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2992
3003
2993 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3004 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2994 if hgvfs.exists():
3005 if hgvfs.exists():
2995 raise error.RepoError(_('repository %s already exists') % path)
3006 raise error.RepoError(_('repository %s already exists') % path)
2996
3007
2997 if 'sharedrepo' in createopts:
3008 if 'sharedrepo' in createopts:
2998 sharedpath = createopts['sharedrepo'].sharedpath
3009 sharedpath = createopts['sharedrepo'].sharedpath
2999
3010
3000 if createopts.get('sharedrelative'):
3011 if createopts.get('sharedrelative'):
3001 try:
3012 try:
3002 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3013 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3003 except (IOError, ValueError) as e:
3014 except (IOError, ValueError) as e:
3004 # ValueError is raised on Windows if the drive letters differ
3015 # ValueError is raised on Windows if the drive letters differ
3005 # on each path.
3016 # on each path.
3006 raise error.Abort(_('cannot calculate relative path'),
3017 raise error.Abort(_('cannot calculate relative path'),
3007 hint=stringutil.forcebytestr(e))
3018 hint=stringutil.forcebytestr(e))
3008
3019
3009 if not wdirvfs.exists():
3020 if not wdirvfs.exists():
3010 wdirvfs.makedirs()
3021 wdirvfs.makedirs()
3011
3022
3012 hgvfs.makedir(notindexed=True)
3023 hgvfs.makedir(notindexed=True)
3013 if 'sharedrepo' not in createopts:
3024 if 'sharedrepo' not in createopts:
3014 hgvfs.mkdir(b'cache')
3025 hgvfs.mkdir(b'cache')
3015 hgvfs.mkdir(b'wcache')
3026 hgvfs.mkdir(b'wcache')
3016
3027
3017 if b'store' in requirements and 'sharedrepo' not in createopts:
3028 if b'store' in requirements and 'sharedrepo' not in createopts:
3018 hgvfs.mkdir(b'store')
3029 hgvfs.mkdir(b'store')
3019
3030
3020 # We create an invalid changelog outside the store so very old
3031 # We create an invalid changelog outside the store so very old
3021 # Mercurial versions (which didn't know about the requirements
3032 # Mercurial versions (which didn't know about the requirements
3022 # file) encounter an error on reading the changelog. This
3033 # file) encounter an error on reading the changelog. This
3023 # effectively locks out old clients and prevents them from
3034 # effectively locks out old clients and prevents them from
3024 # mucking with a repo in an unknown format.
3035 # mucking with a repo in an unknown format.
3025 #
3036 #
3026 # The revlog header has version 2, which won't be recognized by
3037 # The revlog header has version 2, which won't be recognized by
3027 # such old clients.
3038 # such old clients.
3028 hgvfs.append(b'00changelog.i',
3039 hgvfs.append(b'00changelog.i',
3029 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3040 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3030 b'layout')
3041 b'layout')
3031
3042
3032 scmutil.writerequires(hgvfs, requirements)
3043 scmutil.writerequires(hgvfs, requirements)
3033
3044
3034 # Write out file telling readers where to find the shared store.
3045 # Write out file telling readers where to find the shared store.
3035 if 'sharedrepo' in createopts:
3046 if 'sharedrepo' in createopts:
3036 hgvfs.write(b'sharedpath', sharedpath)
3047 hgvfs.write(b'sharedpath', sharedpath)
3037
3048
3038 if createopts.get('shareditems'):
3049 if createopts.get('shareditems'):
3039 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3050 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3040 hgvfs.write(b'shared', shared)
3051 hgvfs.write(b'shared', shared)
3041
3052
3042 def poisonrepository(repo):
3053 def poisonrepository(repo):
3043 """Poison a repository instance so it can no longer be used."""
3054 """Poison a repository instance so it can no longer be used."""
3044 # Perform any cleanup on the instance.
3055 # Perform any cleanup on the instance.
3045 repo.close()
3056 repo.close()
3046
3057
3047 # Our strategy is to replace the type of the object with one that
3058 # Our strategy is to replace the type of the object with one that
3048 # has all attribute lookups result in error.
3059 # has all attribute lookups result in error.
3049 #
3060 #
3050 # But we have to allow the close() method because some constructors
3061 # But we have to allow the close() method because some constructors
3051 # of repos call close() on repo references.
3062 # of repos call close() on repo references.
3052 class poisonedrepository(object):
3063 class poisonedrepository(object):
3053 def __getattribute__(self, item):
3064 def __getattribute__(self, item):
3054 if item == r'close':
3065 if item == r'close':
3055 return object.__getattribute__(self, item)
3066 return object.__getattribute__(self, item)
3056
3067
3057 raise error.ProgrammingError('repo instances should not be used '
3068 raise error.ProgrammingError('repo instances should not be used '
3058 'after unshare')
3069 'after unshare')
3059
3070
3060 def close(self):
3071 def close(self):
3061 pass
3072 pass
3062
3073
3063 # We may have a repoview, which intercepts __setattr__. So be sure
3074 # We may have a repoview, which intercepts __setattr__. So be sure
3064 # we operate at the lowest level possible.
3075 # we operate at the lowest level possible.
3065 object.__setattr__(repo, r'__class__', poisonedrepository)
3076 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,1858 +1,1864 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32 # Files storage may lack data for all ancestors.
32 # Files storage may lack data for all ancestors.
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
34
34
35 REVISION_FLAG_CENSORED = 1 << 15
35 REVISION_FLAG_CENSORED = 1 << 15
36 REVISION_FLAG_ELLIPSIS = 1 << 14
36 REVISION_FLAG_ELLIPSIS = 1 << 14
37 REVISION_FLAG_EXTSTORED = 1 << 13
37 REVISION_FLAG_EXTSTORED = 1 << 13
38
38
39 REVISION_FLAGS_KNOWN = (
39 REVISION_FLAGS_KNOWN = (
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
41
41
42 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_P1 = b'p1'
45 CG_DELTAMODE_P1 = b'p1'
46
46
47 class ipeerconnection(interfaceutil.Interface):
47 class ipeerconnection(interfaceutil.Interface):
48 """Represents a "connection" to a repository.
48 """Represents a "connection" to a repository.
49
49
50 This is the base interface for representing a connection to a repository.
50 This is the base interface for representing a connection to a repository.
51 It holds basic properties and methods applicable to all peer types.
51 It holds basic properties and methods applicable to all peer types.
52
52
53 This is not a complete interface definition and should not be used
53 This is not a complete interface definition and should not be used
54 outside of this module.
54 outside of this module.
55 """
55 """
56 ui = interfaceutil.Attribute("""ui.ui instance""")
56 ui = interfaceutil.Attribute("""ui.ui instance""")
57
57
58 def url():
58 def url():
59 """Returns a URL string representing this peer.
59 """Returns a URL string representing this peer.
60
60
61 Currently, implementations expose the raw URL used to construct the
61 Currently, implementations expose the raw URL used to construct the
62 instance. It may contain credentials as part of the URL. The
62 instance. It may contain credentials as part of the URL. The
63 expectations of the value aren't well-defined and this could lead to
63 expectations of the value aren't well-defined and this could lead to
64 data leakage.
64 data leakage.
65
65
66 TODO audit/clean consumers and more clearly define the contents of this
66 TODO audit/clean consumers and more clearly define the contents of this
67 value.
67 value.
68 """
68 """
69
69
70 def local():
70 def local():
71 """Returns a local repository instance.
71 """Returns a local repository instance.
72
72
73 If the peer represents a local repository, returns an object that
73 If the peer represents a local repository, returns an object that
74 can be used to interface with it. Otherwise returns ``None``.
74 can be used to interface with it. Otherwise returns ``None``.
75 """
75 """
76
76
77 def peer():
77 def peer():
78 """Returns an object conforming to this interface.
78 """Returns an object conforming to this interface.
79
79
80 Most implementations will ``return self``.
80 Most implementations will ``return self``.
81 """
81 """
82
82
83 def canpush():
83 def canpush():
84 """Returns a boolean indicating if this peer can be pushed to."""
84 """Returns a boolean indicating if this peer can be pushed to."""
85
85
86 def close():
86 def close():
87 """Close the connection to this peer.
87 """Close the connection to this peer.
88
88
89 This is called when the peer will no longer be used. Resources
89 This is called when the peer will no longer be used. Resources
90 associated with the peer should be cleaned up.
90 associated with the peer should be cleaned up.
91 """
91 """
92
92
93 class ipeercapabilities(interfaceutil.Interface):
93 class ipeercapabilities(interfaceutil.Interface):
94 """Peer sub-interface related to capabilities."""
94 """Peer sub-interface related to capabilities."""
95
95
96 def capable(name):
96 def capable(name):
97 """Determine support for a named capability.
97 """Determine support for a named capability.
98
98
99 Returns ``False`` if capability not supported.
99 Returns ``False`` if capability not supported.
100
100
101 Returns ``True`` if boolean capability is supported. Returns a string
101 Returns ``True`` if boolean capability is supported. Returns a string
102 if capability support is non-boolean.
102 if capability support is non-boolean.
103
103
104 Capability strings may or may not map to wire protocol capabilities.
104 Capability strings may or may not map to wire protocol capabilities.
105 """
105 """
106
106
107 def requirecap(name, purpose):
107 def requirecap(name, purpose):
108 """Require a capability to be present.
108 """Require a capability to be present.
109
109
110 Raises a ``CapabilityError`` if the capability isn't present.
110 Raises a ``CapabilityError`` if the capability isn't present.
111 """
111 """
112
112
113 class ipeercommands(interfaceutil.Interface):
113 class ipeercommands(interfaceutil.Interface):
114 """Client-side interface for communicating over the wire protocol.
114 """Client-side interface for communicating over the wire protocol.
115
115
116 This interface is used as a gateway to the Mercurial wire protocol.
116 This interface is used as a gateway to the Mercurial wire protocol.
117 methods commonly call wire protocol commands of the same name.
117 methods commonly call wire protocol commands of the same name.
118 """
118 """
119
119
120 def branchmap():
120 def branchmap():
121 """Obtain heads in named branches.
121 """Obtain heads in named branches.
122
122
123 Returns a dict mapping branch name to an iterable of nodes that are
123 Returns a dict mapping branch name to an iterable of nodes that are
124 heads on that branch.
124 heads on that branch.
125 """
125 """
126
126
127 def capabilities():
127 def capabilities():
128 """Obtain capabilities of the peer.
128 """Obtain capabilities of the peer.
129
129
130 Returns a set of string capabilities.
130 Returns a set of string capabilities.
131 """
131 """
132
132
133 def clonebundles():
133 def clonebundles():
134 """Obtains the clone bundles manifest for the repo.
134 """Obtains the clone bundles manifest for the repo.
135
135
136 Returns the manifest as unparsed bytes.
136 Returns the manifest as unparsed bytes.
137 """
137 """
138
138
139 def debugwireargs(one, two, three=None, four=None, five=None):
139 def debugwireargs(one, two, three=None, four=None, five=None):
140 """Used to facilitate debugging of arguments passed over the wire."""
140 """Used to facilitate debugging of arguments passed over the wire."""
141
141
142 def getbundle(source, **kwargs):
142 def getbundle(source, **kwargs):
143 """Obtain remote repository data as a bundle.
143 """Obtain remote repository data as a bundle.
144
144
145 This command is how the bulk of repository data is transferred from
145 This command is how the bulk of repository data is transferred from
146 the peer to the local repository
146 the peer to the local repository
147
147
148 Returns a generator of bundle data.
148 Returns a generator of bundle data.
149 """
149 """
150
150
151 def heads():
151 def heads():
152 """Determine all known head revisions in the peer.
152 """Determine all known head revisions in the peer.
153
153
154 Returns an iterable of binary nodes.
154 Returns an iterable of binary nodes.
155 """
155 """
156
156
157 def known(nodes):
157 def known(nodes):
158 """Determine whether multiple nodes are known.
158 """Determine whether multiple nodes are known.
159
159
160 Accepts an iterable of nodes whose presence to check for.
160 Accepts an iterable of nodes whose presence to check for.
161
161
162 Returns an iterable of booleans indicating of the corresponding node
162 Returns an iterable of booleans indicating of the corresponding node
163 at that index is known to the peer.
163 at that index is known to the peer.
164 """
164 """
165
165
166 def listkeys(namespace):
166 def listkeys(namespace):
167 """Obtain all keys in a pushkey namespace.
167 """Obtain all keys in a pushkey namespace.
168
168
169 Returns an iterable of key names.
169 Returns an iterable of key names.
170 """
170 """
171
171
172 def lookup(key):
172 def lookup(key):
173 """Resolve a value to a known revision.
173 """Resolve a value to a known revision.
174
174
175 Returns a binary node of the resolved revision on success.
175 Returns a binary node of the resolved revision on success.
176 """
176 """
177
177
178 def pushkey(namespace, key, old, new):
178 def pushkey(namespace, key, old, new):
179 """Set a value using the ``pushkey`` protocol.
179 """Set a value using the ``pushkey`` protocol.
180
180
181 Arguments correspond to the pushkey namespace and key to operate on and
181 Arguments correspond to the pushkey namespace and key to operate on and
182 the old and new values for that key.
182 the old and new values for that key.
183
183
184 Returns a string with the peer result. The value inside varies by the
184 Returns a string with the peer result. The value inside varies by the
185 namespace.
185 namespace.
186 """
186 """
187
187
188 def stream_out():
188 def stream_out():
189 """Obtain streaming clone data.
189 """Obtain streaming clone data.
190
190
191 Successful result should be a generator of data chunks.
191 Successful result should be a generator of data chunks.
192 """
192 """
193
193
194 def unbundle(bundle, heads, url):
194 def unbundle(bundle, heads, url):
195 """Transfer repository data to the peer.
195 """Transfer repository data to the peer.
196
196
197 This is how the bulk of data during a push is transferred.
197 This is how the bulk of data during a push is transferred.
198
198
199 Returns the integer number of heads added to the peer.
199 Returns the integer number of heads added to the peer.
200 """
200 """
201
201
202 class ipeerlegacycommands(interfaceutil.Interface):
202 class ipeerlegacycommands(interfaceutil.Interface):
203 """Interface for implementing support for legacy wire protocol commands.
203 """Interface for implementing support for legacy wire protocol commands.
204
204
205 Wire protocol commands transition to legacy status when they are no longer
205 Wire protocol commands transition to legacy status when they are no longer
206 used by modern clients. To facilitate identifying which commands are
206 used by modern clients. To facilitate identifying which commands are
207 legacy, the interfaces are split.
207 legacy, the interfaces are split.
208 """
208 """
209
209
210 def between(pairs):
210 def between(pairs):
211 """Obtain nodes between pairs of nodes.
211 """Obtain nodes between pairs of nodes.
212
212
213 ``pairs`` is an iterable of node pairs.
213 ``pairs`` is an iterable of node pairs.
214
214
215 Returns an iterable of iterables of nodes corresponding to each
215 Returns an iterable of iterables of nodes corresponding to each
216 requested pair.
216 requested pair.
217 """
217 """
218
218
219 def branches(nodes):
219 def branches(nodes):
220 """Obtain ancestor changesets of specific nodes back to a branch point.
220 """Obtain ancestor changesets of specific nodes back to a branch point.
221
221
222 For each requested node, the peer finds the first ancestor node that is
222 For each requested node, the peer finds the first ancestor node that is
223 a DAG root or is a merge.
223 a DAG root or is a merge.
224
224
225 Returns an iterable of iterables with the resolved values for each node.
225 Returns an iterable of iterables with the resolved values for each node.
226 """
226 """
227
227
228 def changegroup(nodes, source):
228 def changegroup(nodes, source):
229 """Obtain a changegroup with data for descendants of specified nodes."""
229 """Obtain a changegroup with data for descendants of specified nodes."""
230
230
231 def changegroupsubset(bases, heads, source):
231 def changegroupsubset(bases, heads, source):
232 pass
232 pass
233
233
234 class ipeercommandexecutor(interfaceutil.Interface):
234 class ipeercommandexecutor(interfaceutil.Interface):
235 """Represents a mechanism to execute remote commands.
235 """Represents a mechanism to execute remote commands.
236
236
237 This is the primary interface for requesting that wire protocol commands
237 This is the primary interface for requesting that wire protocol commands
238 be executed. Instances of this interface are active in a context manager
238 be executed. Instances of this interface are active in a context manager
239 and have a well-defined lifetime. When the context manager exits, all
239 and have a well-defined lifetime. When the context manager exits, all
240 outstanding requests are waited on.
240 outstanding requests are waited on.
241 """
241 """
242
242
243 def callcommand(name, args):
243 def callcommand(name, args):
244 """Request that a named command be executed.
244 """Request that a named command be executed.
245
245
246 Receives the command name and a dictionary of command arguments.
246 Receives the command name and a dictionary of command arguments.
247
247
248 Returns a ``concurrent.futures.Future`` that will resolve to the
248 Returns a ``concurrent.futures.Future`` that will resolve to the
249 result of that command request. That exact value is left up to
249 result of that command request. That exact value is left up to
250 the implementation and possibly varies by command.
250 the implementation and possibly varies by command.
251
251
252 Not all commands can coexist with other commands in an executor
252 Not all commands can coexist with other commands in an executor
253 instance: it depends on the underlying wire protocol transport being
253 instance: it depends on the underlying wire protocol transport being
254 used and the command itself.
254 used and the command itself.
255
255
256 Implementations MAY call ``sendcommands()`` automatically if the
256 Implementations MAY call ``sendcommands()`` automatically if the
257 requested command can not coexist with other commands in this executor.
257 requested command can not coexist with other commands in this executor.
258
258
259 Implementations MAY call ``sendcommands()`` automatically when the
259 Implementations MAY call ``sendcommands()`` automatically when the
260 future's ``result()`` is called. So, consumers using multiple
260 future's ``result()`` is called. So, consumers using multiple
261 commands with an executor MUST ensure that ``result()`` is not called
261 commands with an executor MUST ensure that ``result()`` is not called
262 until all command requests have been issued.
262 until all command requests have been issued.
263 """
263 """
264
264
265 def sendcommands():
265 def sendcommands():
266 """Trigger submission of queued command requests.
266 """Trigger submission of queued command requests.
267
267
268 Not all transports submit commands as soon as they are requested to
268 Not all transports submit commands as soon as they are requested to
269 run. When called, this method forces queued command requests to be
269 run. When called, this method forces queued command requests to be
270 issued. It will no-op if all commands have already been sent.
270 issued. It will no-op if all commands have already been sent.
271
271
272 When called, no more new commands may be issued with this executor.
272 When called, no more new commands may be issued with this executor.
273 """
273 """
274
274
275 def close():
275 def close():
276 """Signal that this command request is finished.
276 """Signal that this command request is finished.
277
277
278 When called, no more new commands may be issued. All outstanding
278 When called, no more new commands may be issued. All outstanding
279 commands that have previously been issued are waited on before
279 commands that have previously been issued are waited on before
280 returning. This not only includes waiting for the futures to resolve,
280 returning. This not only includes waiting for the futures to resolve,
281 but also waiting for all response data to arrive. In other words,
281 but also waiting for all response data to arrive. In other words,
282 calling this waits for all on-wire state for issued command requests
282 calling this waits for all on-wire state for issued command requests
283 to finish.
283 to finish.
284
284
285 When used as a context manager, this method is called when exiting the
285 When used as a context manager, this method is called when exiting the
286 context manager.
286 context manager.
287
287
288 This method may call ``sendcommands()`` if there are buffered commands.
288 This method may call ``sendcommands()`` if there are buffered commands.
289 """
289 """
290
290
291 class ipeerrequests(interfaceutil.Interface):
291 class ipeerrequests(interfaceutil.Interface):
292 """Interface for executing commands on a peer."""
292 """Interface for executing commands on a peer."""
293
293
294 def commandexecutor():
294 def commandexecutor():
295 """A context manager that resolves to an ipeercommandexecutor.
295 """A context manager that resolves to an ipeercommandexecutor.
296
296
297 The object this resolves to can be used to issue command requests
297 The object this resolves to can be used to issue command requests
298 to the peer.
298 to the peer.
299
299
300 Callers should call its ``callcommand`` method to issue command
300 Callers should call its ``callcommand`` method to issue command
301 requests.
301 requests.
302
302
303 A new executor should be obtained for each distinct set of commands
303 A new executor should be obtained for each distinct set of commands
304 (possibly just a single command) that the consumer wants to execute
304 (possibly just a single command) that the consumer wants to execute
305 as part of a single operation or round trip. This is because some
305 as part of a single operation or round trip. This is because some
306 peers are half-duplex and/or don't support persistent connections.
306 peers are half-duplex and/or don't support persistent connections.
307 e.g. in the case of HTTP peers, commands sent to an executor represent
307 e.g. in the case of HTTP peers, commands sent to an executor represent
308 a single HTTP request. While some peers may support multiple command
308 a single HTTP request. While some peers may support multiple command
309 sends over the wire per executor, consumers need to code to the least
309 sends over the wire per executor, consumers need to code to the least
310 capable peer. So it should be assumed that command executors buffer
310 capable peer. So it should be assumed that command executors buffer
311 called commands until they are told to send them and that each
311 called commands until they are told to send them and that each
312 command executor could result in a new connection or wire-level request
312 command executor could result in a new connection or wire-level request
313 being issued.
313 being issued.
314 """
314 """
315
315
316 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
316 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
317 """Unified interface for peer repositories.
317 """Unified interface for peer repositories.
318
318
319 All peer instances must conform to this interface.
319 All peer instances must conform to this interface.
320 """
320 """
321
321
322 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
322 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
323 """Unified peer interface for wire protocol version 2 peers."""
323 """Unified peer interface for wire protocol version 2 peers."""
324
324
325 apidescriptor = interfaceutil.Attribute(
325 apidescriptor = interfaceutil.Attribute(
326 """Data structure holding description of server API.""")
326 """Data structure holding description of server API.""")
327
327
328 @interfaceutil.implementer(ipeerbase)
328 @interfaceutil.implementer(ipeerbase)
329 class peer(object):
329 class peer(object):
330 """Base class for peer repositories."""
330 """Base class for peer repositories."""
331
331
332 def capable(self, name):
332 def capable(self, name):
333 caps = self.capabilities()
333 caps = self.capabilities()
334 if name in caps:
334 if name in caps:
335 return True
335 return True
336
336
337 name = '%s=' % name
337 name = '%s=' % name
338 for cap in caps:
338 for cap in caps:
339 if cap.startswith(name):
339 if cap.startswith(name):
340 return cap[len(name):]
340 return cap[len(name):]
341
341
342 return False
342 return False
343
343
344 def requirecap(self, name, purpose):
344 def requirecap(self, name, purpose):
345 if self.capable(name):
345 if self.capable(name):
346 return
346 return
347
347
348 raise error.CapabilityError(
348 raise error.CapabilityError(
349 _('cannot %s; remote repository does not support the %r '
349 _('cannot %s; remote repository does not support the %r '
350 'capability') % (purpose, name))
350 'capability') % (purpose, name))
351
351
352 class iverifyproblem(interfaceutil.Interface):
352 class iverifyproblem(interfaceutil.Interface):
353 """Represents a problem with the integrity of the repository.
353 """Represents a problem with the integrity of the repository.
354
354
355 Instances of this interface are emitted to describe an integrity issue
355 Instances of this interface are emitted to describe an integrity issue
356 with a repository (e.g. corrupt storage, missing data, etc).
356 with a repository (e.g. corrupt storage, missing data, etc).
357
357
358 Instances are essentially messages associated with severity.
358 Instances are essentially messages associated with severity.
359 """
359 """
360 warning = interfaceutil.Attribute(
360 warning = interfaceutil.Attribute(
361 """Message indicating a non-fatal problem.""")
361 """Message indicating a non-fatal problem.""")
362
362
363 error = interfaceutil.Attribute(
363 error = interfaceutil.Attribute(
364 """Message indicating a fatal problem.""")
364 """Message indicating a fatal problem.""")
365
365
366 node = interfaceutil.Attribute(
366 node = interfaceutil.Attribute(
367 """Revision encountering the problem.
367 """Revision encountering the problem.
368
368
369 ``None`` means the problem doesn't apply to a single revision.
369 ``None`` means the problem doesn't apply to a single revision.
370 """)
370 """)
371
371
372 class irevisiondelta(interfaceutil.Interface):
372 class irevisiondelta(interfaceutil.Interface):
373 """Represents a delta between one revision and another.
373 """Represents a delta between one revision and another.
374
374
375 Instances convey enough information to allow a revision to be exchanged
375 Instances convey enough information to allow a revision to be exchanged
376 with another repository.
376 with another repository.
377
377
378 Instances represent the fulltext revision data or a delta against
378 Instances represent the fulltext revision data or a delta against
379 another revision. Therefore the ``revision`` and ``delta`` attributes
379 another revision. Therefore the ``revision`` and ``delta`` attributes
380 are mutually exclusive.
380 are mutually exclusive.
381
381
382 Typically used for changegroup generation.
382 Typically used for changegroup generation.
383 """
383 """
384
384
385 node = interfaceutil.Attribute(
385 node = interfaceutil.Attribute(
386 """20 byte node of this revision.""")
386 """20 byte node of this revision.""")
387
387
388 p1node = interfaceutil.Attribute(
388 p1node = interfaceutil.Attribute(
389 """20 byte node of 1st parent of this revision.""")
389 """20 byte node of 1st parent of this revision.""")
390
390
391 p2node = interfaceutil.Attribute(
391 p2node = interfaceutil.Attribute(
392 """20 byte node of 2nd parent of this revision.""")
392 """20 byte node of 2nd parent of this revision.""")
393
393
394 linknode = interfaceutil.Attribute(
394 linknode = interfaceutil.Attribute(
395 """20 byte node of the changelog revision this node is linked to.""")
395 """20 byte node of the changelog revision this node is linked to.""")
396
396
397 flags = interfaceutil.Attribute(
397 flags = interfaceutil.Attribute(
398 """2 bytes of integer flags that apply to this revision.
398 """2 bytes of integer flags that apply to this revision.
399
399
400 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
400 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
401 """)
401 """)
402
402
403 basenode = interfaceutil.Attribute(
403 basenode = interfaceutil.Attribute(
404 """20 byte node of the revision this data is a delta against.
404 """20 byte node of the revision this data is a delta against.
405
405
406 ``nullid`` indicates that the revision is a full revision and not
406 ``nullid`` indicates that the revision is a full revision and not
407 a delta.
407 a delta.
408 """)
408 """)
409
409
410 baserevisionsize = interfaceutil.Attribute(
410 baserevisionsize = interfaceutil.Attribute(
411 """Size of base revision this delta is against.
411 """Size of base revision this delta is against.
412
412
413 May be ``None`` if ``basenode`` is ``nullid``.
413 May be ``None`` if ``basenode`` is ``nullid``.
414 """)
414 """)
415
415
416 revision = interfaceutil.Attribute(
416 revision = interfaceutil.Attribute(
417 """Raw fulltext of revision data for this node.""")
417 """Raw fulltext of revision data for this node.""")
418
418
419 delta = interfaceutil.Attribute(
419 delta = interfaceutil.Attribute(
420 """Delta between ``basenode`` and ``node``.
420 """Delta between ``basenode`` and ``node``.
421
421
422 Stored in the bdiff delta format.
422 Stored in the bdiff delta format.
423 """)
423 """)
424
424
425 class ifilerevisionssequence(interfaceutil.Interface):
425 class ifilerevisionssequence(interfaceutil.Interface):
426 """Contains index data for all revisions of a file.
426 """Contains index data for all revisions of a file.
427
427
428 Types implementing this behave like lists of tuples. The index
428 Types implementing this behave like lists of tuples. The index
429 in the list corresponds to the revision number. The values contain
429 in the list corresponds to the revision number. The values contain
430 index metadata.
430 index metadata.
431
431
432 The *null* revision (revision number -1) is always the last item
432 The *null* revision (revision number -1) is always the last item
433 in the index.
433 in the index.
434 """
434 """
435
435
436 def __len__():
436 def __len__():
437 """The total number of revisions."""
437 """The total number of revisions."""
438
438
439 def __getitem__(rev):
439 def __getitem__(rev):
440 """Returns the object having a specific revision number.
440 """Returns the object having a specific revision number.
441
441
442 Returns an 8-tuple with the following fields:
442 Returns an 8-tuple with the following fields:
443
443
444 offset+flags
444 offset+flags
445 Contains the offset and flags for the revision. 64-bit unsigned
445 Contains the offset and flags for the revision. 64-bit unsigned
446 integer where first 6 bytes are the offset and the next 2 bytes
446 integer where first 6 bytes are the offset and the next 2 bytes
447 are flags. The offset can be 0 if it is not used by the store.
447 are flags. The offset can be 0 if it is not used by the store.
448 compressed size
448 compressed size
449 Size of the revision data in the store. It can be 0 if it isn't
449 Size of the revision data in the store. It can be 0 if it isn't
450 needed by the store.
450 needed by the store.
451 uncompressed size
451 uncompressed size
452 Fulltext size. It can be 0 if it isn't needed by the store.
452 Fulltext size. It can be 0 if it isn't needed by the store.
453 base revision
453 base revision
454 Revision number of revision the delta for storage is encoded
454 Revision number of revision the delta for storage is encoded
455 against. -1 indicates not encoded against a base revision.
455 against. -1 indicates not encoded against a base revision.
456 link revision
456 link revision
457 Revision number of changelog revision this entry is related to.
457 Revision number of changelog revision this entry is related to.
458 p1 revision
458 p1 revision
459 Revision number of 1st parent. -1 if no 1st parent.
459 Revision number of 1st parent. -1 if no 1st parent.
460 p2 revision
460 p2 revision
461 Revision number of 2nd parent. -1 if no 1st parent.
461 Revision number of 2nd parent. -1 if no 1st parent.
462 node
462 node
463 Binary node value for this revision number.
463 Binary node value for this revision number.
464
464
465 Negative values should index off the end of the sequence. ``-1``
465 Negative values should index off the end of the sequence. ``-1``
466 should return the null revision. ``-2`` should return the most
466 should return the null revision. ``-2`` should return the most
467 recent revision.
467 recent revision.
468 """
468 """
469
469
470 def __contains__(rev):
470 def __contains__(rev):
471 """Whether a revision number exists."""
471 """Whether a revision number exists."""
472
472
473 def insert(self, i, entry):
473 def insert(self, i, entry):
474 """Add an item to the index at specific revision."""
474 """Add an item to the index at specific revision."""
475
475
476 class ifileindex(interfaceutil.Interface):
476 class ifileindex(interfaceutil.Interface):
477 """Storage interface for index data of a single file.
477 """Storage interface for index data of a single file.
478
478
479 File storage data is divided into index metadata and data storage.
479 File storage data is divided into index metadata and data storage.
480 This interface defines the index portion of the interface.
480 This interface defines the index portion of the interface.
481
481
482 The index logically consists of:
482 The index logically consists of:
483
483
484 * A mapping between revision numbers and nodes.
484 * A mapping between revision numbers and nodes.
485 * DAG data (storing and querying the relationship between nodes).
485 * DAG data (storing and querying the relationship between nodes).
486 * Metadata to facilitate storage.
486 * Metadata to facilitate storage.
487 """
487 """
488 def __len__():
488 def __len__():
489 """Obtain the number of revisions stored for this file."""
489 """Obtain the number of revisions stored for this file."""
490
490
491 def __iter__():
491 def __iter__():
492 """Iterate over revision numbers for this file."""
492 """Iterate over revision numbers for this file."""
493
493
494 def hasnode(node):
494 def hasnode(node):
495 """Returns a bool indicating if a node is known to this store.
495 """Returns a bool indicating if a node is known to this store.
496
496
497 Implementations must only return True for full, binary node values:
497 Implementations must only return True for full, binary node values:
498 hex nodes, revision numbers, and partial node matches must be
498 hex nodes, revision numbers, and partial node matches must be
499 rejected.
499 rejected.
500
500
501 The null node is never present.
501 The null node is never present.
502 """
502 """
503
503
504 def revs(start=0, stop=None):
504 def revs(start=0, stop=None):
505 """Iterate over revision numbers for this file, with control."""
505 """Iterate over revision numbers for this file, with control."""
506
506
507 def parents(node):
507 def parents(node):
508 """Returns a 2-tuple of parent nodes for a revision.
508 """Returns a 2-tuple of parent nodes for a revision.
509
509
510 Values will be ``nullid`` if the parent is empty.
510 Values will be ``nullid`` if the parent is empty.
511 """
511 """
512
512
513 def parentrevs(rev):
513 def parentrevs(rev):
514 """Like parents() but operates on revision numbers."""
514 """Like parents() but operates on revision numbers."""
515
515
516 def rev(node):
516 def rev(node):
517 """Obtain the revision number given a node.
517 """Obtain the revision number given a node.
518
518
519 Raises ``error.LookupError`` if the node is not known.
519 Raises ``error.LookupError`` if the node is not known.
520 """
520 """
521
521
522 def node(rev):
522 def node(rev):
523 """Obtain the node value given a revision number.
523 """Obtain the node value given a revision number.
524
524
525 Raises ``IndexError`` if the node is not known.
525 Raises ``IndexError`` if the node is not known.
526 """
526 """
527
527
528 def lookup(node):
528 def lookup(node):
529 """Attempt to resolve a value to a node.
529 """Attempt to resolve a value to a node.
530
530
531 Value can be a binary node, hex node, revision number, or a string
531 Value can be a binary node, hex node, revision number, or a string
532 that can be converted to an integer.
532 that can be converted to an integer.
533
533
534 Raises ``error.LookupError`` if a node could not be resolved.
534 Raises ``error.LookupError`` if a node could not be resolved.
535 """
535 """
536
536
537 def linkrev(rev):
537 def linkrev(rev):
538 """Obtain the changeset revision number a revision is linked to."""
538 """Obtain the changeset revision number a revision is linked to."""
539
539
540 def iscensored(rev):
540 def iscensored(rev):
541 """Return whether a revision's content has been censored."""
541 """Return whether a revision's content has been censored."""
542
542
543 def commonancestorsheads(node1, node2):
543 def commonancestorsheads(node1, node2):
544 """Obtain an iterable of nodes containing heads of common ancestors.
544 """Obtain an iterable of nodes containing heads of common ancestors.
545
545
546 See ``ancestor.commonancestorsheads()``.
546 See ``ancestor.commonancestorsheads()``.
547 """
547 """
548
548
549 def descendants(revs):
549 def descendants(revs):
550 """Obtain descendant revision numbers for a set of revision numbers.
550 """Obtain descendant revision numbers for a set of revision numbers.
551
551
552 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
552 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
553 """
553 """
554
554
555 def heads(start=None, stop=None):
555 def heads(start=None, stop=None):
556 """Obtain a list of nodes that are DAG heads, with control.
556 """Obtain a list of nodes that are DAG heads, with control.
557
557
558 The set of revisions examined can be limited by specifying
558 The set of revisions examined can be limited by specifying
559 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
559 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
560 iterable of nodes. DAG traversal starts at earlier revision
560 iterable of nodes. DAG traversal starts at earlier revision
561 ``start`` and iterates forward until any node in ``stop`` is
561 ``start`` and iterates forward until any node in ``stop`` is
562 encountered.
562 encountered.
563 """
563 """
564
564
565 def children(node):
565 def children(node):
566 """Obtain nodes that are children of a node.
566 """Obtain nodes that are children of a node.
567
567
568 Returns a list of nodes.
568 Returns a list of nodes.
569 """
569 """
570
570
571 class ifiledata(interfaceutil.Interface):
571 class ifiledata(interfaceutil.Interface):
572 """Storage interface for data storage of a specific file.
572 """Storage interface for data storage of a specific file.
573
573
574 This complements ``ifileindex`` and provides an interface for accessing
574 This complements ``ifileindex`` and provides an interface for accessing
575 data for a tracked file.
575 data for a tracked file.
576 """
576 """
577 def size(rev):
577 def size(rev):
578 """Obtain the fulltext size of file data.
578 """Obtain the fulltext size of file data.
579
579
580 Any metadata is excluded from size measurements.
580 Any metadata is excluded from size measurements.
581 """
581 """
582
582
583 def revision(node, raw=False):
583 def revision(node, raw=False):
584 """"Obtain fulltext data for a node.
584 """"Obtain fulltext data for a node.
585
585
586 By default, any storage transformations are applied before the data
586 By default, any storage transformations are applied before the data
587 is returned. If ``raw`` is True, non-raw storage transformations
587 is returned. If ``raw`` is True, non-raw storage transformations
588 are not applied.
588 are not applied.
589
589
590 The fulltext data may contain a header containing metadata. Most
590 The fulltext data may contain a header containing metadata. Most
591 consumers should use ``read()`` to obtain the actual file data.
591 consumers should use ``read()`` to obtain the actual file data.
592 """
592 """
593
593
594 def read(node):
594 def read(node):
595 """Resolve file fulltext data.
595 """Resolve file fulltext data.
596
596
597 This is similar to ``revision()`` except any metadata in the data
597 This is similar to ``revision()`` except any metadata in the data
598 headers is stripped.
598 headers is stripped.
599 """
599 """
600
600
601 def renamed(node):
601 def renamed(node):
602 """Obtain copy metadata for a node.
602 """Obtain copy metadata for a node.
603
603
604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
605 (path, node) from which this revision was copied.
605 (path, node) from which this revision was copied.
606 """
606 """
607
607
608 def cmp(node, fulltext):
608 def cmp(node, fulltext):
609 """Compare fulltext to another revision.
609 """Compare fulltext to another revision.
610
610
611 Returns True if the fulltext is different from what is stored.
611 Returns True if the fulltext is different from what is stored.
612
612
613 This takes copy metadata into account.
613 This takes copy metadata into account.
614
614
615 TODO better document the copy metadata and censoring logic.
615 TODO better document the copy metadata and censoring logic.
616 """
616 """
617
617
618 def emitrevisions(nodes,
618 def emitrevisions(nodes,
619 nodesorder=None,
619 nodesorder=None,
620 revisiondata=False,
620 revisiondata=False,
621 assumehaveparentrevisions=False,
621 assumehaveparentrevisions=False,
622 deltamode=CG_DELTAMODE_STD):
622 deltamode=CG_DELTAMODE_STD):
623 """Produce ``irevisiondelta`` for revisions.
623 """Produce ``irevisiondelta`` for revisions.
624
624
625 Given an iterable of nodes, emits objects conforming to the
625 Given an iterable of nodes, emits objects conforming to the
626 ``irevisiondelta`` interface that describe revisions in storage.
626 ``irevisiondelta`` interface that describe revisions in storage.
627
627
628 This method is a generator.
628 This method is a generator.
629
629
630 The input nodes may be unordered. Implementations must ensure that a
630 The input nodes may be unordered. Implementations must ensure that a
631 node's parents are emitted before the node itself. Transitively, this
631 node's parents are emitted before the node itself. Transitively, this
632 means that a node may only be emitted once all its ancestors in
632 means that a node may only be emitted once all its ancestors in
633 ``nodes`` have also been emitted.
633 ``nodes`` have also been emitted.
634
634
635 By default, emits "index" data (the ``node``, ``p1node``, and
635 By default, emits "index" data (the ``node``, ``p1node``, and
636 ``p2node`` attributes). If ``revisiondata`` is set, revision data
636 ``p2node`` attributes). If ``revisiondata`` is set, revision data
637 will also be present on the emitted objects.
637 will also be present on the emitted objects.
638
638
639 With default argument values, implementations can choose to emit
639 With default argument values, implementations can choose to emit
640 either fulltext revision data or a delta. When emitting deltas,
640 either fulltext revision data or a delta. When emitting deltas,
641 implementations must consider whether the delta's base revision
641 implementations must consider whether the delta's base revision
642 fulltext is available to the receiver.
642 fulltext is available to the receiver.
643
643
644 The base revision fulltext is guaranteed to be available if any of
644 The base revision fulltext is guaranteed to be available if any of
645 the following are met:
645 the following are met:
646
646
647 * Its fulltext revision was emitted by this method call.
647 * Its fulltext revision was emitted by this method call.
648 * A delta for that revision was emitted by this method call.
648 * A delta for that revision was emitted by this method call.
649 * ``assumehaveparentrevisions`` is True and the base revision is a
649 * ``assumehaveparentrevisions`` is True and the base revision is a
650 parent of the node.
650 parent of the node.
651
651
652 ``nodesorder`` can be used to control the order that revisions are
652 ``nodesorder`` can be used to control the order that revisions are
653 emitted. By default, revisions can be reordered as long as they are
653 emitted. By default, revisions can be reordered as long as they are
654 in DAG topological order (see above). If the value is ``nodes``,
654 in DAG topological order (see above). If the value is ``nodes``,
655 the iteration order from ``nodes`` should be used. If the value is
655 the iteration order from ``nodes`` should be used. If the value is
656 ``storage``, then the native order from the backing storage layer
656 ``storage``, then the native order from the backing storage layer
657 is used. (Not all storage layers will have strong ordering and behavior
657 is used. (Not all storage layers will have strong ordering and behavior
658 of this mode is storage-dependent.) ``nodes`` ordering can force
658 of this mode is storage-dependent.) ``nodes`` ordering can force
659 revisions to be emitted before their ancestors, so consumers should
659 revisions to be emitted before their ancestors, so consumers should
660 use it with care.
660 use it with care.
661
661
662 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
662 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
663 be set and it is the caller's responsibility to resolve it, if needed.
663 be set and it is the caller's responsibility to resolve it, if needed.
664
664
665 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
665 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
666 all revision data should be emitted as deltas against the revision
666 all revision data should be emitted as deltas against the revision
667 emitted just prior. The initial revision should be a delta against its
667 emitted just prior. The initial revision should be a delta against its
668 1st parent.
668 1st parent.
669 """
669 """
670
670
671 class ifilemutation(interfaceutil.Interface):
671 class ifilemutation(interfaceutil.Interface):
672 """Storage interface for mutation events of a tracked file."""
672 """Storage interface for mutation events of a tracked file."""
673
673
674 def add(filedata, meta, transaction, linkrev, p1, p2):
674 def add(filedata, meta, transaction, linkrev, p1, p2):
675 """Add a new revision to the store.
675 """Add a new revision to the store.
676
676
677 Takes file data, dictionary of metadata, a transaction, linkrev,
677 Takes file data, dictionary of metadata, a transaction, linkrev,
678 and parent nodes.
678 and parent nodes.
679
679
680 Returns the node that was added.
680 Returns the node that was added.
681
681
682 May no-op if a revision matching the supplied data is already stored.
682 May no-op if a revision matching the supplied data is already stored.
683 """
683 """
684
684
685 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
685 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
686 flags=0, cachedelta=None):
686 flags=0, cachedelta=None):
687 """Add a new revision to the store.
687 """Add a new revision to the store.
688
688
689 This is similar to ``add()`` except it operates at a lower level.
689 This is similar to ``add()`` except it operates at a lower level.
690
690
691 The data passed in already contains a metadata header, if any.
691 The data passed in already contains a metadata header, if any.
692
692
693 ``node`` and ``flags`` can be used to define the expected node and
693 ``node`` and ``flags`` can be used to define the expected node and
694 the flags to use with storage. ``flags`` is a bitwise value composed
694 the flags to use with storage. ``flags`` is a bitwise value composed
695 of the various ``REVISION_FLAG_*`` constants.
695 of the various ``REVISION_FLAG_*`` constants.
696
696
697 ``add()`` is usually called when adding files from e.g. the working
697 ``add()`` is usually called when adding files from e.g. the working
698 directory. ``addrevision()`` is often called by ``add()`` and for
698 directory. ``addrevision()`` is often called by ``add()`` and for
699 scenarios where revision data has already been computed, such as when
699 scenarios where revision data has already been computed, such as when
700 applying raw data from a peer repo.
700 applying raw data from a peer repo.
701 """
701 """
702
702
703 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
703 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
704 maybemissingparents=False):
704 maybemissingparents=False):
705 """Process a series of deltas for storage.
705 """Process a series of deltas for storage.
706
706
707 ``deltas`` is an iterable of 7-tuples of
707 ``deltas`` is an iterable of 7-tuples of
708 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
708 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
709 to add.
709 to add.
710
710
711 The ``delta`` field contains ``mpatch`` data to apply to a base
711 The ``delta`` field contains ``mpatch`` data to apply to a base
712 revision, identified by ``deltabase``. The base node can be
712 revision, identified by ``deltabase``. The base node can be
713 ``nullid``, in which case the header from the delta can be ignored
713 ``nullid``, in which case the header from the delta can be ignored
714 and the delta used as the fulltext.
714 and the delta used as the fulltext.
715
715
716 ``addrevisioncb`` should be called for each node as it is committed.
716 ``addrevisioncb`` should be called for each node as it is committed.
717
717
718 ``maybemissingparents`` is a bool indicating whether the incoming
718 ``maybemissingparents`` is a bool indicating whether the incoming
719 data may reference parents/ancestor revisions that aren't present.
719 data may reference parents/ancestor revisions that aren't present.
720 This flag is set when receiving data into a "shallow" store that
720 This flag is set when receiving data into a "shallow" store that
721 doesn't hold all history.
721 doesn't hold all history.
722
722
723 Returns a list of nodes that were processed. A node will be in the list
723 Returns a list of nodes that were processed. A node will be in the list
724 even if it existed in the store previously.
724 even if it existed in the store previously.
725 """
725 """
726
726
727 def censorrevision(tr, node, tombstone=b''):
727 def censorrevision(tr, node, tombstone=b''):
728 """Remove the content of a single revision.
728 """Remove the content of a single revision.
729
729
730 The specified ``node`` will have its content purged from storage.
730 The specified ``node`` will have its content purged from storage.
731 Future attempts to access the revision data for this node will
731 Future attempts to access the revision data for this node will
732 result in failure.
732 result in failure.
733
733
734 A ``tombstone`` message can optionally be stored. This message may be
734 A ``tombstone`` message can optionally be stored. This message may be
735 displayed to users when they attempt to access the missing revision
735 displayed to users when they attempt to access the missing revision
736 data.
736 data.
737
737
738 Storage backends may have stored deltas against the previous content
738 Storage backends may have stored deltas against the previous content
739 in this revision. As part of censoring a revision, these storage
739 in this revision. As part of censoring a revision, these storage
740 backends are expected to rewrite any internally stored deltas such
740 backends are expected to rewrite any internally stored deltas such
741 that they no longer reference the deleted content.
741 that they no longer reference the deleted content.
742 """
742 """
743
743
744 def getstrippoint(minlink):
744 def getstrippoint(minlink):
745 """Find the minimum revision that must be stripped to strip a linkrev.
745 """Find the minimum revision that must be stripped to strip a linkrev.
746
746
747 Returns a 2-tuple containing the minimum revision number and a set
747 Returns a 2-tuple containing the minimum revision number and a set
748 of all revisions numbers that would be broken by this strip.
748 of all revisions numbers that would be broken by this strip.
749
749
750 TODO this is highly revlog centric and should be abstracted into
750 TODO this is highly revlog centric and should be abstracted into
751 a higher-level deletion API. ``repair.strip()`` relies on this.
751 a higher-level deletion API. ``repair.strip()`` relies on this.
752 """
752 """
753
753
754 def strip(minlink, transaction):
754 def strip(minlink, transaction):
755 """Remove storage of items starting at a linkrev.
755 """Remove storage of items starting at a linkrev.
756
756
757 This uses ``getstrippoint()`` to determine the first node to remove.
757 This uses ``getstrippoint()`` to determine the first node to remove.
758 Then it effectively truncates storage for all revisions after that.
758 Then it effectively truncates storage for all revisions after that.
759
759
760 TODO this is highly revlog centric and should be abstracted into a
760 TODO this is highly revlog centric and should be abstracted into a
761 higher-level deletion API.
761 higher-level deletion API.
762 """
762 """
763
763
764 class ifilestorage(ifileindex, ifiledata, ifilemutation):
764 class ifilestorage(ifileindex, ifiledata, ifilemutation):
765 """Complete storage interface for a single tracked file."""
765 """Complete storage interface for a single tracked file."""
766
766
767 def files():
767 def files():
768 """Obtain paths that are backing storage for this file.
768 """Obtain paths that are backing storage for this file.
769
769
770 TODO this is used heavily by verify code and there should probably
770 TODO this is used heavily by verify code and there should probably
771 be a better API for that.
771 be a better API for that.
772 """
772 """
773
773
774 def storageinfo(exclusivefiles=False, sharedfiles=False,
774 def storageinfo(exclusivefiles=False, sharedfiles=False,
775 revisionscount=False, trackedsize=False,
775 revisionscount=False, trackedsize=False,
776 storedsize=False):
776 storedsize=False):
777 """Obtain information about storage for this file's data.
777 """Obtain information about storage for this file's data.
778
778
779 Returns a dict describing storage for this tracked path. The keys
779 Returns a dict describing storage for this tracked path. The keys
780 in the dict map to arguments of the same. The arguments are bools
780 in the dict map to arguments of the same. The arguments are bools
781 indicating whether to calculate and obtain that data.
781 indicating whether to calculate and obtain that data.
782
782
783 exclusivefiles
783 exclusivefiles
784 Iterable of (vfs, path) describing files that are exclusively
784 Iterable of (vfs, path) describing files that are exclusively
785 used to back storage for this tracked path.
785 used to back storage for this tracked path.
786
786
787 sharedfiles
787 sharedfiles
788 Iterable of (vfs, path) describing files that are used to back
788 Iterable of (vfs, path) describing files that are used to back
789 storage for this tracked path. Those files may also provide storage
789 storage for this tracked path. Those files may also provide storage
790 for other stored entities.
790 for other stored entities.
791
791
792 revisionscount
792 revisionscount
793 Number of revisions available for retrieval.
793 Number of revisions available for retrieval.
794
794
795 trackedsize
795 trackedsize
796 Total size in bytes of all tracked revisions. This is a sum of the
796 Total size in bytes of all tracked revisions. This is a sum of the
797 length of the fulltext of all revisions.
797 length of the fulltext of all revisions.
798
798
799 storedsize
799 storedsize
800 Total size in bytes used to store data for all tracked revisions.
800 Total size in bytes used to store data for all tracked revisions.
801 This is commonly less than ``trackedsize`` due to internal usage
801 This is commonly less than ``trackedsize`` due to internal usage
802 of deltas rather than fulltext revisions.
802 of deltas rather than fulltext revisions.
803
803
804 Not all storage backends may support all queries are have a reasonable
804 Not all storage backends may support all queries are have a reasonable
805 value to use. In that case, the value should be set to ``None`` and
805 value to use. In that case, the value should be set to ``None`` and
806 callers are expected to handle this special value.
806 callers are expected to handle this special value.
807 """
807 """
808
808
809 def verifyintegrity(state):
809 def verifyintegrity(state):
810 """Verifies the integrity of file storage.
810 """Verifies the integrity of file storage.
811
811
812 ``state`` is a dict holding state of the verifier process. It can be
812 ``state`` is a dict holding state of the verifier process. It can be
813 used to communicate data between invocations of multiple storage
813 used to communicate data between invocations of multiple storage
814 primitives.
814 primitives.
815
815
816 If individual revisions cannot have their revision content resolved,
816 If individual revisions cannot have their revision content resolved,
817 the method is expected to set the ``skipread`` key to a set of nodes
817 the method is expected to set the ``skipread`` key to a set of nodes
818 that encountered problems.
818 that encountered problems.
819
819
820 The method yields objects conforming to the ``iverifyproblem``
820 The method yields objects conforming to the ``iverifyproblem``
821 interface.
821 interface.
822 """
822 """
823
823
824 class idirs(interfaceutil.Interface):
824 class idirs(interfaceutil.Interface):
825 """Interface representing a collection of directories from paths.
825 """Interface representing a collection of directories from paths.
826
826
827 This interface is essentially a derived data structure representing
827 This interface is essentially a derived data structure representing
828 directories from a collection of paths.
828 directories from a collection of paths.
829 """
829 """
830
830
831 def addpath(path):
831 def addpath(path):
832 """Add a path to the collection.
832 """Add a path to the collection.
833
833
834 All directories in the path will be added to the collection.
834 All directories in the path will be added to the collection.
835 """
835 """
836
836
837 def delpath(path):
837 def delpath(path):
838 """Remove a path from the collection.
838 """Remove a path from the collection.
839
839
840 If the removal was the last path in a particular directory, the
840 If the removal was the last path in a particular directory, the
841 directory is removed from the collection.
841 directory is removed from the collection.
842 """
842 """
843
843
844 def __iter__():
844 def __iter__():
845 """Iterate over the directories in this collection of paths."""
845 """Iterate over the directories in this collection of paths."""
846
846
847 def __contains__(path):
847 def __contains__(path):
848 """Whether a specific directory is in this collection."""
848 """Whether a specific directory is in this collection."""
849
849
850 class imanifestdict(interfaceutil.Interface):
850 class imanifestdict(interfaceutil.Interface):
851 """Interface representing a manifest data structure.
851 """Interface representing a manifest data structure.
852
852
853 A manifest is effectively a dict mapping paths to entries. Each entry
853 A manifest is effectively a dict mapping paths to entries. Each entry
854 consists of a binary node and extra flags affecting that entry.
854 consists of a binary node and extra flags affecting that entry.
855 """
855 """
856
856
857 def __getitem__(path):
857 def __getitem__(path):
858 """Returns the binary node value for a path in the manifest.
858 """Returns the binary node value for a path in the manifest.
859
859
860 Raises ``KeyError`` if the path does not exist in the manifest.
860 Raises ``KeyError`` if the path does not exist in the manifest.
861
861
862 Equivalent to ``self.find(path)[0]``.
862 Equivalent to ``self.find(path)[0]``.
863 """
863 """
864
864
865 def find(path):
865 def find(path):
866 """Returns the entry for a path in the manifest.
866 """Returns the entry for a path in the manifest.
867
867
868 Returns a 2-tuple of (node, flags).
868 Returns a 2-tuple of (node, flags).
869
869
870 Raises ``KeyError`` if the path does not exist in the manifest.
870 Raises ``KeyError`` if the path does not exist in the manifest.
871 """
871 """
872
872
873 def __len__():
873 def __len__():
874 """Return the number of entries in the manifest."""
874 """Return the number of entries in the manifest."""
875
875
876 def __nonzero__():
876 def __nonzero__():
877 """Returns True if the manifest has entries, False otherwise."""
877 """Returns True if the manifest has entries, False otherwise."""
878
878
879 __bool__ = __nonzero__
879 __bool__ = __nonzero__
880
880
881 def __setitem__(path, node):
881 def __setitem__(path, node):
882 """Define the node value for a path in the manifest.
882 """Define the node value for a path in the manifest.
883
883
884 If the path is already in the manifest, its flags will be copied to
884 If the path is already in the manifest, its flags will be copied to
885 the new entry.
885 the new entry.
886 """
886 """
887
887
888 def __contains__(path):
888 def __contains__(path):
889 """Whether a path exists in the manifest."""
889 """Whether a path exists in the manifest."""
890
890
891 def __delitem__(path):
891 def __delitem__(path):
892 """Remove a path from the manifest.
892 """Remove a path from the manifest.
893
893
894 Raises ``KeyError`` if the path is not in the manifest.
894 Raises ``KeyError`` if the path is not in the manifest.
895 """
895 """
896
896
897 def __iter__():
897 def __iter__():
898 """Iterate over paths in the manifest."""
898 """Iterate over paths in the manifest."""
899
899
900 def iterkeys():
900 def iterkeys():
901 """Iterate over paths in the manifest."""
901 """Iterate over paths in the manifest."""
902
902
903 def keys():
903 def keys():
904 """Obtain a list of paths in the manifest."""
904 """Obtain a list of paths in the manifest."""
905
905
906 def filesnotin(other, match=None):
906 def filesnotin(other, match=None):
907 """Obtain the set of paths in this manifest but not in another.
907 """Obtain the set of paths in this manifest but not in another.
908
908
909 ``match`` is an optional matcher function to be applied to both
909 ``match`` is an optional matcher function to be applied to both
910 manifests.
910 manifests.
911
911
912 Returns a set of paths.
912 Returns a set of paths.
913 """
913 """
914
914
915 def dirs():
915 def dirs():
916 """Returns an object implementing the ``idirs`` interface."""
916 """Returns an object implementing the ``idirs`` interface."""
917
917
918 def hasdir(dir):
918 def hasdir(dir):
919 """Returns a bool indicating if a directory is in this manifest."""
919 """Returns a bool indicating if a directory is in this manifest."""
920
920
921 def matches(match):
921 def matches(match):
922 """Generate a new manifest filtered through a matcher.
922 """Generate a new manifest filtered through a matcher.
923
923
924 Returns an object conforming to the ``imanifestdict`` interface.
924 Returns an object conforming to the ``imanifestdict`` interface.
925 """
925 """
926
926
927 def walk(match):
927 def walk(match):
928 """Generator of paths in manifest satisfying a matcher.
928 """Generator of paths in manifest satisfying a matcher.
929
929
930 This is equivalent to ``self.matches(match).iterkeys()`` except a new
930 This is equivalent to ``self.matches(match).iterkeys()`` except a new
931 manifest object is not created.
931 manifest object is not created.
932
932
933 If the matcher has explicit files listed and they don't exist in
933 If the matcher has explicit files listed and they don't exist in
934 the manifest, ``match.bad()`` is called for each missing file.
934 the manifest, ``match.bad()`` is called for each missing file.
935 """
935 """
936
936
937 def diff(other, match=None, clean=False):
937 def diff(other, match=None, clean=False):
938 """Find differences between this manifest and another.
938 """Find differences between this manifest and another.
939
939
940 This manifest is compared to ``other``.
940 This manifest is compared to ``other``.
941
941
942 If ``match`` is provided, the two manifests are filtered against this
942 If ``match`` is provided, the two manifests are filtered against this
943 matcher and only entries satisfying the matcher are compared.
943 matcher and only entries satisfying the matcher are compared.
944
944
945 If ``clean`` is True, unchanged files are included in the returned
945 If ``clean`` is True, unchanged files are included in the returned
946 object.
946 object.
947
947
948 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
948 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
949 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
949 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
950 represents the node and flags for this manifest and ``(node2, flag2)``
950 represents the node and flags for this manifest and ``(node2, flag2)``
951 are the same for the other manifest.
951 are the same for the other manifest.
952 """
952 """
953
953
954 def setflag(path, flag):
954 def setflag(path, flag):
955 """Set the flag value for a given path.
955 """Set the flag value for a given path.
956
956
957 Raises ``KeyError`` if the path is not already in the manifest.
957 Raises ``KeyError`` if the path is not already in the manifest.
958 """
958 """
959
959
960 def get(path, default=None):
960 def get(path, default=None):
961 """Obtain the node value for a path or a default value if missing."""
961 """Obtain the node value for a path or a default value if missing."""
962
962
963 def flags(path, default=''):
963 def flags(path, default=''):
964 """Return the flags value for a path or a default value if missing."""
964 """Return the flags value for a path or a default value if missing."""
965
965
966 def copy():
966 def copy():
967 """Return a copy of this manifest."""
967 """Return a copy of this manifest."""
968
968
969 def items():
969 def items():
970 """Returns an iterable of (path, node) for items in this manifest."""
970 """Returns an iterable of (path, node) for items in this manifest."""
971
971
972 def iteritems():
972 def iteritems():
973 """Identical to items()."""
973 """Identical to items()."""
974
974
975 def iterentries():
975 def iterentries():
976 """Returns an iterable of (path, node, flags) for this manifest.
976 """Returns an iterable of (path, node, flags) for this manifest.
977
977
978 Similar to ``iteritems()`` except items are a 3-tuple and include
978 Similar to ``iteritems()`` except items are a 3-tuple and include
979 flags.
979 flags.
980 """
980 """
981
981
982 def text():
982 def text():
983 """Obtain the raw data representation for this manifest.
983 """Obtain the raw data representation for this manifest.
984
984
985 Result is used to create a manifest revision.
985 Result is used to create a manifest revision.
986 """
986 """
987
987
988 def fastdelta(base, changes):
988 def fastdelta(base, changes):
989 """Obtain a delta between this manifest and another given changes.
989 """Obtain a delta between this manifest and another given changes.
990
990
991 ``base`` in the raw data representation for another manifest.
991 ``base`` in the raw data representation for another manifest.
992
992
993 ``changes`` is an iterable of ``(path, to_delete)``.
993 ``changes`` is an iterable of ``(path, to_delete)``.
994
994
995 Returns a 2-tuple containing ``bytearray(self.text())`` and the
995 Returns a 2-tuple containing ``bytearray(self.text())`` and the
996 delta between ``base`` and this manifest.
996 delta between ``base`` and this manifest.
997 """
997 """
998
998
999 class imanifestrevisionbase(interfaceutil.Interface):
999 class imanifestrevisionbase(interfaceutil.Interface):
1000 """Base interface representing a single revision of a manifest.
1000 """Base interface representing a single revision of a manifest.
1001
1001
1002 Should not be used as a primary interface: should always be inherited
1002 Should not be used as a primary interface: should always be inherited
1003 as part of a larger interface.
1003 as part of a larger interface.
1004 """
1004 """
1005
1005
1006 def new():
1006 def new():
1007 """Obtain a new manifest instance.
1007 """Obtain a new manifest instance.
1008
1008
1009 Returns an object conforming to the ``imanifestrevisionwritable``
1009 Returns an object conforming to the ``imanifestrevisionwritable``
1010 interface. The instance will be associated with the same
1010 interface. The instance will be associated with the same
1011 ``imanifestlog`` collection as this instance.
1011 ``imanifestlog`` collection as this instance.
1012 """
1012 """
1013
1013
1014 def copy():
1014 def copy():
1015 """Obtain a copy of this manifest instance.
1015 """Obtain a copy of this manifest instance.
1016
1016
1017 Returns an object conforming to the ``imanifestrevisionwritable``
1017 Returns an object conforming to the ``imanifestrevisionwritable``
1018 interface. The instance will be associated with the same
1018 interface. The instance will be associated with the same
1019 ``imanifestlog`` collection as this instance.
1019 ``imanifestlog`` collection as this instance.
1020 """
1020 """
1021
1021
1022 def read():
1022 def read():
1023 """Obtain the parsed manifest data structure.
1023 """Obtain the parsed manifest data structure.
1024
1024
1025 The returned object conforms to the ``imanifestdict`` interface.
1025 The returned object conforms to the ``imanifestdict`` interface.
1026 """
1026 """
1027
1027
1028 class imanifestrevisionstored(imanifestrevisionbase):
1028 class imanifestrevisionstored(imanifestrevisionbase):
1029 """Interface representing a manifest revision committed to storage."""
1029 """Interface representing a manifest revision committed to storage."""
1030
1030
1031 def node():
1031 def node():
1032 """The binary node for this manifest."""
1032 """The binary node for this manifest."""
1033
1033
1034 parents = interfaceutil.Attribute(
1034 parents = interfaceutil.Attribute(
1035 """List of binary nodes that are parents for this manifest revision."""
1035 """List of binary nodes that are parents for this manifest revision."""
1036 )
1036 )
1037
1037
1038 def readdelta(shallow=False):
1038 def readdelta(shallow=False):
1039 """Obtain the manifest data structure representing changes from parent.
1039 """Obtain the manifest data structure representing changes from parent.
1040
1040
1041 This manifest is compared to its 1st parent. A new manifest representing
1041 This manifest is compared to its 1st parent. A new manifest representing
1042 those differences is constructed.
1042 those differences is constructed.
1043
1043
1044 The returned object conforms to the ``imanifestdict`` interface.
1044 The returned object conforms to the ``imanifestdict`` interface.
1045 """
1045 """
1046
1046
1047 def readfast(shallow=False):
1047 def readfast(shallow=False):
1048 """Calls either ``read()`` or ``readdelta()``.
1048 """Calls either ``read()`` or ``readdelta()``.
1049
1049
1050 The faster of the two options is called.
1050 The faster of the two options is called.
1051 """
1051 """
1052
1052
1053 def find(key):
1053 def find(key):
1054 """Calls self.read().find(key)``.
1054 """Calls self.read().find(key)``.
1055
1055
1056 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1056 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1057 """
1057 """
1058
1058
1059 class imanifestrevisionwritable(imanifestrevisionbase):
1059 class imanifestrevisionwritable(imanifestrevisionbase):
1060 """Interface representing a manifest revision that can be committed."""
1060 """Interface representing a manifest revision that can be committed."""
1061
1061
1062 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1062 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1063 """Add this revision to storage.
1063 """Add this revision to storage.
1064
1064
1065 Takes a transaction object, the changeset revision number it will
1065 Takes a transaction object, the changeset revision number it will
1066 be associated with, its parent nodes, and lists of added and
1066 be associated with, its parent nodes, and lists of added and
1067 removed paths.
1067 removed paths.
1068
1068
1069 If match is provided, storage can choose not to inspect or write out
1069 If match is provided, storage can choose not to inspect or write out
1070 items that do not match. Storage is still required to be able to provide
1070 items that do not match. Storage is still required to be able to provide
1071 the full manifest in the future for any directories written (these
1071 the full manifest in the future for any directories written (these
1072 manifests should not be "narrowed on disk").
1072 manifests should not be "narrowed on disk").
1073
1073
1074 Returns the binary node of the created revision.
1074 Returns the binary node of the created revision.
1075 """
1075 """
1076
1076
1077 class imanifeststorage(interfaceutil.Interface):
1077 class imanifeststorage(interfaceutil.Interface):
1078 """Storage interface for manifest data."""
1078 """Storage interface for manifest data."""
1079
1079
1080 tree = interfaceutil.Attribute(
1080 tree = interfaceutil.Attribute(
1081 """The path to the directory this manifest tracks.
1081 """The path to the directory this manifest tracks.
1082
1082
1083 The empty bytestring represents the root manifest.
1083 The empty bytestring represents the root manifest.
1084 """)
1084 """)
1085
1085
1086 index = interfaceutil.Attribute(
1086 index = interfaceutil.Attribute(
1087 """An ``ifilerevisionssequence`` instance.""")
1087 """An ``ifilerevisionssequence`` instance.""")
1088
1088
1089 indexfile = interfaceutil.Attribute(
1089 indexfile = interfaceutil.Attribute(
1090 """Path of revlog index file.
1090 """Path of revlog index file.
1091
1091
1092 TODO this is revlog specific and should not be exposed.
1092 TODO this is revlog specific and should not be exposed.
1093 """)
1093 """)
1094
1094
1095 opener = interfaceutil.Attribute(
1095 opener = interfaceutil.Attribute(
1096 """VFS opener to use to access underlying files used for storage.
1096 """VFS opener to use to access underlying files used for storage.
1097
1097
1098 TODO this is revlog specific and should not be exposed.
1098 TODO this is revlog specific and should not be exposed.
1099 """)
1099 """)
1100
1100
1101 version = interfaceutil.Attribute(
1101 version = interfaceutil.Attribute(
1102 """Revlog version number.
1102 """Revlog version number.
1103
1103
1104 TODO this is revlog specific and should not be exposed.
1104 TODO this is revlog specific and should not be exposed.
1105 """)
1105 """)
1106
1106
1107 _generaldelta = interfaceutil.Attribute(
1107 _generaldelta = interfaceutil.Attribute(
1108 """Whether generaldelta storage is being used.
1108 """Whether generaldelta storage is being used.
1109
1109
1110 TODO this is revlog specific and should not be exposed.
1110 TODO this is revlog specific and should not be exposed.
1111 """)
1111 """)
1112
1112
1113 fulltextcache = interfaceutil.Attribute(
1113 fulltextcache = interfaceutil.Attribute(
1114 """Dict with cache of fulltexts.
1114 """Dict with cache of fulltexts.
1115
1115
1116 TODO this doesn't feel appropriate for the storage interface.
1116 TODO this doesn't feel appropriate for the storage interface.
1117 """)
1117 """)
1118
1118
1119 def __len__():
1119 def __len__():
1120 """Obtain the number of revisions stored for this manifest."""
1120 """Obtain the number of revisions stored for this manifest."""
1121
1121
1122 def __iter__():
1122 def __iter__():
1123 """Iterate over revision numbers for this manifest."""
1123 """Iterate over revision numbers for this manifest."""
1124
1124
1125 def rev(node):
1125 def rev(node):
1126 """Obtain the revision number given a binary node.
1126 """Obtain the revision number given a binary node.
1127
1127
1128 Raises ``error.LookupError`` if the node is not known.
1128 Raises ``error.LookupError`` if the node is not known.
1129 """
1129 """
1130
1130
1131 def node(rev):
1131 def node(rev):
1132 """Obtain the node value given a revision number.
1132 """Obtain the node value given a revision number.
1133
1133
1134 Raises ``error.LookupError`` if the revision is not known.
1134 Raises ``error.LookupError`` if the revision is not known.
1135 """
1135 """
1136
1136
1137 def lookup(value):
1137 def lookup(value):
1138 """Attempt to resolve a value to a node.
1138 """Attempt to resolve a value to a node.
1139
1139
1140 Value can be a binary node, hex node, revision number, or a bytes
1140 Value can be a binary node, hex node, revision number, or a bytes
1141 that can be converted to an integer.
1141 that can be converted to an integer.
1142
1142
1143 Raises ``error.LookupError`` if a ndoe could not be resolved.
1143 Raises ``error.LookupError`` if a ndoe could not be resolved.
1144 """
1144 """
1145
1145
1146 def parents(node):
1146 def parents(node):
1147 """Returns a 2-tuple of parent nodes for a node.
1147 """Returns a 2-tuple of parent nodes for a node.
1148
1148
1149 Values will be ``nullid`` if the parent is empty.
1149 Values will be ``nullid`` if the parent is empty.
1150 """
1150 """
1151
1151
1152 def parentrevs(rev):
1152 def parentrevs(rev):
1153 """Like parents() but operates on revision numbers."""
1153 """Like parents() but operates on revision numbers."""
1154
1154
1155 def linkrev(rev):
1155 def linkrev(rev):
1156 """Obtain the changeset revision number a revision is linked to."""
1156 """Obtain the changeset revision number a revision is linked to."""
1157
1157
1158 def revision(node, _df=None, raw=False):
1158 def revision(node, _df=None, raw=False):
1159 """Obtain fulltext data for a node."""
1159 """Obtain fulltext data for a node."""
1160
1160
1161 def revdiff(rev1, rev2):
1161 def revdiff(rev1, rev2):
1162 """Obtain a delta between two revision numbers.
1162 """Obtain a delta between two revision numbers.
1163
1163
1164 The returned data is the result of ``bdiff.bdiff()`` on the raw
1164 The returned data is the result of ``bdiff.bdiff()`` on the raw
1165 revision data.
1165 revision data.
1166 """
1166 """
1167
1167
1168 def cmp(node, fulltext):
1168 def cmp(node, fulltext):
1169 """Compare fulltext to another revision.
1169 """Compare fulltext to another revision.
1170
1170
1171 Returns True if the fulltext is different from what is stored.
1171 Returns True if the fulltext is different from what is stored.
1172 """
1172 """
1173
1173
1174 def emitrevisions(nodes,
1174 def emitrevisions(nodes,
1175 nodesorder=None,
1175 nodesorder=None,
1176 revisiondata=False,
1176 revisiondata=False,
1177 assumehaveparentrevisions=False):
1177 assumehaveparentrevisions=False):
1178 """Produce ``irevisiondelta`` describing revisions.
1178 """Produce ``irevisiondelta`` describing revisions.
1179
1179
1180 See the documentation for ``ifiledata`` for more.
1180 See the documentation for ``ifiledata`` for more.
1181 """
1181 """
1182
1182
1183 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1183 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1184 """Process a series of deltas for storage.
1184 """Process a series of deltas for storage.
1185
1185
1186 See the documentation in ``ifilemutation`` for more.
1186 See the documentation in ``ifilemutation`` for more.
1187 """
1187 """
1188
1188
1189 def rawsize(rev):
1189 def rawsize(rev):
1190 """Obtain the size of tracked data.
1190 """Obtain the size of tracked data.
1191
1191
1192 Is equivalent to ``len(m.revision(node, raw=True))``.
1192 Is equivalent to ``len(m.revision(node, raw=True))``.
1193
1193
1194 TODO this method is only used by upgrade code and may be removed.
1194 TODO this method is only used by upgrade code and may be removed.
1195 """
1195 """
1196
1196
1197 def getstrippoint(minlink):
1197 def getstrippoint(minlink):
1198 """Find minimum revision that must be stripped to strip a linkrev.
1198 """Find minimum revision that must be stripped to strip a linkrev.
1199
1199
1200 See the documentation in ``ifilemutation`` for more.
1200 See the documentation in ``ifilemutation`` for more.
1201 """
1201 """
1202
1202
1203 def strip(minlink, transaction):
1203 def strip(minlink, transaction):
1204 """Remove storage of items starting at a linkrev.
1204 """Remove storage of items starting at a linkrev.
1205
1205
1206 See the documentation in ``ifilemutation`` for more.
1206 See the documentation in ``ifilemutation`` for more.
1207 """
1207 """
1208
1208
1209 def checksize():
1209 def checksize():
1210 """Obtain the expected sizes of backing files.
1210 """Obtain the expected sizes of backing files.
1211
1211
1212 TODO this is used by verify and it should not be part of the interface.
1212 TODO this is used by verify and it should not be part of the interface.
1213 """
1213 """
1214
1214
1215 def files():
1215 def files():
1216 """Obtain paths that are backing storage for this manifest.
1216 """Obtain paths that are backing storage for this manifest.
1217
1217
1218 TODO this is used by verify and there should probably be a better API
1218 TODO this is used by verify and there should probably be a better API
1219 for this functionality.
1219 for this functionality.
1220 """
1220 """
1221
1221
1222 def deltaparent(rev):
1222 def deltaparent(rev):
1223 """Obtain the revision that a revision is delta'd against.
1223 """Obtain the revision that a revision is delta'd against.
1224
1224
1225 TODO delta encoding is an implementation detail of storage and should
1225 TODO delta encoding is an implementation detail of storage and should
1226 not be exposed to the storage interface.
1226 not be exposed to the storage interface.
1227 """
1227 """
1228
1228
1229 def clone(tr, dest, **kwargs):
1229 def clone(tr, dest, **kwargs):
1230 """Clone this instance to another."""
1230 """Clone this instance to another."""
1231
1231
1232 def clearcaches(clear_persisted_data=False):
1232 def clearcaches(clear_persisted_data=False):
1233 """Clear any caches associated with this instance."""
1233 """Clear any caches associated with this instance."""
1234
1234
1235 def dirlog(d):
1235 def dirlog(d):
1236 """Obtain a manifest storage instance for a tree."""
1236 """Obtain a manifest storage instance for a tree."""
1237
1237
1238 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1238 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1239 match=None):
1239 match=None):
1240 """Add a revision to storage.
1240 """Add a revision to storage.
1241
1241
1242 ``m`` is an object conforming to ``imanifestdict``.
1242 ``m`` is an object conforming to ``imanifestdict``.
1243
1243
1244 ``link`` is the linkrev revision number.
1244 ``link`` is the linkrev revision number.
1245
1245
1246 ``p1`` and ``p2`` are the parent revision numbers.
1246 ``p1`` and ``p2`` are the parent revision numbers.
1247
1247
1248 ``added`` and ``removed`` are iterables of added and removed paths,
1248 ``added`` and ``removed`` are iterables of added and removed paths,
1249 respectively.
1249 respectively.
1250
1250
1251 ``readtree`` is a function that can be used to read the child tree(s)
1251 ``readtree`` is a function that can be used to read the child tree(s)
1252 when recursively writing the full tree structure when using
1252 when recursively writing the full tree structure when using
1253 treemanifets.
1253 treemanifets.
1254
1254
1255 ``match`` is a matcher that can be used to hint to storage that not all
1255 ``match`` is a matcher that can be used to hint to storage that not all
1256 paths must be inspected; this is an optimization and can be safely
1256 paths must be inspected; this is an optimization and can be safely
1257 ignored. Note that the storage must still be able to reproduce a full
1257 ignored. Note that the storage must still be able to reproduce a full
1258 manifest including files that did not match.
1258 manifest including files that did not match.
1259 """
1259 """
1260
1260
1261 def storageinfo(exclusivefiles=False, sharedfiles=False,
1261 def storageinfo(exclusivefiles=False, sharedfiles=False,
1262 revisionscount=False, trackedsize=False,
1262 revisionscount=False, trackedsize=False,
1263 storedsize=False):
1263 storedsize=False):
1264 """Obtain information about storage for this manifest's data.
1264 """Obtain information about storage for this manifest's data.
1265
1265
1266 See ``ifilestorage.storageinfo()`` for a description of this method.
1266 See ``ifilestorage.storageinfo()`` for a description of this method.
1267 This one behaves the same way, except for manifest data.
1267 This one behaves the same way, except for manifest data.
1268 """
1268 """
1269
1269
1270 class imanifestlog(interfaceutil.Interface):
1270 class imanifestlog(interfaceutil.Interface):
1271 """Interface representing a collection of manifest snapshots.
1271 """Interface representing a collection of manifest snapshots.
1272
1272
1273 Represents the root manifest in a repository.
1273 Represents the root manifest in a repository.
1274
1274
1275 Also serves as a means to access nested tree manifests and to cache
1275 Also serves as a means to access nested tree manifests and to cache
1276 tree manifests.
1276 tree manifests.
1277 """
1277 """
1278
1278
1279 def __getitem__(node):
1279 def __getitem__(node):
1280 """Obtain a manifest instance for a given binary node.
1280 """Obtain a manifest instance for a given binary node.
1281
1281
1282 Equivalent to calling ``self.get('', node)``.
1282 Equivalent to calling ``self.get('', node)``.
1283
1283
1284 The returned object conforms to the ``imanifestrevisionstored``
1284 The returned object conforms to the ``imanifestrevisionstored``
1285 interface.
1285 interface.
1286 """
1286 """
1287
1287
1288 def get(tree, node, verify=True):
1288 def get(tree, node, verify=True):
1289 """Retrieve the manifest instance for a given directory and binary node.
1289 """Retrieve the manifest instance for a given directory and binary node.
1290
1290
1291 ``node`` always refers to the node of the root manifest (which will be
1291 ``node`` always refers to the node of the root manifest (which will be
1292 the only manifest if flat manifests are being used).
1292 the only manifest if flat manifests are being used).
1293
1293
1294 If ``tree`` is the empty string, the root manifest is returned.
1294 If ``tree`` is the empty string, the root manifest is returned.
1295 Otherwise the manifest for the specified directory will be returned
1295 Otherwise the manifest for the specified directory will be returned
1296 (requires tree manifests).
1296 (requires tree manifests).
1297
1297
1298 If ``verify`` is True, ``LookupError`` is raised if the node is not
1298 If ``verify`` is True, ``LookupError`` is raised if the node is not
1299 known.
1299 known.
1300
1300
1301 The returned object conforms to the ``imanifestrevisionstored``
1301 The returned object conforms to the ``imanifestrevisionstored``
1302 interface.
1302 interface.
1303 """
1303 """
1304
1304
1305 def getstorage(tree):
1305 def getstorage(tree):
1306 """Retrieve an interface to storage for a particular tree.
1306 """Retrieve an interface to storage for a particular tree.
1307
1307
1308 If ``tree`` is the empty bytestring, storage for the root manifest will
1308 If ``tree`` is the empty bytestring, storage for the root manifest will
1309 be returned. Otherwise storage for a tree manifest is returned.
1309 be returned. Otherwise storage for a tree manifest is returned.
1310
1310
1311 TODO formalize interface for returned object.
1311 TODO formalize interface for returned object.
1312 """
1312 """
1313
1313
1314 def clearcaches():
1314 def clearcaches():
1315 """Clear caches associated with this collection."""
1315 """Clear caches associated with this collection."""
1316
1316
1317 def rev(node):
1317 def rev(node):
1318 """Obtain the revision number for a binary node.
1318 """Obtain the revision number for a binary node.
1319
1319
1320 Raises ``error.LookupError`` if the node is not known.
1320 Raises ``error.LookupError`` if the node is not known.
1321 """
1321 """
1322
1322
1323 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1323 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1324 """Local repository sub-interface providing access to tracked file storage.
1324 """Local repository sub-interface providing access to tracked file storage.
1325
1325
1326 This interface defines how a repository accesses storage for a single
1326 This interface defines how a repository accesses storage for a single
1327 tracked file path.
1327 tracked file path.
1328 """
1328 """
1329
1329
1330 def file(f):
1330 def file(f):
1331 """Obtain a filelog for a tracked path.
1331 """Obtain a filelog for a tracked path.
1332
1332
1333 The returned type conforms to the ``ifilestorage`` interface.
1333 The returned type conforms to the ``ifilestorage`` interface.
1334 """
1334 """
1335
1335
1336 class ilocalrepositorymain(interfaceutil.Interface):
1336 class ilocalrepositorymain(interfaceutil.Interface):
1337 """Main interface for local repositories.
1337 """Main interface for local repositories.
1338
1338
1339 This currently captures the reality of things - not how things should be.
1339 This currently captures the reality of things - not how things should be.
1340 """
1340 """
1341
1341
1342 supportedformats = interfaceutil.Attribute(
1342 supportedformats = interfaceutil.Attribute(
1343 """Set of requirements that apply to stream clone.
1343 """Set of requirements that apply to stream clone.
1344
1344
1345 This is actually a class attribute and is shared among all instances.
1345 This is actually a class attribute and is shared among all instances.
1346 """)
1346 """)
1347
1347
1348 supported = interfaceutil.Attribute(
1348 supported = interfaceutil.Attribute(
1349 """Set of requirements that this repo is capable of opening.""")
1349 """Set of requirements that this repo is capable of opening.""")
1350
1350
1351 requirements = interfaceutil.Attribute(
1351 requirements = interfaceutil.Attribute(
1352 """Set of requirements this repo uses.""")
1352 """Set of requirements this repo uses.""")
1353
1353
1354 features = interfaceutil.Attribute(
1354 features = interfaceutil.Attribute(
1355 """Set of "features" this repository supports.
1355 """Set of "features" this repository supports.
1356
1356
1357 A "feature" is a loosely-defined term. It can refer to a feature
1357 A "feature" is a loosely-defined term. It can refer to a feature
1358 in the classical sense or can describe an implementation detail
1358 in the classical sense or can describe an implementation detail
1359 of the repository. For example, a ``readonly`` feature may denote
1359 of the repository. For example, a ``readonly`` feature may denote
1360 the repository as read-only. Or a ``revlogfilestore`` feature may
1360 the repository as read-only. Or a ``revlogfilestore`` feature may
1361 denote that the repository is using revlogs for file storage.
1361 denote that the repository is using revlogs for file storage.
1362
1362
1363 The intent of features is to provide a machine-queryable mechanism
1363 The intent of features is to provide a machine-queryable mechanism
1364 for repo consumers to test for various repository characteristics.
1364 for repo consumers to test for various repository characteristics.
1365
1365
1366 Features are similar to ``requirements``. The main difference is that
1366 Features are similar to ``requirements``. The main difference is that
1367 requirements are stored on-disk and represent requirements to open the
1367 requirements are stored on-disk and represent requirements to open the
1368 repository. Features are more run-time capabilities of the repository
1368 repository. Features are more run-time capabilities of the repository
1369 and more granular capabilities (which may be derived from requirements).
1369 and more granular capabilities (which may be derived from requirements).
1370 """)
1370 """)
1371
1371
1372 filtername = interfaceutil.Attribute(
1372 filtername = interfaceutil.Attribute(
1373 """Name of the repoview that is active on this repo.""")
1373 """Name of the repoview that is active on this repo.""")
1374
1374
1375 wvfs = interfaceutil.Attribute(
1375 wvfs = interfaceutil.Attribute(
1376 """VFS used to access the working directory.""")
1376 """VFS used to access the working directory.""")
1377
1377
1378 vfs = interfaceutil.Attribute(
1378 vfs = interfaceutil.Attribute(
1379 """VFS rooted at the .hg directory.
1379 """VFS rooted at the .hg directory.
1380
1380
1381 Used to access repository data not in the store.
1381 Used to access repository data not in the store.
1382 """)
1382 """)
1383
1383
1384 svfs = interfaceutil.Attribute(
1384 svfs = interfaceutil.Attribute(
1385 """VFS rooted at the store.
1385 """VFS rooted at the store.
1386
1386
1387 Used to access repository data in the store. Typically .hg/store.
1387 Used to access repository data in the store. Typically .hg/store.
1388 But can point elsewhere if the store is shared.
1388 But can point elsewhere if the store is shared.
1389 """)
1389 """)
1390
1390
1391 root = interfaceutil.Attribute(
1391 root = interfaceutil.Attribute(
1392 """Path to the root of the working directory.""")
1392 """Path to the root of the working directory.""")
1393
1393
1394 path = interfaceutil.Attribute(
1394 path = interfaceutil.Attribute(
1395 """Path to the .hg directory.""")
1395 """Path to the .hg directory.""")
1396
1396
1397 origroot = interfaceutil.Attribute(
1397 origroot = interfaceutil.Attribute(
1398 """The filesystem path that was used to construct the repo.""")
1398 """The filesystem path that was used to construct the repo.""")
1399
1399
1400 auditor = interfaceutil.Attribute(
1400 auditor = interfaceutil.Attribute(
1401 """A pathauditor for the working directory.
1401 """A pathauditor for the working directory.
1402
1402
1403 This checks if a path refers to a nested repository.
1403 This checks if a path refers to a nested repository.
1404
1404
1405 Operates on the filesystem.
1405 Operates on the filesystem.
1406 """)
1406 """)
1407
1407
1408 nofsauditor = interfaceutil.Attribute(
1408 nofsauditor = interfaceutil.Attribute(
1409 """A pathauditor for the working directory.
1409 """A pathauditor for the working directory.
1410
1410
1411 This is like ``auditor`` except it doesn't do filesystem checks.
1411 This is like ``auditor`` except it doesn't do filesystem checks.
1412 """)
1412 """)
1413
1413
1414 baseui = interfaceutil.Attribute(
1414 baseui = interfaceutil.Attribute(
1415 """Original ui instance passed into constructor.""")
1415 """Original ui instance passed into constructor.""")
1416
1416
1417 ui = interfaceutil.Attribute(
1417 ui = interfaceutil.Attribute(
1418 """Main ui instance for this instance.""")
1418 """Main ui instance for this instance.""")
1419
1419
1420 sharedpath = interfaceutil.Attribute(
1420 sharedpath = interfaceutil.Attribute(
1421 """Path to the .hg directory of the repo this repo was shared from.""")
1421 """Path to the .hg directory of the repo this repo was shared from.""")
1422
1422
1423 store = interfaceutil.Attribute(
1423 store = interfaceutil.Attribute(
1424 """A store instance.""")
1424 """A store instance.""")
1425
1425
1426 spath = interfaceutil.Attribute(
1426 spath = interfaceutil.Attribute(
1427 """Path to the store.""")
1427 """Path to the store.""")
1428
1428
1429 sjoin = interfaceutil.Attribute(
1429 sjoin = interfaceutil.Attribute(
1430 """Alias to self.store.join.""")
1430 """Alias to self.store.join.""")
1431
1431
1432 cachevfs = interfaceutil.Attribute(
1432 cachevfs = interfaceutil.Attribute(
1433 """A VFS used to access the cache directory.
1433 """A VFS used to access the cache directory.
1434
1434
1435 Typically .hg/cache.
1435 Typically .hg/cache.
1436 """)
1436 """)
1437
1437
1438 wcachevfs = interfaceutil.Attribute(
1439 """A VFS used to access the cache directory dedicated to working copy
1440
1441 Typically .hg/wcache.
1442 """)
1443
1438 filteredrevcache = interfaceutil.Attribute(
1444 filteredrevcache = interfaceutil.Attribute(
1439 """Holds sets of revisions to be filtered.""")
1445 """Holds sets of revisions to be filtered.""")
1440
1446
1441 names = interfaceutil.Attribute(
1447 names = interfaceutil.Attribute(
1442 """A ``namespaces`` instance.""")
1448 """A ``namespaces`` instance.""")
1443
1449
1444 def close():
1450 def close():
1445 """Close the handle on this repository."""
1451 """Close the handle on this repository."""
1446
1452
1447 def peer():
1453 def peer():
1448 """Obtain an object conforming to the ``peer`` interface."""
1454 """Obtain an object conforming to the ``peer`` interface."""
1449
1455
1450 def unfiltered():
1456 def unfiltered():
1451 """Obtain an unfiltered/raw view of this repo."""
1457 """Obtain an unfiltered/raw view of this repo."""
1452
1458
1453 def filtered(name, visibilityexceptions=None):
1459 def filtered(name, visibilityexceptions=None):
1454 """Obtain a named view of this repository."""
1460 """Obtain a named view of this repository."""
1455
1461
1456 obsstore = interfaceutil.Attribute(
1462 obsstore = interfaceutil.Attribute(
1457 """A store of obsolescence data.""")
1463 """A store of obsolescence data.""")
1458
1464
1459 changelog = interfaceutil.Attribute(
1465 changelog = interfaceutil.Attribute(
1460 """A handle on the changelog revlog.""")
1466 """A handle on the changelog revlog.""")
1461
1467
1462 manifestlog = interfaceutil.Attribute(
1468 manifestlog = interfaceutil.Attribute(
1463 """An instance conforming to the ``imanifestlog`` interface.
1469 """An instance conforming to the ``imanifestlog`` interface.
1464
1470
1465 Provides access to manifests for the repository.
1471 Provides access to manifests for the repository.
1466 """)
1472 """)
1467
1473
1468 dirstate = interfaceutil.Attribute(
1474 dirstate = interfaceutil.Attribute(
1469 """Working directory state.""")
1475 """Working directory state.""")
1470
1476
1471 narrowpats = interfaceutil.Attribute(
1477 narrowpats = interfaceutil.Attribute(
1472 """Matcher patterns for this repository's narrowspec.""")
1478 """Matcher patterns for this repository's narrowspec.""")
1473
1479
1474 def narrowmatch():
1480 def narrowmatch():
1475 """Obtain a matcher for the narrowspec."""
1481 """Obtain a matcher for the narrowspec."""
1476
1482
1477 def setnarrowpats(newincludes, newexcludes):
1483 def setnarrowpats(newincludes, newexcludes):
1478 """Define the narrowspec for this repository."""
1484 """Define the narrowspec for this repository."""
1479
1485
1480 def __getitem__(changeid):
1486 def __getitem__(changeid):
1481 """Try to resolve a changectx."""
1487 """Try to resolve a changectx."""
1482
1488
1483 def __contains__(changeid):
1489 def __contains__(changeid):
1484 """Whether a changeset exists."""
1490 """Whether a changeset exists."""
1485
1491
1486 def __nonzero__():
1492 def __nonzero__():
1487 """Always returns True."""
1493 """Always returns True."""
1488 return True
1494 return True
1489
1495
1490 __bool__ = __nonzero__
1496 __bool__ = __nonzero__
1491
1497
1492 def __len__():
1498 def __len__():
1493 """Returns the number of changesets in the repo."""
1499 """Returns the number of changesets in the repo."""
1494
1500
1495 def __iter__():
1501 def __iter__():
1496 """Iterate over revisions in the changelog."""
1502 """Iterate over revisions in the changelog."""
1497
1503
1498 def revs(expr, *args):
1504 def revs(expr, *args):
1499 """Evaluate a revset.
1505 """Evaluate a revset.
1500
1506
1501 Emits revisions.
1507 Emits revisions.
1502 """
1508 """
1503
1509
1504 def set(expr, *args):
1510 def set(expr, *args):
1505 """Evaluate a revset.
1511 """Evaluate a revset.
1506
1512
1507 Emits changectx instances.
1513 Emits changectx instances.
1508 """
1514 """
1509
1515
1510 def anyrevs(specs, user=False, localalias=None):
1516 def anyrevs(specs, user=False, localalias=None):
1511 """Find revisions matching one of the given revsets."""
1517 """Find revisions matching one of the given revsets."""
1512
1518
1513 def url():
1519 def url():
1514 """Returns a string representing the location of this repo."""
1520 """Returns a string representing the location of this repo."""
1515
1521
1516 def hook(name, throw=False, **args):
1522 def hook(name, throw=False, **args):
1517 """Call a hook."""
1523 """Call a hook."""
1518
1524
1519 def tags():
1525 def tags():
1520 """Return a mapping of tag to node."""
1526 """Return a mapping of tag to node."""
1521
1527
1522 def tagtype(tagname):
1528 def tagtype(tagname):
1523 """Return the type of a given tag."""
1529 """Return the type of a given tag."""
1524
1530
1525 def tagslist():
1531 def tagslist():
1526 """Return a list of tags ordered by revision."""
1532 """Return a list of tags ordered by revision."""
1527
1533
1528 def nodetags(node):
1534 def nodetags(node):
1529 """Return the tags associated with a node."""
1535 """Return the tags associated with a node."""
1530
1536
1531 def nodebookmarks(node):
1537 def nodebookmarks(node):
1532 """Return the list of bookmarks pointing to the specified node."""
1538 """Return the list of bookmarks pointing to the specified node."""
1533
1539
1534 def branchmap():
1540 def branchmap():
1535 """Return a mapping of branch to heads in that branch."""
1541 """Return a mapping of branch to heads in that branch."""
1536
1542
1537 def revbranchcache():
1543 def revbranchcache():
1538 pass
1544 pass
1539
1545
1540 def branchtip(branchtip, ignoremissing=False):
1546 def branchtip(branchtip, ignoremissing=False):
1541 """Return the tip node for a given branch."""
1547 """Return the tip node for a given branch."""
1542
1548
1543 def lookup(key):
1549 def lookup(key):
1544 """Resolve the node for a revision."""
1550 """Resolve the node for a revision."""
1545
1551
1546 def lookupbranch(key):
1552 def lookupbranch(key):
1547 """Look up the branch name of the given revision or branch name."""
1553 """Look up the branch name of the given revision or branch name."""
1548
1554
1549 def known(nodes):
1555 def known(nodes):
1550 """Determine whether a series of nodes is known.
1556 """Determine whether a series of nodes is known.
1551
1557
1552 Returns a list of bools.
1558 Returns a list of bools.
1553 """
1559 """
1554
1560
1555 def local():
1561 def local():
1556 """Whether the repository is local."""
1562 """Whether the repository is local."""
1557 return True
1563 return True
1558
1564
1559 def publishing():
1565 def publishing():
1560 """Whether the repository is a publishing repository."""
1566 """Whether the repository is a publishing repository."""
1561
1567
1562 def cancopy():
1568 def cancopy():
1563 pass
1569 pass
1564
1570
1565 def shared():
1571 def shared():
1566 """The type of shared repository or None."""
1572 """The type of shared repository or None."""
1567
1573
1568 def wjoin(f, *insidef):
1574 def wjoin(f, *insidef):
1569 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1575 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1570
1576
1571 def setparents(p1, p2):
1577 def setparents(p1, p2):
1572 """Set the parent nodes of the working directory."""
1578 """Set the parent nodes of the working directory."""
1573
1579
1574 def filectx(path, changeid=None, fileid=None):
1580 def filectx(path, changeid=None, fileid=None):
1575 """Obtain a filectx for the given file revision."""
1581 """Obtain a filectx for the given file revision."""
1576
1582
1577 def getcwd():
1583 def getcwd():
1578 """Obtain the current working directory from the dirstate."""
1584 """Obtain the current working directory from the dirstate."""
1579
1585
1580 def pathto(f, cwd=None):
1586 def pathto(f, cwd=None):
1581 """Obtain the relative path to a file."""
1587 """Obtain the relative path to a file."""
1582
1588
1583 def adddatafilter(name, fltr):
1589 def adddatafilter(name, fltr):
1584 pass
1590 pass
1585
1591
1586 def wread(filename):
1592 def wread(filename):
1587 """Read a file from wvfs, using data filters."""
1593 """Read a file from wvfs, using data filters."""
1588
1594
1589 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1595 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1590 """Write data to a file in the wvfs, using data filters."""
1596 """Write data to a file in the wvfs, using data filters."""
1591
1597
1592 def wwritedata(filename, data):
1598 def wwritedata(filename, data):
1593 """Resolve data for writing to the wvfs, using data filters."""
1599 """Resolve data for writing to the wvfs, using data filters."""
1594
1600
1595 def currenttransaction():
1601 def currenttransaction():
1596 """Obtain the current transaction instance or None."""
1602 """Obtain the current transaction instance or None."""
1597
1603
1598 def transaction(desc, report=None):
1604 def transaction(desc, report=None):
1599 """Open a new transaction to write to the repository."""
1605 """Open a new transaction to write to the repository."""
1600
1606
1601 def undofiles():
1607 def undofiles():
1602 """Returns a list of (vfs, path) for files to undo transactions."""
1608 """Returns a list of (vfs, path) for files to undo transactions."""
1603
1609
1604 def recover():
1610 def recover():
1605 """Roll back an interrupted transaction."""
1611 """Roll back an interrupted transaction."""
1606
1612
1607 def rollback(dryrun=False, force=False):
1613 def rollback(dryrun=False, force=False):
1608 """Undo the last transaction.
1614 """Undo the last transaction.
1609
1615
1610 DANGEROUS.
1616 DANGEROUS.
1611 """
1617 """
1612
1618
1613 def updatecaches(tr=None, full=False):
1619 def updatecaches(tr=None, full=False):
1614 """Warm repo caches."""
1620 """Warm repo caches."""
1615
1621
1616 def invalidatecaches():
1622 def invalidatecaches():
1617 """Invalidate cached data due to the repository mutating."""
1623 """Invalidate cached data due to the repository mutating."""
1618
1624
1619 def invalidatevolatilesets():
1625 def invalidatevolatilesets():
1620 pass
1626 pass
1621
1627
1622 def invalidatedirstate():
1628 def invalidatedirstate():
1623 """Invalidate the dirstate."""
1629 """Invalidate the dirstate."""
1624
1630
1625 def invalidate(clearfilecache=False):
1631 def invalidate(clearfilecache=False):
1626 pass
1632 pass
1627
1633
1628 def invalidateall():
1634 def invalidateall():
1629 pass
1635 pass
1630
1636
1631 def lock(wait=True):
1637 def lock(wait=True):
1632 """Lock the repository store and return a lock instance."""
1638 """Lock the repository store and return a lock instance."""
1633
1639
1634 def wlock(wait=True):
1640 def wlock(wait=True):
1635 """Lock the non-store parts of the repository."""
1641 """Lock the non-store parts of the repository."""
1636
1642
1637 def currentwlock():
1643 def currentwlock():
1638 """Return the wlock if it's held or None."""
1644 """Return the wlock if it's held or None."""
1639
1645
1640 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1646 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1641 pass
1647 pass
1642
1648
1643 def commit(text='', user=None, date=None, match=None, force=False,
1649 def commit(text='', user=None, date=None, match=None, force=False,
1644 editor=False, extra=None):
1650 editor=False, extra=None):
1645 """Add a new revision to the repository."""
1651 """Add a new revision to the repository."""
1646
1652
1647 def commitctx(ctx, error=False):
1653 def commitctx(ctx, error=False):
1648 """Commit a commitctx instance to the repository."""
1654 """Commit a commitctx instance to the repository."""
1649
1655
1650 def destroying():
1656 def destroying():
1651 """Inform the repository that nodes are about to be destroyed."""
1657 """Inform the repository that nodes are about to be destroyed."""
1652
1658
1653 def destroyed():
1659 def destroyed():
1654 """Inform the repository that nodes have been destroyed."""
1660 """Inform the repository that nodes have been destroyed."""
1655
1661
1656 def status(node1='.', node2=None, match=None, ignored=False,
1662 def status(node1='.', node2=None, match=None, ignored=False,
1657 clean=False, unknown=False, listsubrepos=False):
1663 clean=False, unknown=False, listsubrepos=False):
1658 """Convenience method to call repo[x].status()."""
1664 """Convenience method to call repo[x].status()."""
1659
1665
1660 def addpostdsstatus(ps):
1666 def addpostdsstatus(ps):
1661 pass
1667 pass
1662
1668
1663 def postdsstatus():
1669 def postdsstatus():
1664 pass
1670 pass
1665
1671
1666 def clearpostdsstatus():
1672 def clearpostdsstatus():
1667 pass
1673 pass
1668
1674
1669 def heads(start=None):
1675 def heads(start=None):
1670 """Obtain list of nodes that are DAG heads."""
1676 """Obtain list of nodes that are DAG heads."""
1671
1677
1672 def branchheads(branch=None, start=None, closed=False):
1678 def branchheads(branch=None, start=None, closed=False):
1673 pass
1679 pass
1674
1680
1675 def branches(nodes):
1681 def branches(nodes):
1676 pass
1682 pass
1677
1683
1678 def between(pairs):
1684 def between(pairs):
1679 pass
1685 pass
1680
1686
1681 def checkpush(pushop):
1687 def checkpush(pushop):
1682 pass
1688 pass
1683
1689
1684 prepushoutgoinghooks = interfaceutil.Attribute(
1690 prepushoutgoinghooks = interfaceutil.Attribute(
1685 """util.hooks instance.""")
1691 """util.hooks instance.""")
1686
1692
1687 def pushkey(namespace, key, old, new):
1693 def pushkey(namespace, key, old, new):
1688 pass
1694 pass
1689
1695
1690 def listkeys(namespace):
1696 def listkeys(namespace):
1691 pass
1697 pass
1692
1698
1693 def debugwireargs(one, two, three=None, four=None, five=None):
1699 def debugwireargs(one, two, three=None, four=None, five=None):
1694 pass
1700 pass
1695
1701
1696 def savecommitmessage(text):
1702 def savecommitmessage(text):
1697 pass
1703 pass
1698
1704
1699 class completelocalrepository(ilocalrepositorymain,
1705 class completelocalrepository(ilocalrepositorymain,
1700 ilocalrepositoryfilestorage):
1706 ilocalrepositoryfilestorage):
1701 """Complete interface for a local repository."""
1707 """Complete interface for a local repository."""
1702
1708
1703 class iwireprotocolcommandcacher(interfaceutil.Interface):
1709 class iwireprotocolcommandcacher(interfaceutil.Interface):
1704 """Represents a caching backend for wire protocol commands.
1710 """Represents a caching backend for wire protocol commands.
1705
1711
1706 Wire protocol version 2 supports transparent caching of many commands.
1712 Wire protocol version 2 supports transparent caching of many commands.
1707 To leverage this caching, servers can activate objects that cache
1713 To leverage this caching, servers can activate objects that cache
1708 command responses. Objects handle both cache writing and reading.
1714 command responses. Objects handle both cache writing and reading.
1709 This interface defines how that response caching mechanism works.
1715 This interface defines how that response caching mechanism works.
1710
1716
1711 Wire protocol version 2 commands emit a series of objects that are
1717 Wire protocol version 2 commands emit a series of objects that are
1712 serialized and sent to the client. The caching layer exists between
1718 serialized and sent to the client. The caching layer exists between
1713 the invocation of the command function and the sending of its output
1719 the invocation of the command function and the sending of its output
1714 objects to an output layer.
1720 objects to an output layer.
1715
1721
1716 Instances of this interface represent a binding to a cache that
1722 Instances of this interface represent a binding to a cache that
1717 can serve a response (in place of calling a command function) and/or
1723 can serve a response (in place of calling a command function) and/or
1718 write responses to a cache for subsequent use.
1724 write responses to a cache for subsequent use.
1719
1725
1720 When a command request arrives, the following happens with regards
1726 When a command request arrives, the following happens with regards
1721 to this interface:
1727 to this interface:
1722
1728
1723 1. The server determines whether the command request is cacheable.
1729 1. The server determines whether the command request is cacheable.
1724 2. If it is, an instance of this interface is spawned.
1730 2. If it is, an instance of this interface is spawned.
1725 3. The cacher is activated in a context manager (``__enter__`` is called).
1731 3. The cacher is activated in a context manager (``__enter__`` is called).
1726 4. A cache *key* for that request is derived. This will call the
1732 4. A cache *key* for that request is derived. This will call the
1727 instance's ``adjustcachekeystate()`` method so the derivation
1733 instance's ``adjustcachekeystate()`` method so the derivation
1728 can be influenced.
1734 can be influenced.
1729 5. The cacher is informed of the derived cache key via a call to
1735 5. The cacher is informed of the derived cache key via a call to
1730 ``setcachekey()``.
1736 ``setcachekey()``.
1731 6. The cacher's ``lookup()`` method is called to test for presence of
1737 6. The cacher's ``lookup()`` method is called to test for presence of
1732 the derived key in the cache.
1738 the derived key in the cache.
1733 7. If ``lookup()`` returns a hit, that cached result is used in place
1739 7. If ``lookup()`` returns a hit, that cached result is used in place
1734 of invoking the command function. ``__exit__`` is called and the instance
1740 of invoking the command function. ``__exit__`` is called and the instance
1735 is discarded.
1741 is discarded.
1736 8. The command function is invoked.
1742 8. The command function is invoked.
1737 9. ``onobject()`` is called for each object emitted by the command
1743 9. ``onobject()`` is called for each object emitted by the command
1738 function.
1744 function.
1739 10. After the final object is seen, ``onfinished()`` is called.
1745 10. After the final object is seen, ``onfinished()`` is called.
1740 11. ``__exit__`` is called to signal the end of use of the instance.
1746 11. ``__exit__`` is called to signal the end of use of the instance.
1741
1747
1742 Cache *key* derivation can be influenced by the instance.
1748 Cache *key* derivation can be influenced by the instance.
1743
1749
1744 Cache keys are initially derived by a deterministic representation of
1750 Cache keys are initially derived by a deterministic representation of
1745 the command request. This includes the command name, arguments, protocol
1751 the command request. This includes the command name, arguments, protocol
1746 version, etc. This initial key derivation is performed by CBOR-encoding a
1752 version, etc. This initial key derivation is performed by CBOR-encoding a
1747 data structure and feeding that output into a hasher.
1753 data structure and feeding that output into a hasher.
1748
1754
1749 Instances of this interface can influence this initial key derivation
1755 Instances of this interface can influence this initial key derivation
1750 via ``adjustcachekeystate()``.
1756 via ``adjustcachekeystate()``.
1751
1757
1752 The instance is informed of the derived cache key via a call to
1758 The instance is informed of the derived cache key via a call to
1753 ``setcachekey()``. The instance must store the key locally so it can
1759 ``setcachekey()``. The instance must store the key locally so it can
1754 be consulted on subsequent operations that may require it.
1760 be consulted on subsequent operations that may require it.
1755
1761
1756 When constructed, the instance has access to a callable that can be used
1762 When constructed, the instance has access to a callable that can be used
1757 for encoding response objects. This callable receives as its single
1763 for encoding response objects. This callable receives as its single
1758 argument an object emitted by a command function. It returns an iterable
1764 argument an object emitted by a command function. It returns an iterable
1759 of bytes chunks representing the encoded object. Unless the cacher is
1765 of bytes chunks representing the encoded object. Unless the cacher is
1760 caching native Python objects in memory or has a way of reconstructing
1766 caching native Python objects in memory or has a way of reconstructing
1761 the original Python objects, implementations typically call this function
1767 the original Python objects, implementations typically call this function
1762 to produce bytes from the output objects and then store those bytes in
1768 to produce bytes from the output objects and then store those bytes in
1763 the cache. When it comes time to re-emit those bytes, they are wrapped
1769 the cache. When it comes time to re-emit those bytes, they are wrapped
1764 in a ``wireprototypes.encodedresponse`` instance to tell the output
1770 in a ``wireprototypes.encodedresponse`` instance to tell the output
1765 layer that they are pre-encoded.
1771 layer that they are pre-encoded.
1766
1772
1767 When receiving the objects emitted by the command function, instances
1773 When receiving the objects emitted by the command function, instances
1768 can choose what to do with those objects. The simplest thing to do is
1774 can choose what to do with those objects. The simplest thing to do is
1769 re-emit the original objects. They will be forwarded to the output
1775 re-emit the original objects. They will be forwarded to the output
1770 layer and will be processed as if the cacher did not exist.
1776 layer and will be processed as if the cacher did not exist.
1771
1777
1772 Implementations could also choose to not emit objects - instead locally
1778 Implementations could also choose to not emit objects - instead locally
1773 buffering objects or their encoded representation. They could then emit
1779 buffering objects or their encoded representation. They could then emit
1774 a single "coalesced" object when ``onfinished()`` is called. In
1780 a single "coalesced" object when ``onfinished()`` is called. In
1775 this way, the implementation would function as a filtering layer of
1781 this way, the implementation would function as a filtering layer of
1776 sorts.
1782 sorts.
1777
1783
1778 When caching objects, typically the encoded form of the object will
1784 When caching objects, typically the encoded form of the object will
1779 be stored. Keep in mind that if the original object is forwarded to
1785 be stored. Keep in mind that if the original object is forwarded to
1780 the output layer, it will need to be encoded there as well. For large
1786 the output layer, it will need to be encoded there as well. For large
1781 output, this redundant encoding could add overhead. Implementations
1787 output, this redundant encoding could add overhead. Implementations
1782 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1788 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1783 instances to avoid this overhead.
1789 instances to avoid this overhead.
1784 """
1790 """
1785 def __enter__():
1791 def __enter__():
1786 """Marks the instance as active.
1792 """Marks the instance as active.
1787
1793
1788 Should return self.
1794 Should return self.
1789 """
1795 """
1790
1796
1791 def __exit__(exctype, excvalue, exctb):
1797 def __exit__(exctype, excvalue, exctb):
1792 """Called when cacher is no longer used.
1798 """Called when cacher is no longer used.
1793
1799
1794 This can be used by implementations to perform cleanup actions (e.g.
1800 This can be used by implementations to perform cleanup actions (e.g.
1795 disconnecting network sockets, aborting a partially cached response.
1801 disconnecting network sockets, aborting a partially cached response.
1796 """
1802 """
1797
1803
1798 def adjustcachekeystate(state):
1804 def adjustcachekeystate(state):
1799 """Influences cache key derivation by adjusting state to derive key.
1805 """Influences cache key derivation by adjusting state to derive key.
1800
1806
1801 A dict defining the state used to derive the cache key is passed.
1807 A dict defining the state used to derive the cache key is passed.
1802
1808
1803 Implementations can modify this dict to record additional state that
1809 Implementations can modify this dict to record additional state that
1804 is wanted to influence key derivation.
1810 is wanted to influence key derivation.
1805
1811
1806 Implementations are *highly* encouraged to not modify or delete
1812 Implementations are *highly* encouraged to not modify or delete
1807 existing keys.
1813 existing keys.
1808 """
1814 """
1809
1815
1810 def setcachekey(key):
1816 def setcachekey(key):
1811 """Record the derived cache key for this request.
1817 """Record the derived cache key for this request.
1812
1818
1813 Instances may mutate the key for internal usage, as desired. e.g.
1819 Instances may mutate the key for internal usage, as desired. e.g.
1814 instances may wish to prepend the repo name, introduce path
1820 instances may wish to prepend the repo name, introduce path
1815 components for filesystem or URL addressing, etc. Behavior is up to
1821 components for filesystem or URL addressing, etc. Behavior is up to
1816 the cache.
1822 the cache.
1817
1823
1818 Returns a bool indicating if the request is cacheable by this
1824 Returns a bool indicating if the request is cacheable by this
1819 instance.
1825 instance.
1820 """
1826 """
1821
1827
1822 def lookup():
1828 def lookup():
1823 """Attempt to resolve an entry in the cache.
1829 """Attempt to resolve an entry in the cache.
1824
1830
1825 The instance is instructed to look for the cache key that it was
1831 The instance is instructed to look for the cache key that it was
1826 informed about via the call to ``setcachekey()``.
1832 informed about via the call to ``setcachekey()``.
1827
1833
1828 If there's no cache hit or the cacher doesn't wish to use the cached
1834 If there's no cache hit or the cacher doesn't wish to use the cached
1829 entry, ``None`` should be returned.
1835 entry, ``None`` should be returned.
1830
1836
1831 Else, a dict defining the cached result should be returned. The
1837 Else, a dict defining the cached result should be returned. The
1832 dict may have the following keys:
1838 dict may have the following keys:
1833
1839
1834 objs
1840 objs
1835 An iterable of objects that should be sent to the client. That
1841 An iterable of objects that should be sent to the client. That
1836 iterable of objects is expected to be what the command function
1842 iterable of objects is expected to be what the command function
1837 would return if invoked or an equivalent representation thereof.
1843 would return if invoked or an equivalent representation thereof.
1838 """
1844 """
1839
1845
1840 def onobject(obj):
1846 def onobject(obj):
1841 """Called when a new object is emitted from the command function.
1847 """Called when a new object is emitted from the command function.
1842
1848
1843 Receives as its argument the object that was emitted from the
1849 Receives as its argument the object that was emitted from the
1844 command function.
1850 command function.
1845
1851
1846 This method returns an iterator of objects to forward to the output
1852 This method returns an iterator of objects to forward to the output
1847 layer. The easiest implementation is a generator that just
1853 layer. The easiest implementation is a generator that just
1848 ``yield obj``.
1854 ``yield obj``.
1849 """
1855 """
1850
1856
1851 def onfinished():
1857 def onfinished():
1852 """Called after all objects have been emitted from the command function.
1858 """Called after all objects have been emitted from the command function.
1853
1859
1854 Implementations should return an iterator of objects to forward to
1860 Implementations should return an iterator of objects to forward to
1855 the output layer.
1861 the output layer.
1856
1862
1857 This method can be a generator.
1863 This method can be a generator.
1858 """
1864 """
General Comments 0
You need to be logged in to leave comments. Login now