##// END OF EJS Templates
localrepo: create new function for instantiating a local repo object...
Gregory Szorc -
r39978:bfeab472 default
parent child Browse files
Show More
@@ -1,2537 +1,2549 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(ui, path, intents=None):
380 """Create a local repository object.
381
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
384 instance of it.
385
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
388 """
389 return localrepository(ui, path, intents=intents)
390
379 @interfaceutil.implementer(repository.completelocalrepository)
391 @interfaceutil.implementer(repository.completelocalrepository)
380 class localrepository(object):
392 class localrepository(object):
381
393
382 # obsolete experimental requirements:
394 # obsolete experimental requirements:
383 # - manifestv2: An experimental new manifest format that allowed
395 # - manifestv2: An experimental new manifest format that allowed
384 # for stem compression of long paths. Experiment ended up not
396 # for stem compression of long paths. Experiment ended up not
385 # being successful (repository sizes went up due to worse delta
397 # being successful (repository sizes went up due to worse delta
386 # chains), and the code was deleted in 4.6.
398 # chains), and the code was deleted in 4.6.
387 supportedformats = {
399 supportedformats = {
388 'revlogv1',
400 'revlogv1',
389 'generaldelta',
401 'generaldelta',
390 'treemanifest',
402 'treemanifest',
391 REVLOGV2_REQUIREMENT,
403 REVLOGV2_REQUIREMENT,
392 SPARSEREVLOG_REQUIREMENT,
404 SPARSEREVLOG_REQUIREMENT,
393 }
405 }
394 _basesupported = supportedformats | {
406 _basesupported = supportedformats | {
395 'store',
407 'store',
396 'fncache',
408 'fncache',
397 'shared',
409 'shared',
398 'relshared',
410 'relshared',
399 'dotencode',
411 'dotencode',
400 'exp-sparse',
412 'exp-sparse',
401 'internal-phase'
413 'internal-phase'
402 }
414 }
403 openerreqs = {
415 openerreqs = {
404 'revlogv1',
416 'revlogv1',
405 'generaldelta',
417 'generaldelta',
406 'treemanifest',
418 'treemanifest',
407 }
419 }
408
420
409 # list of prefix for file which can be written without 'wlock'
421 # list of prefix for file which can be written without 'wlock'
410 # Extensions should extend this list when needed
422 # Extensions should extend this list when needed
411 _wlockfreeprefix = {
423 _wlockfreeprefix = {
412 # We migh consider requiring 'wlock' for the next
424 # We migh consider requiring 'wlock' for the next
413 # two, but pretty much all the existing code assume
425 # two, but pretty much all the existing code assume
414 # wlock is not needed so we keep them excluded for
426 # wlock is not needed so we keep them excluded for
415 # now.
427 # now.
416 'hgrc',
428 'hgrc',
417 'requires',
429 'requires',
418 # XXX cache is a complicatged business someone
430 # XXX cache is a complicatged business someone
419 # should investigate this in depth at some point
431 # should investigate this in depth at some point
420 'cache/',
432 'cache/',
421 # XXX shouldn't be dirstate covered by the wlock?
433 # XXX shouldn't be dirstate covered by the wlock?
422 'dirstate',
434 'dirstate',
423 # XXX bisect was still a bit too messy at the time
435 # XXX bisect was still a bit too messy at the time
424 # this changeset was introduced. Someone should fix
436 # this changeset was introduced. Someone should fix
425 # the remainig bit and drop this line
437 # the remainig bit and drop this line
426 'bisect.state',
438 'bisect.state',
427 }
439 }
428
440
429 def __init__(self, baseui, path, intents=None):
441 def __init__(self, baseui, path, intents=None):
430 """Create a new local repository instance.
442 """Create a new local repository instance.
431
443
432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
444 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
433 for obtaining a new repository object.
445 for obtaining a new repository object.
434 """
446 """
435
447
436 self.requirements = set()
448 self.requirements = set()
437 self.filtername = None
449 self.filtername = None
438 # wvfs: rooted at the repository root, used to access the working copy
450 # wvfs: rooted at the repository root, used to access the working copy
439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
451 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
452 # vfs: rooted at .hg, used to access repo files outside of .hg/store
441 self.vfs = None
453 self.vfs = None
442 # svfs: usually rooted at .hg/store, used to access repository history
454 # svfs: usually rooted at .hg/store, used to access repository history
443 # If this is a shared repository, this vfs may point to another
455 # If this is a shared repository, this vfs may point to another
444 # repository's .hg/store directory.
456 # repository's .hg/store directory.
445 self.svfs = None
457 self.svfs = None
446 self.root = self.wvfs.base
458 self.root = self.wvfs.base
447 self.path = self.wvfs.join(".hg")
459 self.path = self.wvfs.join(".hg")
448 self.origroot = path
460 self.origroot = path
449 self.baseui = baseui
461 self.baseui = baseui
450 self.ui = baseui.copy()
462 self.ui = baseui.copy()
451 self.ui.copy = baseui.copy # prevent copying repo configuration
463 self.ui.copy = baseui.copy # prevent copying repo configuration
452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
464 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
453 if (self.ui.configbool('devel', 'all-warnings') or
465 if (self.ui.configbool('devel', 'all-warnings') or
454 self.ui.configbool('devel', 'check-locks')):
466 self.ui.configbool('devel', 'check-locks')):
455 self.vfs.audit = self._getvfsward(self.vfs.audit)
467 self.vfs.audit = self._getvfsward(self.vfs.audit)
456 # A list of callback to shape the phase if no data were found.
468 # A list of callback to shape the phase if no data were found.
457 # Callback are in the form: func(repo, roots) --> processed root.
469 # Callback are in the form: func(repo, roots) --> processed root.
458 # This list it to be filled by extension during repo setup
470 # This list it to be filled by extension during repo setup
459 self._phasedefaults = []
471 self._phasedefaults = []
460 try:
472 try:
461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
473 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
462 self._loadextensions()
474 self._loadextensions()
463 except IOError:
475 except IOError:
464 pass
476 pass
465
477
466 if featuresetupfuncs:
478 if featuresetupfuncs:
467 self.supported = set(self._basesupported) # use private copy
479 self.supported = set(self._basesupported) # use private copy
468 extmods = set(m.__name__ for n, m
480 extmods = set(m.__name__ for n, m
469 in extensions.extensions(self.ui))
481 in extensions.extensions(self.ui))
470 for setupfunc in featuresetupfuncs:
482 for setupfunc in featuresetupfuncs:
471 if setupfunc.__module__ in extmods:
483 if setupfunc.__module__ in extmods:
472 setupfunc(self.ui, self.supported)
484 setupfunc(self.ui, self.supported)
473 else:
485 else:
474 self.supported = self._basesupported
486 self.supported = self._basesupported
475 color.setup(self.ui)
487 color.setup(self.ui)
476
488
477 # Add compression engines.
489 # Add compression engines.
478 for name in util.compengines:
490 for name in util.compengines:
479 engine = util.compengines[name]
491 engine = util.compengines[name]
480 if engine.revlogheader():
492 if engine.revlogheader():
481 self.supported.add('exp-compression-%s' % name)
493 self.supported.add('exp-compression-%s' % name)
482
494
483 if not self.vfs.isdir():
495 if not self.vfs.isdir():
484 try:
496 try:
485 self.vfs.stat()
497 self.vfs.stat()
486 except OSError as inst:
498 except OSError as inst:
487 if inst.errno != errno.ENOENT:
499 if inst.errno != errno.ENOENT:
488 raise
500 raise
489 raise error.RepoError(_("repository %s not found") % path)
501 raise error.RepoError(_("repository %s not found") % path)
490 else:
502 else:
491 try:
503 try:
492 self.requirements = scmutil.readrequires(
504 self.requirements = scmutil.readrequires(
493 self.vfs, self.supported)
505 self.vfs, self.supported)
494 except IOError as inst:
506 except IOError as inst:
495 if inst.errno != errno.ENOENT:
507 if inst.errno != errno.ENOENT:
496 raise
508 raise
497
509
498 cachepath = self.vfs.join('cache')
510 cachepath = self.vfs.join('cache')
499 self.sharedpath = self.path
511 self.sharedpath = self.path
500 try:
512 try:
501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
513 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
502 if 'relshared' in self.requirements:
514 if 'relshared' in self.requirements:
503 sharedpath = self.vfs.join(sharedpath)
515 sharedpath = self.vfs.join(sharedpath)
504 vfs = vfsmod.vfs(sharedpath, realpath=True)
516 vfs = vfsmod.vfs(sharedpath, realpath=True)
505 cachepath = vfs.join('cache')
517 cachepath = vfs.join('cache')
506 s = vfs.base
518 s = vfs.base
507 if not vfs.exists():
519 if not vfs.exists():
508 raise error.RepoError(
520 raise error.RepoError(
509 _('.hg/sharedpath points to nonexistent directory %s') % s)
521 _('.hg/sharedpath points to nonexistent directory %s') % s)
510 self.sharedpath = s
522 self.sharedpath = s
511 except IOError as inst:
523 except IOError as inst:
512 if inst.errno != errno.ENOENT:
524 if inst.errno != errno.ENOENT:
513 raise
525 raise
514
526
515 if 'exp-sparse' in self.requirements and not sparse.enabled:
527 if 'exp-sparse' in self.requirements and not sparse.enabled:
516 raise error.RepoError(_('repository is using sparse feature but '
528 raise error.RepoError(_('repository is using sparse feature but '
517 'sparse is not enabled; enable the '
529 'sparse is not enabled; enable the '
518 '"sparse" extensions to access'))
530 '"sparse" extensions to access'))
519
531
520 self.store = store.store(
532 self.store = store.store(
521 self.requirements, self.sharedpath,
533 self.requirements, self.sharedpath,
522 lambda base: vfsmod.vfs(base, cacheaudited=True))
534 lambda base: vfsmod.vfs(base, cacheaudited=True))
523 self.spath = self.store.path
535 self.spath = self.store.path
524 self.svfs = self.store.vfs
536 self.svfs = self.store.vfs
525 self.sjoin = self.store.join
537 self.sjoin = self.store.join
526 self.vfs.createmode = self.store.createmode
538 self.vfs.createmode = self.store.createmode
527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
539 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
528 self.cachevfs.createmode = self.store.createmode
540 self.cachevfs.createmode = self.store.createmode
529 if (self.ui.configbool('devel', 'all-warnings') or
541 if (self.ui.configbool('devel', 'all-warnings') or
530 self.ui.configbool('devel', 'check-locks')):
542 self.ui.configbool('devel', 'check-locks')):
531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
543 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
544 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
533 else: # standard vfs
545 else: # standard vfs
534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
546 self.svfs.audit = self._getsvfsward(self.svfs.audit)
535 self._applyopenerreqs()
547 self._applyopenerreqs()
536
548
537 self._dirstatevalidatewarned = False
549 self._dirstatevalidatewarned = False
538
550
539 self._branchcaches = {}
551 self._branchcaches = {}
540 self._revbranchcache = None
552 self._revbranchcache = None
541 self._filterpats = {}
553 self._filterpats = {}
542 self._datafilters = {}
554 self._datafilters = {}
543 self._transref = self._lockref = self._wlockref = None
555 self._transref = self._lockref = self._wlockref = None
544
556
545 # A cache for various files under .hg/ that tracks file changes,
557 # A cache for various files under .hg/ that tracks file changes,
546 # (used by the filecache decorator)
558 # (used by the filecache decorator)
547 #
559 #
548 # Maps a property name to its util.filecacheentry
560 # Maps a property name to its util.filecacheentry
549 self._filecache = {}
561 self._filecache = {}
550
562
551 # hold sets of revision to be filtered
563 # hold sets of revision to be filtered
552 # should be cleared when something might have changed the filter value:
564 # should be cleared when something might have changed the filter value:
553 # - new changesets,
565 # - new changesets,
554 # - phase change,
566 # - phase change,
555 # - new obsolescence marker,
567 # - new obsolescence marker,
556 # - working directory parent change,
568 # - working directory parent change,
557 # - bookmark changes
569 # - bookmark changes
558 self.filteredrevcache = {}
570 self.filteredrevcache = {}
559
571
560 # post-dirstate-status hooks
572 # post-dirstate-status hooks
561 self._postdsstatus = []
573 self._postdsstatus = []
562
574
563 # generic mapping between names and nodes
575 # generic mapping between names and nodes
564 self.names = namespaces.namespaces()
576 self.names = namespaces.namespaces()
565
577
566 # Key to signature value.
578 # Key to signature value.
567 self._sparsesignaturecache = {}
579 self._sparsesignaturecache = {}
568 # Signature to cached matcher instance.
580 # Signature to cached matcher instance.
569 self._sparsematchercache = {}
581 self._sparsematchercache = {}
570
582
571 def _getvfsward(self, origfunc):
583 def _getvfsward(self, origfunc):
572 """build a ward for self.vfs"""
584 """build a ward for self.vfs"""
573 rref = weakref.ref(self)
585 rref = weakref.ref(self)
574 def checkvfs(path, mode=None):
586 def checkvfs(path, mode=None):
575 ret = origfunc(path, mode=mode)
587 ret = origfunc(path, mode=mode)
576 repo = rref()
588 repo = rref()
577 if (repo is None
589 if (repo is None
578 or not util.safehasattr(repo, '_wlockref')
590 or not util.safehasattr(repo, '_wlockref')
579 or not util.safehasattr(repo, '_lockref')):
591 or not util.safehasattr(repo, '_lockref')):
580 return
592 return
581 if mode in (None, 'r', 'rb'):
593 if mode in (None, 'r', 'rb'):
582 return
594 return
583 if path.startswith(repo.path):
595 if path.startswith(repo.path):
584 # truncate name relative to the repository (.hg)
596 # truncate name relative to the repository (.hg)
585 path = path[len(repo.path) + 1:]
597 path = path[len(repo.path) + 1:]
586 if path.startswith('cache/'):
598 if path.startswith('cache/'):
587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
599 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
600 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
589 if path.startswith('journal.'):
601 if path.startswith('journal.'):
590 # journal is covered by 'lock'
602 # journal is covered by 'lock'
591 if repo._currentlock(repo._lockref) is None:
603 if repo._currentlock(repo._lockref) is None:
592 repo.ui.develwarn('write with no lock: "%s"' % path,
604 repo.ui.develwarn('write with no lock: "%s"' % path,
593 stacklevel=2, config='check-locks')
605 stacklevel=2, config='check-locks')
594 elif repo._currentlock(repo._wlockref) is None:
606 elif repo._currentlock(repo._wlockref) is None:
595 # rest of vfs files are covered by 'wlock'
607 # rest of vfs files are covered by 'wlock'
596 #
608 #
597 # exclude special files
609 # exclude special files
598 for prefix in self._wlockfreeprefix:
610 for prefix in self._wlockfreeprefix:
599 if path.startswith(prefix):
611 if path.startswith(prefix):
600 return
612 return
601 repo.ui.develwarn('write with no wlock: "%s"' % path,
613 repo.ui.develwarn('write with no wlock: "%s"' % path,
602 stacklevel=2, config='check-locks')
614 stacklevel=2, config='check-locks')
603 return ret
615 return ret
604 return checkvfs
616 return checkvfs
605
617
606 def _getsvfsward(self, origfunc):
618 def _getsvfsward(self, origfunc):
607 """build a ward for self.svfs"""
619 """build a ward for self.svfs"""
608 rref = weakref.ref(self)
620 rref = weakref.ref(self)
609 def checksvfs(path, mode=None):
621 def checksvfs(path, mode=None):
610 ret = origfunc(path, mode=mode)
622 ret = origfunc(path, mode=mode)
611 repo = rref()
623 repo = rref()
612 if repo is None or not util.safehasattr(repo, '_lockref'):
624 if repo is None or not util.safehasattr(repo, '_lockref'):
613 return
625 return
614 if mode in (None, 'r', 'rb'):
626 if mode in (None, 'r', 'rb'):
615 return
627 return
616 if path.startswith(repo.sharedpath):
628 if path.startswith(repo.sharedpath):
617 # truncate name relative to the repository (.hg)
629 # truncate name relative to the repository (.hg)
618 path = path[len(repo.sharedpath) + 1:]
630 path = path[len(repo.sharedpath) + 1:]
619 if repo._currentlock(repo._lockref) is None:
631 if repo._currentlock(repo._lockref) is None:
620 repo.ui.develwarn('write with no lock: "%s"' % path,
632 repo.ui.develwarn('write with no lock: "%s"' % path,
621 stacklevel=3)
633 stacklevel=3)
622 return ret
634 return ret
623 return checksvfs
635 return checksvfs
624
636
625 def close(self):
637 def close(self):
626 self._writecaches()
638 self._writecaches()
627
639
628 def _loadextensions(self):
640 def _loadextensions(self):
629 extensions.loadall(self.ui)
641 extensions.loadall(self.ui)
630
642
631 def _writecaches(self):
643 def _writecaches(self):
632 if self._revbranchcache:
644 if self._revbranchcache:
633 self._revbranchcache.write()
645 self._revbranchcache.write()
634
646
635 def _restrictcapabilities(self, caps):
647 def _restrictcapabilities(self, caps):
636 if self.ui.configbool('experimental', 'bundle2-advertise'):
648 if self.ui.configbool('experimental', 'bundle2-advertise'):
637 caps = set(caps)
649 caps = set(caps)
638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
650 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
639 role='client'))
651 role='client'))
640 caps.add('bundle2=' + urlreq.quote(capsblob))
652 caps.add('bundle2=' + urlreq.quote(capsblob))
641 return caps
653 return caps
642
654
643 def _applyopenerreqs(self):
655 def _applyopenerreqs(self):
644 self.svfs.options = dict((r, 1) for r in self.requirements
656 self.svfs.options = dict((r, 1) for r in self.requirements
645 if r in self.openerreqs)
657 if r in self.openerreqs)
646 # experimental config: format.chunkcachesize
658 # experimental config: format.chunkcachesize
647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
659 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
648 if chunkcachesize is not None:
660 if chunkcachesize is not None:
649 self.svfs.options['chunkcachesize'] = chunkcachesize
661 self.svfs.options['chunkcachesize'] = chunkcachesize
650 # experimental config: format.manifestcachesize
662 # experimental config: format.manifestcachesize
651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
663 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
652 if manifestcachesize is not None:
664 if manifestcachesize is not None:
653 self.svfs.options['manifestcachesize'] = manifestcachesize
665 self.svfs.options['manifestcachesize'] = manifestcachesize
654 deltabothparents = self.ui.configbool('storage',
666 deltabothparents = self.ui.configbool('storage',
655 'revlog.optimize-delta-parent-choice')
667 'revlog.optimize-delta-parent-choice')
656 self.svfs.options['deltabothparents'] = deltabothparents
668 self.svfs.options['deltabothparents'] = deltabothparents
657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
669 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
670 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
659 if 0 <= chainspan:
671 if 0 <= chainspan:
660 self.svfs.options['maxdeltachainspan'] = chainspan
672 self.svfs.options['maxdeltachainspan'] = chainspan
661 mmapindexthreshold = self.ui.configbytes('experimental',
673 mmapindexthreshold = self.ui.configbytes('experimental',
662 'mmapindexthreshold')
674 'mmapindexthreshold')
663 if mmapindexthreshold is not None:
675 if mmapindexthreshold is not None:
664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
676 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
677 withsparseread = self.ui.configbool('experimental', 'sparse-read')
666 srdensitythres = float(self.ui.config('experimental',
678 srdensitythres = float(self.ui.config('experimental',
667 'sparse-read.density-threshold'))
679 'sparse-read.density-threshold'))
668 srmingapsize = self.ui.configbytes('experimental',
680 srmingapsize = self.ui.configbytes('experimental',
669 'sparse-read.min-gap-size')
681 'sparse-read.min-gap-size')
670 self.svfs.options['with-sparse-read'] = withsparseread
682 self.svfs.options['with-sparse-read'] = withsparseread
671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
683 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
684 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
685 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
674 self.svfs.options['sparse-revlog'] = sparserevlog
686 self.svfs.options['sparse-revlog'] = sparserevlog
675 if sparserevlog:
687 if sparserevlog:
676 self.svfs.options['generaldelta'] = True
688 self.svfs.options['generaldelta'] = True
677 maxchainlen = None
689 maxchainlen = None
678 if sparserevlog:
690 if sparserevlog:
679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
691 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
680 # experimental config: format.maxchainlen
692 # experimental config: format.maxchainlen
681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
693 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
682 if maxchainlen is not None:
694 if maxchainlen is not None:
683 self.svfs.options['maxchainlen'] = maxchainlen
695 self.svfs.options['maxchainlen'] = maxchainlen
684
696
685 for r in self.requirements:
697 for r in self.requirements:
686 if r.startswith('exp-compression-'):
698 if r.startswith('exp-compression-'):
687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
699 self.svfs.options['compengine'] = r[len('exp-compression-'):]
688
700
689 # TODO move "revlogv2" to openerreqs once finalized.
701 # TODO move "revlogv2" to openerreqs once finalized.
690 if REVLOGV2_REQUIREMENT in self.requirements:
702 if REVLOGV2_REQUIREMENT in self.requirements:
691 self.svfs.options['revlogv2'] = True
703 self.svfs.options['revlogv2'] = True
692
704
693 def _writerequirements(self):
705 def _writerequirements(self):
694 scmutil.writerequires(self.vfs, self.requirements)
706 scmutil.writerequires(self.vfs, self.requirements)
695
707
696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
708 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
697 # self -> auditor -> self._checknested -> self
709 # self -> auditor -> self._checknested -> self
698
710
699 @property
711 @property
700 def auditor(self):
712 def auditor(self):
701 # This is only used by context.workingctx.match in order to
713 # This is only used by context.workingctx.match in order to
702 # detect files in subrepos.
714 # detect files in subrepos.
703 return pathutil.pathauditor(self.root, callback=self._checknested)
715 return pathutil.pathauditor(self.root, callback=self._checknested)
704
716
705 @property
717 @property
706 def nofsauditor(self):
718 def nofsauditor(self):
707 # This is only used by context.basectx.match in order to detect
719 # This is only used by context.basectx.match in order to detect
708 # files in subrepos.
720 # files in subrepos.
709 return pathutil.pathauditor(self.root, callback=self._checknested,
721 return pathutil.pathauditor(self.root, callback=self._checknested,
710 realfs=False, cached=True)
722 realfs=False, cached=True)
711
723
712 def _checknested(self, path):
724 def _checknested(self, path):
713 """Determine if path is a legal nested repository."""
725 """Determine if path is a legal nested repository."""
714 if not path.startswith(self.root):
726 if not path.startswith(self.root):
715 return False
727 return False
716 subpath = path[len(self.root) + 1:]
728 subpath = path[len(self.root) + 1:]
717 normsubpath = util.pconvert(subpath)
729 normsubpath = util.pconvert(subpath)
718
730
719 # XXX: Checking against the current working copy is wrong in
731 # XXX: Checking against the current working copy is wrong in
720 # the sense that it can reject things like
732 # the sense that it can reject things like
721 #
733 #
722 # $ hg cat -r 10 sub/x.txt
734 # $ hg cat -r 10 sub/x.txt
723 #
735 #
724 # if sub/ is no longer a subrepository in the working copy
736 # if sub/ is no longer a subrepository in the working copy
725 # parent revision.
737 # parent revision.
726 #
738 #
727 # However, it can of course also allow things that would have
739 # However, it can of course also allow things that would have
728 # been rejected before, such as the above cat command if sub/
740 # been rejected before, such as the above cat command if sub/
729 # is a subrepository now, but was a normal directory before.
741 # is a subrepository now, but was a normal directory before.
730 # The old path auditor would have rejected by mistake since it
742 # The old path auditor would have rejected by mistake since it
731 # panics when it sees sub/.hg/.
743 # panics when it sees sub/.hg/.
732 #
744 #
733 # All in all, checking against the working copy seems sensible
745 # All in all, checking against the working copy seems sensible
734 # since we want to prevent access to nested repositories on
746 # since we want to prevent access to nested repositories on
735 # the filesystem *now*.
747 # the filesystem *now*.
736 ctx = self[None]
748 ctx = self[None]
737 parts = util.splitpath(subpath)
749 parts = util.splitpath(subpath)
738 while parts:
750 while parts:
739 prefix = '/'.join(parts)
751 prefix = '/'.join(parts)
740 if prefix in ctx.substate:
752 if prefix in ctx.substate:
741 if prefix == normsubpath:
753 if prefix == normsubpath:
742 return True
754 return True
743 else:
755 else:
744 sub = ctx.sub(prefix)
756 sub = ctx.sub(prefix)
745 return sub.checknested(subpath[len(prefix) + 1:])
757 return sub.checknested(subpath[len(prefix) + 1:])
746 else:
758 else:
747 parts.pop()
759 parts.pop()
748 return False
760 return False
749
761
750 def peer(self):
762 def peer(self):
751 return localpeer(self) # not cached to avoid reference cycle
763 return localpeer(self) # not cached to avoid reference cycle
752
764
753 def unfiltered(self):
765 def unfiltered(self):
754 """Return unfiltered version of the repository
766 """Return unfiltered version of the repository
755
767
756 Intended to be overwritten by filtered repo."""
768 Intended to be overwritten by filtered repo."""
757 return self
769 return self
758
770
759 def filtered(self, name, visibilityexceptions=None):
771 def filtered(self, name, visibilityexceptions=None):
760 """Return a filtered version of a repository"""
772 """Return a filtered version of a repository"""
761 cls = repoview.newtype(self.unfiltered().__class__)
773 cls = repoview.newtype(self.unfiltered().__class__)
762 return cls(self, name, visibilityexceptions)
774 return cls(self, name, visibilityexceptions)
763
775
764 @repofilecache('bookmarks', 'bookmarks.current')
776 @repofilecache('bookmarks', 'bookmarks.current')
765 def _bookmarks(self):
777 def _bookmarks(self):
766 return bookmarks.bmstore(self)
778 return bookmarks.bmstore(self)
767
779
768 @property
780 @property
769 def _activebookmark(self):
781 def _activebookmark(self):
770 return self._bookmarks.active
782 return self._bookmarks.active
771
783
772 # _phasesets depend on changelog. what we need is to call
784 # _phasesets depend on changelog. what we need is to call
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
785 # _phasecache.invalidate() if '00changelog.i' was changed, but it
774 # can't be easily expressed in filecache mechanism.
786 # can't be easily expressed in filecache mechanism.
775 @storecache('phaseroots', '00changelog.i')
787 @storecache('phaseroots', '00changelog.i')
776 def _phasecache(self):
788 def _phasecache(self):
777 return phases.phasecache(self, self._phasedefaults)
789 return phases.phasecache(self, self._phasedefaults)
778
790
779 @storecache('obsstore')
791 @storecache('obsstore')
780 def obsstore(self):
792 def obsstore(self):
781 return obsolete.makestore(self.ui, self)
793 return obsolete.makestore(self.ui, self)
782
794
783 @storecache('00changelog.i')
795 @storecache('00changelog.i')
784 def changelog(self):
796 def changelog(self):
785 return changelog.changelog(self.svfs,
797 return changelog.changelog(self.svfs,
786 trypending=txnutil.mayhavepending(self.root))
798 trypending=txnutil.mayhavepending(self.root))
787
799
788 def _constructmanifest(self):
800 def _constructmanifest(self):
789 # This is a temporary function while we migrate from manifest to
801 # This is a temporary function while we migrate from manifest to
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
802 # manifestlog. It allows bundlerepo and unionrepo to intercept the
791 # manifest creation.
803 # manifest creation.
792 return manifest.manifestrevlog(self.svfs)
804 return manifest.manifestrevlog(self.svfs)
793
805
794 @storecache('00manifest.i')
806 @storecache('00manifest.i')
795 def manifestlog(self):
807 def manifestlog(self):
796 return manifest.manifestlog(self.svfs, self)
808 return manifest.manifestlog(self.svfs, self)
797
809
798 @repofilecache('dirstate')
810 @repofilecache('dirstate')
799 def dirstate(self):
811 def dirstate(self):
800 return self._makedirstate()
812 return self._makedirstate()
801
813
802 def _makedirstate(self):
814 def _makedirstate(self):
803 """Extension point for wrapping the dirstate per-repo."""
815 """Extension point for wrapping the dirstate per-repo."""
804 sparsematchfn = lambda: sparse.matcher(self)
816 sparsematchfn = lambda: sparse.matcher(self)
805
817
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
818 return dirstate.dirstate(self.vfs, self.ui, self.root,
807 self._dirstatevalidate, sparsematchfn)
819 self._dirstatevalidate, sparsematchfn)
808
820
809 def _dirstatevalidate(self, node):
821 def _dirstatevalidate(self, node):
810 try:
822 try:
811 self.changelog.rev(node)
823 self.changelog.rev(node)
812 return node
824 return node
813 except error.LookupError:
825 except error.LookupError:
814 if not self._dirstatevalidatewarned:
826 if not self._dirstatevalidatewarned:
815 self._dirstatevalidatewarned = True
827 self._dirstatevalidatewarned = True
816 self.ui.warn(_("warning: ignoring unknown"
828 self.ui.warn(_("warning: ignoring unknown"
817 " working parent %s!\n") % short(node))
829 " working parent %s!\n") % short(node))
818 return nullid
830 return nullid
819
831
820 @storecache(narrowspec.FILENAME)
832 @storecache(narrowspec.FILENAME)
821 def narrowpats(self):
833 def narrowpats(self):
822 """matcher patterns for this repository's narrowspec
834 """matcher patterns for this repository's narrowspec
823
835
824 A tuple of (includes, excludes).
836 A tuple of (includes, excludes).
825 """
837 """
826 source = self
838 source = self
827 if self.shared():
839 if self.shared():
828 from . import hg
840 from . import hg
829 source = hg.sharedreposource(self)
841 source = hg.sharedreposource(self)
830 return narrowspec.load(source)
842 return narrowspec.load(source)
831
843
832 @storecache(narrowspec.FILENAME)
844 @storecache(narrowspec.FILENAME)
833 def _narrowmatch(self):
845 def _narrowmatch(self):
834 if repository.NARROW_REQUIREMENT not in self.requirements:
846 if repository.NARROW_REQUIREMENT not in self.requirements:
835 return matchmod.always(self.root, '')
847 return matchmod.always(self.root, '')
836 include, exclude = self.narrowpats
848 include, exclude = self.narrowpats
837 return narrowspec.match(self.root, include=include, exclude=exclude)
849 return narrowspec.match(self.root, include=include, exclude=exclude)
838
850
839 # TODO(martinvonz): make this property-like instead?
851 # TODO(martinvonz): make this property-like instead?
840 def narrowmatch(self):
852 def narrowmatch(self):
841 return self._narrowmatch
853 return self._narrowmatch
842
854
843 def setnarrowpats(self, newincludes, newexcludes):
855 def setnarrowpats(self, newincludes, newexcludes):
844 narrowspec.save(self, newincludes, newexcludes)
856 narrowspec.save(self, newincludes, newexcludes)
845 self.invalidate(clearfilecache=True)
857 self.invalidate(clearfilecache=True)
846
858
847 def __getitem__(self, changeid):
859 def __getitem__(self, changeid):
848 if changeid is None:
860 if changeid is None:
849 return context.workingctx(self)
861 return context.workingctx(self)
850 if isinstance(changeid, context.basectx):
862 if isinstance(changeid, context.basectx):
851 return changeid
863 return changeid
852 if isinstance(changeid, slice):
864 if isinstance(changeid, slice):
853 # wdirrev isn't contiguous so the slice shouldn't include it
865 # wdirrev isn't contiguous so the slice shouldn't include it
854 return [context.changectx(self, i)
866 return [context.changectx(self, i)
855 for i in pycompat.xrange(*changeid.indices(len(self)))
867 for i in pycompat.xrange(*changeid.indices(len(self)))
856 if i not in self.changelog.filteredrevs]
868 if i not in self.changelog.filteredrevs]
857 try:
869 try:
858 return context.changectx(self, changeid)
870 return context.changectx(self, changeid)
859 except error.WdirUnsupported:
871 except error.WdirUnsupported:
860 return context.workingctx(self)
872 return context.workingctx(self)
861
873
862 def __contains__(self, changeid):
874 def __contains__(self, changeid):
863 """True if the given changeid exists
875 """True if the given changeid exists
864
876
865 error.AmbiguousPrefixLookupError is raised if an ambiguous node
877 error.AmbiguousPrefixLookupError is raised if an ambiguous node
866 specified.
878 specified.
867 """
879 """
868 try:
880 try:
869 self[changeid]
881 self[changeid]
870 return True
882 return True
871 except error.RepoLookupError:
883 except error.RepoLookupError:
872 return False
884 return False
873
885
874 def __nonzero__(self):
886 def __nonzero__(self):
875 return True
887 return True
876
888
877 __bool__ = __nonzero__
889 __bool__ = __nonzero__
878
890
879 def __len__(self):
891 def __len__(self):
880 # no need to pay the cost of repoview.changelog
892 # no need to pay the cost of repoview.changelog
881 unfi = self.unfiltered()
893 unfi = self.unfiltered()
882 return len(unfi.changelog)
894 return len(unfi.changelog)
883
895
884 def __iter__(self):
896 def __iter__(self):
885 return iter(self.changelog)
897 return iter(self.changelog)
886
898
887 def revs(self, expr, *args):
899 def revs(self, expr, *args):
888 '''Find revisions matching a revset.
900 '''Find revisions matching a revset.
889
901
890 The revset is specified as a string ``expr`` that may contain
902 The revset is specified as a string ``expr`` that may contain
891 %-formatting to escape certain types. See ``revsetlang.formatspec``.
903 %-formatting to escape certain types. See ``revsetlang.formatspec``.
892
904
893 Revset aliases from the configuration are not expanded. To expand
905 Revset aliases from the configuration are not expanded. To expand
894 user aliases, consider calling ``scmutil.revrange()`` or
906 user aliases, consider calling ``scmutil.revrange()`` or
895 ``repo.anyrevs([expr], user=True)``.
907 ``repo.anyrevs([expr], user=True)``.
896
908
897 Returns a revset.abstractsmartset, which is a list-like interface
909 Returns a revset.abstractsmartset, which is a list-like interface
898 that contains integer revisions.
910 that contains integer revisions.
899 '''
911 '''
900 expr = revsetlang.formatspec(expr, *args)
912 expr = revsetlang.formatspec(expr, *args)
901 m = revset.match(None, expr)
913 m = revset.match(None, expr)
902 return m(self)
914 return m(self)
903
915
904 def set(self, expr, *args):
916 def set(self, expr, *args):
905 '''Find revisions matching a revset and emit changectx instances.
917 '''Find revisions matching a revset and emit changectx instances.
906
918
907 This is a convenience wrapper around ``revs()`` that iterates the
919 This is a convenience wrapper around ``revs()`` that iterates the
908 result and is a generator of changectx instances.
920 result and is a generator of changectx instances.
909
921
910 Revset aliases from the configuration are not expanded. To expand
922 Revset aliases from the configuration are not expanded. To expand
911 user aliases, consider calling ``scmutil.revrange()``.
923 user aliases, consider calling ``scmutil.revrange()``.
912 '''
924 '''
913 for r in self.revs(expr, *args):
925 for r in self.revs(expr, *args):
914 yield self[r]
926 yield self[r]
915
927
916 def anyrevs(self, specs, user=False, localalias=None):
928 def anyrevs(self, specs, user=False, localalias=None):
917 '''Find revisions matching one of the given revsets.
929 '''Find revisions matching one of the given revsets.
918
930
919 Revset aliases from the configuration are not expanded by default. To
931 Revset aliases from the configuration are not expanded by default. To
920 expand user aliases, specify ``user=True``. To provide some local
932 expand user aliases, specify ``user=True``. To provide some local
921 definitions overriding user aliases, set ``localalias`` to
933 definitions overriding user aliases, set ``localalias`` to
922 ``{name: definitionstring}``.
934 ``{name: definitionstring}``.
923 '''
935 '''
924 if user:
936 if user:
925 m = revset.matchany(self.ui, specs,
937 m = revset.matchany(self.ui, specs,
926 lookup=revset.lookupfn(self),
938 lookup=revset.lookupfn(self),
927 localalias=localalias)
939 localalias=localalias)
928 else:
940 else:
929 m = revset.matchany(None, specs, localalias=localalias)
941 m = revset.matchany(None, specs, localalias=localalias)
930 return m(self)
942 return m(self)
931
943
932 def url(self):
944 def url(self):
933 return 'file:' + self.root
945 return 'file:' + self.root
934
946
935 def hook(self, name, throw=False, **args):
947 def hook(self, name, throw=False, **args):
936 """Call a hook, passing this repo instance.
948 """Call a hook, passing this repo instance.
937
949
938 This a convenience method to aid invoking hooks. Extensions likely
950 This a convenience method to aid invoking hooks. Extensions likely
939 won't call this unless they have registered a custom hook or are
951 won't call this unless they have registered a custom hook or are
940 replacing code that is expected to call a hook.
952 replacing code that is expected to call a hook.
941 """
953 """
942 return hook.hook(self.ui, self, name, throw, **args)
954 return hook.hook(self.ui, self, name, throw, **args)
943
955
944 @filteredpropertycache
956 @filteredpropertycache
945 def _tagscache(self):
957 def _tagscache(self):
946 '''Returns a tagscache object that contains various tags related
958 '''Returns a tagscache object that contains various tags related
947 caches.'''
959 caches.'''
948
960
949 # This simplifies its cache management by having one decorated
961 # This simplifies its cache management by having one decorated
950 # function (this one) and the rest simply fetch things from it.
962 # function (this one) and the rest simply fetch things from it.
951 class tagscache(object):
963 class tagscache(object):
952 def __init__(self):
964 def __init__(self):
953 # These two define the set of tags for this repository. tags
965 # These two define the set of tags for this repository. tags
954 # maps tag name to node; tagtypes maps tag name to 'global' or
966 # maps tag name to node; tagtypes maps tag name to 'global' or
955 # 'local'. (Global tags are defined by .hgtags across all
967 # 'local'. (Global tags are defined by .hgtags across all
956 # heads, and local tags are defined in .hg/localtags.)
968 # heads, and local tags are defined in .hg/localtags.)
957 # They constitute the in-memory cache of tags.
969 # They constitute the in-memory cache of tags.
958 self.tags = self.tagtypes = None
970 self.tags = self.tagtypes = None
959
971
960 self.nodetagscache = self.tagslist = None
972 self.nodetagscache = self.tagslist = None
961
973
962 cache = tagscache()
974 cache = tagscache()
963 cache.tags, cache.tagtypes = self._findtags()
975 cache.tags, cache.tagtypes = self._findtags()
964
976
965 return cache
977 return cache
966
978
967 def tags(self):
979 def tags(self):
968 '''return a mapping of tag to node'''
980 '''return a mapping of tag to node'''
969 t = {}
981 t = {}
970 if self.changelog.filteredrevs:
982 if self.changelog.filteredrevs:
971 tags, tt = self._findtags()
983 tags, tt = self._findtags()
972 else:
984 else:
973 tags = self._tagscache.tags
985 tags = self._tagscache.tags
974 for k, v in tags.iteritems():
986 for k, v in tags.iteritems():
975 try:
987 try:
976 # ignore tags to unknown nodes
988 # ignore tags to unknown nodes
977 self.changelog.rev(v)
989 self.changelog.rev(v)
978 t[k] = v
990 t[k] = v
979 except (error.LookupError, ValueError):
991 except (error.LookupError, ValueError):
980 pass
992 pass
981 return t
993 return t
982
994
983 def _findtags(self):
995 def _findtags(self):
984 '''Do the hard work of finding tags. Return a pair of dicts
996 '''Do the hard work of finding tags. Return a pair of dicts
985 (tags, tagtypes) where tags maps tag name to node, and tagtypes
997 (tags, tagtypes) where tags maps tag name to node, and tagtypes
986 maps tag name to a string like \'global\' or \'local\'.
998 maps tag name to a string like \'global\' or \'local\'.
987 Subclasses or extensions are free to add their own tags, but
999 Subclasses or extensions are free to add their own tags, but
988 should be aware that the returned dicts will be retained for the
1000 should be aware that the returned dicts will be retained for the
989 duration of the localrepo object.'''
1001 duration of the localrepo object.'''
990
1002
991 # XXX what tagtype should subclasses/extensions use? Currently
1003 # XXX what tagtype should subclasses/extensions use? Currently
992 # mq and bookmarks add tags, but do not set the tagtype at all.
1004 # mq and bookmarks add tags, but do not set the tagtype at all.
993 # Should each extension invent its own tag type? Should there
1005 # Should each extension invent its own tag type? Should there
994 # be one tagtype for all such "virtual" tags? Or is the status
1006 # be one tagtype for all such "virtual" tags? Or is the status
995 # quo fine?
1007 # quo fine?
996
1008
997
1009
998 # map tag name to (node, hist)
1010 # map tag name to (node, hist)
999 alltags = tagsmod.findglobaltags(self.ui, self)
1011 alltags = tagsmod.findglobaltags(self.ui, self)
1000 # map tag name to tag type
1012 # map tag name to tag type
1001 tagtypes = dict((tag, 'global') for tag in alltags)
1013 tagtypes = dict((tag, 'global') for tag in alltags)
1002
1014
1003 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1015 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1004
1016
1005 # Build the return dicts. Have to re-encode tag names because
1017 # Build the return dicts. Have to re-encode tag names because
1006 # the tags module always uses UTF-8 (in order not to lose info
1018 # the tags module always uses UTF-8 (in order not to lose info
1007 # writing to the cache), but the rest of Mercurial wants them in
1019 # writing to the cache), but the rest of Mercurial wants them in
1008 # local encoding.
1020 # local encoding.
1009 tags = {}
1021 tags = {}
1010 for (name, (node, hist)) in alltags.iteritems():
1022 for (name, (node, hist)) in alltags.iteritems():
1011 if node != nullid:
1023 if node != nullid:
1012 tags[encoding.tolocal(name)] = node
1024 tags[encoding.tolocal(name)] = node
1013 tags['tip'] = self.changelog.tip()
1025 tags['tip'] = self.changelog.tip()
1014 tagtypes = dict([(encoding.tolocal(name), value)
1026 tagtypes = dict([(encoding.tolocal(name), value)
1015 for (name, value) in tagtypes.iteritems()])
1027 for (name, value) in tagtypes.iteritems()])
1016 return (tags, tagtypes)
1028 return (tags, tagtypes)
1017
1029
1018 def tagtype(self, tagname):
1030 def tagtype(self, tagname):
1019 '''
1031 '''
1020 return the type of the given tag. result can be:
1032 return the type of the given tag. result can be:
1021
1033
1022 'local' : a local tag
1034 'local' : a local tag
1023 'global' : a global tag
1035 'global' : a global tag
1024 None : tag does not exist
1036 None : tag does not exist
1025 '''
1037 '''
1026
1038
1027 return self._tagscache.tagtypes.get(tagname)
1039 return self._tagscache.tagtypes.get(tagname)
1028
1040
1029 def tagslist(self):
1041 def tagslist(self):
1030 '''return a list of tags ordered by revision'''
1042 '''return a list of tags ordered by revision'''
1031 if not self._tagscache.tagslist:
1043 if not self._tagscache.tagslist:
1032 l = []
1044 l = []
1033 for t, n in self.tags().iteritems():
1045 for t, n in self.tags().iteritems():
1034 l.append((self.changelog.rev(n), t, n))
1046 l.append((self.changelog.rev(n), t, n))
1035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1047 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1036
1048
1037 return self._tagscache.tagslist
1049 return self._tagscache.tagslist
1038
1050
1039 def nodetags(self, node):
1051 def nodetags(self, node):
1040 '''return the tags associated with a node'''
1052 '''return the tags associated with a node'''
1041 if not self._tagscache.nodetagscache:
1053 if not self._tagscache.nodetagscache:
1042 nodetagscache = {}
1054 nodetagscache = {}
1043 for t, n in self._tagscache.tags.iteritems():
1055 for t, n in self._tagscache.tags.iteritems():
1044 nodetagscache.setdefault(n, []).append(t)
1056 nodetagscache.setdefault(n, []).append(t)
1045 for tags in nodetagscache.itervalues():
1057 for tags in nodetagscache.itervalues():
1046 tags.sort()
1058 tags.sort()
1047 self._tagscache.nodetagscache = nodetagscache
1059 self._tagscache.nodetagscache = nodetagscache
1048 return self._tagscache.nodetagscache.get(node, [])
1060 return self._tagscache.nodetagscache.get(node, [])
1049
1061
1050 def nodebookmarks(self, node):
1062 def nodebookmarks(self, node):
1051 """return the list of bookmarks pointing to the specified node"""
1063 """return the list of bookmarks pointing to the specified node"""
1052 return self._bookmarks.names(node)
1064 return self._bookmarks.names(node)
1053
1065
1054 def branchmap(self):
1066 def branchmap(self):
1055 '''returns a dictionary {branch: [branchheads]} with branchheads
1067 '''returns a dictionary {branch: [branchheads]} with branchheads
1056 ordered by increasing revision number'''
1068 ordered by increasing revision number'''
1057 branchmap.updatecache(self)
1069 branchmap.updatecache(self)
1058 return self._branchcaches[self.filtername]
1070 return self._branchcaches[self.filtername]
1059
1071
1060 @unfilteredmethod
1072 @unfilteredmethod
1061 def revbranchcache(self):
1073 def revbranchcache(self):
1062 if not self._revbranchcache:
1074 if not self._revbranchcache:
1063 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1075 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1064 return self._revbranchcache
1076 return self._revbranchcache
1065
1077
1066 def branchtip(self, branch, ignoremissing=False):
1078 def branchtip(self, branch, ignoremissing=False):
1067 '''return the tip node for a given branch
1079 '''return the tip node for a given branch
1068
1080
1069 If ignoremissing is True, then this method will not raise an error.
1081 If ignoremissing is True, then this method will not raise an error.
1070 This is helpful for callers that only expect None for a missing branch
1082 This is helpful for callers that only expect None for a missing branch
1071 (e.g. namespace).
1083 (e.g. namespace).
1072
1084
1073 '''
1085 '''
1074 try:
1086 try:
1075 return self.branchmap().branchtip(branch)
1087 return self.branchmap().branchtip(branch)
1076 except KeyError:
1088 except KeyError:
1077 if not ignoremissing:
1089 if not ignoremissing:
1078 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1090 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1079 else:
1091 else:
1080 pass
1092 pass
1081
1093
1082 def lookup(self, key):
1094 def lookup(self, key):
1083 return scmutil.revsymbol(self, key).node()
1095 return scmutil.revsymbol(self, key).node()
1084
1096
1085 def lookupbranch(self, key):
1097 def lookupbranch(self, key):
1086 if key in self.branchmap():
1098 if key in self.branchmap():
1087 return key
1099 return key
1088
1100
1089 return scmutil.revsymbol(self, key).branch()
1101 return scmutil.revsymbol(self, key).branch()
1090
1102
1091 def known(self, nodes):
1103 def known(self, nodes):
1092 cl = self.changelog
1104 cl = self.changelog
1093 nm = cl.nodemap
1105 nm = cl.nodemap
1094 filtered = cl.filteredrevs
1106 filtered = cl.filteredrevs
1095 result = []
1107 result = []
1096 for n in nodes:
1108 for n in nodes:
1097 r = nm.get(n)
1109 r = nm.get(n)
1098 resp = not (r is None or r in filtered)
1110 resp = not (r is None or r in filtered)
1099 result.append(resp)
1111 result.append(resp)
1100 return result
1112 return result
1101
1113
1102 def local(self):
1114 def local(self):
1103 return self
1115 return self
1104
1116
1105 def publishing(self):
1117 def publishing(self):
1106 # it's safe (and desirable) to trust the publish flag unconditionally
1118 # it's safe (and desirable) to trust the publish flag unconditionally
1107 # so that we don't finalize changes shared between users via ssh or nfs
1119 # so that we don't finalize changes shared between users via ssh or nfs
1108 return self.ui.configbool('phases', 'publish', untrusted=True)
1120 return self.ui.configbool('phases', 'publish', untrusted=True)
1109
1121
1110 def cancopy(self):
1122 def cancopy(self):
1111 # so statichttprepo's override of local() works
1123 # so statichttprepo's override of local() works
1112 if not self.local():
1124 if not self.local():
1113 return False
1125 return False
1114 if not self.publishing():
1126 if not self.publishing():
1115 return True
1127 return True
1116 # if publishing we can't copy if there is filtered content
1128 # if publishing we can't copy if there is filtered content
1117 return not self.filtered('visible').changelog.filteredrevs
1129 return not self.filtered('visible').changelog.filteredrevs
1118
1130
1119 def shared(self):
1131 def shared(self):
1120 '''the type of shared repository (None if not shared)'''
1132 '''the type of shared repository (None if not shared)'''
1121 if self.sharedpath != self.path:
1133 if self.sharedpath != self.path:
1122 return 'store'
1134 return 'store'
1123 return None
1135 return None
1124
1136
1125 def wjoin(self, f, *insidef):
1137 def wjoin(self, f, *insidef):
1126 return self.vfs.reljoin(self.root, f, *insidef)
1138 return self.vfs.reljoin(self.root, f, *insidef)
1127
1139
1128 def file(self, f):
1140 def file(self, f):
1129 if f[0] == '/':
1141 if f[0] == '/':
1130 f = f[1:]
1142 f = f[1:]
1131 return filelog.filelog(self.svfs, f)
1143 return filelog.filelog(self.svfs, f)
1132
1144
1133 def setparents(self, p1, p2=nullid):
1145 def setparents(self, p1, p2=nullid):
1134 with self.dirstate.parentchange():
1146 with self.dirstate.parentchange():
1135 copies = self.dirstate.setparents(p1, p2)
1147 copies = self.dirstate.setparents(p1, p2)
1136 pctx = self[p1]
1148 pctx = self[p1]
1137 if copies:
1149 if copies:
1138 # Adjust copy records, the dirstate cannot do it, it
1150 # Adjust copy records, the dirstate cannot do it, it
1139 # requires access to parents manifests. Preserve them
1151 # requires access to parents manifests. Preserve them
1140 # only for entries added to first parent.
1152 # only for entries added to first parent.
1141 for f in copies:
1153 for f in copies:
1142 if f not in pctx and copies[f] in pctx:
1154 if f not in pctx and copies[f] in pctx:
1143 self.dirstate.copy(copies[f], f)
1155 self.dirstate.copy(copies[f], f)
1144 if p2 == nullid:
1156 if p2 == nullid:
1145 for f, s in sorted(self.dirstate.copies().items()):
1157 for f, s in sorted(self.dirstate.copies().items()):
1146 if f not in pctx and s not in pctx:
1158 if f not in pctx and s not in pctx:
1147 self.dirstate.copy(None, f)
1159 self.dirstate.copy(None, f)
1148
1160
1149 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1161 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1150 """changeid can be a changeset revision, node, or tag.
1162 """changeid can be a changeset revision, node, or tag.
1151 fileid can be a file revision or node."""
1163 fileid can be a file revision or node."""
1152 return context.filectx(self, path, changeid, fileid,
1164 return context.filectx(self, path, changeid, fileid,
1153 changectx=changectx)
1165 changectx=changectx)
1154
1166
1155 def getcwd(self):
1167 def getcwd(self):
1156 return self.dirstate.getcwd()
1168 return self.dirstate.getcwd()
1157
1169
1158 def pathto(self, f, cwd=None):
1170 def pathto(self, f, cwd=None):
1159 return self.dirstate.pathto(f, cwd)
1171 return self.dirstate.pathto(f, cwd)
1160
1172
1161 def _loadfilter(self, filter):
1173 def _loadfilter(self, filter):
1162 if filter not in self._filterpats:
1174 if filter not in self._filterpats:
1163 l = []
1175 l = []
1164 for pat, cmd in self.ui.configitems(filter):
1176 for pat, cmd in self.ui.configitems(filter):
1165 if cmd == '!':
1177 if cmd == '!':
1166 continue
1178 continue
1167 mf = matchmod.match(self.root, '', [pat])
1179 mf = matchmod.match(self.root, '', [pat])
1168 fn = None
1180 fn = None
1169 params = cmd
1181 params = cmd
1170 for name, filterfn in self._datafilters.iteritems():
1182 for name, filterfn in self._datafilters.iteritems():
1171 if cmd.startswith(name):
1183 if cmd.startswith(name):
1172 fn = filterfn
1184 fn = filterfn
1173 params = cmd[len(name):].lstrip()
1185 params = cmd[len(name):].lstrip()
1174 break
1186 break
1175 if not fn:
1187 if not fn:
1176 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1188 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1177 # Wrap old filters not supporting keyword arguments
1189 # Wrap old filters not supporting keyword arguments
1178 if not pycompat.getargspec(fn)[2]:
1190 if not pycompat.getargspec(fn)[2]:
1179 oldfn = fn
1191 oldfn = fn
1180 fn = lambda s, c, **kwargs: oldfn(s, c)
1192 fn = lambda s, c, **kwargs: oldfn(s, c)
1181 l.append((mf, fn, params))
1193 l.append((mf, fn, params))
1182 self._filterpats[filter] = l
1194 self._filterpats[filter] = l
1183 return self._filterpats[filter]
1195 return self._filterpats[filter]
1184
1196
1185 def _filter(self, filterpats, filename, data):
1197 def _filter(self, filterpats, filename, data):
1186 for mf, fn, cmd in filterpats:
1198 for mf, fn, cmd in filterpats:
1187 if mf(filename):
1199 if mf(filename):
1188 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1200 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1189 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1201 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1190 break
1202 break
1191
1203
1192 return data
1204 return data
1193
1205
1194 @unfilteredpropertycache
1206 @unfilteredpropertycache
1195 def _encodefilterpats(self):
1207 def _encodefilterpats(self):
1196 return self._loadfilter('encode')
1208 return self._loadfilter('encode')
1197
1209
1198 @unfilteredpropertycache
1210 @unfilteredpropertycache
1199 def _decodefilterpats(self):
1211 def _decodefilterpats(self):
1200 return self._loadfilter('decode')
1212 return self._loadfilter('decode')
1201
1213
1202 def adddatafilter(self, name, filter):
1214 def adddatafilter(self, name, filter):
1203 self._datafilters[name] = filter
1215 self._datafilters[name] = filter
1204
1216
1205 def wread(self, filename):
1217 def wread(self, filename):
1206 if self.wvfs.islink(filename):
1218 if self.wvfs.islink(filename):
1207 data = self.wvfs.readlink(filename)
1219 data = self.wvfs.readlink(filename)
1208 else:
1220 else:
1209 data = self.wvfs.read(filename)
1221 data = self.wvfs.read(filename)
1210 return self._filter(self._encodefilterpats, filename, data)
1222 return self._filter(self._encodefilterpats, filename, data)
1211
1223
1212 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1224 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1213 """write ``data`` into ``filename`` in the working directory
1225 """write ``data`` into ``filename`` in the working directory
1214
1226
1215 This returns length of written (maybe decoded) data.
1227 This returns length of written (maybe decoded) data.
1216 """
1228 """
1217 data = self._filter(self._decodefilterpats, filename, data)
1229 data = self._filter(self._decodefilterpats, filename, data)
1218 if 'l' in flags:
1230 if 'l' in flags:
1219 self.wvfs.symlink(data, filename)
1231 self.wvfs.symlink(data, filename)
1220 else:
1232 else:
1221 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1233 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1222 **kwargs)
1234 **kwargs)
1223 if 'x' in flags:
1235 if 'x' in flags:
1224 self.wvfs.setflags(filename, False, True)
1236 self.wvfs.setflags(filename, False, True)
1225 else:
1237 else:
1226 self.wvfs.setflags(filename, False, False)
1238 self.wvfs.setflags(filename, False, False)
1227 return len(data)
1239 return len(data)
1228
1240
1229 def wwritedata(self, filename, data):
1241 def wwritedata(self, filename, data):
1230 return self._filter(self._decodefilterpats, filename, data)
1242 return self._filter(self._decodefilterpats, filename, data)
1231
1243
1232 def currenttransaction(self):
1244 def currenttransaction(self):
1233 """return the current transaction or None if non exists"""
1245 """return the current transaction or None if non exists"""
1234 if self._transref:
1246 if self._transref:
1235 tr = self._transref()
1247 tr = self._transref()
1236 else:
1248 else:
1237 tr = None
1249 tr = None
1238
1250
1239 if tr and tr.running():
1251 if tr and tr.running():
1240 return tr
1252 return tr
1241 return None
1253 return None
1242
1254
1243 def transaction(self, desc, report=None):
1255 def transaction(self, desc, report=None):
1244 if (self.ui.configbool('devel', 'all-warnings')
1256 if (self.ui.configbool('devel', 'all-warnings')
1245 or self.ui.configbool('devel', 'check-locks')):
1257 or self.ui.configbool('devel', 'check-locks')):
1246 if self._currentlock(self._lockref) is None:
1258 if self._currentlock(self._lockref) is None:
1247 raise error.ProgrammingError('transaction requires locking')
1259 raise error.ProgrammingError('transaction requires locking')
1248 tr = self.currenttransaction()
1260 tr = self.currenttransaction()
1249 if tr is not None:
1261 if tr is not None:
1250 return tr.nest(name=desc)
1262 return tr.nest(name=desc)
1251
1263
1252 # abort here if the journal already exists
1264 # abort here if the journal already exists
1253 if self.svfs.exists("journal"):
1265 if self.svfs.exists("journal"):
1254 raise error.RepoError(
1266 raise error.RepoError(
1255 _("abandoned transaction found"),
1267 _("abandoned transaction found"),
1256 hint=_("run 'hg recover' to clean up transaction"))
1268 hint=_("run 'hg recover' to clean up transaction"))
1257
1269
1258 idbase = "%.40f#%f" % (random.random(), time.time())
1270 idbase = "%.40f#%f" % (random.random(), time.time())
1259 ha = hex(hashlib.sha1(idbase).digest())
1271 ha = hex(hashlib.sha1(idbase).digest())
1260 txnid = 'TXN:' + ha
1272 txnid = 'TXN:' + ha
1261 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1273 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1262
1274
1263 self._writejournal(desc)
1275 self._writejournal(desc)
1264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1276 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1265 if report:
1277 if report:
1266 rp = report
1278 rp = report
1267 else:
1279 else:
1268 rp = self.ui.warn
1280 rp = self.ui.warn
1269 vfsmap = {'plain': self.vfs} # root of .hg/
1281 vfsmap = {'plain': self.vfs} # root of .hg/
1270 # we must avoid cyclic reference between repo and transaction.
1282 # we must avoid cyclic reference between repo and transaction.
1271 reporef = weakref.ref(self)
1283 reporef = weakref.ref(self)
1272 # Code to track tag movement
1284 # Code to track tag movement
1273 #
1285 #
1274 # Since tags are all handled as file content, it is actually quite hard
1286 # Since tags are all handled as file content, it is actually quite hard
1275 # to track these movement from a code perspective. So we fallback to a
1287 # to track these movement from a code perspective. So we fallback to a
1276 # tracking at the repository level. One could envision to track changes
1288 # tracking at the repository level. One could envision to track changes
1277 # to the '.hgtags' file through changegroup apply but that fails to
1289 # to the '.hgtags' file through changegroup apply but that fails to
1278 # cope with case where transaction expose new heads without changegroup
1290 # cope with case where transaction expose new heads without changegroup
1279 # being involved (eg: phase movement).
1291 # being involved (eg: phase movement).
1280 #
1292 #
1281 # For now, We gate the feature behind a flag since this likely comes
1293 # For now, We gate the feature behind a flag since this likely comes
1282 # with performance impacts. The current code run more often than needed
1294 # with performance impacts. The current code run more often than needed
1283 # and do not use caches as much as it could. The current focus is on
1295 # and do not use caches as much as it could. The current focus is on
1284 # the behavior of the feature so we disable it by default. The flag
1296 # the behavior of the feature so we disable it by default. The flag
1285 # will be removed when we are happy with the performance impact.
1297 # will be removed when we are happy with the performance impact.
1286 #
1298 #
1287 # Once this feature is no longer experimental move the following
1299 # Once this feature is no longer experimental move the following
1288 # documentation to the appropriate help section:
1300 # documentation to the appropriate help section:
1289 #
1301 #
1290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1302 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1291 # tags (new or changed or deleted tags). In addition the details of
1303 # tags (new or changed or deleted tags). In addition the details of
1292 # these changes are made available in a file at:
1304 # these changes are made available in a file at:
1293 # ``REPOROOT/.hg/changes/tags.changes``.
1305 # ``REPOROOT/.hg/changes/tags.changes``.
1294 # Make sure you check for HG_TAG_MOVED before reading that file as it
1306 # Make sure you check for HG_TAG_MOVED before reading that file as it
1295 # might exist from a previous transaction even if no tag were touched
1307 # might exist from a previous transaction even if no tag were touched
1296 # in this one. Changes are recorded in a line base format::
1308 # in this one. Changes are recorded in a line base format::
1297 #
1309 #
1298 # <action> <hex-node> <tag-name>\n
1310 # <action> <hex-node> <tag-name>\n
1299 #
1311 #
1300 # Actions are defined as follow:
1312 # Actions are defined as follow:
1301 # "-R": tag is removed,
1313 # "-R": tag is removed,
1302 # "+A": tag is added,
1314 # "+A": tag is added,
1303 # "-M": tag is moved (old value),
1315 # "-M": tag is moved (old value),
1304 # "+M": tag is moved (new value),
1316 # "+M": tag is moved (new value),
1305 tracktags = lambda x: None
1317 tracktags = lambda x: None
1306 # experimental config: experimental.hook-track-tags
1318 # experimental config: experimental.hook-track-tags
1307 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1319 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1308 if desc != 'strip' and shouldtracktags:
1320 if desc != 'strip' and shouldtracktags:
1309 oldheads = self.changelog.headrevs()
1321 oldheads = self.changelog.headrevs()
1310 def tracktags(tr2):
1322 def tracktags(tr2):
1311 repo = reporef()
1323 repo = reporef()
1312 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1324 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1313 newheads = repo.changelog.headrevs()
1325 newheads = repo.changelog.headrevs()
1314 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1326 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1315 # notes: we compare lists here.
1327 # notes: we compare lists here.
1316 # As we do it only once buiding set would not be cheaper
1328 # As we do it only once buiding set would not be cheaper
1317 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1329 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1318 if changes:
1330 if changes:
1319 tr2.hookargs['tag_moved'] = '1'
1331 tr2.hookargs['tag_moved'] = '1'
1320 with repo.vfs('changes/tags.changes', 'w',
1332 with repo.vfs('changes/tags.changes', 'w',
1321 atomictemp=True) as changesfile:
1333 atomictemp=True) as changesfile:
1322 # note: we do not register the file to the transaction
1334 # note: we do not register the file to the transaction
1323 # because we needs it to still exist on the transaction
1335 # because we needs it to still exist on the transaction
1324 # is close (for txnclose hooks)
1336 # is close (for txnclose hooks)
1325 tagsmod.writediff(changesfile, changes)
1337 tagsmod.writediff(changesfile, changes)
1326 def validate(tr2):
1338 def validate(tr2):
1327 """will run pre-closing hooks"""
1339 """will run pre-closing hooks"""
1328 # XXX the transaction API is a bit lacking here so we take a hacky
1340 # XXX the transaction API is a bit lacking here so we take a hacky
1329 # path for now
1341 # path for now
1330 #
1342 #
1331 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1343 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1332 # dict is copied before these run. In addition we needs the data
1344 # dict is copied before these run. In addition we needs the data
1333 # available to in memory hooks too.
1345 # available to in memory hooks too.
1334 #
1346 #
1335 # Moreover, we also need to make sure this runs before txnclose
1347 # Moreover, we also need to make sure this runs before txnclose
1336 # hooks and there is no "pending" mechanism that would execute
1348 # hooks and there is no "pending" mechanism that would execute
1337 # logic only if hooks are about to run.
1349 # logic only if hooks are about to run.
1338 #
1350 #
1339 # Fixing this limitation of the transaction is also needed to track
1351 # Fixing this limitation of the transaction is also needed to track
1340 # other families of changes (bookmarks, phases, obsolescence).
1352 # other families of changes (bookmarks, phases, obsolescence).
1341 #
1353 #
1342 # This will have to be fixed before we remove the experimental
1354 # This will have to be fixed before we remove the experimental
1343 # gating.
1355 # gating.
1344 tracktags(tr2)
1356 tracktags(tr2)
1345 repo = reporef()
1357 repo = reporef()
1346 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1358 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1347 scmutil.enforcesinglehead(repo, tr2, desc)
1359 scmutil.enforcesinglehead(repo, tr2, desc)
1348 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1360 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1349 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1361 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1350 args = tr.hookargs.copy()
1362 args = tr.hookargs.copy()
1351 args.update(bookmarks.preparehookargs(name, old, new))
1363 args.update(bookmarks.preparehookargs(name, old, new))
1352 repo.hook('pretxnclose-bookmark', throw=True,
1364 repo.hook('pretxnclose-bookmark', throw=True,
1353 txnname=desc,
1365 txnname=desc,
1354 **pycompat.strkwargs(args))
1366 **pycompat.strkwargs(args))
1355 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1367 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1356 cl = repo.unfiltered().changelog
1368 cl = repo.unfiltered().changelog
1357 for rev, (old, new) in tr.changes['phases'].items():
1369 for rev, (old, new) in tr.changes['phases'].items():
1358 args = tr.hookargs.copy()
1370 args = tr.hookargs.copy()
1359 node = hex(cl.node(rev))
1371 node = hex(cl.node(rev))
1360 args.update(phases.preparehookargs(node, old, new))
1372 args.update(phases.preparehookargs(node, old, new))
1361 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1373 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1362 **pycompat.strkwargs(args))
1374 **pycompat.strkwargs(args))
1363
1375
1364 repo.hook('pretxnclose', throw=True,
1376 repo.hook('pretxnclose', throw=True,
1365 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1377 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1366 def releasefn(tr, success):
1378 def releasefn(tr, success):
1367 repo = reporef()
1379 repo = reporef()
1368 if success:
1380 if success:
1369 # this should be explicitly invoked here, because
1381 # this should be explicitly invoked here, because
1370 # in-memory changes aren't written out at closing
1382 # in-memory changes aren't written out at closing
1371 # transaction, if tr.addfilegenerator (via
1383 # transaction, if tr.addfilegenerator (via
1372 # dirstate.write or so) isn't invoked while
1384 # dirstate.write or so) isn't invoked while
1373 # transaction running
1385 # transaction running
1374 repo.dirstate.write(None)
1386 repo.dirstate.write(None)
1375 else:
1387 else:
1376 # discard all changes (including ones already written
1388 # discard all changes (including ones already written
1377 # out) in this transaction
1389 # out) in this transaction
1378 narrowspec.restorebackup(self, 'journal.narrowspec')
1390 narrowspec.restorebackup(self, 'journal.narrowspec')
1379 repo.dirstate.restorebackup(None, 'journal.dirstate')
1391 repo.dirstate.restorebackup(None, 'journal.dirstate')
1380
1392
1381 repo.invalidate(clearfilecache=True)
1393 repo.invalidate(clearfilecache=True)
1382
1394
1383 tr = transaction.transaction(rp, self.svfs, vfsmap,
1395 tr = transaction.transaction(rp, self.svfs, vfsmap,
1384 "journal",
1396 "journal",
1385 "undo",
1397 "undo",
1386 aftertrans(renames),
1398 aftertrans(renames),
1387 self.store.createmode,
1399 self.store.createmode,
1388 validator=validate,
1400 validator=validate,
1389 releasefn=releasefn,
1401 releasefn=releasefn,
1390 checkambigfiles=_cachedfiles,
1402 checkambigfiles=_cachedfiles,
1391 name=desc)
1403 name=desc)
1392 tr.changes['origrepolen'] = len(self)
1404 tr.changes['origrepolen'] = len(self)
1393 tr.changes['obsmarkers'] = set()
1405 tr.changes['obsmarkers'] = set()
1394 tr.changes['phases'] = {}
1406 tr.changes['phases'] = {}
1395 tr.changes['bookmarks'] = {}
1407 tr.changes['bookmarks'] = {}
1396
1408
1397 tr.hookargs['txnid'] = txnid
1409 tr.hookargs['txnid'] = txnid
1398 # note: writing the fncache only during finalize mean that the file is
1410 # note: writing the fncache only during finalize mean that the file is
1399 # outdated when running hooks. As fncache is used for streaming clone,
1411 # outdated when running hooks. As fncache is used for streaming clone,
1400 # this is not expected to break anything that happen during the hooks.
1412 # this is not expected to break anything that happen during the hooks.
1401 tr.addfinalize('flush-fncache', self.store.write)
1413 tr.addfinalize('flush-fncache', self.store.write)
1402 def txnclosehook(tr2):
1414 def txnclosehook(tr2):
1403 """To be run if transaction is successful, will schedule a hook run
1415 """To be run if transaction is successful, will schedule a hook run
1404 """
1416 """
1405 # Don't reference tr2 in hook() so we don't hold a reference.
1417 # Don't reference tr2 in hook() so we don't hold a reference.
1406 # This reduces memory consumption when there are multiple
1418 # This reduces memory consumption when there are multiple
1407 # transactions per lock. This can likely go away if issue5045
1419 # transactions per lock. This can likely go away if issue5045
1408 # fixes the function accumulation.
1420 # fixes the function accumulation.
1409 hookargs = tr2.hookargs
1421 hookargs = tr2.hookargs
1410
1422
1411 def hookfunc():
1423 def hookfunc():
1412 repo = reporef()
1424 repo = reporef()
1413 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1425 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1414 bmchanges = sorted(tr.changes['bookmarks'].items())
1426 bmchanges = sorted(tr.changes['bookmarks'].items())
1415 for name, (old, new) in bmchanges:
1427 for name, (old, new) in bmchanges:
1416 args = tr.hookargs.copy()
1428 args = tr.hookargs.copy()
1417 args.update(bookmarks.preparehookargs(name, old, new))
1429 args.update(bookmarks.preparehookargs(name, old, new))
1418 repo.hook('txnclose-bookmark', throw=False,
1430 repo.hook('txnclose-bookmark', throw=False,
1419 txnname=desc, **pycompat.strkwargs(args))
1431 txnname=desc, **pycompat.strkwargs(args))
1420
1432
1421 if hook.hashook(repo.ui, 'txnclose-phase'):
1433 if hook.hashook(repo.ui, 'txnclose-phase'):
1422 cl = repo.unfiltered().changelog
1434 cl = repo.unfiltered().changelog
1423 phasemv = sorted(tr.changes['phases'].items())
1435 phasemv = sorted(tr.changes['phases'].items())
1424 for rev, (old, new) in phasemv:
1436 for rev, (old, new) in phasemv:
1425 args = tr.hookargs.copy()
1437 args = tr.hookargs.copy()
1426 node = hex(cl.node(rev))
1438 node = hex(cl.node(rev))
1427 args.update(phases.preparehookargs(node, old, new))
1439 args.update(phases.preparehookargs(node, old, new))
1428 repo.hook('txnclose-phase', throw=False, txnname=desc,
1440 repo.hook('txnclose-phase', throw=False, txnname=desc,
1429 **pycompat.strkwargs(args))
1441 **pycompat.strkwargs(args))
1430
1442
1431 repo.hook('txnclose', throw=False, txnname=desc,
1443 repo.hook('txnclose', throw=False, txnname=desc,
1432 **pycompat.strkwargs(hookargs))
1444 **pycompat.strkwargs(hookargs))
1433 reporef()._afterlock(hookfunc)
1445 reporef()._afterlock(hookfunc)
1434 tr.addfinalize('txnclose-hook', txnclosehook)
1446 tr.addfinalize('txnclose-hook', txnclosehook)
1435 # Include a leading "-" to make it happen before the transaction summary
1447 # Include a leading "-" to make it happen before the transaction summary
1436 # reports registered via scmutil.registersummarycallback() whose names
1448 # reports registered via scmutil.registersummarycallback() whose names
1437 # are 00-txnreport etc. That way, the caches will be warm when the
1449 # are 00-txnreport etc. That way, the caches will be warm when the
1438 # callbacks run.
1450 # callbacks run.
1439 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1451 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1440 def txnaborthook(tr2):
1452 def txnaborthook(tr2):
1441 """To be run if transaction is aborted
1453 """To be run if transaction is aborted
1442 """
1454 """
1443 reporef().hook('txnabort', throw=False, txnname=desc,
1455 reporef().hook('txnabort', throw=False, txnname=desc,
1444 **pycompat.strkwargs(tr2.hookargs))
1456 **pycompat.strkwargs(tr2.hookargs))
1445 tr.addabort('txnabort-hook', txnaborthook)
1457 tr.addabort('txnabort-hook', txnaborthook)
1446 # avoid eager cache invalidation. in-memory data should be identical
1458 # avoid eager cache invalidation. in-memory data should be identical
1447 # to stored data if transaction has no error.
1459 # to stored data if transaction has no error.
1448 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1460 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1449 self._transref = weakref.ref(tr)
1461 self._transref = weakref.ref(tr)
1450 scmutil.registersummarycallback(self, tr, desc)
1462 scmutil.registersummarycallback(self, tr, desc)
1451 return tr
1463 return tr
1452
1464
1453 def _journalfiles(self):
1465 def _journalfiles(self):
1454 return ((self.svfs, 'journal'),
1466 return ((self.svfs, 'journal'),
1455 (self.vfs, 'journal.dirstate'),
1467 (self.vfs, 'journal.dirstate'),
1456 (self.vfs, 'journal.branch'),
1468 (self.vfs, 'journal.branch'),
1457 (self.vfs, 'journal.desc'),
1469 (self.vfs, 'journal.desc'),
1458 (self.vfs, 'journal.bookmarks'),
1470 (self.vfs, 'journal.bookmarks'),
1459 (self.svfs, 'journal.phaseroots'))
1471 (self.svfs, 'journal.phaseroots'))
1460
1472
1461 def undofiles(self):
1473 def undofiles(self):
1462 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1474 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1463
1475
1464 @unfilteredmethod
1476 @unfilteredmethod
1465 def _writejournal(self, desc):
1477 def _writejournal(self, desc):
1466 self.dirstate.savebackup(None, 'journal.dirstate')
1478 self.dirstate.savebackup(None, 'journal.dirstate')
1467 narrowspec.savebackup(self, 'journal.narrowspec')
1479 narrowspec.savebackup(self, 'journal.narrowspec')
1468 self.vfs.write("journal.branch",
1480 self.vfs.write("journal.branch",
1469 encoding.fromlocal(self.dirstate.branch()))
1481 encoding.fromlocal(self.dirstate.branch()))
1470 self.vfs.write("journal.desc",
1482 self.vfs.write("journal.desc",
1471 "%d\n%s\n" % (len(self), desc))
1483 "%d\n%s\n" % (len(self), desc))
1472 self.vfs.write("journal.bookmarks",
1484 self.vfs.write("journal.bookmarks",
1473 self.vfs.tryread("bookmarks"))
1485 self.vfs.tryread("bookmarks"))
1474 self.svfs.write("journal.phaseroots",
1486 self.svfs.write("journal.phaseroots",
1475 self.svfs.tryread("phaseroots"))
1487 self.svfs.tryread("phaseroots"))
1476
1488
1477 def recover(self):
1489 def recover(self):
1478 with self.lock():
1490 with self.lock():
1479 if self.svfs.exists("journal"):
1491 if self.svfs.exists("journal"):
1480 self.ui.status(_("rolling back interrupted transaction\n"))
1492 self.ui.status(_("rolling back interrupted transaction\n"))
1481 vfsmap = {'': self.svfs,
1493 vfsmap = {'': self.svfs,
1482 'plain': self.vfs,}
1494 'plain': self.vfs,}
1483 transaction.rollback(self.svfs, vfsmap, "journal",
1495 transaction.rollback(self.svfs, vfsmap, "journal",
1484 self.ui.warn,
1496 self.ui.warn,
1485 checkambigfiles=_cachedfiles)
1497 checkambigfiles=_cachedfiles)
1486 self.invalidate()
1498 self.invalidate()
1487 return True
1499 return True
1488 else:
1500 else:
1489 self.ui.warn(_("no interrupted transaction available\n"))
1501 self.ui.warn(_("no interrupted transaction available\n"))
1490 return False
1502 return False
1491
1503
1492 def rollback(self, dryrun=False, force=False):
1504 def rollback(self, dryrun=False, force=False):
1493 wlock = lock = dsguard = None
1505 wlock = lock = dsguard = None
1494 try:
1506 try:
1495 wlock = self.wlock()
1507 wlock = self.wlock()
1496 lock = self.lock()
1508 lock = self.lock()
1497 if self.svfs.exists("undo"):
1509 if self.svfs.exists("undo"):
1498 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1510 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1499
1511
1500 return self._rollback(dryrun, force, dsguard)
1512 return self._rollback(dryrun, force, dsguard)
1501 else:
1513 else:
1502 self.ui.warn(_("no rollback information available\n"))
1514 self.ui.warn(_("no rollback information available\n"))
1503 return 1
1515 return 1
1504 finally:
1516 finally:
1505 release(dsguard, lock, wlock)
1517 release(dsguard, lock, wlock)
1506
1518
1507 @unfilteredmethod # Until we get smarter cache management
1519 @unfilteredmethod # Until we get smarter cache management
1508 def _rollback(self, dryrun, force, dsguard):
1520 def _rollback(self, dryrun, force, dsguard):
1509 ui = self.ui
1521 ui = self.ui
1510 try:
1522 try:
1511 args = self.vfs.read('undo.desc').splitlines()
1523 args = self.vfs.read('undo.desc').splitlines()
1512 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1524 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1513 if len(args) >= 3:
1525 if len(args) >= 3:
1514 detail = args[2]
1526 detail = args[2]
1515 oldtip = oldlen - 1
1527 oldtip = oldlen - 1
1516
1528
1517 if detail and ui.verbose:
1529 if detail and ui.verbose:
1518 msg = (_('repository tip rolled back to revision %d'
1530 msg = (_('repository tip rolled back to revision %d'
1519 ' (undo %s: %s)\n')
1531 ' (undo %s: %s)\n')
1520 % (oldtip, desc, detail))
1532 % (oldtip, desc, detail))
1521 else:
1533 else:
1522 msg = (_('repository tip rolled back to revision %d'
1534 msg = (_('repository tip rolled back to revision %d'
1523 ' (undo %s)\n')
1535 ' (undo %s)\n')
1524 % (oldtip, desc))
1536 % (oldtip, desc))
1525 except IOError:
1537 except IOError:
1526 msg = _('rolling back unknown transaction\n')
1538 msg = _('rolling back unknown transaction\n')
1527 desc = None
1539 desc = None
1528
1540
1529 if not force and self['.'] != self['tip'] and desc == 'commit':
1541 if not force and self['.'] != self['tip'] and desc == 'commit':
1530 raise error.Abort(
1542 raise error.Abort(
1531 _('rollback of last commit while not checked out '
1543 _('rollback of last commit while not checked out '
1532 'may lose data'), hint=_('use -f to force'))
1544 'may lose data'), hint=_('use -f to force'))
1533
1545
1534 ui.status(msg)
1546 ui.status(msg)
1535 if dryrun:
1547 if dryrun:
1536 return 0
1548 return 0
1537
1549
1538 parents = self.dirstate.parents()
1550 parents = self.dirstate.parents()
1539 self.destroying()
1551 self.destroying()
1540 vfsmap = {'plain': self.vfs, '': self.svfs}
1552 vfsmap = {'plain': self.vfs, '': self.svfs}
1541 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1553 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1542 checkambigfiles=_cachedfiles)
1554 checkambigfiles=_cachedfiles)
1543 if self.vfs.exists('undo.bookmarks'):
1555 if self.vfs.exists('undo.bookmarks'):
1544 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1556 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1545 if self.svfs.exists('undo.phaseroots'):
1557 if self.svfs.exists('undo.phaseroots'):
1546 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1558 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1547 self.invalidate()
1559 self.invalidate()
1548
1560
1549 parentgone = (parents[0] not in self.changelog.nodemap or
1561 parentgone = (parents[0] not in self.changelog.nodemap or
1550 parents[1] not in self.changelog.nodemap)
1562 parents[1] not in self.changelog.nodemap)
1551 if parentgone:
1563 if parentgone:
1552 # prevent dirstateguard from overwriting already restored one
1564 # prevent dirstateguard from overwriting already restored one
1553 dsguard.close()
1565 dsguard.close()
1554
1566
1555 narrowspec.restorebackup(self, 'undo.narrowspec')
1567 narrowspec.restorebackup(self, 'undo.narrowspec')
1556 self.dirstate.restorebackup(None, 'undo.dirstate')
1568 self.dirstate.restorebackup(None, 'undo.dirstate')
1557 try:
1569 try:
1558 branch = self.vfs.read('undo.branch')
1570 branch = self.vfs.read('undo.branch')
1559 self.dirstate.setbranch(encoding.tolocal(branch))
1571 self.dirstate.setbranch(encoding.tolocal(branch))
1560 except IOError:
1572 except IOError:
1561 ui.warn(_('named branch could not be reset: '
1573 ui.warn(_('named branch could not be reset: '
1562 'current branch is still \'%s\'\n')
1574 'current branch is still \'%s\'\n')
1563 % self.dirstate.branch())
1575 % self.dirstate.branch())
1564
1576
1565 parents = tuple([p.rev() for p in self[None].parents()])
1577 parents = tuple([p.rev() for p in self[None].parents()])
1566 if len(parents) > 1:
1578 if len(parents) > 1:
1567 ui.status(_('working directory now based on '
1579 ui.status(_('working directory now based on '
1568 'revisions %d and %d\n') % parents)
1580 'revisions %d and %d\n') % parents)
1569 else:
1581 else:
1570 ui.status(_('working directory now based on '
1582 ui.status(_('working directory now based on '
1571 'revision %d\n') % parents)
1583 'revision %d\n') % parents)
1572 mergemod.mergestate.clean(self, self['.'].node())
1584 mergemod.mergestate.clean(self, self['.'].node())
1573
1585
1574 # TODO: if we know which new heads may result from this rollback, pass
1586 # TODO: if we know which new heads may result from this rollback, pass
1575 # them to destroy(), which will prevent the branchhead cache from being
1587 # them to destroy(), which will prevent the branchhead cache from being
1576 # invalidated.
1588 # invalidated.
1577 self.destroyed()
1589 self.destroyed()
1578 return 0
1590 return 0
1579
1591
1580 def _buildcacheupdater(self, newtransaction):
1592 def _buildcacheupdater(self, newtransaction):
1581 """called during transaction to build the callback updating cache
1593 """called during transaction to build the callback updating cache
1582
1594
1583 Lives on the repository to help extension who might want to augment
1595 Lives on the repository to help extension who might want to augment
1584 this logic. For this purpose, the created transaction is passed to the
1596 this logic. For this purpose, the created transaction is passed to the
1585 method.
1597 method.
1586 """
1598 """
1587 # we must avoid cyclic reference between repo and transaction.
1599 # we must avoid cyclic reference between repo and transaction.
1588 reporef = weakref.ref(self)
1600 reporef = weakref.ref(self)
1589 def updater(tr):
1601 def updater(tr):
1590 repo = reporef()
1602 repo = reporef()
1591 repo.updatecaches(tr)
1603 repo.updatecaches(tr)
1592 return updater
1604 return updater
1593
1605
1594 @unfilteredmethod
1606 @unfilteredmethod
1595 def updatecaches(self, tr=None, full=False):
1607 def updatecaches(self, tr=None, full=False):
1596 """warm appropriate caches
1608 """warm appropriate caches
1597
1609
1598 If this function is called after a transaction closed. The transaction
1610 If this function is called after a transaction closed. The transaction
1599 will be available in the 'tr' argument. This can be used to selectively
1611 will be available in the 'tr' argument. This can be used to selectively
1600 update caches relevant to the changes in that transaction.
1612 update caches relevant to the changes in that transaction.
1601
1613
1602 If 'full' is set, make sure all caches the function knows about have
1614 If 'full' is set, make sure all caches the function knows about have
1603 up-to-date data. Even the ones usually loaded more lazily.
1615 up-to-date data. Even the ones usually loaded more lazily.
1604 """
1616 """
1605 if tr is not None and tr.hookargs.get('source') == 'strip':
1617 if tr is not None and tr.hookargs.get('source') == 'strip':
1606 # During strip, many caches are invalid but
1618 # During strip, many caches are invalid but
1607 # later call to `destroyed` will refresh them.
1619 # later call to `destroyed` will refresh them.
1608 return
1620 return
1609
1621
1610 if tr is None or tr.changes['origrepolen'] < len(self):
1622 if tr is None or tr.changes['origrepolen'] < len(self):
1611 # updating the unfiltered branchmap should refresh all the others,
1623 # updating the unfiltered branchmap should refresh all the others,
1612 self.ui.debug('updating the branch cache\n')
1624 self.ui.debug('updating the branch cache\n')
1613 branchmap.updatecache(self.filtered('served'))
1625 branchmap.updatecache(self.filtered('served'))
1614
1626
1615 if full:
1627 if full:
1616 rbc = self.revbranchcache()
1628 rbc = self.revbranchcache()
1617 for r in self.changelog:
1629 for r in self.changelog:
1618 rbc.branchinfo(r)
1630 rbc.branchinfo(r)
1619 rbc.write()
1631 rbc.write()
1620
1632
1621 # ensure the working copy parents are in the manifestfulltextcache
1633 # ensure the working copy parents are in the manifestfulltextcache
1622 for ctx in self['.'].parents():
1634 for ctx in self['.'].parents():
1623 ctx.manifest() # accessing the manifest is enough
1635 ctx.manifest() # accessing the manifest is enough
1624
1636
1625 def invalidatecaches(self):
1637 def invalidatecaches(self):
1626
1638
1627 if '_tagscache' in vars(self):
1639 if '_tagscache' in vars(self):
1628 # can't use delattr on proxy
1640 # can't use delattr on proxy
1629 del self.__dict__['_tagscache']
1641 del self.__dict__['_tagscache']
1630
1642
1631 self.unfiltered()._branchcaches.clear()
1643 self.unfiltered()._branchcaches.clear()
1632 self.invalidatevolatilesets()
1644 self.invalidatevolatilesets()
1633 self._sparsesignaturecache.clear()
1645 self._sparsesignaturecache.clear()
1634
1646
1635 def invalidatevolatilesets(self):
1647 def invalidatevolatilesets(self):
1636 self.filteredrevcache.clear()
1648 self.filteredrevcache.clear()
1637 obsolete.clearobscaches(self)
1649 obsolete.clearobscaches(self)
1638
1650
1639 def invalidatedirstate(self):
1651 def invalidatedirstate(self):
1640 '''Invalidates the dirstate, causing the next call to dirstate
1652 '''Invalidates the dirstate, causing the next call to dirstate
1641 to check if it was modified since the last time it was read,
1653 to check if it was modified since the last time it was read,
1642 rereading it if it has.
1654 rereading it if it has.
1643
1655
1644 This is different to dirstate.invalidate() that it doesn't always
1656 This is different to dirstate.invalidate() that it doesn't always
1645 rereads the dirstate. Use dirstate.invalidate() if you want to
1657 rereads the dirstate. Use dirstate.invalidate() if you want to
1646 explicitly read the dirstate again (i.e. restoring it to a previous
1658 explicitly read the dirstate again (i.e. restoring it to a previous
1647 known good state).'''
1659 known good state).'''
1648 if hasunfilteredcache(self, 'dirstate'):
1660 if hasunfilteredcache(self, 'dirstate'):
1649 for k in self.dirstate._filecache:
1661 for k in self.dirstate._filecache:
1650 try:
1662 try:
1651 delattr(self.dirstate, k)
1663 delattr(self.dirstate, k)
1652 except AttributeError:
1664 except AttributeError:
1653 pass
1665 pass
1654 delattr(self.unfiltered(), 'dirstate')
1666 delattr(self.unfiltered(), 'dirstate')
1655
1667
1656 def invalidate(self, clearfilecache=False):
1668 def invalidate(self, clearfilecache=False):
1657 '''Invalidates both store and non-store parts other than dirstate
1669 '''Invalidates both store and non-store parts other than dirstate
1658
1670
1659 If a transaction is running, invalidation of store is omitted,
1671 If a transaction is running, invalidation of store is omitted,
1660 because discarding in-memory changes might cause inconsistency
1672 because discarding in-memory changes might cause inconsistency
1661 (e.g. incomplete fncache causes unintentional failure, but
1673 (e.g. incomplete fncache causes unintentional failure, but
1662 redundant one doesn't).
1674 redundant one doesn't).
1663 '''
1675 '''
1664 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1676 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1665 for k in list(self._filecache.keys()):
1677 for k in list(self._filecache.keys()):
1666 # dirstate is invalidated separately in invalidatedirstate()
1678 # dirstate is invalidated separately in invalidatedirstate()
1667 if k == 'dirstate':
1679 if k == 'dirstate':
1668 continue
1680 continue
1669 if (k == 'changelog' and
1681 if (k == 'changelog' and
1670 self.currenttransaction() and
1682 self.currenttransaction() and
1671 self.changelog._delayed):
1683 self.changelog._delayed):
1672 # The changelog object may store unwritten revisions. We don't
1684 # The changelog object may store unwritten revisions. We don't
1673 # want to lose them.
1685 # want to lose them.
1674 # TODO: Solve the problem instead of working around it.
1686 # TODO: Solve the problem instead of working around it.
1675 continue
1687 continue
1676
1688
1677 if clearfilecache:
1689 if clearfilecache:
1678 del self._filecache[k]
1690 del self._filecache[k]
1679 try:
1691 try:
1680 delattr(unfiltered, k)
1692 delattr(unfiltered, k)
1681 except AttributeError:
1693 except AttributeError:
1682 pass
1694 pass
1683 self.invalidatecaches()
1695 self.invalidatecaches()
1684 if not self.currenttransaction():
1696 if not self.currenttransaction():
1685 # TODO: Changing contents of store outside transaction
1697 # TODO: Changing contents of store outside transaction
1686 # causes inconsistency. We should make in-memory store
1698 # causes inconsistency. We should make in-memory store
1687 # changes detectable, and abort if changed.
1699 # changes detectable, and abort if changed.
1688 self.store.invalidatecaches()
1700 self.store.invalidatecaches()
1689
1701
1690 def invalidateall(self):
1702 def invalidateall(self):
1691 '''Fully invalidates both store and non-store parts, causing the
1703 '''Fully invalidates both store and non-store parts, causing the
1692 subsequent operation to reread any outside changes.'''
1704 subsequent operation to reread any outside changes.'''
1693 # extension should hook this to invalidate its caches
1705 # extension should hook this to invalidate its caches
1694 self.invalidate()
1706 self.invalidate()
1695 self.invalidatedirstate()
1707 self.invalidatedirstate()
1696
1708
1697 @unfilteredmethod
1709 @unfilteredmethod
1698 def _refreshfilecachestats(self, tr):
1710 def _refreshfilecachestats(self, tr):
1699 """Reload stats of cached files so that they are flagged as valid"""
1711 """Reload stats of cached files so that they are flagged as valid"""
1700 for k, ce in self._filecache.items():
1712 for k, ce in self._filecache.items():
1701 k = pycompat.sysstr(k)
1713 k = pycompat.sysstr(k)
1702 if k == r'dirstate' or k not in self.__dict__:
1714 if k == r'dirstate' or k not in self.__dict__:
1703 continue
1715 continue
1704 ce.refresh()
1716 ce.refresh()
1705
1717
1706 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1718 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1707 inheritchecker=None, parentenvvar=None):
1719 inheritchecker=None, parentenvvar=None):
1708 parentlock = None
1720 parentlock = None
1709 # the contents of parentenvvar are used by the underlying lock to
1721 # the contents of parentenvvar are used by the underlying lock to
1710 # determine whether it can be inherited
1722 # determine whether it can be inherited
1711 if parentenvvar is not None:
1723 if parentenvvar is not None:
1712 parentlock = encoding.environ.get(parentenvvar)
1724 parentlock = encoding.environ.get(parentenvvar)
1713
1725
1714 timeout = 0
1726 timeout = 0
1715 warntimeout = 0
1727 warntimeout = 0
1716 if wait:
1728 if wait:
1717 timeout = self.ui.configint("ui", "timeout")
1729 timeout = self.ui.configint("ui", "timeout")
1718 warntimeout = self.ui.configint("ui", "timeout.warn")
1730 warntimeout = self.ui.configint("ui", "timeout.warn")
1719 # internal config: ui.signal-safe-lock
1731 # internal config: ui.signal-safe-lock
1720 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1732 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1721
1733
1722 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1734 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1723 releasefn=releasefn,
1735 releasefn=releasefn,
1724 acquirefn=acquirefn, desc=desc,
1736 acquirefn=acquirefn, desc=desc,
1725 inheritchecker=inheritchecker,
1737 inheritchecker=inheritchecker,
1726 parentlock=parentlock,
1738 parentlock=parentlock,
1727 signalsafe=signalsafe)
1739 signalsafe=signalsafe)
1728 return l
1740 return l
1729
1741
1730 def _afterlock(self, callback):
1742 def _afterlock(self, callback):
1731 """add a callback to be run when the repository is fully unlocked
1743 """add a callback to be run when the repository is fully unlocked
1732
1744
1733 The callback will be executed when the outermost lock is released
1745 The callback will be executed when the outermost lock is released
1734 (with wlock being higher level than 'lock')."""
1746 (with wlock being higher level than 'lock')."""
1735 for ref in (self._wlockref, self._lockref):
1747 for ref in (self._wlockref, self._lockref):
1736 l = ref and ref()
1748 l = ref and ref()
1737 if l and l.held:
1749 if l and l.held:
1738 l.postrelease.append(callback)
1750 l.postrelease.append(callback)
1739 break
1751 break
1740 else: # no lock have been found.
1752 else: # no lock have been found.
1741 callback()
1753 callback()
1742
1754
1743 def lock(self, wait=True):
1755 def lock(self, wait=True):
1744 '''Lock the repository store (.hg/store) and return a weak reference
1756 '''Lock the repository store (.hg/store) and return a weak reference
1745 to the lock. Use this before modifying the store (e.g. committing or
1757 to the lock. Use this before modifying the store (e.g. committing or
1746 stripping). If you are opening a transaction, get a lock as well.)
1758 stripping). If you are opening a transaction, get a lock as well.)
1747
1759
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1760 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1749 'wlock' first to avoid a dead-lock hazard.'''
1761 'wlock' first to avoid a dead-lock hazard.'''
1750 l = self._currentlock(self._lockref)
1762 l = self._currentlock(self._lockref)
1751 if l is not None:
1763 if l is not None:
1752 l.lock()
1764 l.lock()
1753 return l
1765 return l
1754
1766
1755 l = self._lock(self.svfs, "lock", wait, None,
1767 l = self._lock(self.svfs, "lock", wait, None,
1756 self.invalidate, _('repository %s') % self.origroot)
1768 self.invalidate, _('repository %s') % self.origroot)
1757 self._lockref = weakref.ref(l)
1769 self._lockref = weakref.ref(l)
1758 return l
1770 return l
1759
1771
1760 def _wlockchecktransaction(self):
1772 def _wlockchecktransaction(self):
1761 if self.currenttransaction() is not None:
1773 if self.currenttransaction() is not None:
1762 raise error.LockInheritanceContractViolation(
1774 raise error.LockInheritanceContractViolation(
1763 'wlock cannot be inherited in the middle of a transaction')
1775 'wlock cannot be inherited in the middle of a transaction')
1764
1776
1765 def wlock(self, wait=True):
1777 def wlock(self, wait=True):
1766 '''Lock the non-store parts of the repository (everything under
1778 '''Lock the non-store parts of the repository (everything under
1767 .hg except .hg/store) and return a weak reference to the lock.
1779 .hg except .hg/store) and return a weak reference to the lock.
1768
1780
1769 Use this before modifying files in .hg.
1781 Use this before modifying files in .hg.
1770
1782
1771 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1783 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1772 'wlock' first to avoid a dead-lock hazard.'''
1784 'wlock' first to avoid a dead-lock hazard.'''
1773 l = self._wlockref and self._wlockref()
1785 l = self._wlockref and self._wlockref()
1774 if l is not None and l.held:
1786 if l is not None and l.held:
1775 l.lock()
1787 l.lock()
1776 return l
1788 return l
1777
1789
1778 # We do not need to check for non-waiting lock acquisition. Such
1790 # We do not need to check for non-waiting lock acquisition. Such
1779 # acquisition would not cause dead-lock as they would just fail.
1791 # acquisition would not cause dead-lock as they would just fail.
1780 if wait and (self.ui.configbool('devel', 'all-warnings')
1792 if wait and (self.ui.configbool('devel', 'all-warnings')
1781 or self.ui.configbool('devel', 'check-locks')):
1793 or self.ui.configbool('devel', 'check-locks')):
1782 if self._currentlock(self._lockref) is not None:
1794 if self._currentlock(self._lockref) is not None:
1783 self.ui.develwarn('"wlock" acquired after "lock"')
1795 self.ui.develwarn('"wlock" acquired after "lock"')
1784
1796
1785 def unlock():
1797 def unlock():
1786 if self.dirstate.pendingparentchange():
1798 if self.dirstate.pendingparentchange():
1787 self.dirstate.invalidate()
1799 self.dirstate.invalidate()
1788 else:
1800 else:
1789 self.dirstate.write(None)
1801 self.dirstate.write(None)
1790
1802
1791 self._filecache['dirstate'].refresh()
1803 self._filecache['dirstate'].refresh()
1792
1804
1793 l = self._lock(self.vfs, "wlock", wait, unlock,
1805 l = self._lock(self.vfs, "wlock", wait, unlock,
1794 self.invalidatedirstate, _('working directory of %s') %
1806 self.invalidatedirstate, _('working directory of %s') %
1795 self.origroot,
1807 self.origroot,
1796 inheritchecker=self._wlockchecktransaction,
1808 inheritchecker=self._wlockchecktransaction,
1797 parentenvvar='HG_WLOCK_LOCKER')
1809 parentenvvar='HG_WLOCK_LOCKER')
1798 self._wlockref = weakref.ref(l)
1810 self._wlockref = weakref.ref(l)
1799 return l
1811 return l
1800
1812
1801 def _currentlock(self, lockref):
1813 def _currentlock(self, lockref):
1802 """Returns the lock if it's held, or None if it's not."""
1814 """Returns the lock if it's held, or None if it's not."""
1803 if lockref is None:
1815 if lockref is None:
1804 return None
1816 return None
1805 l = lockref()
1817 l = lockref()
1806 if l is None or not l.held:
1818 if l is None or not l.held:
1807 return None
1819 return None
1808 return l
1820 return l
1809
1821
1810 def currentwlock(self):
1822 def currentwlock(self):
1811 """Returns the wlock if it's held, or None if it's not."""
1823 """Returns the wlock if it's held, or None if it's not."""
1812 return self._currentlock(self._wlockref)
1824 return self._currentlock(self._wlockref)
1813
1825
1814 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1826 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1815 """
1827 """
1816 commit an individual file as part of a larger transaction
1828 commit an individual file as part of a larger transaction
1817 """
1829 """
1818
1830
1819 fname = fctx.path()
1831 fname = fctx.path()
1820 fparent1 = manifest1.get(fname, nullid)
1832 fparent1 = manifest1.get(fname, nullid)
1821 fparent2 = manifest2.get(fname, nullid)
1833 fparent2 = manifest2.get(fname, nullid)
1822 if isinstance(fctx, context.filectx):
1834 if isinstance(fctx, context.filectx):
1823 node = fctx.filenode()
1835 node = fctx.filenode()
1824 if node in [fparent1, fparent2]:
1836 if node in [fparent1, fparent2]:
1825 self.ui.debug('reusing %s filelog entry\n' % fname)
1837 self.ui.debug('reusing %s filelog entry\n' % fname)
1826 if manifest1.flags(fname) != fctx.flags():
1838 if manifest1.flags(fname) != fctx.flags():
1827 changelist.append(fname)
1839 changelist.append(fname)
1828 return node
1840 return node
1829
1841
1830 flog = self.file(fname)
1842 flog = self.file(fname)
1831 meta = {}
1843 meta = {}
1832 copy = fctx.renamed()
1844 copy = fctx.renamed()
1833 if copy and copy[0] != fname:
1845 if copy and copy[0] != fname:
1834 # Mark the new revision of this file as a copy of another
1846 # Mark the new revision of this file as a copy of another
1835 # file. This copy data will effectively act as a parent
1847 # file. This copy data will effectively act as a parent
1836 # of this new revision. If this is a merge, the first
1848 # of this new revision. If this is a merge, the first
1837 # parent will be the nullid (meaning "look up the copy data")
1849 # parent will be the nullid (meaning "look up the copy data")
1838 # and the second one will be the other parent. For example:
1850 # and the second one will be the other parent. For example:
1839 #
1851 #
1840 # 0 --- 1 --- 3 rev1 changes file foo
1852 # 0 --- 1 --- 3 rev1 changes file foo
1841 # \ / rev2 renames foo to bar and changes it
1853 # \ / rev2 renames foo to bar and changes it
1842 # \- 2 -/ rev3 should have bar with all changes and
1854 # \- 2 -/ rev3 should have bar with all changes and
1843 # should record that bar descends from
1855 # should record that bar descends from
1844 # bar in rev2 and foo in rev1
1856 # bar in rev2 and foo in rev1
1845 #
1857 #
1846 # this allows this merge to succeed:
1858 # this allows this merge to succeed:
1847 #
1859 #
1848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1860 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1849 # \ / merging rev3 and rev4 should use bar@rev2
1861 # \ / merging rev3 and rev4 should use bar@rev2
1850 # \- 2 --- 4 as the merge base
1862 # \- 2 --- 4 as the merge base
1851 #
1863 #
1852
1864
1853 cfname = copy[0]
1865 cfname = copy[0]
1854 crev = manifest1.get(cfname)
1866 crev = manifest1.get(cfname)
1855 newfparent = fparent2
1867 newfparent = fparent2
1856
1868
1857 if manifest2: # branch merge
1869 if manifest2: # branch merge
1858 if fparent2 == nullid or crev is None: # copied on remote side
1870 if fparent2 == nullid or crev is None: # copied on remote side
1859 if cfname in manifest2:
1871 if cfname in manifest2:
1860 crev = manifest2[cfname]
1872 crev = manifest2[cfname]
1861 newfparent = fparent1
1873 newfparent = fparent1
1862
1874
1863 # Here, we used to search backwards through history to try to find
1875 # Here, we used to search backwards through history to try to find
1864 # where the file copy came from if the source of a copy was not in
1876 # where the file copy came from if the source of a copy was not in
1865 # the parent directory. However, this doesn't actually make sense to
1877 # the parent directory. However, this doesn't actually make sense to
1866 # do (what does a copy from something not in your working copy even
1878 # do (what does a copy from something not in your working copy even
1867 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1879 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1868 # the user that copy information was dropped, so if they didn't
1880 # the user that copy information was dropped, so if they didn't
1869 # expect this outcome it can be fixed, but this is the correct
1881 # expect this outcome it can be fixed, but this is the correct
1870 # behavior in this circumstance.
1882 # behavior in this circumstance.
1871
1883
1872 if crev:
1884 if crev:
1873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1885 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1874 meta["copy"] = cfname
1886 meta["copy"] = cfname
1875 meta["copyrev"] = hex(crev)
1887 meta["copyrev"] = hex(crev)
1876 fparent1, fparent2 = nullid, newfparent
1888 fparent1, fparent2 = nullid, newfparent
1877 else:
1889 else:
1878 self.ui.warn(_("warning: can't find ancestor for '%s' "
1890 self.ui.warn(_("warning: can't find ancestor for '%s' "
1879 "copied from '%s'!\n") % (fname, cfname))
1891 "copied from '%s'!\n") % (fname, cfname))
1880
1892
1881 elif fparent1 == nullid:
1893 elif fparent1 == nullid:
1882 fparent1, fparent2 = fparent2, nullid
1894 fparent1, fparent2 = fparent2, nullid
1883 elif fparent2 != nullid:
1895 elif fparent2 != nullid:
1884 # is one parent an ancestor of the other?
1896 # is one parent an ancestor of the other?
1885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1897 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1886 if fparent1 in fparentancestors:
1898 if fparent1 in fparentancestors:
1887 fparent1, fparent2 = fparent2, nullid
1899 fparent1, fparent2 = fparent2, nullid
1888 elif fparent2 in fparentancestors:
1900 elif fparent2 in fparentancestors:
1889 fparent2 = nullid
1901 fparent2 = nullid
1890
1902
1891 # is the file changed?
1903 # is the file changed?
1892 text = fctx.data()
1904 text = fctx.data()
1893 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1905 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1894 changelist.append(fname)
1906 changelist.append(fname)
1895 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1907 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1896 # are just the flags changed during merge?
1908 # are just the flags changed during merge?
1897 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1909 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1898 changelist.append(fname)
1910 changelist.append(fname)
1899
1911
1900 return fparent1
1912 return fparent1
1901
1913
1902 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1914 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1903 """check for commit arguments that aren't committable"""
1915 """check for commit arguments that aren't committable"""
1904 if match.isexact() or match.prefix():
1916 if match.isexact() or match.prefix():
1905 matched = set(status.modified + status.added + status.removed)
1917 matched = set(status.modified + status.added + status.removed)
1906
1918
1907 for f in match.files():
1919 for f in match.files():
1908 f = self.dirstate.normalize(f)
1920 f = self.dirstate.normalize(f)
1909 if f == '.' or f in matched or f in wctx.substate:
1921 if f == '.' or f in matched or f in wctx.substate:
1910 continue
1922 continue
1911 if f in status.deleted:
1923 if f in status.deleted:
1912 fail(f, _('file not found!'))
1924 fail(f, _('file not found!'))
1913 if f in vdirs: # visited directory
1925 if f in vdirs: # visited directory
1914 d = f + '/'
1926 d = f + '/'
1915 for mf in matched:
1927 for mf in matched:
1916 if mf.startswith(d):
1928 if mf.startswith(d):
1917 break
1929 break
1918 else:
1930 else:
1919 fail(f, _("no match under directory!"))
1931 fail(f, _("no match under directory!"))
1920 elif f not in self.dirstate:
1932 elif f not in self.dirstate:
1921 fail(f, _("file not tracked!"))
1933 fail(f, _("file not tracked!"))
1922
1934
1923 @unfilteredmethod
1935 @unfilteredmethod
1924 def commit(self, text="", user=None, date=None, match=None, force=False,
1936 def commit(self, text="", user=None, date=None, match=None, force=False,
1925 editor=False, extra=None):
1937 editor=False, extra=None):
1926 """Add a new revision to current repository.
1938 """Add a new revision to current repository.
1927
1939
1928 Revision information is gathered from the working directory,
1940 Revision information is gathered from the working directory,
1929 match can be used to filter the committed files. If editor is
1941 match can be used to filter the committed files. If editor is
1930 supplied, it is called to get a commit message.
1942 supplied, it is called to get a commit message.
1931 """
1943 """
1932 if extra is None:
1944 if extra is None:
1933 extra = {}
1945 extra = {}
1934
1946
1935 def fail(f, msg):
1947 def fail(f, msg):
1936 raise error.Abort('%s: %s' % (f, msg))
1948 raise error.Abort('%s: %s' % (f, msg))
1937
1949
1938 if not match:
1950 if not match:
1939 match = matchmod.always(self.root, '')
1951 match = matchmod.always(self.root, '')
1940
1952
1941 if not force:
1953 if not force:
1942 vdirs = []
1954 vdirs = []
1943 match.explicitdir = vdirs.append
1955 match.explicitdir = vdirs.append
1944 match.bad = fail
1956 match.bad = fail
1945
1957
1946 wlock = lock = tr = None
1958 wlock = lock = tr = None
1947 try:
1959 try:
1948 wlock = self.wlock()
1960 wlock = self.wlock()
1949 lock = self.lock() # for recent changelog (see issue4368)
1961 lock = self.lock() # for recent changelog (see issue4368)
1950
1962
1951 wctx = self[None]
1963 wctx = self[None]
1952 merge = len(wctx.parents()) > 1
1964 merge = len(wctx.parents()) > 1
1953
1965
1954 if not force and merge and not match.always():
1966 if not force and merge and not match.always():
1955 raise error.Abort(_('cannot partially commit a merge '
1967 raise error.Abort(_('cannot partially commit a merge '
1956 '(do not specify files or patterns)'))
1968 '(do not specify files or patterns)'))
1957
1969
1958 status = self.status(match=match, clean=force)
1970 status = self.status(match=match, clean=force)
1959 if force:
1971 if force:
1960 status.modified.extend(status.clean) # mq may commit clean files
1972 status.modified.extend(status.clean) # mq may commit clean files
1961
1973
1962 # check subrepos
1974 # check subrepos
1963 subs, commitsubs, newstate = subrepoutil.precommit(
1975 subs, commitsubs, newstate = subrepoutil.precommit(
1964 self.ui, wctx, status, match, force=force)
1976 self.ui, wctx, status, match, force=force)
1965
1977
1966 # make sure all explicit patterns are matched
1978 # make sure all explicit patterns are matched
1967 if not force:
1979 if not force:
1968 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1980 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1969
1981
1970 cctx = context.workingcommitctx(self, status,
1982 cctx = context.workingcommitctx(self, status,
1971 text, user, date, extra)
1983 text, user, date, extra)
1972
1984
1973 # internal config: ui.allowemptycommit
1985 # internal config: ui.allowemptycommit
1974 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1986 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1975 or extra.get('close') or merge or cctx.files()
1987 or extra.get('close') or merge or cctx.files()
1976 or self.ui.configbool('ui', 'allowemptycommit'))
1988 or self.ui.configbool('ui', 'allowemptycommit'))
1977 if not allowemptycommit:
1989 if not allowemptycommit:
1978 return None
1990 return None
1979
1991
1980 if merge and cctx.deleted():
1992 if merge and cctx.deleted():
1981 raise error.Abort(_("cannot commit merge with missing files"))
1993 raise error.Abort(_("cannot commit merge with missing files"))
1982
1994
1983 ms = mergemod.mergestate.read(self)
1995 ms = mergemod.mergestate.read(self)
1984 mergeutil.checkunresolved(ms)
1996 mergeutil.checkunresolved(ms)
1985
1997
1986 if editor:
1998 if editor:
1987 cctx._text = editor(self, cctx, subs)
1999 cctx._text = editor(self, cctx, subs)
1988 edited = (text != cctx._text)
2000 edited = (text != cctx._text)
1989
2001
1990 # Save commit message in case this transaction gets rolled back
2002 # Save commit message in case this transaction gets rolled back
1991 # (e.g. by a pretxncommit hook). Leave the content alone on
2003 # (e.g. by a pretxncommit hook). Leave the content alone on
1992 # the assumption that the user will use the same editor again.
2004 # the assumption that the user will use the same editor again.
1993 msgfn = self.savecommitmessage(cctx._text)
2005 msgfn = self.savecommitmessage(cctx._text)
1994
2006
1995 # commit subs and write new state
2007 # commit subs and write new state
1996 if subs:
2008 if subs:
1997 for s in sorted(commitsubs):
2009 for s in sorted(commitsubs):
1998 sub = wctx.sub(s)
2010 sub = wctx.sub(s)
1999 self.ui.status(_('committing subrepository %s\n') %
2011 self.ui.status(_('committing subrepository %s\n') %
2000 subrepoutil.subrelpath(sub))
2012 subrepoutil.subrelpath(sub))
2001 sr = sub.commit(cctx._text, user, date)
2013 sr = sub.commit(cctx._text, user, date)
2002 newstate[s] = (newstate[s][0], sr)
2014 newstate[s] = (newstate[s][0], sr)
2003 subrepoutil.writestate(self, newstate)
2015 subrepoutil.writestate(self, newstate)
2004
2016
2005 p1, p2 = self.dirstate.parents()
2017 p1, p2 = self.dirstate.parents()
2006 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2018 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2007 try:
2019 try:
2008 self.hook("precommit", throw=True, parent1=hookp1,
2020 self.hook("precommit", throw=True, parent1=hookp1,
2009 parent2=hookp2)
2021 parent2=hookp2)
2010 tr = self.transaction('commit')
2022 tr = self.transaction('commit')
2011 ret = self.commitctx(cctx, True)
2023 ret = self.commitctx(cctx, True)
2012 except: # re-raises
2024 except: # re-raises
2013 if edited:
2025 if edited:
2014 self.ui.write(
2026 self.ui.write(
2015 _('note: commit message saved in %s\n') % msgfn)
2027 _('note: commit message saved in %s\n') % msgfn)
2016 raise
2028 raise
2017 # update bookmarks, dirstate and mergestate
2029 # update bookmarks, dirstate and mergestate
2018 bookmarks.update(self, [p1, p2], ret)
2030 bookmarks.update(self, [p1, p2], ret)
2019 cctx.markcommitted(ret)
2031 cctx.markcommitted(ret)
2020 ms.reset()
2032 ms.reset()
2021 tr.close()
2033 tr.close()
2022
2034
2023 finally:
2035 finally:
2024 lockmod.release(tr, lock, wlock)
2036 lockmod.release(tr, lock, wlock)
2025
2037
2026 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2038 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2027 # hack for command that use a temporary commit (eg: histedit)
2039 # hack for command that use a temporary commit (eg: histedit)
2028 # temporary commit got stripped before hook release
2040 # temporary commit got stripped before hook release
2029 if self.changelog.hasnode(ret):
2041 if self.changelog.hasnode(ret):
2030 self.hook("commit", node=node, parent1=parent1,
2042 self.hook("commit", node=node, parent1=parent1,
2031 parent2=parent2)
2043 parent2=parent2)
2032 self._afterlock(commithook)
2044 self._afterlock(commithook)
2033 return ret
2045 return ret
2034
2046
2035 @unfilteredmethod
2047 @unfilteredmethod
2036 def commitctx(self, ctx, error=False):
2048 def commitctx(self, ctx, error=False):
2037 """Add a new revision to current repository.
2049 """Add a new revision to current repository.
2038 Revision information is passed via the context argument.
2050 Revision information is passed via the context argument.
2039
2051
2040 ctx.files() should list all files involved in this commit, i.e.
2052 ctx.files() should list all files involved in this commit, i.e.
2041 modified/added/removed files. On merge, it may be wider than the
2053 modified/added/removed files. On merge, it may be wider than the
2042 ctx.files() to be committed, since any file nodes derived directly
2054 ctx.files() to be committed, since any file nodes derived directly
2043 from p1 or p2 are excluded from the committed ctx.files().
2055 from p1 or p2 are excluded from the committed ctx.files().
2044 """
2056 """
2045
2057
2046 tr = None
2058 tr = None
2047 p1, p2 = ctx.p1(), ctx.p2()
2059 p1, p2 = ctx.p1(), ctx.p2()
2048 user = ctx.user()
2060 user = ctx.user()
2049
2061
2050 lock = self.lock()
2062 lock = self.lock()
2051 try:
2063 try:
2052 tr = self.transaction("commit")
2064 tr = self.transaction("commit")
2053 trp = weakref.proxy(tr)
2065 trp = weakref.proxy(tr)
2054
2066
2055 if ctx.manifestnode():
2067 if ctx.manifestnode():
2056 # reuse an existing manifest revision
2068 # reuse an existing manifest revision
2057 self.ui.debug('reusing known manifest\n')
2069 self.ui.debug('reusing known manifest\n')
2058 mn = ctx.manifestnode()
2070 mn = ctx.manifestnode()
2059 files = ctx.files()
2071 files = ctx.files()
2060 elif ctx.files():
2072 elif ctx.files():
2061 m1ctx = p1.manifestctx()
2073 m1ctx = p1.manifestctx()
2062 m2ctx = p2.manifestctx()
2074 m2ctx = p2.manifestctx()
2063 mctx = m1ctx.copy()
2075 mctx = m1ctx.copy()
2064
2076
2065 m = mctx.read()
2077 m = mctx.read()
2066 m1 = m1ctx.read()
2078 m1 = m1ctx.read()
2067 m2 = m2ctx.read()
2079 m2 = m2ctx.read()
2068
2080
2069 # check in files
2081 # check in files
2070 added = []
2082 added = []
2071 changed = []
2083 changed = []
2072 removed = list(ctx.removed())
2084 removed = list(ctx.removed())
2073 linkrev = len(self)
2085 linkrev = len(self)
2074 self.ui.note(_("committing files:\n"))
2086 self.ui.note(_("committing files:\n"))
2075 for f in sorted(ctx.modified() + ctx.added()):
2087 for f in sorted(ctx.modified() + ctx.added()):
2076 self.ui.note(f + "\n")
2088 self.ui.note(f + "\n")
2077 try:
2089 try:
2078 fctx = ctx[f]
2090 fctx = ctx[f]
2079 if fctx is None:
2091 if fctx is None:
2080 removed.append(f)
2092 removed.append(f)
2081 else:
2093 else:
2082 added.append(f)
2094 added.append(f)
2083 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2095 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2084 trp, changed)
2096 trp, changed)
2085 m.setflag(f, fctx.flags())
2097 m.setflag(f, fctx.flags())
2086 except OSError as inst:
2098 except OSError as inst:
2087 self.ui.warn(_("trouble committing %s!\n") % f)
2099 self.ui.warn(_("trouble committing %s!\n") % f)
2088 raise
2100 raise
2089 except IOError as inst:
2101 except IOError as inst:
2090 errcode = getattr(inst, 'errno', errno.ENOENT)
2102 errcode = getattr(inst, 'errno', errno.ENOENT)
2091 if error or errcode and errcode != errno.ENOENT:
2103 if error or errcode and errcode != errno.ENOENT:
2092 self.ui.warn(_("trouble committing %s!\n") % f)
2104 self.ui.warn(_("trouble committing %s!\n") % f)
2093 raise
2105 raise
2094
2106
2095 # update manifest
2107 # update manifest
2096 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2108 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2097 drop = [f for f in removed if f in m]
2109 drop = [f for f in removed if f in m]
2098 for f in drop:
2110 for f in drop:
2099 del m[f]
2111 del m[f]
2100 files = changed + removed
2112 files = changed + removed
2101 md = None
2113 md = None
2102 if not files:
2114 if not files:
2103 # if no "files" actually changed in terms of the changelog,
2115 # if no "files" actually changed in terms of the changelog,
2104 # try hard to detect unmodified manifest entry so that the
2116 # try hard to detect unmodified manifest entry so that the
2105 # exact same commit can be reproduced later on convert.
2117 # exact same commit can be reproduced later on convert.
2106 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2118 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2107 if not files and md:
2119 if not files and md:
2108 self.ui.debug('not reusing manifest (no file change in '
2120 self.ui.debug('not reusing manifest (no file change in '
2109 'changelog, but manifest differs)\n')
2121 'changelog, but manifest differs)\n')
2110 if files or md:
2122 if files or md:
2111 self.ui.note(_("committing manifest\n"))
2123 self.ui.note(_("committing manifest\n"))
2112 # we're using narrowmatch here since it's already applied at
2124 # we're using narrowmatch here since it's already applied at
2113 # other stages (such as dirstate.walk), so we're already
2125 # other stages (such as dirstate.walk), so we're already
2114 # ignoring things outside of narrowspec in most cases. The
2126 # ignoring things outside of narrowspec in most cases. The
2115 # one case where we might have files outside the narrowspec
2127 # one case where we might have files outside the narrowspec
2116 # at this point is merges, and we already error out in the
2128 # at this point is merges, and we already error out in the
2117 # case where the merge has files outside of the narrowspec,
2129 # case where the merge has files outside of the narrowspec,
2118 # so this is safe.
2130 # so this is safe.
2119 mn = mctx.write(trp, linkrev,
2131 mn = mctx.write(trp, linkrev,
2120 p1.manifestnode(), p2.manifestnode(),
2132 p1.manifestnode(), p2.manifestnode(),
2121 added, drop, match=self.narrowmatch())
2133 added, drop, match=self.narrowmatch())
2122 else:
2134 else:
2123 self.ui.debug('reusing manifest form p1 (listed files '
2135 self.ui.debug('reusing manifest form p1 (listed files '
2124 'actually unchanged)\n')
2136 'actually unchanged)\n')
2125 mn = p1.manifestnode()
2137 mn = p1.manifestnode()
2126 else:
2138 else:
2127 self.ui.debug('reusing manifest from p1 (no file change)\n')
2139 self.ui.debug('reusing manifest from p1 (no file change)\n')
2128 mn = p1.manifestnode()
2140 mn = p1.manifestnode()
2129 files = []
2141 files = []
2130
2142
2131 # update changelog
2143 # update changelog
2132 self.ui.note(_("committing changelog\n"))
2144 self.ui.note(_("committing changelog\n"))
2133 self.changelog.delayupdate(tr)
2145 self.changelog.delayupdate(tr)
2134 n = self.changelog.add(mn, files, ctx.description(),
2146 n = self.changelog.add(mn, files, ctx.description(),
2135 trp, p1.node(), p2.node(),
2147 trp, p1.node(), p2.node(),
2136 user, ctx.date(), ctx.extra().copy())
2148 user, ctx.date(), ctx.extra().copy())
2137 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2149 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2138 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2150 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2139 parent2=xp2)
2151 parent2=xp2)
2140 # set the new commit is proper phase
2152 # set the new commit is proper phase
2141 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2153 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2142 if targetphase:
2154 if targetphase:
2143 # retract boundary do not alter parent changeset.
2155 # retract boundary do not alter parent changeset.
2144 # if a parent have higher the resulting phase will
2156 # if a parent have higher the resulting phase will
2145 # be compliant anyway
2157 # be compliant anyway
2146 #
2158 #
2147 # if minimal phase was 0 we don't need to retract anything
2159 # if minimal phase was 0 we don't need to retract anything
2148 phases.registernew(self, tr, targetphase, [n])
2160 phases.registernew(self, tr, targetphase, [n])
2149 tr.close()
2161 tr.close()
2150 return n
2162 return n
2151 finally:
2163 finally:
2152 if tr:
2164 if tr:
2153 tr.release()
2165 tr.release()
2154 lock.release()
2166 lock.release()
2155
2167
2156 @unfilteredmethod
2168 @unfilteredmethod
2157 def destroying(self):
2169 def destroying(self):
2158 '''Inform the repository that nodes are about to be destroyed.
2170 '''Inform the repository that nodes are about to be destroyed.
2159 Intended for use by strip and rollback, so there's a common
2171 Intended for use by strip and rollback, so there's a common
2160 place for anything that has to be done before destroying history.
2172 place for anything that has to be done before destroying history.
2161
2173
2162 This is mostly useful for saving state that is in memory and waiting
2174 This is mostly useful for saving state that is in memory and waiting
2163 to be flushed when the current lock is released. Because a call to
2175 to be flushed when the current lock is released. Because a call to
2164 destroyed is imminent, the repo will be invalidated causing those
2176 destroyed is imminent, the repo will be invalidated causing those
2165 changes to stay in memory (waiting for the next unlock), or vanish
2177 changes to stay in memory (waiting for the next unlock), or vanish
2166 completely.
2178 completely.
2167 '''
2179 '''
2168 # When using the same lock to commit and strip, the phasecache is left
2180 # When using the same lock to commit and strip, the phasecache is left
2169 # dirty after committing. Then when we strip, the repo is invalidated,
2181 # dirty after committing. Then when we strip, the repo is invalidated,
2170 # causing those changes to disappear.
2182 # causing those changes to disappear.
2171 if '_phasecache' in vars(self):
2183 if '_phasecache' in vars(self):
2172 self._phasecache.write()
2184 self._phasecache.write()
2173
2185
2174 @unfilteredmethod
2186 @unfilteredmethod
2175 def destroyed(self):
2187 def destroyed(self):
2176 '''Inform the repository that nodes have been destroyed.
2188 '''Inform the repository that nodes have been destroyed.
2177 Intended for use by strip and rollback, so there's a common
2189 Intended for use by strip and rollback, so there's a common
2178 place for anything that has to be done after destroying history.
2190 place for anything that has to be done after destroying history.
2179 '''
2191 '''
2180 # When one tries to:
2192 # When one tries to:
2181 # 1) destroy nodes thus calling this method (e.g. strip)
2193 # 1) destroy nodes thus calling this method (e.g. strip)
2182 # 2) use phasecache somewhere (e.g. commit)
2194 # 2) use phasecache somewhere (e.g. commit)
2183 #
2195 #
2184 # then 2) will fail because the phasecache contains nodes that were
2196 # then 2) will fail because the phasecache contains nodes that were
2185 # removed. We can either remove phasecache from the filecache,
2197 # removed. We can either remove phasecache from the filecache,
2186 # causing it to reload next time it is accessed, or simply filter
2198 # causing it to reload next time it is accessed, or simply filter
2187 # the removed nodes now and write the updated cache.
2199 # the removed nodes now and write the updated cache.
2188 self._phasecache.filterunknown(self)
2200 self._phasecache.filterunknown(self)
2189 self._phasecache.write()
2201 self._phasecache.write()
2190
2202
2191 # refresh all repository caches
2203 # refresh all repository caches
2192 self.updatecaches()
2204 self.updatecaches()
2193
2205
2194 # Ensure the persistent tag cache is updated. Doing it now
2206 # Ensure the persistent tag cache is updated. Doing it now
2195 # means that the tag cache only has to worry about destroyed
2207 # means that the tag cache only has to worry about destroyed
2196 # heads immediately after a strip/rollback. That in turn
2208 # heads immediately after a strip/rollback. That in turn
2197 # guarantees that "cachetip == currenttip" (comparing both rev
2209 # guarantees that "cachetip == currenttip" (comparing both rev
2198 # and node) always means no nodes have been added or destroyed.
2210 # and node) always means no nodes have been added or destroyed.
2199
2211
2200 # XXX this is suboptimal when qrefresh'ing: we strip the current
2212 # XXX this is suboptimal when qrefresh'ing: we strip the current
2201 # head, refresh the tag cache, then immediately add a new head.
2213 # head, refresh the tag cache, then immediately add a new head.
2202 # But I think doing it this way is necessary for the "instant
2214 # But I think doing it this way is necessary for the "instant
2203 # tag cache retrieval" case to work.
2215 # tag cache retrieval" case to work.
2204 self.invalidate()
2216 self.invalidate()
2205
2217
2206 def status(self, node1='.', node2=None, match=None,
2218 def status(self, node1='.', node2=None, match=None,
2207 ignored=False, clean=False, unknown=False,
2219 ignored=False, clean=False, unknown=False,
2208 listsubrepos=False):
2220 listsubrepos=False):
2209 '''a convenience method that calls node1.status(node2)'''
2221 '''a convenience method that calls node1.status(node2)'''
2210 return self[node1].status(node2, match, ignored, clean, unknown,
2222 return self[node1].status(node2, match, ignored, clean, unknown,
2211 listsubrepos)
2223 listsubrepos)
2212
2224
2213 def addpostdsstatus(self, ps):
2225 def addpostdsstatus(self, ps):
2214 """Add a callback to run within the wlock, at the point at which status
2226 """Add a callback to run within the wlock, at the point at which status
2215 fixups happen.
2227 fixups happen.
2216
2228
2217 On status completion, callback(wctx, status) will be called with the
2229 On status completion, callback(wctx, status) will be called with the
2218 wlock held, unless the dirstate has changed from underneath or the wlock
2230 wlock held, unless the dirstate has changed from underneath or the wlock
2219 couldn't be grabbed.
2231 couldn't be grabbed.
2220
2232
2221 Callbacks should not capture and use a cached copy of the dirstate --
2233 Callbacks should not capture and use a cached copy of the dirstate --
2222 it might change in the meanwhile. Instead, they should access the
2234 it might change in the meanwhile. Instead, they should access the
2223 dirstate via wctx.repo().dirstate.
2235 dirstate via wctx.repo().dirstate.
2224
2236
2225 This list is emptied out after each status run -- extensions should
2237 This list is emptied out after each status run -- extensions should
2226 make sure it adds to this list each time dirstate.status is called.
2238 make sure it adds to this list each time dirstate.status is called.
2227 Extensions should also make sure they don't call this for statuses
2239 Extensions should also make sure they don't call this for statuses
2228 that don't involve the dirstate.
2240 that don't involve the dirstate.
2229 """
2241 """
2230
2242
2231 # The list is located here for uniqueness reasons -- it is actually
2243 # The list is located here for uniqueness reasons -- it is actually
2232 # managed by the workingctx, but that isn't unique per-repo.
2244 # managed by the workingctx, but that isn't unique per-repo.
2233 self._postdsstatus.append(ps)
2245 self._postdsstatus.append(ps)
2234
2246
2235 def postdsstatus(self):
2247 def postdsstatus(self):
2236 """Used by workingctx to get the list of post-dirstate-status hooks."""
2248 """Used by workingctx to get the list of post-dirstate-status hooks."""
2237 return self._postdsstatus
2249 return self._postdsstatus
2238
2250
2239 def clearpostdsstatus(self):
2251 def clearpostdsstatus(self):
2240 """Used by workingctx to clear post-dirstate-status hooks."""
2252 """Used by workingctx to clear post-dirstate-status hooks."""
2241 del self._postdsstatus[:]
2253 del self._postdsstatus[:]
2242
2254
2243 def heads(self, start=None):
2255 def heads(self, start=None):
2244 if start is None:
2256 if start is None:
2245 cl = self.changelog
2257 cl = self.changelog
2246 headrevs = reversed(cl.headrevs())
2258 headrevs = reversed(cl.headrevs())
2247 return [cl.node(rev) for rev in headrevs]
2259 return [cl.node(rev) for rev in headrevs]
2248
2260
2249 heads = self.changelog.heads(start)
2261 heads = self.changelog.heads(start)
2250 # sort the output in rev descending order
2262 # sort the output in rev descending order
2251 return sorted(heads, key=self.changelog.rev, reverse=True)
2263 return sorted(heads, key=self.changelog.rev, reverse=True)
2252
2264
2253 def branchheads(self, branch=None, start=None, closed=False):
2265 def branchheads(self, branch=None, start=None, closed=False):
2254 '''return a (possibly filtered) list of heads for the given branch
2266 '''return a (possibly filtered) list of heads for the given branch
2255
2267
2256 Heads are returned in topological order, from newest to oldest.
2268 Heads are returned in topological order, from newest to oldest.
2257 If branch is None, use the dirstate branch.
2269 If branch is None, use the dirstate branch.
2258 If start is not None, return only heads reachable from start.
2270 If start is not None, return only heads reachable from start.
2259 If closed is True, return heads that are marked as closed as well.
2271 If closed is True, return heads that are marked as closed as well.
2260 '''
2272 '''
2261 if branch is None:
2273 if branch is None:
2262 branch = self[None].branch()
2274 branch = self[None].branch()
2263 branches = self.branchmap()
2275 branches = self.branchmap()
2264 if branch not in branches:
2276 if branch not in branches:
2265 return []
2277 return []
2266 # the cache returns heads ordered lowest to highest
2278 # the cache returns heads ordered lowest to highest
2267 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2279 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2268 if start is not None:
2280 if start is not None:
2269 # filter out the heads that cannot be reached from startrev
2281 # filter out the heads that cannot be reached from startrev
2270 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2271 bheads = [h for h in bheads if h in fbheads]
2283 bheads = [h for h in bheads if h in fbheads]
2272 return bheads
2284 return bheads
2273
2285
2274 def branches(self, nodes):
2286 def branches(self, nodes):
2275 if not nodes:
2287 if not nodes:
2276 nodes = [self.changelog.tip()]
2288 nodes = [self.changelog.tip()]
2277 b = []
2289 b = []
2278 for n in nodes:
2290 for n in nodes:
2279 t = n
2291 t = n
2280 while True:
2292 while True:
2281 p = self.changelog.parents(n)
2293 p = self.changelog.parents(n)
2282 if p[1] != nullid or p[0] == nullid:
2294 if p[1] != nullid or p[0] == nullid:
2283 b.append((t, n, p[0], p[1]))
2295 b.append((t, n, p[0], p[1]))
2284 break
2296 break
2285 n = p[0]
2297 n = p[0]
2286 return b
2298 return b
2287
2299
2288 def between(self, pairs):
2300 def between(self, pairs):
2289 r = []
2301 r = []
2290
2302
2291 for top, bottom in pairs:
2303 for top, bottom in pairs:
2292 n, l, i = top, [], 0
2304 n, l, i = top, [], 0
2293 f = 1
2305 f = 1
2294
2306
2295 while n != bottom and n != nullid:
2307 while n != bottom and n != nullid:
2296 p = self.changelog.parents(n)[0]
2308 p = self.changelog.parents(n)[0]
2297 if i == f:
2309 if i == f:
2298 l.append(n)
2310 l.append(n)
2299 f = f * 2
2311 f = f * 2
2300 n = p
2312 n = p
2301 i += 1
2313 i += 1
2302
2314
2303 r.append(l)
2315 r.append(l)
2304
2316
2305 return r
2317 return r
2306
2318
2307 def checkpush(self, pushop):
2319 def checkpush(self, pushop):
2308 """Extensions can override this function if additional checks have
2320 """Extensions can override this function if additional checks have
2309 to be performed before pushing, or call it if they override push
2321 to be performed before pushing, or call it if they override push
2310 command.
2322 command.
2311 """
2323 """
2312
2324
2313 @unfilteredpropertycache
2325 @unfilteredpropertycache
2314 def prepushoutgoinghooks(self):
2326 def prepushoutgoinghooks(self):
2315 """Return util.hooks consists of a pushop with repo, remote, outgoing
2327 """Return util.hooks consists of a pushop with repo, remote, outgoing
2316 methods, which are called before pushing changesets.
2328 methods, which are called before pushing changesets.
2317 """
2329 """
2318 return util.hooks()
2330 return util.hooks()
2319
2331
2320 def pushkey(self, namespace, key, old, new):
2332 def pushkey(self, namespace, key, old, new):
2321 try:
2333 try:
2322 tr = self.currenttransaction()
2334 tr = self.currenttransaction()
2323 hookargs = {}
2335 hookargs = {}
2324 if tr is not None:
2336 if tr is not None:
2325 hookargs.update(tr.hookargs)
2337 hookargs.update(tr.hookargs)
2326 hookargs = pycompat.strkwargs(hookargs)
2338 hookargs = pycompat.strkwargs(hookargs)
2327 hookargs[r'namespace'] = namespace
2339 hookargs[r'namespace'] = namespace
2328 hookargs[r'key'] = key
2340 hookargs[r'key'] = key
2329 hookargs[r'old'] = old
2341 hookargs[r'old'] = old
2330 hookargs[r'new'] = new
2342 hookargs[r'new'] = new
2331 self.hook('prepushkey', throw=True, **hookargs)
2343 self.hook('prepushkey', throw=True, **hookargs)
2332 except error.HookAbort as exc:
2344 except error.HookAbort as exc:
2333 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2345 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2334 if exc.hint:
2346 if exc.hint:
2335 self.ui.write_err(_("(%s)\n") % exc.hint)
2347 self.ui.write_err(_("(%s)\n") % exc.hint)
2336 return False
2348 return False
2337 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2349 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2338 ret = pushkey.push(self, namespace, key, old, new)
2350 ret = pushkey.push(self, namespace, key, old, new)
2339 def runhook():
2351 def runhook():
2340 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2352 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2341 ret=ret)
2353 ret=ret)
2342 self._afterlock(runhook)
2354 self._afterlock(runhook)
2343 return ret
2355 return ret
2344
2356
2345 def listkeys(self, namespace):
2357 def listkeys(self, namespace):
2346 self.hook('prelistkeys', throw=True, namespace=namespace)
2358 self.hook('prelistkeys', throw=True, namespace=namespace)
2347 self.ui.debug('listing keys for "%s"\n' % namespace)
2359 self.ui.debug('listing keys for "%s"\n' % namespace)
2348 values = pushkey.list(self, namespace)
2360 values = pushkey.list(self, namespace)
2349 self.hook('listkeys', namespace=namespace, values=values)
2361 self.hook('listkeys', namespace=namespace, values=values)
2350 return values
2362 return values
2351
2363
2352 def debugwireargs(self, one, two, three=None, four=None, five=None):
2364 def debugwireargs(self, one, two, three=None, four=None, five=None):
2353 '''used to test argument passing over the wire'''
2365 '''used to test argument passing over the wire'''
2354 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2366 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2355 pycompat.bytestr(four),
2367 pycompat.bytestr(four),
2356 pycompat.bytestr(five))
2368 pycompat.bytestr(five))
2357
2369
2358 def savecommitmessage(self, text):
2370 def savecommitmessage(self, text):
2359 fp = self.vfs('last-message.txt', 'wb')
2371 fp = self.vfs('last-message.txt', 'wb')
2360 try:
2372 try:
2361 fp.write(text)
2373 fp.write(text)
2362 finally:
2374 finally:
2363 fp.close()
2375 fp.close()
2364 return self.pathto(fp.name[len(self.root) + 1:])
2376 return self.pathto(fp.name[len(self.root) + 1:])
2365
2377
2366 # used to avoid circular references so destructors work
2378 # used to avoid circular references so destructors work
2367 def aftertrans(files):
2379 def aftertrans(files):
2368 renamefiles = [tuple(t) for t in files]
2380 renamefiles = [tuple(t) for t in files]
2369 def a():
2381 def a():
2370 for vfs, src, dest in renamefiles:
2382 for vfs, src, dest in renamefiles:
2371 # if src and dest refer to a same file, vfs.rename is a no-op,
2383 # if src and dest refer to a same file, vfs.rename is a no-op,
2372 # leaving both src and dest on disk. delete dest to make sure
2384 # leaving both src and dest on disk. delete dest to make sure
2373 # the rename couldn't be such a no-op.
2385 # the rename couldn't be such a no-op.
2374 vfs.tryunlink(dest)
2386 vfs.tryunlink(dest)
2375 try:
2387 try:
2376 vfs.rename(src, dest)
2388 vfs.rename(src, dest)
2377 except OSError: # journal file does not yet exist
2389 except OSError: # journal file does not yet exist
2378 pass
2390 pass
2379 return a
2391 return a
2380
2392
2381 def undoname(fn):
2393 def undoname(fn):
2382 base, name = os.path.split(fn)
2394 base, name = os.path.split(fn)
2383 assert name.startswith('journal')
2395 assert name.startswith('journal')
2384 return os.path.join(base, name.replace('journal', 'undo', 1))
2396 return os.path.join(base, name.replace('journal', 'undo', 1))
2385
2397
2386 def instance(ui, path, create, intents=None, createopts=None):
2398 def instance(ui, path, create, intents=None, createopts=None):
2387 localpath = util.urllocalpath(path)
2399 localpath = util.urllocalpath(path)
2388 if create:
2400 if create:
2389 createrepository(ui, localpath, createopts=createopts)
2401 createrepository(ui, localpath, createopts=createopts)
2390
2402
2391 return localrepository(ui, localpath, intents=intents)
2403 return makelocalrepository(ui, localpath, intents=intents)
2392
2404
2393 def islocal(path):
2405 def islocal(path):
2394 return True
2406 return True
2395
2407
2396 def newreporequirements(ui, createopts=None):
2408 def newreporequirements(ui, createopts=None):
2397 """Determine the set of requirements for a new local repository.
2409 """Determine the set of requirements for a new local repository.
2398
2410
2399 Extensions can wrap this function to specify custom requirements for
2411 Extensions can wrap this function to specify custom requirements for
2400 new repositories.
2412 new repositories.
2401 """
2413 """
2402 createopts = createopts or {}
2414 createopts = createopts or {}
2403
2415
2404 requirements = {'revlogv1'}
2416 requirements = {'revlogv1'}
2405 if ui.configbool('format', 'usestore'):
2417 if ui.configbool('format', 'usestore'):
2406 requirements.add('store')
2418 requirements.add('store')
2407 if ui.configbool('format', 'usefncache'):
2419 if ui.configbool('format', 'usefncache'):
2408 requirements.add('fncache')
2420 requirements.add('fncache')
2409 if ui.configbool('format', 'dotencode'):
2421 if ui.configbool('format', 'dotencode'):
2410 requirements.add('dotencode')
2422 requirements.add('dotencode')
2411
2423
2412 compengine = ui.config('experimental', 'format.compression')
2424 compengine = ui.config('experimental', 'format.compression')
2413 if compengine not in util.compengines:
2425 if compengine not in util.compengines:
2414 raise error.Abort(_('compression engine %s defined by '
2426 raise error.Abort(_('compression engine %s defined by '
2415 'experimental.format.compression not available') %
2427 'experimental.format.compression not available') %
2416 compengine,
2428 compengine,
2417 hint=_('run "hg debuginstall" to list available '
2429 hint=_('run "hg debuginstall" to list available '
2418 'compression engines'))
2430 'compression engines'))
2419
2431
2420 # zlib is the historical default and doesn't need an explicit requirement.
2432 # zlib is the historical default and doesn't need an explicit requirement.
2421 if compengine != 'zlib':
2433 if compengine != 'zlib':
2422 requirements.add('exp-compression-%s' % compengine)
2434 requirements.add('exp-compression-%s' % compengine)
2423
2435
2424 if scmutil.gdinitconfig(ui):
2436 if scmutil.gdinitconfig(ui):
2425 requirements.add('generaldelta')
2437 requirements.add('generaldelta')
2426 if ui.configbool('experimental', 'treemanifest'):
2438 if ui.configbool('experimental', 'treemanifest'):
2427 requirements.add('treemanifest')
2439 requirements.add('treemanifest')
2428 # experimental config: format.sparse-revlog
2440 # experimental config: format.sparse-revlog
2429 if ui.configbool('format', 'sparse-revlog'):
2441 if ui.configbool('format', 'sparse-revlog'):
2430 requirements.add(SPARSEREVLOG_REQUIREMENT)
2442 requirements.add(SPARSEREVLOG_REQUIREMENT)
2431
2443
2432 revlogv2 = ui.config('experimental', 'revlogv2')
2444 revlogv2 = ui.config('experimental', 'revlogv2')
2433 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2445 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2434 requirements.remove('revlogv1')
2446 requirements.remove('revlogv1')
2435 # generaldelta is implied by revlogv2.
2447 # generaldelta is implied by revlogv2.
2436 requirements.discard('generaldelta')
2448 requirements.discard('generaldelta')
2437 requirements.add(REVLOGV2_REQUIREMENT)
2449 requirements.add(REVLOGV2_REQUIREMENT)
2438 # experimental config: format.internal-phase
2450 # experimental config: format.internal-phase
2439 if ui.configbool('format', 'internal-phase'):
2451 if ui.configbool('format', 'internal-phase'):
2440 requirements.add('internal-phase')
2452 requirements.add('internal-phase')
2441
2453
2442 if createopts.get('narrowfiles'):
2454 if createopts.get('narrowfiles'):
2443 requirements.add(repository.NARROW_REQUIREMENT)
2455 requirements.add(repository.NARROW_REQUIREMENT)
2444
2456
2445 return requirements
2457 return requirements
2446
2458
2447 def filterknowncreateopts(ui, createopts):
2459 def filterknowncreateopts(ui, createopts):
2448 """Filters a dict of repo creation options against options that are known.
2460 """Filters a dict of repo creation options against options that are known.
2449
2461
2450 Receives a dict of repo creation options and returns a dict of those
2462 Receives a dict of repo creation options and returns a dict of those
2451 options that we don't know how to handle.
2463 options that we don't know how to handle.
2452
2464
2453 This function is called as part of repository creation. If the
2465 This function is called as part of repository creation. If the
2454 returned dict contains any items, repository creation will not
2466 returned dict contains any items, repository creation will not
2455 be allowed, as it means there was a request to create a repository
2467 be allowed, as it means there was a request to create a repository
2456 with options not recognized by loaded code.
2468 with options not recognized by loaded code.
2457
2469
2458 Extensions can wrap this function to filter out creation options
2470 Extensions can wrap this function to filter out creation options
2459 they know how to handle.
2471 they know how to handle.
2460 """
2472 """
2461 known = {'narrowfiles'}
2473 known = {'narrowfiles'}
2462
2474
2463 return {k: v for k, v in createopts.items() if k not in known}
2475 return {k: v for k, v in createopts.items() if k not in known}
2464
2476
2465 def createrepository(ui, path, createopts=None):
2477 def createrepository(ui, path, createopts=None):
2466 """Create a new repository in a vfs.
2478 """Create a new repository in a vfs.
2467
2479
2468 ``path`` path to the new repo's working directory.
2480 ``path`` path to the new repo's working directory.
2469 ``createopts`` options for the new repository.
2481 ``createopts`` options for the new repository.
2470 """
2482 """
2471 createopts = createopts or {}
2483 createopts = createopts or {}
2472
2484
2473 unknownopts = filterknowncreateopts(ui, createopts)
2485 unknownopts = filterknowncreateopts(ui, createopts)
2474
2486
2475 if not isinstance(unknownopts, dict):
2487 if not isinstance(unknownopts, dict):
2476 raise error.ProgrammingError('filterknowncreateopts() did not return '
2488 raise error.ProgrammingError('filterknowncreateopts() did not return '
2477 'a dict')
2489 'a dict')
2478
2490
2479 if unknownopts:
2491 if unknownopts:
2480 raise error.Abort(_('unable to create repository because of unknown '
2492 raise error.Abort(_('unable to create repository because of unknown '
2481 'creation option: %s') %
2493 'creation option: %s') %
2482 ', '.sorted(unknownopts),
2494 ', '.sorted(unknownopts),
2483 hint=_('is a required extension not loaded?'))
2495 hint=_('is a required extension not loaded?'))
2484
2496
2485 requirements = newreporequirements(ui, createopts=createopts)
2497 requirements = newreporequirements(ui, createopts=createopts)
2486
2498
2487 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2499 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2488 if not wdirvfs.exists():
2500 if not wdirvfs.exists():
2489 wdirvfs.makedirs()
2501 wdirvfs.makedirs()
2490
2502
2491 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2503 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2492 if hgvfs.exists():
2504 if hgvfs.exists():
2493 raise error.RepoError(_('repository %s already exists') % path)
2505 raise error.RepoError(_('repository %s already exists') % path)
2494
2506
2495 hgvfs.makedir(notindexed=True)
2507 hgvfs.makedir(notindexed=True)
2496
2508
2497 if b'store' in requirements:
2509 if b'store' in requirements:
2498 hgvfs.mkdir(b'store')
2510 hgvfs.mkdir(b'store')
2499
2511
2500 # We create an invalid changelog outside the store so very old
2512 # We create an invalid changelog outside the store so very old
2501 # Mercurial versions (which didn't know about the requirements
2513 # Mercurial versions (which didn't know about the requirements
2502 # file) encounter an error on reading the changelog. This
2514 # file) encounter an error on reading the changelog. This
2503 # effectively locks out old clients and prevents them from
2515 # effectively locks out old clients and prevents them from
2504 # mucking with a repo in an unknown format.
2516 # mucking with a repo in an unknown format.
2505 #
2517 #
2506 # The revlog header has version 2, which won't be recognized by
2518 # The revlog header has version 2, which won't be recognized by
2507 # such old clients.
2519 # such old clients.
2508 hgvfs.append(b'00changelog.i',
2520 hgvfs.append(b'00changelog.i',
2509 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2521 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2510 b'layout')
2522 b'layout')
2511
2523
2512 scmutil.writerequires(hgvfs, requirements)
2524 scmutil.writerequires(hgvfs, requirements)
2513
2525
2514 def poisonrepository(repo):
2526 def poisonrepository(repo):
2515 """Poison a repository instance so it can no longer be used."""
2527 """Poison a repository instance so it can no longer be used."""
2516 # Perform any cleanup on the instance.
2528 # Perform any cleanup on the instance.
2517 repo.close()
2529 repo.close()
2518
2530
2519 # Our strategy is to replace the type of the object with one that
2531 # Our strategy is to replace the type of the object with one that
2520 # has all attribute lookups result in error.
2532 # has all attribute lookups result in error.
2521 #
2533 #
2522 # But we have to allow the close() method because some constructors
2534 # But we have to allow the close() method because some constructors
2523 # of repos call close() on repo references.
2535 # of repos call close() on repo references.
2524 class poisonedrepository(object):
2536 class poisonedrepository(object):
2525 def __getattribute__(self, item):
2537 def __getattribute__(self, item):
2526 if item == r'close':
2538 if item == r'close':
2527 return object.__getattribute__(self, item)
2539 return object.__getattribute__(self, item)
2528
2540
2529 raise error.ProgrammingError('repo instances should not be used '
2541 raise error.ProgrammingError('repo instances should not be used '
2530 'after unshare')
2542 'after unshare')
2531
2543
2532 def close(self):
2544 def close(self):
2533 pass
2545 pass
2534
2546
2535 # We may have a repoview, which intercepts __setattr__. So be sure
2547 # We may have a repoview, which intercepts __setattr__. So be sure
2536 # we operate at the lowest level possible.
2548 # we operate at the lowest level possible.
2537 object.__setattr__(repo, r'__class__', poisonedrepository)
2549 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,230 +1,230 b''
1 # Test that certain objects conform to well-defined interfaces.
1 # Test that certain objects conform to well-defined interfaces.
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 from mercurial import encoding
5 from mercurial import encoding
6 encoding.environ[b'HGREALINTERFACES'] = b'1'
6 encoding.environ[b'HGREALINTERFACES'] = b'1'
7
7
8 import os
8 import os
9 import subprocess
9 import subprocess
10 import sys
10 import sys
11
11
12 # Only run if tests are run in a repo
12 # Only run if tests are run in a repo
13 if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
13 if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
14 'test-repo']):
14 'test-repo']):
15 sys.exit(80)
15 sys.exit(80)
16
16
17 from mercurial.thirdparty.zope import (
17 from mercurial.thirdparty.zope import (
18 interface as zi,
18 interface as zi,
19 )
19 )
20 from mercurial.thirdparty.zope.interface import (
20 from mercurial.thirdparty.zope.interface import (
21 verify as ziverify,
21 verify as ziverify,
22 )
22 )
23 from mercurial import (
23 from mercurial import (
24 changegroup,
24 changegroup,
25 bundlerepo,
25 bundlerepo,
26 filelog,
26 filelog,
27 httppeer,
27 httppeer,
28 localrepo,
28 localrepo,
29 manifest,
29 manifest,
30 pycompat,
30 pycompat,
31 repository,
31 repository,
32 revlog,
32 revlog,
33 sshpeer,
33 sshpeer,
34 statichttprepo,
34 statichttprepo,
35 ui as uimod,
35 ui as uimod,
36 unionrepo,
36 unionrepo,
37 vfs as vfsmod,
37 vfs as vfsmod,
38 wireprotoserver,
38 wireprotoserver,
39 wireprototypes,
39 wireprototypes,
40 wireprotov1peer,
40 wireprotov1peer,
41 wireprotov2server,
41 wireprotov2server,
42 )
42 )
43
43
44 rootdir = pycompat.fsencode(
44 rootdir = pycompat.fsencode(
45 os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))
45 os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))
46
46
47 def checkzobject(o, allowextra=False):
47 def checkzobject(o, allowextra=False):
48 """Verify an object with a zope interface."""
48 """Verify an object with a zope interface."""
49 ifaces = zi.providedBy(o)
49 ifaces = zi.providedBy(o)
50 if not ifaces:
50 if not ifaces:
51 print('%r does not provide any zope interfaces' % o)
51 print('%r does not provide any zope interfaces' % o)
52 return
52 return
53
53
54 # Run zope.interface's built-in verification routine. This verifies that
54 # Run zope.interface's built-in verification routine. This verifies that
55 # everything that is supposed to be present is present.
55 # everything that is supposed to be present is present.
56 for iface in ifaces:
56 for iface in ifaces:
57 ziverify.verifyObject(iface, o)
57 ziverify.verifyObject(iface, o)
58
58
59 if allowextra:
59 if allowextra:
60 return
60 return
61
61
62 # Now verify that the object provides no extra public attributes that
62 # Now verify that the object provides no extra public attributes that
63 # aren't declared as part of interfaces.
63 # aren't declared as part of interfaces.
64 allowed = set()
64 allowed = set()
65 for iface in ifaces:
65 for iface in ifaces:
66 allowed |= set(iface.names(all=True))
66 allowed |= set(iface.names(all=True))
67
67
68 public = {a for a in dir(o) if not a.startswith('_')}
68 public = {a for a in dir(o) if not a.startswith('_')}
69
69
70 for attr in sorted(public - allowed):
70 for attr in sorted(public - allowed):
71 print('public attribute not declared in interfaces: %s.%s' % (
71 print('public attribute not declared in interfaces: %s.%s' % (
72 o.__class__.__name__, attr))
72 o.__class__.__name__, attr))
73
73
74 # Facilitates testing localpeer.
74 # Facilitates testing localpeer.
75 class dummyrepo(object):
75 class dummyrepo(object):
76 def __init__(self):
76 def __init__(self):
77 self.ui = uimod.ui()
77 self.ui = uimod.ui()
78 def filtered(self, name):
78 def filtered(self, name):
79 pass
79 pass
80 def _restrictcapabilities(self, caps):
80 def _restrictcapabilities(self, caps):
81 pass
81 pass
82
82
83 class dummyopener(object):
83 class dummyopener(object):
84 handlers = []
84 handlers = []
85
85
86 # Facilitates testing sshpeer without requiring a server.
86 # Facilitates testing sshpeer without requiring a server.
87 class badpeer(httppeer.httppeer):
87 class badpeer(httppeer.httppeer):
88 def __init__(self):
88 def __init__(self):
89 super(badpeer, self).__init__(None, None, None, dummyopener(), None,
89 super(badpeer, self).__init__(None, None, None, dummyopener(), None,
90 None)
90 None)
91 self.badattribute = True
91 self.badattribute = True
92
92
93 def badmethod(self):
93 def badmethod(self):
94 pass
94 pass
95
95
96 class dummypipe(object):
96 class dummypipe(object):
97 def close(self):
97 def close(self):
98 pass
98 pass
99
99
100 def main():
100 def main():
101 ui = uimod.ui()
101 ui = uimod.ui()
102 # Needed so we can open a local repo with obsstore without a warning.
102 # Needed so we can open a local repo with obsstore without a warning.
103 ui.setconfig('experimental', 'evolution.createmarkers', True)
103 ui.setconfig('experimental', 'evolution.createmarkers', True)
104
104
105 checkzobject(badpeer())
105 checkzobject(badpeer())
106
106
107 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
107 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
108 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
108 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
109
109
110 ziverify.verifyClass(repository.ipeerconnection,
110 ziverify.verifyClass(repository.ipeerconnection,
111 httppeer.httpv2peer)
111 httppeer.httpv2peer)
112 ziverify.verifyClass(repository.ipeercapabilities,
112 ziverify.verifyClass(repository.ipeercapabilities,
113 httppeer.httpv2peer)
113 httppeer.httpv2peer)
114 checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
114 checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
115
115
116 ziverify.verifyClass(repository.ipeerbase,
116 ziverify.verifyClass(repository.ipeerbase,
117 localrepo.localpeer)
117 localrepo.localpeer)
118 checkzobject(localrepo.localpeer(dummyrepo()))
118 checkzobject(localrepo.localpeer(dummyrepo()))
119
119
120 ziverify.verifyClass(repository.ipeercommandexecutor,
120 ziverify.verifyClass(repository.ipeercommandexecutor,
121 localrepo.localcommandexecutor)
121 localrepo.localcommandexecutor)
122 checkzobject(localrepo.localcommandexecutor(None))
122 checkzobject(localrepo.localcommandexecutor(None))
123
123
124 ziverify.verifyClass(repository.ipeercommandexecutor,
124 ziverify.verifyClass(repository.ipeercommandexecutor,
125 wireprotov1peer.peerexecutor)
125 wireprotov1peer.peerexecutor)
126 checkzobject(wireprotov1peer.peerexecutor(None))
126 checkzobject(wireprotov1peer.peerexecutor(None))
127
127
128 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
128 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
129 checkzobject(sshpeer.sshv1peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
129 checkzobject(sshpeer.sshv1peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
130 dummypipe(), None, None))
130 dummypipe(), None, None))
131
131
132 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
132 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
133 checkzobject(sshpeer.sshv2peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
133 checkzobject(sshpeer.sshv2peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
134 dummypipe(), None, None))
134 dummypipe(), None, None))
135
135
136 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
136 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
137 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
137 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
138
138
139 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
139 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
140 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
140 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
141
141
142 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
142 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
143 checkzobject(unionrepo.unionpeer(dummyrepo()))
143 checkzobject(unionrepo.unionpeer(dummyrepo()))
144
144
145 ziverify.verifyClass(repository.completelocalrepository,
145 ziverify.verifyClass(repository.completelocalrepository,
146 localrepo.localrepository)
146 localrepo.localrepository)
147 repo = localrepo.localrepository(ui, rootdir)
147 repo = localrepo.makelocalrepository(ui, rootdir)
148 checkzobject(repo)
148 checkzobject(repo)
149
149
150 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
150 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
151 wireprotoserver.sshv1protocolhandler)
151 wireprotoserver.sshv1protocolhandler)
152 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
152 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
153 wireprotoserver.sshv2protocolhandler)
153 wireprotoserver.sshv2protocolhandler)
154 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
154 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
155 wireprotoserver.httpv1protocolhandler)
155 wireprotoserver.httpv1protocolhandler)
156 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
156 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
157 wireprotov2server.httpv2protocolhandler)
157 wireprotov2server.httpv2protocolhandler)
158
158
159 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
159 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
160 checkzobject(sshv1)
160 checkzobject(sshv1)
161 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
161 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
162 checkzobject(sshv2)
162 checkzobject(sshv2)
163
163
164 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
164 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
165 checkzobject(httpv1)
165 checkzobject(httpv1)
166 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
166 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
167 checkzobject(httpv2)
167 checkzobject(httpv2)
168
168
169 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
169 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
170 ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
170 ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
171 ziverify.verifyClass(repository.imanifestrevisionstored,
171 ziverify.verifyClass(repository.imanifestrevisionstored,
172 manifest.manifestctx)
172 manifest.manifestctx)
173 ziverify.verifyClass(repository.imanifestrevisionwritable,
173 ziverify.verifyClass(repository.imanifestrevisionwritable,
174 manifest.memmanifestctx)
174 manifest.memmanifestctx)
175 ziverify.verifyClass(repository.imanifestrevisionstored,
175 ziverify.verifyClass(repository.imanifestrevisionstored,
176 manifest.treemanifestctx)
176 manifest.treemanifestctx)
177 ziverify.verifyClass(repository.imanifestrevisionwritable,
177 ziverify.verifyClass(repository.imanifestrevisionwritable,
178 manifest.memtreemanifestctx)
178 manifest.memtreemanifestctx)
179 ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
179 ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
180 ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
180 ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
181
181
182 vfs = vfsmod.vfs(b'.')
182 vfs = vfsmod.vfs(b'.')
183 fl = filelog.filelog(vfs, b'dummy.i')
183 fl = filelog.filelog(vfs, b'dummy.i')
184 checkzobject(fl, allowextra=True)
184 checkzobject(fl, allowextra=True)
185
185
186 # Conforms to imanifestlog.
186 # Conforms to imanifestlog.
187 ml = manifest.manifestlog(vfs, repo)
187 ml = manifest.manifestlog(vfs, repo)
188 checkzobject(ml)
188 checkzobject(ml)
189 checkzobject(repo.manifestlog)
189 checkzobject(repo.manifestlog)
190
190
191 # Conforms to imanifestrevision.
191 # Conforms to imanifestrevision.
192 mctx = ml[repo[0].manifestnode()]
192 mctx = ml[repo[0].manifestnode()]
193 checkzobject(mctx)
193 checkzobject(mctx)
194
194
195 # Conforms to imanifestrevisionwritable.
195 # Conforms to imanifestrevisionwritable.
196 checkzobject(mctx.new())
196 checkzobject(mctx.new())
197 checkzobject(mctx.copy())
197 checkzobject(mctx.copy())
198
198
199 # Conforms to imanifestdict.
199 # Conforms to imanifestdict.
200 checkzobject(mctx.read())
200 checkzobject(mctx.read())
201
201
202 mrl = manifest.manifestrevlog(vfs)
202 mrl = manifest.manifestrevlog(vfs)
203 checkzobject(mrl)
203 checkzobject(mrl)
204
204
205 ziverify.verifyClass(repository.irevisiondelta,
205 ziverify.verifyClass(repository.irevisiondelta,
206 revlog.revlogrevisiondelta)
206 revlog.revlogrevisiondelta)
207 ziverify.verifyClass(repository.irevisiondeltarequest,
207 ziverify.verifyClass(repository.irevisiondeltarequest,
208 changegroup.revisiondeltarequest)
208 changegroup.revisiondeltarequest)
209
209
210 rd = revlog.revlogrevisiondelta(
210 rd = revlog.revlogrevisiondelta(
211 node=b'',
211 node=b'',
212 p1node=b'',
212 p1node=b'',
213 p2node=b'',
213 p2node=b'',
214 basenode=b'',
214 basenode=b'',
215 linknode=b'',
215 linknode=b'',
216 flags=b'',
216 flags=b'',
217 baserevisionsize=None,
217 baserevisionsize=None,
218 revision=b'',
218 revision=b'',
219 delta=None)
219 delta=None)
220 checkzobject(rd)
220 checkzobject(rd)
221
221
222 rdr = changegroup.revisiondeltarequest(
222 rdr = changegroup.revisiondeltarequest(
223 node=b'',
223 node=b'',
224 linknode=b'',
224 linknode=b'',
225 p1node=b'',
225 p1node=b'',
226 p2node=b'',
226 p2node=b'',
227 basenode=b'')
227 basenode=b'')
228 checkzobject(rdr)
228 checkzobject(rdr)
229
229
230 main()
230 main()
General Comments 0
You need to be logged in to leave comments. Login now