##// END OF EJS Templates
localrepo: move requirements reasonability testing to own function...
Gregory Szorc -
r39731:cb2dcfa5 default
parent child Browse files
Show More
@@ -1,2647 +1,2665 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 ui = baseui.copy()
389 ui = baseui.copy()
390 # Prevent copying repo configuration.
390 # Prevent copying repo configuration.
391 ui.copy = baseui.copy
391 ui.copy = baseui.copy
392
392
393 # Working directory VFS rooted at repository root.
393 # Working directory VFS rooted at repository root.
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395
395
396 # Main VFS for .hg/ directory.
396 # Main VFS for .hg/ directory.
397 hgpath = wdirvfs.join(b'.hg')
397 hgpath = wdirvfs.join(b'.hg')
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399
399
400 # The .hg/ path should exist and should be a directory. All other
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
401 # cases are errors.
402 if not hgvfs.isdir():
402 if not hgvfs.isdir():
403 try:
403 try:
404 hgvfs.stat()
404 hgvfs.stat()
405 except OSError as e:
405 except OSError as e:
406 if e.errno != errno.ENOENT:
406 if e.errno != errno.ENOENT:
407 raise
407 raise
408
408
409 raise error.RepoError(_(b'repository %s not found') % path)
409 raise error.RepoError(_(b'repository %s not found') % path)
410
410
411 # .hg/requires file contains a newline-delimited list of
411 # .hg/requires file contains a newline-delimited list of
412 # features/capabilities the opener (us) must have in order to use
412 # features/capabilities the opener (us) must have in order to use
413 # the repository. This file was introduced in Mercurial 0.9.2,
413 # the repository. This file was introduced in Mercurial 0.9.2,
414 # which means very old repositories may not have one. We assume
414 # which means very old repositories may not have one. We assume
415 # a missing file translates to no requirements.
415 # a missing file translates to no requirements.
416 try:
416 try:
417 requirements = set(hgvfs.read(b'requires').splitlines())
417 requirements = set(hgvfs.read(b'requires').splitlines())
418 except IOError as e:
418 except IOError as e:
419 if e.errno != errno.ENOENT:
419 if e.errno != errno.ENOENT:
420 raise
420 raise
421 requirements = set()
421 requirements = set()
422
422
423 # The .hg/hgrc file may load extensions or contain config options
423 # The .hg/hgrc file may load extensions or contain config options
424 # that influence repository construction. Attempt to load it and
424 # that influence repository construction. Attempt to load it and
425 # process any new extensions that it may have pulled in.
425 # process any new extensions that it may have pulled in.
426 try:
426 try:
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
428 except IOError:
428 except IOError:
429 pass
429 pass
430 else:
430 else:
431 extensions.loadall(ui)
431 extensions.loadall(ui)
432
432
433 supportedrequirements = gathersupportedrequirements(ui)
433 supportedrequirements = gathersupportedrequirements(ui)
434
435 # We first validate the requirements are known.
434 ensurerequirementsrecognized(requirements, supportedrequirements)
436 ensurerequirementsrecognized(requirements, supportedrequirements)
435
437
438 # Then we validate that the known set is reasonable to use together.
439 ensurerequirementscompatible(ui, requirements)
440
436 # At this point, we know we should be capable of opening the repository.
441 # At this point, we know we should be capable of opening the repository.
437 # Now get on with doing that.
442 # Now get on with doing that.
438
443
439 return localrepository(
444 return localrepository(
440 baseui=baseui,
445 baseui=baseui,
441 ui=ui,
446 ui=ui,
442 origroot=path,
447 origroot=path,
443 wdirvfs=wdirvfs,
448 wdirvfs=wdirvfs,
444 hgvfs=hgvfs,
449 hgvfs=hgvfs,
445 requirements=requirements,
450 requirements=requirements,
446 supportedrequirements=supportedrequirements,
451 supportedrequirements=supportedrequirements,
447 intents=intents)
452 intents=intents)
448
453
449 def gathersupportedrequirements(ui):
454 def gathersupportedrequirements(ui):
450 """Determine the complete set of recognized requirements."""
455 """Determine the complete set of recognized requirements."""
451 # Start with all requirements supported by this file.
456 # Start with all requirements supported by this file.
452 supported = set(localrepository._basesupported)
457 supported = set(localrepository._basesupported)
453
458
454 # Execute ``featuresetupfuncs`` entries if they belong to an extension
459 # Execute ``featuresetupfuncs`` entries if they belong to an extension
455 # relevant to this ui instance.
460 # relevant to this ui instance.
456 modules = {m.__name__ for n, m in extensions.extensions(ui)}
461 modules = {m.__name__ for n, m in extensions.extensions(ui)}
457
462
458 for fn in featuresetupfuncs:
463 for fn in featuresetupfuncs:
459 if fn.__module__ in modules:
464 if fn.__module__ in modules:
460 fn(ui, supported)
465 fn(ui, supported)
461
466
462 # Add derived requirements from registered compression engines.
467 # Add derived requirements from registered compression engines.
463 for name in util.compengines:
468 for name in util.compengines:
464 engine = util.compengines[name]
469 engine = util.compengines[name]
465 if engine.revlogheader():
470 if engine.revlogheader():
466 supported.add(b'exp-compression-%s' % name)
471 supported.add(b'exp-compression-%s' % name)
467
472
468 return supported
473 return supported
469
474
470 def ensurerequirementsrecognized(requirements, supported):
475 def ensurerequirementsrecognized(requirements, supported):
471 """Validate that a set of local requirements is recognized.
476 """Validate that a set of local requirements is recognized.
472
477
473 Receives a set of requirements. Raises an ``error.RepoError`` if there
478 Receives a set of requirements. Raises an ``error.RepoError`` if there
474 exists any requirement in that set that currently loaded code doesn't
479 exists any requirement in that set that currently loaded code doesn't
475 recognize.
480 recognize.
476
481
477 Returns a set of supported requirements.
482 Returns a set of supported requirements.
478 """
483 """
479 missing = set()
484 missing = set()
480
485
481 for requirement in requirements:
486 for requirement in requirements:
482 if requirement in supported:
487 if requirement in supported:
483 continue
488 continue
484
489
485 if not requirement or not requirement[0:1].isalnum():
490 if not requirement or not requirement[0:1].isalnum():
486 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
491 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
487
492
488 missing.add(requirement)
493 missing.add(requirement)
489
494
490 if missing:
495 if missing:
491 raise error.RequirementError(
496 raise error.RequirementError(
492 _(b'repository requires features unknown to this Mercurial: %s') %
497 _(b'repository requires features unknown to this Mercurial: %s') %
493 b' '.join(sorted(missing)),
498 b' '.join(sorted(missing)),
494 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
499 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
495 b'for more information'))
500 b'for more information'))
496
501
502 def ensurerequirementscompatible(ui, requirements):
503 """Validates that a set of recognized requirements is mutually compatible.
504
505 Some requirements may not be compatible with others or require
506 config options that aren't enabled. This function is called during
507 repository opening to ensure that the set of requirements needed
508 to open a repository is sane and compatible with config options.
509
510 Extensions can monkeypatch this function to perform additional
511 checking.
512
513 ``error.RepoError`` should be raised on failure.
514 """
515 if b'exp-sparse' in requirements and not sparse.enabled:
516 raise error.RepoError(_(b'repository is using sparse feature but '
517 b'sparse is not enabled; enable the '
518 b'"sparse" extensions to access'))
519
497 @interfaceutil.implementer(repository.completelocalrepository)
520 @interfaceutil.implementer(repository.completelocalrepository)
498 class localrepository(object):
521 class localrepository(object):
499
522
500 # obsolete experimental requirements:
523 # obsolete experimental requirements:
501 # - manifestv2: An experimental new manifest format that allowed
524 # - manifestv2: An experimental new manifest format that allowed
502 # for stem compression of long paths. Experiment ended up not
525 # for stem compression of long paths. Experiment ended up not
503 # being successful (repository sizes went up due to worse delta
526 # being successful (repository sizes went up due to worse delta
504 # chains), and the code was deleted in 4.6.
527 # chains), and the code was deleted in 4.6.
505 supportedformats = {
528 supportedformats = {
506 'revlogv1',
529 'revlogv1',
507 'generaldelta',
530 'generaldelta',
508 'treemanifest',
531 'treemanifest',
509 REVLOGV2_REQUIREMENT,
532 REVLOGV2_REQUIREMENT,
510 SPARSEREVLOG_REQUIREMENT,
533 SPARSEREVLOG_REQUIREMENT,
511 }
534 }
512 _basesupported = supportedformats | {
535 _basesupported = supportedformats | {
513 'store',
536 'store',
514 'fncache',
537 'fncache',
515 'shared',
538 'shared',
516 'relshared',
539 'relshared',
517 'dotencode',
540 'dotencode',
518 'exp-sparse',
541 'exp-sparse',
519 'internal-phase'
542 'internal-phase'
520 }
543 }
521 openerreqs = {
544 openerreqs = {
522 'revlogv1',
545 'revlogv1',
523 'generaldelta',
546 'generaldelta',
524 'treemanifest',
547 'treemanifest',
525 }
548 }
526
549
527 # list of prefix for file which can be written without 'wlock'
550 # list of prefix for file which can be written without 'wlock'
528 # Extensions should extend this list when needed
551 # Extensions should extend this list when needed
529 _wlockfreeprefix = {
552 _wlockfreeprefix = {
530 # We migh consider requiring 'wlock' for the next
553 # We migh consider requiring 'wlock' for the next
531 # two, but pretty much all the existing code assume
554 # two, but pretty much all the existing code assume
532 # wlock is not needed so we keep them excluded for
555 # wlock is not needed so we keep them excluded for
533 # now.
556 # now.
534 'hgrc',
557 'hgrc',
535 'requires',
558 'requires',
536 # XXX cache is a complicatged business someone
559 # XXX cache is a complicatged business someone
537 # should investigate this in depth at some point
560 # should investigate this in depth at some point
538 'cache/',
561 'cache/',
539 # XXX shouldn't be dirstate covered by the wlock?
562 # XXX shouldn't be dirstate covered by the wlock?
540 'dirstate',
563 'dirstate',
541 # XXX bisect was still a bit too messy at the time
564 # XXX bisect was still a bit too messy at the time
542 # this changeset was introduced. Someone should fix
565 # this changeset was introduced. Someone should fix
543 # the remainig bit and drop this line
566 # the remainig bit and drop this line
544 'bisect.state',
567 'bisect.state',
545 }
568 }
546
569
547 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
570 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
548 supportedrequirements, intents=None):
571 supportedrequirements, intents=None):
549 """Create a new local repository instance.
572 """Create a new local repository instance.
550
573
551 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
574 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
552 or ``localrepo.makelocalrepository()`` for obtaining a new repository
575 or ``localrepo.makelocalrepository()`` for obtaining a new repository
553 object.
576 object.
554
577
555 Arguments:
578 Arguments:
556
579
557 baseui
580 baseui
558 ``ui.ui`` instance that ``ui`` argument was based off of.
581 ``ui.ui`` instance that ``ui`` argument was based off of.
559
582
560 ui
583 ui
561 ``ui.ui`` instance for use by the repository.
584 ``ui.ui`` instance for use by the repository.
562
585
563 origroot
586 origroot
564 ``bytes`` path to working directory root of this repository.
587 ``bytes`` path to working directory root of this repository.
565
588
566 wdirvfs
589 wdirvfs
567 ``vfs.vfs`` rooted at the working directory.
590 ``vfs.vfs`` rooted at the working directory.
568
591
569 hgvfs
592 hgvfs
570 ``vfs.vfs`` rooted at .hg/
593 ``vfs.vfs`` rooted at .hg/
571
594
572 requirements
595 requirements
573 ``set`` of bytestrings representing repository opening requirements.
596 ``set`` of bytestrings representing repository opening requirements.
574
597
575 supportedrequirements
598 supportedrequirements
576 ``set`` of bytestrings representing repository requirements that we
599 ``set`` of bytestrings representing repository requirements that we
577 know how to open. May be a supetset of ``requirements``.
600 know how to open. May be a supetset of ``requirements``.
578
601
579 intents
602 intents
580 ``set`` of system strings indicating what this repo will be used
603 ``set`` of system strings indicating what this repo will be used
581 for.
604 for.
582 """
605 """
583 self.baseui = baseui
606 self.baseui = baseui
584 self.ui = ui
607 self.ui = ui
585 self.origroot = origroot
608 self.origroot = origroot
586 # vfs rooted at working directory.
609 # vfs rooted at working directory.
587 self.wvfs = wdirvfs
610 self.wvfs = wdirvfs
588 self.root = wdirvfs.base
611 self.root = wdirvfs.base
589 # vfs rooted at .hg/. Used to access most non-store paths.
612 # vfs rooted at .hg/. Used to access most non-store paths.
590 self.vfs = hgvfs
613 self.vfs = hgvfs
591 self.path = hgvfs.base
614 self.path = hgvfs.base
592 self.requirements = requirements
615 self.requirements = requirements
593 self.supported = supportedrequirements
616 self.supported = supportedrequirements
594
617
595 self.filtername = None
618 self.filtername = None
596 # svfs: usually rooted at .hg/store, used to access repository history
619 # svfs: usually rooted at .hg/store, used to access repository history
597 # If this is a shared repository, this vfs may point to another
620 # If this is a shared repository, this vfs may point to another
598 # repository's .hg/store directory.
621 # repository's .hg/store directory.
599 self.svfs = None
622 self.svfs = None
600
623
601 if (self.ui.configbool('devel', 'all-warnings') or
624 if (self.ui.configbool('devel', 'all-warnings') or
602 self.ui.configbool('devel', 'check-locks')):
625 self.ui.configbool('devel', 'check-locks')):
603 self.vfs.audit = self._getvfsward(self.vfs.audit)
626 self.vfs.audit = self._getvfsward(self.vfs.audit)
604 # A list of callback to shape the phase if no data were found.
627 # A list of callback to shape the phase if no data were found.
605 # Callback are in the form: func(repo, roots) --> processed root.
628 # Callback are in the form: func(repo, roots) --> processed root.
606 # This list it to be filled by extension during repo setup
629 # This list it to be filled by extension during repo setup
607 self._phasedefaults = []
630 self._phasedefaults = []
608
631
609 color.setup(self.ui)
632 color.setup(self.ui)
610
633
611 cachepath = self.vfs.join('cache')
634 cachepath = self.vfs.join('cache')
612 self.sharedpath = self.path
635 self.sharedpath = self.path
613 try:
636 try:
614 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
637 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
615 if 'relshared' in self.requirements:
638 if 'relshared' in self.requirements:
616 sharedpath = self.vfs.join(sharedpath)
639 sharedpath = self.vfs.join(sharedpath)
617 vfs = vfsmod.vfs(sharedpath, realpath=True)
640 vfs = vfsmod.vfs(sharedpath, realpath=True)
618 cachepath = vfs.join('cache')
641 cachepath = vfs.join('cache')
619 s = vfs.base
642 s = vfs.base
620 if not vfs.exists():
643 if not vfs.exists():
621 raise error.RepoError(
644 raise error.RepoError(
622 _('.hg/sharedpath points to nonexistent directory %s') % s)
645 _('.hg/sharedpath points to nonexistent directory %s') % s)
623 self.sharedpath = s
646 self.sharedpath = s
624 except IOError as inst:
647 except IOError as inst:
625 if inst.errno != errno.ENOENT:
648 if inst.errno != errno.ENOENT:
626 raise
649 raise
627
650
628 if 'exp-sparse' in self.requirements and not sparse.enabled:
629 raise error.RepoError(_('repository is using sparse feature but '
630 'sparse is not enabled; enable the '
631 '"sparse" extensions to access'))
632
633 self.store = store.store(
651 self.store = store.store(
634 self.requirements, self.sharedpath,
652 self.requirements, self.sharedpath,
635 lambda base: vfsmod.vfs(base, cacheaudited=True))
653 lambda base: vfsmod.vfs(base, cacheaudited=True))
636 self.spath = self.store.path
654 self.spath = self.store.path
637 self.svfs = self.store.vfs
655 self.svfs = self.store.vfs
638 self.sjoin = self.store.join
656 self.sjoin = self.store.join
639 self.vfs.createmode = self.store.createmode
657 self.vfs.createmode = self.store.createmode
640 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
658 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
641 self.cachevfs.createmode = self.store.createmode
659 self.cachevfs.createmode = self.store.createmode
642 if (self.ui.configbool('devel', 'all-warnings') or
660 if (self.ui.configbool('devel', 'all-warnings') or
643 self.ui.configbool('devel', 'check-locks')):
661 self.ui.configbool('devel', 'check-locks')):
644 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
662 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
645 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
663 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
646 else: # standard vfs
664 else: # standard vfs
647 self.svfs.audit = self._getsvfsward(self.svfs.audit)
665 self.svfs.audit = self._getsvfsward(self.svfs.audit)
648 self._applyopenerreqs()
666 self._applyopenerreqs()
649
667
650 self._dirstatevalidatewarned = False
668 self._dirstatevalidatewarned = False
651
669
652 self._branchcaches = {}
670 self._branchcaches = {}
653 self._revbranchcache = None
671 self._revbranchcache = None
654 self._filterpats = {}
672 self._filterpats = {}
655 self._datafilters = {}
673 self._datafilters = {}
656 self._transref = self._lockref = self._wlockref = None
674 self._transref = self._lockref = self._wlockref = None
657
675
658 # A cache for various files under .hg/ that tracks file changes,
676 # A cache for various files under .hg/ that tracks file changes,
659 # (used by the filecache decorator)
677 # (used by the filecache decorator)
660 #
678 #
661 # Maps a property name to its util.filecacheentry
679 # Maps a property name to its util.filecacheentry
662 self._filecache = {}
680 self._filecache = {}
663
681
664 # hold sets of revision to be filtered
682 # hold sets of revision to be filtered
665 # should be cleared when something might have changed the filter value:
683 # should be cleared when something might have changed the filter value:
666 # - new changesets,
684 # - new changesets,
667 # - phase change,
685 # - phase change,
668 # - new obsolescence marker,
686 # - new obsolescence marker,
669 # - working directory parent change,
687 # - working directory parent change,
670 # - bookmark changes
688 # - bookmark changes
671 self.filteredrevcache = {}
689 self.filteredrevcache = {}
672
690
673 # post-dirstate-status hooks
691 # post-dirstate-status hooks
674 self._postdsstatus = []
692 self._postdsstatus = []
675
693
676 # generic mapping between names and nodes
694 # generic mapping between names and nodes
677 self.names = namespaces.namespaces()
695 self.names = namespaces.namespaces()
678
696
679 # Key to signature value.
697 # Key to signature value.
680 self._sparsesignaturecache = {}
698 self._sparsesignaturecache = {}
681 # Signature to cached matcher instance.
699 # Signature to cached matcher instance.
682 self._sparsematchercache = {}
700 self._sparsematchercache = {}
683
701
684 def _getvfsward(self, origfunc):
702 def _getvfsward(self, origfunc):
685 """build a ward for self.vfs"""
703 """build a ward for self.vfs"""
686 rref = weakref.ref(self)
704 rref = weakref.ref(self)
687 def checkvfs(path, mode=None):
705 def checkvfs(path, mode=None):
688 ret = origfunc(path, mode=mode)
706 ret = origfunc(path, mode=mode)
689 repo = rref()
707 repo = rref()
690 if (repo is None
708 if (repo is None
691 or not util.safehasattr(repo, '_wlockref')
709 or not util.safehasattr(repo, '_wlockref')
692 or not util.safehasattr(repo, '_lockref')):
710 or not util.safehasattr(repo, '_lockref')):
693 return
711 return
694 if mode in (None, 'r', 'rb'):
712 if mode in (None, 'r', 'rb'):
695 return
713 return
696 if path.startswith(repo.path):
714 if path.startswith(repo.path):
697 # truncate name relative to the repository (.hg)
715 # truncate name relative to the repository (.hg)
698 path = path[len(repo.path) + 1:]
716 path = path[len(repo.path) + 1:]
699 if path.startswith('cache/'):
717 if path.startswith('cache/'):
700 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
718 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
701 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
719 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
702 if path.startswith('journal.'):
720 if path.startswith('journal.'):
703 # journal is covered by 'lock'
721 # journal is covered by 'lock'
704 if repo._currentlock(repo._lockref) is None:
722 if repo._currentlock(repo._lockref) is None:
705 repo.ui.develwarn('write with no lock: "%s"' % path,
723 repo.ui.develwarn('write with no lock: "%s"' % path,
706 stacklevel=2, config='check-locks')
724 stacklevel=2, config='check-locks')
707 elif repo._currentlock(repo._wlockref) is None:
725 elif repo._currentlock(repo._wlockref) is None:
708 # rest of vfs files are covered by 'wlock'
726 # rest of vfs files are covered by 'wlock'
709 #
727 #
710 # exclude special files
728 # exclude special files
711 for prefix in self._wlockfreeprefix:
729 for prefix in self._wlockfreeprefix:
712 if path.startswith(prefix):
730 if path.startswith(prefix):
713 return
731 return
714 repo.ui.develwarn('write with no wlock: "%s"' % path,
732 repo.ui.develwarn('write with no wlock: "%s"' % path,
715 stacklevel=2, config='check-locks')
733 stacklevel=2, config='check-locks')
716 return ret
734 return ret
717 return checkvfs
735 return checkvfs
718
736
719 def _getsvfsward(self, origfunc):
737 def _getsvfsward(self, origfunc):
720 """build a ward for self.svfs"""
738 """build a ward for self.svfs"""
721 rref = weakref.ref(self)
739 rref = weakref.ref(self)
722 def checksvfs(path, mode=None):
740 def checksvfs(path, mode=None):
723 ret = origfunc(path, mode=mode)
741 ret = origfunc(path, mode=mode)
724 repo = rref()
742 repo = rref()
725 if repo is None or not util.safehasattr(repo, '_lockref'):
743 if repo is None or not util.safehasattr(repo, '_lockref'):
726 return
744 return
727 if mode in (None, 'r', 'rb'):
745 if mode in (None, 'r', 'rb'):
728 return
746 return
729 if path.startswith(repo.sharedpath):
747 if path.startswith(repo.sharedpath):
730 # truncate name relative to the repository (.hg)
748 # truncate name relative to the repository (.hg)
731 path = path[len(repo.sharedpath) + 1:]
749 path = path[len(repo.sharedpath) + 1:]
732 if repo._currentlock(repo._lockref) is None:
750 if repo._currentlock(repo._lockref) is None:
733 repo.ui.develwarn('write with no lock: "%s"' % path,
751 repo.ui.develwarn('write with no lock: "%s"' % path,
734 stacklevel=3)
752 stacklevel=3)
735 return ret
753 return ret
736 return checksvfs
754 return checksvfs
737
755
738 def close(self):
756 def close(self):
739 self._writecaches()
757 self._writecaches()
740
758
741 def _writecaches(self):
759 def _writecaches(self):
742 if self._revbranchcache:
760 if self._revbranchcache:
743 self._revbranchcache.write()
761 self._revbranchcache.write()
744
762
745 def _restrictcapabilities(self, caps):
763 def _restrictcapabilities(self, caps):
746 if self.ui.configbool('experimental', 'bundle2-advertise'):
764 if self.ui.configbool('experimental', 'bundle2-advertise'):
747 caps = set(caps)
765 caps = set(caps)
748 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
766 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
749 role='client'))
767 role='client'))
750 caps.add('bundle2=' + urlreq.quote(capsblob))
768 caps.add('bundle2=' + urlreq.quote(capsblob))
751 return caps
769 return caps
752
770
753 def _applyopenerreqs(self):
771 def _applyopenerreqs(self):
754 self.svfs.options = dict((r, 1) for r in self.requirements
772 self.svfs.options = dict((r, 1) for r in self.requirements
755 if r in self.openerreqs)
773 if r in self.openerreqs)
756 # experimental config: format.chunkcachesize
774 # experimental config: format.chunkcachesize
757 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
775 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
758 if chunkcachesize is not None:
776 if chunkcachesize is not None:
759 self.svfs.options['chunkcachesize'] = chunkcachesize
777 self.svfs.options['chunkcachesize'] = chunkcachesize
760 # experimental config: format.manifestcachesize
778 # experimental config: format.manifestcachesize
761 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
779 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
762 if manifestcachesize is not None:
780 if manifestcachesize is not None:
763 self.svfs.options['manifestcachesize'] = manifestcachesize
781 self.svfs.options['manifestcachesize'] = manifestcachesize
764 deltabothparents = self.ui.configbool('storage',
782 deltabothparents = self.ui.configbool('storage',
765 'revlog.optimize-delta-parent-choice')
783 'revlog.optimize-delta-parent-choice')
766 self.svfs.options['deltabothparents'] = deltabothparents
784 self.svfs.options['deltabothparents'] = deltabothparents
767 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
785 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
768 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
786 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
769 if 0 <= chainspan:
787 if 0 <= chainspan:
770 self.svfs.options['maxdeltachainspan'] = chainspan
788 self.svfs.options['maxdeltachainspan'] = chainspan
771 mmapindexthreshold = self.ui.configbytes('experimental',
789 mmapindexthreshold = self.ui.configbytes('experimental',
772 'mmapindexthreshold')
790 'mmapindexthreshold')
773 if mmapindexthreshold is not None:
791 if mmapindexthreshold is not None:
774 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
792 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
775 withsparseread = self.ui.configbool('experimental', 'sparse-read')
793 withsparseread = self.ui.configbool('experimental', 'sparse-read')
776 srdensitythres = float(self.ui.config('experimental',
794 srdensitythres = float(self.ui.config('experimental',
777 'sparse-read.density-threshold'))
795 'sparse-read.density-threshold'))
778 srmingapsize = self.ui.configbytes('experimental',
796 srmingapsize = self.ui.configbytes('experimental',
779 'sparse-read.min-gap-size')
797 'sparse-read.min-gap-size')
780 self.svfs.options['with-sparse-read'] = withsparseread
798 self.svfs.options['with-sparse-read'] = withsparseread
781 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
799 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
782 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
800 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
801 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
784 self.svfs.options['sparse-revlog'] = sparserevlog
802 self.svfs.options['sparse-revlog'] = sparserevlog
785 if sparserevlog:
803 if sparserevlog:
786 self.svfs.options['generaldelta'] = True
804 self.svfs.options['generaldelta'] = True
787 maxchainlen = None
805 maxchainlen = None
788 if sparserevlog:
806 if sparserevlog:
789 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
807 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
790 # experimental config: format.maxchainlen
808 # experimental config: format.maxchainlen
791 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
809 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
792 if maxchainlen is not None:
810 if maxchainlen is not None:
793 self.svfs.options['maxchainlen'] = maxchainlen
811 self.svfs.options['maxchainlen'] = maxchainlen
794
812
795 for r in self.requirements:
813 for r in self.requirements:
796 if r.startswith('exp-compression-'):
814 if r.startswith('exp-compression-'):
797 self.svfs.options['compengine'] = r[len('exp-compression-'):]
815 self.svfs.options['compengine'] = r[len('exp-compression-'):]
798
816
799 # TODO move "revlogv2" to openerreqs once finalized.
817 # TODO move "revlogv2" to openerreqs once finalized.
800 if REVLOGV2_REQUIREMENT in self.requirements:
818 if REVLOGV2_REQUIREMENT in self.requirements:
801 self.svfs.options['revlogv2'] = True
819 self.svfs.options['revlogv2'] = True
802
820
803 def _writerequirements(self):
821 def _writerequirements(self):
804 scmutil.writerequires(self.vfs, self.requirements)
822 scmutil.writerequires(self.vfs, self.requirements)
805
823
806 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
824 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
807 # self -> auditor -> self._checknested -> self
825 # self -> auditor -> self._checknested -> self
808
826
809 @property
827 @property
810 def auditor(self):
828 def auditor(self):
811 # This is only used by context.workingctx.match in order to
829 # This is only used by context.workingctx.match in order to
812 # detect files in subrepos.
830 # detect files in subrepos.
813 return pathutil.pathauditor(self.root, callback=self._checknested)
831 return pathutil.pathauditor(self.root, callback=self._checknested)
814
832
815 @property
833 @property
816 def nofsauditor(self):
834 def nofsauditor(self):
817 # This is only used by context.basectx.match in order to detect
835 # This is only used by context.basectx.match in order to detect
818 # files in subrepos.
836 # files in subrepos.
819 return pathutil.pathauditor(self.root, callback=self._checknested,
837 return pathutil.pathauditor(self.root, callback=self._checknested,
820 realfs=False, cached=True)
838 realfs=False, cached=True)
821
839
822 def _checknested(self, path):
840 def _checknested(self, path):
823 """Determine if path is a legal nested repository."""
841 """Determine if path is a legal nested repository."""
824 if not path.startswith(self.root):
842 if not path.startswith(self.root):
825 return False
843 return False
826 subpath = path[len(self.root) + 1:]
844 subpath = path[len(self.root) + 1:]
827 normsubpath = util.pconvert(subpath)
845 normsubpath = util.pconvert(subpath)
828
846
829 # XXX: Checking against the current working copy is wrong in
847 # XXX: Checking against the current working copy is wrong in
830 # the sense that it can reject things like
848 # the sense that it can reject things like
831 #
849 #
832 # $ hg cat -r 10 sub/x.txt
850 # $ hg cat -r 10 sub/x.txt
833 #
851 #
834 # if sub/ is no longer a subrepository in the working copy
852 # if sub/ is no longer a subrepository in the working copy
835 # parent revision.
853 # parent revision.
836 #
854 #
837 # However, it can of course also allow things that would have
855 # However, it can of course also allow things that would have
838 # been rejected before, such as the above cat command if sub/
856 # been rejected before, such as the above cat command if sub/
839 # is a subrepository now, but was a normal directory before.
857 # is a subrepository now, but was a normal directory before.
840 # The old path auditor would have rejected by mistake since it
858 # The old path auditor would have rejected by mistake since it
841 # panics when it sees sub/.hg/.
859 # panics when it sees sub/.hg/.
842 #
860 #
843 # All in all, checking against the working copy seems sensible
861 # All in all, checking against the working copy seems sensible
844 # since we want to prevent access to nested repositories on
862 # since we want to prevent access to nested repositories on
845 # the filesystem *now*.
863 # the filesystem *now*.
846 ctx = self[None]
864 ctx = self[None]
847 parts = util.splitpath(subpath)
865 parts = util.splitpath(subpath)
848 while parts:
866 while parts:
849 prefix = '/'.join(parts)
867 prefix = '/'.join(parts)
850 if prefix in ctx.substate:
868 if prefix in ctx.substate:
851 if prefix == normsubpath:
869 if prefix == normsubpath:
852 return True
870 return True
853 else:
871 else:
854 sub = ctx.sub(prefix)
872 sub = ctx.sub(prefix)
855 return sub.checknested(subpath[len(prefix) + 1:])
873 return sub.checknested(subpath[len(prefix) + 1:])
856 else:
874 else:
857 parts.pop()
875 parts.pop()
858 return False
876 return False
859
877
860 def peer(self):
878 def peer(self):
861 return localpeer(self) # not cached to avoid reference cycle
879 return localpeer(self) # not cached to avoid reference cycle
862
880
863 def unfiltered(self):
881 def unfiltered(self):
864 """Return unfiltered version of the repository
882 """Return unfiltered version of the repository
865
883
866 Intended to be overwritten by filtered repo."""
884 Intended to be overwritten by filtered repo."""
867 return self
885 return self
868
886
869 def filtered(self, name, visibilityexceptions=None):
887 def filtered(self, name, visibilityexceptions=None):
870 """Return a filtered version of a repository"""
888 """Return a filtered version of a repository"""
871 cls = repoview.newtype(self.unfiltered().__class__)
889 cls = repoview.newtype(self.unfiltered().__class__)
872 return cls(self, name, visibilityexceptions)
890 return cls(self, name, visibilityexceptions)
873
891
874 @repofilecache('bookmarks', 'bookmarks.current')
892 @repofilecache('bookmarks', 'bookmarks.current')
875 def _bookmarks(self):
893 def _bookmarks(self):
876 return bookmarks.bmstore(self)
894 return bookmarks.bmstore(self)
877
895
878 @property
896 @property
879 def _activebookmark(self):
897 def _activebookmark(self):
880 return self._bookmarks.active
898 return self._bookmarks.active
881
899
882 # _phasesets depend on changelog. what we need is to call
900 # _phasesets depend on changelog. what we need is to call
883 # _phasecache.invalidate() if '00changelog.i' was changed, but it
901 # _phasecache.invalidate() if '00changelog.i' was changed, but it
884 # can't be easily expressed in filecache mechanism.
902 # can't be easily expressed in filecache mechanism.
885 @storecache('phaseroots', '00changelog.i')
903 @storecache('phaseroots', '00changelog.i')
886 def _phasecache(self):
904 def _phasecache(self):
887 return phases.phasecache(self, self._phasedefaults)
905 return phases.phasecache(self, self._phasedefaults)
888
906
889 @storecache('obsstore')
907 @storecache('obsstore')
890 def obsstore(self):
908 def obsstore(self):
891 return obsolete.makestore(self.ui, self)
909 return obsolete.makestore(self.ui, self)
892
910
893 @storecache('00changelog.i')
911 @storecache('00changelog.i')
894 def changelog(self):
912 def changelog(self):
895 return changelog.changelog(self.svfs,
913 return changelog.changelog(self.svfs,
896 trypending=txnutil.mayhavepending(self.root))
914 trypending=txnutil.mayhavepending(self.root))
897
915
898 def _constructmanifest(self):
916 def _constructmanifest(self):
899 # This is a temporary function while we migrate from manifest to
917 # This is a temporary function while we migrate from manifest to
900 # manifestlog. It allows bundlerepo and unionrepo to intercept the
918 # manifestlog. It allows bundlerepo and unionrepo to intercept the
901 # manifest creation.
919 # manifest creation.
902 return manifest.manifestrevlog(self.svfs)
920 return manifest.manifestrevlog(self.svfs)
903
921
904 @storecache('00manifest.i')
922 @storecache('00manifest.i')
905 def manifestlog(self):
923 def manifestlog(self):
906 return manifest.manifestlog(self.svfs, self)
924 return manifest.manifestlog(self.svfs, self)
907
925
908 @repofilecache('dirstate')
926 @repofilecache('dirstate')
909 def dirstate(self):
927 def dirstate(self):
910 return self._makedirstate()
928 return self._makedirstate()
911
929
912 def _makedirstate(self):
930 def _makedirstate(self):
913 """Extension point for wrapping the dirstate per-repo."""
931 """Extension point for wrapping the dirstate per-repo."""
914 sparsematchfn = lambda: sparse.matcher(self)
932 sparsematchfn = lambda: sparse.matcher(self)
915
933
916 return dirstate.dirstate(self.vfs, self.ui, self.root,
934 return dirstate.dirstate(self.vfs, self.ui, self.root,
917 self._dirstatevalidate, sparsematchfn)
935 self._dirstatevalidate, sparsematchfn)
918
936
919 def _dirstatevalidate(self, node):
937 def _dirstatevalidate(self, node):
920 try:
938 try:
921 self.changelog.rev(node)
939 self.changelog.rev(node)
922 return node
940 return node
923 except error.LookupError:
941 except error.LookupError:
924 if not self._dirstatevalidatewarned:
942 if not self._dirstatevalidatewarned:
925 self._dirstatevalidatewarned = True
943 self._dirstatevalidatewarned = True
926 self.ui.warn(_("warning: ignoring unknown"
944 self.ui.warn(_("warning: ignoring unknown"
927 " working parent %s!\n") % short(node))
945 " working parent %s!\n") % short(node))
928 return nullid
946 return nullid
929
947
930 @storecache(narrowspec.FILENAME)
948 @storecache(narrowspec.FILENAME)
931 def narrowpats(self):
949 def narrowpats(self):
932 """matcher patterns for this repository's narrowspec
950 """matcher patterns for this repository's narrowspec
933
951
934 A tuple of (includes, excludes).
952 A tuple of (includes, excludes).
935 """
953 """
936 source = self
954 source = self
937 if self.shared():
955 if self.shared():
938 from . import hg
956 from . import hg
939 source = hg.sharedreposource(self)
957 source = hg.sharedreposource(self)
940 return narrowspec.load(source)
958 return narrowspec.load(source)
941
959
942 @storecache(narrowspec.FILENAME)
960 @storecache(narrowspec.FILENAME)
943 def _narrowmatch(self):
961 def _narrowmatch(self):
944 if repository.NARROW_REQUIREMENT not in self.requirements:
962 if repository.NARROW_REQUIREMENT not in self.requirements:
945 return matchmod.always(self.root, '')
963 return matchmod.always(self.root, '')
946 include, exclude = self.narrowpats
964 include, exclude = self.narrowpats
947 return narrowspec.match(self.root, include=include, exclude=exclude)
965 return narrowspec.match(self.root, include=include, exclude=exclude)
948
966
949 # TODO(martinvonz): make this property-like instead?
967 # TODO(martinvonz): make this property-like instead?
950 def narrowmatch(self):
968 def narrowmatch(self):
951 return self._narrowmatch
969 return self._narrowmatch
952
970
953 def setnarrowpats(self, newincludes, newexcludes):
971 def setnarrowpats(self, newincludes, newexcludes):
954 narrowspec.save(self, newincludes, newexcludes)
972 narrowspec.save(self, newincludes, newexcludes)
955 self.invalidate(clearfilecache=True)
973 self.invalidate(clearfilecache=True)
956
974
957 def __getitem__(self, changeid):
975 def __getitem__(self, changeid):
958 if changeid is None:
976 if changeid is None:
959 return context.workingctx(self)
977 return context.workingctx(self)
960 if isinstance(changeid, context.basectx):
978 if isinstance(changeid, context.basectx):
961 return changeid
979 return changeid
962 if isinstance(changeid, slice):
980 if isinstance(changeid, slice):
963 # wdirrev isn't contiguous so the slice shouldn't include it
981 # wdirrev isn't contiguous so the slice shouldn't include it
964 return [context.changectx(self, i)
982 return [context.changectx(self, i)
965 for i in pycompat.xrange(*changeid.indices(len(self)))
983 for i in pycompat.xrange(*changeid.indices(len(self)))
966 if i not in self.changelog.filteredrevs]
984 if i not in self.changelog.filteredrevs]
967 try:
985 try:
968 return context.changectx(self, changeid)
986 return context.changectx(self, changeid)
969 except error.WdirUnsupported:
987 except error.WdirUnsupported:
970 return context.workingctx(self)
988 return context.workingctx(self)
971
989
972 def __contains__(self, changeid):
990 def __contains__(self, changeid):
973 """True if the given changeid exists
991 """True if the given changeid exists
974
992
975 error.AmbiguousPrefixLookupError is raised if an ambiguous node
993 error.AmbiguousPrefixLookupError is raised if an ambiguous node
976 specified.
994 specified.
977 """
995 """
978 try:
996 try:
979 self[changeid]
997 self[changeid]
980 return True
998 return True
981 except error.RepoLookupError:
999 except error.RepoLookupError:
982 return False
1000 return False
983
1001
984 def __nonzero__(self):
1002 def __nonzero__(self):
985 return True
1003 return True
986
1004
987 __bool__ = __nonzero__
1005 __bool__ = __nonzero__
988
1006
989 def __len__(self):
1007 def __len__(self):
990 # no need to pay the cost of repoview.changelog
1008 # no need to pay the cost of repoview.changelog
991 unfi = self.unfiltered()
1009 unfi = self.unfiltered()
992 return len(unfi.changelog)
1010 return len(unfi.changelog)
993
1011
994 def __iter__(self):
1012 def __iter__(self):
995 return iter(self.changelog)
1013 return iter(self.changelog)
996
1014
997 def revs(self, expr, *args):
1015 def revs(self, expr, *args):
998 '''Find revisions matching a revset.
1016 '''Find revisions matching a revset.
999
1017
1000 The revset is specified as a string ``expr`` that may contain
1018 The revset is specified as a string ``expr`` that may contain
1001 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1019 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1002
1020
1003 Revset aliases from the configuration are not expanded. To expand
1021 Revset aliases from the configuration are not expanded. To expand
1004 user aliases, consider calling ``scmutil.revrange()`` or
1022 user aliases, consider calling ``scmutil.revrange()`` or
1005 ``repo.anyrevs([expr], user=True)``.
1023 ``repo.anyrevs([expr], user=True)``.
1006
1024
1007 Returns a revset.abstractsmartset, which is a list-like interface
1025 Returns a revset.abstractsmartset, which is a list-like interface
1008 that contains integer revisions.
1026 that contains integer revisions.
1009 '''
1027 '''
1010 expr = revsetlang.formatspec(expr, *args)
1028 expr = revsetlang.formatspec(expr, *args)
1011 m = revset.match(None, expr)
1029 m = revset.match(None, expr)
1012 return m(self)
1030 return m(self)
1013
1031
1014 def set(self, expr, *args):
1032 def set(self, expr, *args):
1015 '''Find revisions matching a revset and emit changectx instances.
1033 '''Find revisions matching a revset and emit changectx instances.
1016
1034
1017 This is a convenience wrapper around ``revs()`` that iterates the
1035 This is a convenience wrapper around ``revs()`` that iterates the
1018 result and is a generator of changectx instances.
1036 result and is a generator of changectx instances.
1019
1037
1020 Revset aliases from the configuration are not expanded. To expand
1038 Revset aliases from the configuration are not expanded. To expand
1021 user aliases, consider calling ``scmutil.revrange()``.
1039 user aliases, consider calling ``scmutil.revrange()``.
1022 '''
1040 '''
1023 for r in self.revs(expr, *args):
1041 for r in self.revs(expr, *args):
1024 yield self[r]
1042 yield self[r]
1025
1043
1026 def anyrevs(self, specs, user=False, localalias=None):
1044 def anyrevs(self, specs, user=False, localalias=None):
1027 '''Find revisions matching one of the given revsets.
1045 '''Find revisions matching one of the given revsets.
1028
1046
1029 Revset aliases from the configuration are not expanded by default. To
1047 Revset aliases from the configuration are not expanded by default. To
1030 expand user aliases, specify ``user=True``. To provide some local
1048 expand user aliases, specify ``user=True``. To provide some local
1031 definitions overriding user aliases, set ``localalias`` to
1049 definitions overriding user aliases, set ``localalias`` to
1032 ``{name: definitionstring}``.
1050 ``{name: definitionstring}``.
1033 '''
1051 '''
1034 if user:
1052 if user:
1035 m = revset.matchany(self.ui, specs,
1053 m = revset.matchany(self.ui, specs,
1036 lookup=revset.lookupfn(self),
1054 lookup=revset.lookupfn(self),
1037 localalias=localalias)
1055 localalias=localalias)
1038 else:
1056 else:
1039 m = revset.matchany(None, specs, localalias=localalias)
1057 m = revset.matchany(None, specs, localalias=localalias)
1040 return m(self)
1058 return m(self)
1041
1059
1042 def url(self):
1060 def url(self):
1043 return 'file:' + self.root
1061 return 'file:' + self.root
1044
1062
1045 def hook(self, name, throw=False, **args):
1063 def hook(self, name, throw=False, **args):
1046 """Call a hook, passing this repo instance.
1064 """Call a hook, passing this repo instance.
1047
1065
1048 This a convenience method to aid invoking hooks. Extensions likely
1066 This a convenience method to aid invoking hooks. Extensions likely
1049 won't call this unless they have registered a custom hook or are
1067 won't call this unless they have registered a custom hook or are
1050 replacing code that is expected to call a hook.
1068 replacing code that is expected to call a hook.
1051 """
1069 """
1052 return hook.hook(self.ui, self, name, throw, **args)
1070 return hook.hook(self.ui, self, name, throw, **args)
1053
1071
1054 @filteredpropertycache
1072 @filteredpropertycache
1055 def _tagscache(self):
1073 def _tagscache(self):
1056 '''Returns a tagscache object that contains various tags related
1074 '''Returns a tagscache object that contains various tags related
1057 caches.'''
1075 caches.'''
1058
1076
1059 # This simplifies its cache management by having one decorated
1077 # This simplifies its cache management by having one decorated
1060 # function (this one) and the rest simply fetch things from it.
1078 # function (this one) and the rest simply fetch things from it.
1061 class tagscache(object):
1079 class tagscache(object):
1062 def __init__(self):
1080 def __init__(self):
1063 # These two define the set of tags for this repository. tags
1081 # These two define the set of tags for this repository. tags
1064 # maps tag name to node; tagtypes maps tag name to 'global' or
1082 # maps tag name to node; tagtypes maps tag name to 'global' or
1065 # 'local'. (Global tags are defined by .hgtags across all
1083 # 'local'. (Global tags are defined by .hgtags across all
1066 # heads, and local tags are defined in .hg/localtags.)
1084 # heads, and local tags are defined in .hg/localtags.)
1067 # They constitute the in-memory cache of tags.
1085 # They constitute the in-memory cache of tags.
1068 self.tags = self.tagtypes = None
1086 self.tags = self.tagtypes = None
1069
1087
1070 self.nodetagscache = self.tagslist = None
1088 self.nodetagscache = self.tagslist = None
1071
1089
1072 cache = tagscache()
1090 cache = tagscache()
1073 cache.tags, cache.tagtypes = self._findtags()
1091 cache.tags, cache.tagtypes = self._findtags()
1074
1092
1075 return cache
1093 return cache
1076
1094
1077 def tags(self):
1095 def tags(self):
1078 '''return a mapping of tag to node'''
1096 '''return a mapping of tag to node'''
1079 t = {}
1097 t = {}
1080 if self.changelog.filteredrevs:
1098 if self.changelog.filteredrevs:
1081 tags, tt = self._findtags()
1099 tags, tt = self._findtags()
1082 else:
1100 else:
1083 tags = self._tagscache.tags
1101 tags = self._tagscache.tags
1084 for k, v in tags.iteritems():
1102 for k, v in tags.iteritems():
1085 try:
1103 try:
1086 # ignore tags to unknown nodes
1104 # ignore tags to unknown nodes
1087 self.changelog.rev(v)
1105 self.changelog.rev(v)
1088 t[k] = v
1106 t[k] = v
1089 except (error.LookupError, ValueError):
1107 except (error.LookupError, ValueError):
1090 pass
1108 pass
1091 return t
1109 return t
1092
1110
1093 def _findtags(self):
1111 def _findtags(self):
1094 '''Do the hard work of finding tags. Return a pair of dicts
1112 '''Do the hard work of finding tags. Return a pair of dicts
1095 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1113 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1096 maps tag name to a string like \'global\' or \'local\'.
1114 maps tag name to a string like \'global\' or \'local\'.
1097 Subclasses or extensions are free to add their own tags, but
1115 Subclasses or extensions are free to add their own tags, but
1098 should be aware that the returned dicts will be retained for the
1116 should be aware that the returned dicts will be retained for the
1099 duration of the localrepo object.'''
1117 duration of the localrepo object.'''
1100
1118
1101 # XXX what tagtype should subclasses/extensions use? Currently
1119 # XXX what tagtype should subclasses/extensions use? Currently
1102 # mq and bookmarks add tags, but do not set the tagtype at all.
1120 # mq and bookmarks add tags, but do not set the tagtype at all.
1103 # Should each extension invent its own tag type? Should there
1121 # Should each extension invent its own tag type? Should there
1104 # be one tagtype for all such "virtual" tags? Or is the status
1122 # be one tagtype for all such "virtual" tags? Or is the status
1105 # quo fine?
1123 # quo fine?
1106
1124
1107
1125
1108 # map tag name to (node, hist)
1126 # map tag name to (node, hist)
1109 alltags = tagsmod.findglobaltags(self.ui, self)
1127 alltags = tagsmod.findglobaltags(self.ui, self)
1110 # map tag name to tag type
1128 # map tag name to tag type
1111 tagtypes = dict((tag, 'global') for tag in alltags)
1129 tagtypes = dict((tag, 'global') for tag in alltags)
1112
1130
1113 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1131 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1114
1132
1115 # Build the return dicts. Have to re-encode tag names because
1133 # Build the return dicts. Have to re-encode tag names because
1116 # the tags module always uses UTF-8 (in order not to lose info
1134 # the tags module always uses UTF-8 (in order not to lose info
1117 # writing to the cache), but the rest of Mercurial wants them in
1135 # writing to the cache), but the rest of Mercurial wants them in
1118 # local encoding.
1136 # local encoding.
1119 tags = {}
1137 tags = {}
1120 for (name, (node, hist)) in alltags.iteritems():
1138 for (name, (node, hist)) in alltags.iteritems():
1121 if node != nullid:
1139 if node != nullid:
1122 tags[encoding.tolocal(name)] = node
1140 tags[encoding.tolocal(name)] = node
1123 tags['tip'] = self.changelog.tip()
1141 tags['tip'] = self.changelog.tip()
1124 tagtypes = dict([(encoding.tolocal(name), value)
1142 tagtypes = dict([(encoding.tolocal(name), value)
1125 for (name, value) in tagtypes.iteritems()])
1143 for (name, value) in tagtypes.iteritems()])
1126 return (tags, tagtypes)
1144 return (tags, tagtypes)
1127
1145
1128 def tagtype(self, tagname):
1146 def tagtype(self, tagname):
1129 '''
1147 '''
1130 return the type of the given tag. result can be:
1148 return the type of the given tag. result can be:
1131
1149
1132 'local' : a local tag
1150 'local' : a local tag
1133 'global' : a global tag
1151 'global' : a global tag
1134 None : tag does not exist
1152 None : tag does not exist
1135 '''
1153 '''
1136
1154
1137 return self._tagscache.tagtypes.get(tagname)
1155 return self._tagscache.tagtypes.get(tagname)
1138
1156
1139 def tagslist(self):
1157 def tagslist(self):
1140 '''return a list of tags ordered by revision'''
1158 '''return a list of tags ordered by revision'''
1141 if not self._tagscache.tagslist:
1159 if not self._tagscache.tagslist:
1142 l = []
1160 l = []
1143 for t, n in self.tags().iteritems():
1161 for t, n in self.tags().iteritems():
1144 l.append((self.changelog.rev(n), t, n))
1162 l.append((self.changelog.rev(n), t, n))
1145 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1163 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1146
1164
1147 return self._tagscache.tagslist
1165 return self._tagscache.tagslist
1148
1166
1149 def nodetags(self, node):
1167 def nodetags(self, node):
1150 '''return the tags associated with a node'''
1168 '''return the tags associated with a node'''
1151 if not self._tagscache.nodetagscache:
1169 if not self._tagscache.nodetagscache:
1152 nodetagscache = {}
1170 nodetagscache = {}
1153 for t, n in self._tagscache.tags.iteritems():
1171 for t, n in self._tagscache.tags.iteritems():
1154 nodetagscache.setdefault(n, []).append(t)
1172 nodetagscache.setdefault(n, []).append(t)
1155 for tags in nodetagscache.itervalues():
1173 for tags in nodetagscache.itervalues():
1156 tags.sort()
1174 tags.sort()
1157 self._tagscache.nodetagscache = nodetagscache
1175 self._tagscache.nodetagscache = nodetagscache
1158 return self._tagscache.nodetagscache.get(node, [])
1176 return self._tagscache.nodetagscache.get(node, [])
1159
1177
1160 def nodebookmarks(self, node):
1178 def nodebookmarks(self, node):
1161 """return the list of bookmarks pointing to the specified node"""
1179 """return the list of bookmarks pointing to the specified node"""
1162 return self._bookmarks.names(node)
1180 return self._bookmarks.names(node)
1163
1181
1164 def branchmap(self):
1182 def branchmap(self):
1165 '''returns a dictionary {branch: [branchheads]} with branchheads
1183 '''returns a dictionary {branch: [branchheads]} with branchheads
1166 ordered by increasing revision number'''
1184 ordered by increasing revision number'''
1167 branchmap.updatecache(self)
1185 branchmap.updatecache(self)
1168 return self._branchcaches[self.filtername]
1186 return self._branchcaches[self.filtername]
1169
1187
1170 @unfilteredmethod
1188 @unfilteredmethod
1171 def revbranchcache(self):
1189 def revbranchcache(self):
1172 if not self._revbranchcache:
1190 if not self._revbranchcache:
1173 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1191 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1174 return self._revbranchcache
1192 return self._revbranchcache
1175
1193
1176 def branchtip(self, branch, ignoremissing=False):
1194 def branchtip(self, branch, ignoremissing=False):
1177 '''return the tip node for a given branch
1195 '''return the tip node for a given branch
1178
1196
1179 If ignoremissing is True, then this method will not raise an error.
1197 If ignoremissing is True, then this method will not raise an error.
1180 This is helpful for callers that only expect None for a missing branch
1198 This is helpful for callers that only expect None for a missing branch
1181 (e.g. namespace).
1199 (e.g. namespace).
1182
1200
1183 '''
1201 '''
1184 try:
1202 try:
1185 return self.branchmap().branchtip(branch)
1203 return self.branchmap().branchtip(branch)
1186 except KeyError:
1204 except KeyError:
1187 if not ignoremissing:
1205 if not ignoremissing:
1188 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1206 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1189 else:
1207 else:
1190 pass
1208 pass
1191
1209
1192 def lookup(self, key):
1210 def lookup(self, key):
1193 return scmutil.revsymbol(self, key).node()
1211 return scmutil.revsymbol(self, key).node()
1194
1212
1195 def lookupbranch(self, key):
1213 def lookupbranch(self, key):
1196 if key in self.branchmap():
1214 if key in self.branchmap():
1197 return key
1215 return key
1198
1216
1199 return scmutil.revsymbol(self, key).branch()
1217 return scmutil.revsymbol(self, key).branch()
1200
1218
1201 def known(self, nodes):
1219 def known(self, nodes):
1202 cl = self.changelog
1220 cl = self.changelog
1203 nm = cl.nodemap
1221 nm = cl.nodemap
1204 filtered = cl.filteredrevs
1222 filtered = cl.filteredrevs
1205 result = []
1223 result = []
1206 for n in nodes:
1224 for n in nodes:
1207 r = nm.get(n)
1225 r = nm.get(n)
1208 resp = not (r is None or r in filtered)
1226 resp = not (r is None or r in filtered)
1209 result.append(resp)
1227 result.append(resp)
1210 return result
1228 return result
1211
1229
1212 def local(self):
1230 def local(self):
1213 return self
1231 return self
1214
1232
1215 def publishing(self):
1233 def publishing(self):
1216 # it's safe (and desirable) to trust the publish flag unconditionally
1234 # it's safe (and desirable) to trust the publish flag unconditionally
1217 # so that we don't finalize changes shared between users via ssh or nfs
1235 # so that we don't finalize changes shared between users via ssh or nfs
1218 return self.ui.configbool('phases', 'publish', untrusted=True)
1236 return self.ui.configbool('phases', 'publish', untrusted=True)
1219
1237
1220 def cancopy(self):
1238 def cancopy(self):
1221 # so statichttprepo's override of local() works
1239 # so statichttprepo's override of local() works
1222 if not self.local():
1240 if not self.local():
1223 return False
1241 return False
1224 if not self.publishing():
1242 if not self.publishing():
1225 return True
1243 return True
1226 # if publishing we can't copy if there is filtered content
1244 # if publishing we can't copy if there is filtered content
1227 return not self.filtered('visible').changelog.filteredrevs
1245 return not self.filtered('visible').changelog.filteredrevs
1228
1246
1229 def shared(self):
1247 def shared(self):
1230 '''the type of shared repository (None if not shared)'''
1248 '''the type of shared repository (None if not shared)'''
1231 if self.sharedpath != self.path:
1249 if self.sharedpath != self.path:
1232 return 'store'
1250 return 'store'
1233 return None
1251 return None
1234
1252
1235 def wjoin(self, f, *insidef):
1253 def wjoin(self, f, *insidef):
1236 return self.vfs.reljoin(self.root, f, *insidef)
1254 return self.vfs.reljoin(self.root, f, *insidef)
1237
1255
1238 def file(self, f):
1256 def file(self, f):
1239 if f[0] == '/':
1257 if f[0] == '/':
1240 f = f[1:]
1258 f = f[1:]
1241 return filelog.filelog(self.svfs, f)
1259 return filelog.filelog(self.svfs, f)
1242
1260
1243 def setparents(self, p1, p2=nullid):
1261 def setparents(self, p1, p2=nullid):
1244 with self.dirstate.parentchange():
1262 with self.dirstate.parentchange():
1245 copies = self.dirstate.setparents(p1, p2)
1263 copies = self.dirstate.setparents(p1, p2)
1246 pctx = self[p1]
1264 pctx = self[p1]
1247 if copies:
1265 if copies:
1248 # Adjust copy records, the dirstate cannot do it, it
1266 # Adjust copy records, the dirstate cannot do it, it
1249 # requires access to parents manifests. Preserve them
1267 # requires access to parents manifests. Preserve them
1250 # only for entries added to first parent.
1268 # only for entries added to first parent.
1251 for f in copies:
1269 for f in copies:
1252 if f not in pctx and copies[f] in pctx:
1270 if f not in pctx and copies[f] in pctx:
1253 self.dirstate.copy(copies[f], f)
1271 self.dirstate.copy(copies[f], f)
1254 if p2 == nullid:
1272 if p2 == nullid:
1255 for f, s in sorted(self.dirstate.copies().items()):
1273 for f, s in sorted(self.dirstate.copies().items()):
1256 if f not in pctx and s not in pctx:
1274 if f not in pctx and s not in pctx:
1257 self.dirstate.copy(None, f)
1275 self.dirstate.copy(None, f)
1258
1276
1259 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1277 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1260 """changeid can be a changeset revision, node, or tag.
1278 """changeid can be a changeset revision, node, or tag.
1261 fileid can be a file revision or node."""
1279 fileid can be a file revision or node."""
1262 return context.filectx(self, path, changeid, fileid,
1280 return context.filectx(self, path, changeid, fileid,
1263 changectx=changectx)
1281 changectx=changectx)
1264
1282
1265 def getcwd(self):
1283 def getcwd(self):
1266 return self.dirstate.getcwd()
1284 return self.dirstate.getcwd()
1267
1285
1268 def pathto(self, f, cwd=None):
1286 def pathto(self, f, cwd=None):
1269 return self.dirstate.pathto(f, cwd)
1287 return self.dirstate.pathto(f, cwd)
1270
1288
1271 def _loadfilter(self, filter):
1289 def _loadfilter(self, filter):
1272 if filter not in self._filterpats:
1290 if filter not in self._filterpats:
1273 l = []
1291 l = []
1274 for pat, cmd in self.ui.configitems(filter):
1292 for pat, cmd in self.ui.configitems(filter):
1275 if cmd == '!':
1293 if cmd == '!':
1276 continue
1294 continue
1277 mf = matchmod.match(self.root, '', [pat])
1295 mf = matchmod.match(self.root, '', [pat])
1278 fn = None
1296 fn = None
1279 params = cmd
1297 params = cmd
1280 for name, filterfn in self._datafilters.iteritems():
1298 for name, filterfn in self._datafilters.iteritems():
1281 if cmd.startswith(name):
1299 if cmd.startswith(name):
1282 fn = filterfn
1300 fn = filterfn
1283 params = cmd[len(name):].lstrip()
1301 params = cmd[len(name):].lstrip()
1284 break
1302 break
1285 if not fn:
1303 if not fn:
1286 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1304 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1287 # Wrap old filters not supporting keyword arguments
1305 # Wrap old filters not supporting keyword arguments
1288 if not pycompat.getargspec(fn)[2]:
1306 if not pycompat.getargspec(fn)[2]:
1289 oldfn = fn
1307 oldfn = fn
1290 fn = lambda s, c, **kwargs: oldfn(s, c)
1308 fn = lambda s, c, **kwargs: oldfn(s, c)
1291 l.append((mf, fn, params))
1309 l.append((mf, fn, params))
1292 self._filterpats[filter] = l
1310 self._filterpats[filter] = l
1293 return self._filterpats[filter]
1311 return self._filterpats[filter]
1294
1312
1295 def _filter(self, filterpats, filename, data):
1313 def _filter(self, filterpats, filename, data):
1296 for mf, fn, cmd in filterpats:
1314 for mf, fn, cmd in filterpats:
1297 if mf(filename):
1315 if mf(filename):
1298 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1316 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1299 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1317 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1300 break
1318 break
1301
1319
1302 return data
1320 return data
1303
1321
1304 @unfilteredpropertycache
1322 @unfilteredpropertycache
1305 def _encodefilterpats(self):
1323 def _encodefilterpats(self):
1306 return self._loadfilter('encode')
1324 return self._loadfilter('encode')
1307
1325
1308 @unfilteredpropertycache
1326 @unfilteredpropertycache
1309 def _decodefilterpats(self):
1327 def _decodefilterpats(self):
1310 return self._loadfilter('decode')
1328 return self._loadfilter('decode')
1311
1329
1312 def adddatafilter(self, name, filter):
1330 def adddatafilter(self, name, filter):
1313 self._datafilters[name] = filter
1331 self._datafilters[name] = filter
1314
1332
1315 def wread(self, filename):
1333 def wread(self, filename):
1316 if self.wvfs.islink(filename):
1334 if self.wvfs.islink(filename):
1317 data = self.wvfs.readlink(filename)
1335 data = self.wvfs.readlink(filename)
1318 else:
1336 else:
1319 data = self.wvfs.read(filename)
1337 data = self.wvfs.read(filename)
1320 return self._filter(self._encodefilterpats, filename, data)
1338 return self._filter(self._encodefilterpats, filename, data)
1321
1339
1322 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1340 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1323 """write ``data`` into ``filename`` in the working directory
1341 """write ``data`` into ``filename`` in the working directory
1324
1342
1325 This returns length of written (maybe decoded) data.
1343 This returns length of written (maybe decoded) data.
1326 """
1344 """
1327 data = self._filter(self._decodefilterpats, filename, data)
1345 data = self._filter(self._decodefilterpats, filename, data)
1328 if 'l' in flags:
1346 if 'l' in flags:
1329 self.wvfs.symlink(data, filename)
1347 self.wvfs.symlink(data, filename)
1330 else:
1348 else:
1331 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1349 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1332 **kwargs)
1350 **kwargs)
1333 if 'x' in flags:
1351 if 'x' in flags:
1334 self.wvfs.setflags(filename, False, True)
1352 self.wvfs.setflags(filename, False, True)
1335 else:
1353 else:
1336 self.wvfs.setflags(filename, False, False)
1354 self.wvfs.setflags(filename, False, False)
1337 return len(data)
1355 return len(data)
1338
1356
1339 def wwritedata(self, filename, data):
1357 def wwritedata(self, filename, data):
1340 return self._filter(self._decodefilterpats, filename, data)
1358 return self._filter(self._decodefilterpats, filename, data)
1341
1359
1342 def currenttransaction(self):
1360 def currenttransaction(self):
1343 """return the current transaction or None if non exists"""
1361 """return the current transaction or None if non exists"""
1344 if self._transref:
1362 if self._transref:
1345 tr = self._transref()
1363 tr = self._transref()
1346 else:
1364 else:
1347 tr = None
1365 tr = None
1348
1366
1349 if tr and tr.running():
1367 if tr and tr.running():
1350 return tr
1368 return tr
1351 return None
1369 return None
1352
1370
1353 def transaction(self, desc, report=None):
1371 def transaction(self, desc, report=None):
1354 if (self.ui.configbool('devel', 'all-warnings')
1372 if (self.ui.configbool('devel', 'all-warnings')
1355 or self.ui.configbool('devel', 'check-locks')):
1373 or self.ui.configbool('devel', 'check-locks')):
1356 if self._currentlock(self._lockref) is None:
1374 if self._currentlock(self._lockref) is None:
1357 raise error.ProgrammingError('transaction requires locking')
1375 raise error.ProgrammingError('transaction requires locking')
1358 tr = self.currenttransaction()
1376 tr = self.currenttransaction()
1359 if tr is not None:
1377 if tr is not None:
1360 return tr.nest(name=desc)
1378 return tr.nest(name=desc)
1361
1379
1362 # abort here if the journal already exists
1380 # abort here if the journal already exists
1363 if self.svfs.exists("journal"):
1381 if self.svfs.exists("journal"):
1364 raise error.RepoError(
1382 raise error.RepoError(
1365 _("abandoned transaction found"),
1383 _("abandoned transaction found"),
1366 hint=_("run 'hg recover' to clean up transaction"))
1384 hint=_("run 'hg recover' to clean up transaction"))
1367
1385
1368 idbase = "%.40f#%f" % (random.random(), time.time())
1386 idbase = "%.40f#%f" % (random.random(), time.time())
1369 ha = hex(hashlib.sha1(idbase).digest())
1387 ha = hex(hashlib.sha1(idbase).digest())
1370 txnid = 'TXN:' + ha
1388 txnid = 'TXN:' + ha
1371 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1389 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1372
1390
1373 self._writejournal(desc)
1391 self._writejournal(desc)
1374 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1392 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1375 if report:
1393 if report:
1376 rp = report
1394 rp = report
1377 else:
1395 else:
1378 rp = self.ui.warn
1396 rp = self.ui.warn
1379 vfsmap = {'plain': self.vfs} # root of .hg/
1397 vfsmap = {'plain': self.vfs} # root of .hg/
1380 # we must avoid cyclic reference between repo and transaction.
1398 # we must avoid cyclic reference between repo and transaction.
1381 reporef = weakref.ref(self)
1399 reporef = weakref.ref(self)
1382 # Code to track tag movement
1400 # Code to track tag movement
1383 #
1401 #
1384 # Since tags are all handled as file content, it is actually quite hard
1402 # Since tags are all handled as file content, it is actually quite hard
1385 # to track these movement from a code perspective. So we fallback to a
1403 # to track these movement from a code perspective. So we fallback to a
1386 # tracking at the repository level. One could envision to track changes
1404 # tracking at the repository level. One could envision to track changes
1387 # to the '.hgtags' file through changegroup apply but that fails to
1405 # to the '.hgtags' file through changegroup apply but that fails to
1388 # cope with case where transaction expose new heads without changegroup
1406 # cope with case where transaction expose new heads without changegroup
1389 # being involved (eg: phase movement).
1407 # being involved (eg: phase movement).
1390 #
1408 #
1391 # For now, We gate the feature behind a flag since this likely comes
1409 # For now, We gate the feature behind a flag since this likely comes
1392 # with performance impacts. The current code run more often than needed
1410 # with performance impacts. The current code run more often than needed
1393 # and do not use caches as much as it could. The current focus is on
1411 # and do not use caches as much as it could. The current focus is on
1394 # the behavior of the feature so we disable it by default. The flag
1412 # the behavior of the feature so we disable it by default. The flag
1395 # will be removed when we are happy with the performance impact.
1413 # will be removed when we are happy with the performance impact.
1396 #
1414 #
1397 # Once this feature is no longer experimental move the following
1415 # Once this feature is no longer experimental move the following
1398 # documentation to the appropriate help section:
1416 # documentation to the appropriate help section:
1399 #
1417 #
1400 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1418 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1401 # tags (new or changed or deleted tags). In addition the details of
1419 # tags (new or changed or deleted tags). In addition the details of
1402 # these changes are made available in a file at:
1420 # these changes are made available in a file at:
1403 # ``REPOROOT/.hg/changes/tags.changes``.
1421 # ``REPOROOT/.hg/changes/tags.changes``.
1404 # Make sure you check for HG_TAG_MOVED before reading that file as it
1422 # Make sure you check for HG_TAG_MOVED before reading that file as it
1405 # might exist from a previous transaction even if no tag were touched
1423 # might exist from a previous transaction even if no tag were touched
1406 # in this one. Changes are recorded in a line base format::
1424 # in this one. Changes are recorded in a line base format::
1407 #
1425 #
1408 # <action> <hex-node> <tag-name>\n
1426 # <action> <hex-node> <tag-name>\n
1409 #
1427 #
1410 # Actions are defined as follow:
1428 # Actions are defined as follow:
1411 # "-R": tag is removed,
1429 # "-R": tag is removed,
1412 # "+A": tag is added,
1430 # "+A": tag is added,
1413 # "-M": tag is moved (old value),
1431 # "-M": tag is moved (old value),
1414 # "+M": tag is moved (new value),
1432 # "+M": tag is moved (new value),
1415 tracktags = lambda x: None
1433 tracktags = lambda x: None
1416 # experimental config: experimental.hook-track-tags
1434 # experimental config: experimental.hook-track-tags
1417 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1435 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1418 if desc != 'strip' and shouldtracktags:
1436 if desc != 'strip' and shouldtracktags:
1419 oldheads = self.changelog.headrevs()
1437 oldheads = self.changelog.headrevs()
1420 def tracktags(tr2):
1438 def tracktags(tr2):
1421 repo = reporef()
1439 repo = reporef()
1422 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1440 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1423 newheads = repo.changelog.headrevs()
1441 newheads = repo.changelog.headrevs()
1424 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1442 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1425 # notes: we compare lists here.
1443 # notes: we compare lists here.
1426 # As we do it only once buiding set would not be cheaper
1444 # As we do it only once buiding set would not be cheaper
1427 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1445 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1428 if changes:
1446 if changes:
1429 tr2.hookargs['tag_moved'] = '1'
1447 tr2.hookargs['tag_moved'] = '1'
1430 with repo.vfs('changes/tags.changes', 'w',
1448 with repo.vfs('changes/tags.changes', 'w',
1431 atomictemp=True) as changesfile:
1449 atomictemp=True) as changesfile:
1432 # note: we do not register the file to the transaction
1450 # note: we do not register the file to the transaction
1433 # because we needs it to still exist on the transaction
1451 # because we needs it to still exist on the transaction
1434 # is close (for txnclose hooks)
1452 # is close (for txnclose hooks)
1435 tagsmod.writediff(changesfile, changes)
1453 tagsmod.writediff(changesfile, changes)
1436 def validate(tr2):
1454 def validate(tr2):
1437 """will run pre-closing hooks"""
1455 """will run pre-closing hooks"""
1438 # XXX the transaction API is a bit lacking here so we take a hacky
1456 # XXX the transaction API is a bit lacking here so we take a hacky
1439 # path for now
1457 # path for now
1440 #
1458 #
1441 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1459 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1442 # dict is copied before these run. In addition we needs the data
1460 # dict is copied before these run. In addition we needs the data
1443 # available to in memory hooks too.
1461 # available to in memory hooks too.
1444 #
1462 #
1445 # Moreover, we also need to make sure this runs before txnclose
1463 # Moreover, we also need to make sure this runs before txnclose
1446 # hooks and there is no "pending" mechanism that would execute
1464 # hooks and there is no "pending" mechanism that would execute
1447 # logic only if hooks are about to run.
1465 # logic only if hooks are about to run.
1448 #
1466 #
1449 # Fixing this limitation of the transaction is also needed to track
1467 # Fixing this limitation of the transaction is also needed to track
1450 # other families of changes (bookmarks, phases, obsolescence).
1468 # other families of changes (bookmarks, phases, obsolescence).
1451 #
1469 #
1452 # This will have to be fixed before we remove the experimental
1470 # This will have to be fixed before we remove the experimental
1453 # gating.
1471 # gating.
1454 tracktags(tr2)
1472 tracktags(tr2)
1455 repo = reporef()
1473 repo = reporef()
1456 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1474 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1457 scmutil.enforcesinglehead(repo, tr2, desc)
1475 scmutil.enforcesinglehead(repo, tr2, desc)
1458 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1476 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1459 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1477 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1460 args = tr.hookargs.copy()
1478 args = tr.hookargs.copy()
1461 args.update(bookmarks.preparehookargs(name, old, new))
1479 args.update(bookmarks.preparehookargs(name, old, new))
1462 repo.hook('pretxnclose-bookmark', throw=True,
1480 repo.hook('pretxnclose-bookmark', throw=True,
1463 txnname=desc,
1481 txnname=desc,
1464 **pycompat.strkwargs(args))
1482 **pycompat.strkwargs(args))
1465 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1483 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1466 cl = repo.unfiltered().changelog
1484 cl = repo.unfiltered().changelog
1467 for rev, (old, new) in tr.changes['phases'].items():
1485 for rev, (old, new) in tr.changes['phases'].items():
1468 args = tr.hookargs.copy()
1486 args = tr.hookargs.copy()
1469 node = hex(cl.node(rev))
1487 node = hex(cl.node(rev))
1470 args.update(phases.preparehookargs(node, old, new))
1488 args.update(phases.preparehookargs(node, old, new))
1471 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1489 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1472 **pycompat.strkwargs(args))
1490 **pycompat.strkwargs(args))
1473
1491
1474 repo.hook('pretxnclose', throw=True,
1492 repo.hook('pretxnclose', throw=True,
1475 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1493 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1476 def releasefn(tr, success):
1494 def releasefn(tr, success):
1477 repo = reporef()
1495 repo = reporef()
1478 if success:
1496 if success:
1479 # this should be explicitly invoked here, because
1497 # this should be explicitly invoked here, because
1480 # in-memory changes aren't written out at closing
1498 # in-memory changes aren't written out at closing
1481 # transaction, if tr.addfilegenerator (via
1499 # transaction, if tr.addfilegenerator (via
1482 # dirstate.write or so) isn't invoked while
1500 # dirstate.write or so) isn't invoked while
1483 # transaction running
1501 # transaction running
1484 repo.dirstate.write(None)
1502 repo.dirstate.write(None)
1485 else:
1503 else:
1486 # discard all changes (including ones already written
1504 # discard all changes (including ones already written
1487 # out) in this transaction
1505 # out) in this transaction
1488 narrowspec.restorebackup(self, 'journal.narrowspec')
1506 narrowspec.restorebackup(self, 'journal.narrowspec')
1489 repo.dirstate.restorebackup(None, 'journal.dirstate')
1507 repo.dirstate.restorebackup(None, 'journal.dirstate')
1490
1508
1491 repo.invalidate(clearfilecache=True)
1509 repo.invalidate(clearfilecache=True)
1492
1510
1493 tr = transaction.transaction(rp, self.svfs, vfsmap,
1511 tr = transaction.transaction(rp, self.svfs, vfsmap,
1494 "journal",
1512 "journal",
1495 "undo",
1513 "undo",
1496 aftertrans(renames),
1514 aftertrans(renames),
1497 self.store.createmode,
1515 self.store.createmode,
1498 validator=validate,
1516 validator=validate,
1499 releasefn=releasefn,
1517 releasefn=releasefn,
1500 checkambigfiles=_cachedfiles,
1518 checkambigfiles=_cachedfiles,
1501 name=desc)
1519 name=desc)
1502 tr.changes['origrepolen'] = len(self)
1520 tr.changes['origrepolen'] = len(self)
1503 tr.changes['obsmarkers'] = set()
1521 tr.changes['obsmarkers'] = set()
1504 tr.changes['phases'] = {}
1522 tr.changes['phases'] = {}
1505 tr.changes['bookmarks'] = {}
1523 tr.changes['bookmarks'] = {}
1506
1524
1507 tr.hookargs['txnid'] = txnid
1525 tr.hookargs['txnid'] = txnid
1508 # note: writing the fncache only during finalize mean that the file is
1526 # note: writing the fncache only during finalize mean that the file is
1509 # outdated when running hooks. As fncache is used for streaming clone,
1527 # outdated when running hooks. As fncache is used for streaming clone,
1510 # this is not expected to break anything that happen during the hooks.
1528 # this is not expected to break anything that happen during the hooks.
1511 tr.addfinalize('flush-fncache', self.store.write)
1529 tr.addfinalize('flush-fncache', self.store.write)
1512 def txnclosehook(tr2):
1530 def txnclosehook(tr2):
1513 """To be run if transaction is successful, will schedule a hook run
1531 """To be run if transaction is successful, will schedule a hook run
1514 """
1532 """
1515 # Don't reference tr2 in hook() so we don't hold a reference.
1533 # Don't reference tr2 in hook() so we don't hold a reference.
1516 # This reduces memory consumption when there are multiple
1534 # This reduces memory consumption when there are multiple
1517 # transactions per lock. This can likely go away if issue5045
1535 # transactions per lock. This can likely go away if issue5045
1518 # fixes the function accumulation.
1536 # fixes the function accumulation.
1519 hookargs = tr2.hookargs
1537 hookargs = tr2.hookargs
1520
1538
1521 def hookfunc():
1539 def hookfunc():
1522 repo = reporef()
1540 repo = reporef()
1523 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1541 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1524 bmchanges = sorted(tr.changes['bookmarks'].items())
1542 bmchanges = sorted(tr.changes['bookmarks'].items())
1525 for name, (old, new) in bmchanges:
1543 for name, (old, new) in bmchanges:
1526 args = tr.hookargs.copy()
1544 args = tr.hookargs.copy()
1527 args.update(bookmarks.preparehookargs(name, old, new))
1545 args.update(bookmarks.preparehookargs(name, old, new))
1528 repo.hook('txnclose-bookmark', throw=False,
1546 repo.hook('txnclose-bookmark', throw=False,
1529 txnname=desc, **pycompat.strkwargs(args))
1547 txnname=desc, **pycompat.strkwargs(args))
1530
1548
1531 if hook.hashook(repo.ui, 'txnclose-phase'):
1549 if hook.hashook(repo.ui, 'txnclose-phase'):
1532 cl = repo.unfiltered().changelog
1550 cl = repo.unfiltered().changelog
1533 phasemv = sorted(tr.changes['phases'].items())
1551 phasemv = sorted(tr.changes['phases'].items())
1534 for rev, (old, new) in phasemv:
1552 for rev, (old, new) in phasemv:
1535 args = tr.hookargs.copy()
1553 args = tr.hookargs.copy()
1536 node = hex(cl.node(rev))
1554 node = hex(cl.node(rev))
1537 args.update(phases.preparehookargs(node, old, new))
1555 args.update(phases.preparehookargs(node, old, new))
1538 repo.hook('txnclose-phase', throw=False, txnname=desc,
1556 repo.hook('txnclose-phase', throw=False, txnname=desc,
1539 **pycompat.strkwargs(args))
1557 **pycompat.strkwargs(args))
1540
1558
1541 repo.hook('txnclose', throw=False, txnname=desc,
1559 repo.hook('txnclose', throw=False, txnname=desc,
1542 **pycompat.strkwargs(hookargs))
1560 **pycompat.strkwargs(hookargs))
1543 reporef()._afterlock(hookfunc)
1561 reporef()._afterlock(hookfunc)
1544 tr.addfinalize('txnclose-hook', txnclosehook)
1562 tr.addfinalize('txnclose-hook', txnclosehook)
1545 # Include a leading "-" to make it happen before the transaction summary
1563 # Include a leading "-" to make it happen before the transaction summary
1546 # reports registered via scmutil.registersummarycallback() whose names
1564 # reports registered via scmutil.registersummarycallback() whose names
1547 # are 00-txnreport etc. That way, the caches will be warm when the
1565 # are 00-txnreport etc. That way, the caches will be warm when the
1548 # callbacks run.
1566 # callbacks run.
1549 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1567 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1550 def txnaborthook(tr2):
1568 def txnaborthook(tr2):
1551 """To be run if transaction is aborted
1569 """To be run if transaction is aborted
1552 """
1570 """
1553 reporef().hook('txnabort', throw=False, txnname=desc,
1571 reporef().hook('txnabort', throw=False, txnname=desc,
1554 **pycompat.strkwargs(tr2.hookargs))
1572 **pycompat.strkwargs(tr2.hookargs))
1555 tr.addabort('txnabort-hook', txnaborthook)
1573 tr.addabort('txnabort-hook', txnaborthook)
1556 # avoid eager cache invalidation. in-memory data should be identical
1574 # avoid eager cache invalidation. in-memory data should be identical
1557 # to stored data if transaction has no error.
1575 # to stored data if transaction has no error.
1558 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1576 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1559 self._transref = weakref.ref(tr)
1577 self._transref = weakref.ref(tr)
1560 scmutil.registersummarycallback(self, tr, desc)
1578 scmutil.registersummarycallback(self, tr, desc)
1561 return tr
1579 return tr
1562
1580
1563 def _journalfiles(self):
1581 def _journalfiles(self):
1564 return ((self.svfs, 'journal'),
1582 return ((self.svfs, 'journal'),
1565 (self.vfs, 'journal.dirstate'),
1583 (self.vfs, 'journal.dirstate'),
1566 (self.vfs, 'journal.branch'),
1584 (self.vfs, 'journal.branch'),
1567 (self.vfs, 'journal.desc'),
1585 (self.vfs, 'journal.desc'),
1568 (self.vfs, 'journal.bookmarks'),
1586 (self.vfs, 'journal.bookmarks'),
1569 (self.svfs, 'journal.phaseroots'))
1587 (self.svfs, 'journal.phaseroots'))
1570
1588
1571 def undofiles(self):
1589 def undofiles(self):
1572 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1590 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1573
1591
1574 @unfilteredmethod
1592 @unfilteredmethod
1575 def _writejournal(self, desc):
1593 def _writejournal(self, desc):
1576 self.dirstate.savebackup(None, 'journal.dirstate')
1594 self.dirstate.savebackup(None, 'journal.dirstate')
1577 narrowspec.savebackup(self, 'journal.narrowspec')
1595 narrowspec.savebackup(self, 'journal.narrowspec')
1578 self.vfs.write("journal.branch",
1596 self.vfs.write("journal.branch",
1579 encoding.fromlocal(self.dirstate.branch()))
1597 encoding.fromlocal(self.dirstate.branch()))
1580 self.vfs.write("journal.desc",
1598 self.vfs.write("journal.desc",
1581 "%d\n%s\n" % (len(self), desc))
1599 "%d\n%s\n" % (len(self), desc))
1582 self.vfs.write("journal.bookmarks",
1600 self.vfs.write("journal.bookmarks",
1583 self.vfs.tryread("bookmarks"))
1601 self.vfs.tryread("bookmarks"))
1584 self.svfs.write("journal.phaseroots",
1602 self.svfs.write("journal.phaseroots",
1585 self.svfs.tryread("phaseroots"))
1603 self.svfs.tryread("phaseroots"))
1586
1604
1587 def recover(self):
1605 def recover(self):
1588 with self.lock():
1606 with self.lock():
1589 if self.svfs.exists("journal"):
1607 if self.svfs.exists("journal"):
1590 self.ui.status(_("rolling back interrupted transaction\n"))
1608 self.ui.status(_("rolling back interrupted transaction\n"))
1591 vfsmap = {'': self.svfs,
1609 vfsmap = {'': self.svfs,
1592 'plain': self.vfs,}
1610 'plain': self.vfs,}
1593 transaction.rollback(self.svfs, vfsmap, "journal",
1611 transaction.rollback(self.svfs, vfsmap, "journal",
1594 self.ui.warn,
1612 self.ui.warn,
1595 checkambigfiles=_cachedfiles)
1613 checkambigfiles=_cachedfiles)
1596 self.invalidate()
1614 self.invalidate()
1597 return True
1615 return True
1598 else:
1616 else:
1599 self.ui.warn(_("no interrupted transaction available\n"))
1617 self.ui.warn(_("no interrupted transaction available\n"))
1600 return False
1618 return False
1601
1619
1602 def rollback(self, dryrun=False, force=False):
1620 def rollback(self, dryrun=False, force=False):
1603 wlock = lock = dsguard = None
1621 wlock = lock = dsguard = None
1604 try:
1622 try:
1605 wlock = self.wlock()
1623 wlock = self.wlock()
1606 lock = self.lock()
1624 lock = self.lock()
1607 if self.svfs.exists("undo"):
1625 if self.svfs.exists("undo"):
1608 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1626 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1609
1627
1610 return self._rollback(dryrun, force, dsguard)
1628 return self._rollback(dryrun, force, dsguard)
1611 else:
1629 else:
1612 self.ui.warn(_("no rollback information available\n"))
1630 self.ui.warn(_("no rollback information available\n"))
1613 return 1
1631 return 1
1614 finally:
1632 finally:
1615 release(dsguard, lock, wlock)
1633 release(dsguard, lock, wlock)
1616
1634
1617 @unfilteredmethod # Until we get smarter cache management
1635 @unfilteredmethod # Until we get smarter cache management
1618 def _rollback(self, dryrun, force, dsguard):
1636 def _rollback(self, dryrun, force, dsguard):
1619 ui = self.ui
1637 ui = self.ui
1620 try:
1638 try:
1621 args = self.vfs.read('undo.desc').splitlines()
1639 args = self.vfs.read('undo.desc').splitlines()
1622 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1640 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1623 if len(args) >= 3:
1641 if len(args) >= 3:
1624 detail = args[2]
1642 detail = args[2]
1625 oldtip = oldlen - 1
1643 oldtip = oldlen - 1
1626
1644
1627 if detail and ui.verbose:
1645 if detail and ui.verbose:
1628 msg = (_('repository tip rolled back to revision %d'
1646 msg = (_('repository tip rolled back to revision %d'
1629 ' (undo %s: %s)\n')
1647 ' (undo %s: %s)\n')
1630 % (oldtip, desc, detail))
1648 % (oldtip, desc, detail))
1631 else:
1649 else:
1632 msg = (_('repository tip rolled back to revision %d'
1650 msg = (_('repository tip rolled back to revision %d'
1633 ' (undo %s)\n')
1651 ' (undo %s)\n')
1634 % (oldtip, desc))
1652 % (oldtip, desc))
1635 except IOError:
1653 except IOError:
1636 msg = _('rolling back unknown transaction\n')
1654 msg = _('rolling back unknown transaction\n')
1637 desc = None
1655 desc = None
1638
1656
1639 if not force and self['.'] != self['tip'] and desc == 'commit':
1657 if not force and self['.'] != self['tip'] and desc == 'commit':
1640 raise error.Abort(
1658 raise error.Abort(
1641 _('rollback of last commit while not checked out '
1659 _('rollback of last commit while not checked out '
1642 'may lose data'), hint=_('use -f to force'))
1660 'may lose data'), hint=_('use -f to force'))
1643
1661
1644 ui.status(msg)
1662 ui.status(msg)
1645 if dryrun:
1663 if dryrun:
1646 return 0
1664 return 0
1647
1665
1648 parents = self.dirstate.parents()
1666 parents = self.dirstate.parents()
1649 self.destroying()
1667 self.destroying()
1650 vfsmap = {'plain': self.vfs, '': self.svfs}
1668 vfsmap = {'plain': self.vfs, '': self.svfs}
1651 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1669 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1652 checkambigfiles=_cachedfiles)
1670 checkambigfiles=_cachedfiles)
1653 if self.vfs.exists('undo.bookmarks'):
1671 if self.vfs.exists('undo.bookmarks'):
1654 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1672 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1655 if self.svfs.exists('undo.phaseroots'):
1673 if self.svfs.exists('undo.phaseroots'):
1656 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1674 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1657 self.invalidate()
1675 self.invalidate()
1658
1676
1659 parentgone = (parents[0] not in self.changelog.nodemap or
1677 parentgone = (parents[0] not in self.changelog.nodemap or
1660 parents[1] not in self.changelog.nodemap)
1678 parents[1] not in self.changelog.nodemap)
1661 if parentgone:
1679 if parentgone:
1662 # prevent dirstateguard from overwriting already restored one
1680 # prevent dirstateguard from overwriting already restored one
1663 dsguard.close()
1681 dsguard.close()
1664
1682
1665 narrowspec.restorebackup(self, 'undo.narrowspec')
1683 narrowspec.restorebackup(self, 'undo.narrowspec')
1666 self.dirstate.restorebackup(None, 'undo.dirstate')
1684 self.dirstate.restorebackup(None, 'undo.dirstate')
1667 try:
1685 try:
1668 branch = self.vfs.read('undo.branch')
1686 branch = self.vfs.read('undo.branch')
1669 self.dirstate.setbranch(encoding.tolocal(branch))
1687 self.dirstate.setbranch(encoding.tolocal(branch))
1670 except IOError:
1688 except IOError:
1671 ui.warn(_('named branch could not be reset: '
1689 ui.warn(_('named branch could not be reset: '
1672 'current branch is still \'%s\'\n')
1690 'current branch is still \'%s\'\n')
1673 % self.dirstate.branch())
1691 % self.dirstate.branch())
1674
1692
1675 parents = tuple([p.rev() for p in self[None].parents()])
1693 parents = tuple([p.rev() for p in self[None].parents()])
1676 if len(parents) > 1:
1694 if len(parents) > 1:
1677 ui.status(_('working directory now based on '
1695 ui.status(_('working directory now based on '
1678 'revisions %d and %d\n') % parents)
1696 'revisions %d and %d\n') % parents)
1679 else:
1697 else:
1680 ui.status(_('working directory now based on '
1698 ui.status(_('working directory now based on '
1681 'revision %d\n') % parents)
1699 'revision %d\n') % parents)
1682 mergemod.mergestate.clean(self, self['.'].node())
1700 mergemod.mergestate.clean(self, self['.'].node())
1683
1701
1684 # TODO: if we know which new heads may result from this rollback, pass
1702 # TODO: if we know which new heads may result from this rollback, pass
1685 # them to destroy(), which will prevent the branchhead cache from being
1703 # them to destroy(), which will prevent the branchhead cache from being
1686 # invalidated.
1704 # invalidated.
1687 self.destroyed()
1705 self.destroyed()
1688 return 0
1706 return 0
1689
1707
1690 def _buildcacheupdater(self, newtransaction):
1708 def _buildcacheupdater(self, newtransaction):
1691 """called during transaction to build the callback updating cache
1709 """called during transaction to build the callback updating cache
1692
1710
1693 Lives on the repository to help extension who might want to augment
1711 Lives on the repository to help extension who might want to augment
1694 this logic. For this purpose, the created transaction is passed to the
1712 this logic. For this purpose, the created transaction is passed to the
1695 method.
1713 method.
1696 """
1714 """
1697 # we must avoid cyclic reference between repo and transaction.
1715 # we must avoid cyclic reference between repo and transaction.
1698 reporef = weakref.ref(self)
1716 reporef = weakref.ref(self)
1699 def updater(tr):
1717 def updater(tr):
1700 repo = reporef()
1718 repo = reporef()
1701 repo.updatecaches(tr)
1719 repo.updatecaches(tr)
1702 return updater
1720 return updater
1703
1721
1704 @unfilteredmethod
1722 @unfilteredmethod
1705 def updatecaches(self, tr=None, full=False):
1723 def updatecaches(self, tr=None, full=False):
1706 """warm appropriate caches
1724 """warm appropriate caches
1707
1725
1708 If this function is called after a transaction closed. The transaction
1726 If this function is called after a transaction closed. The transaction
1709 will be available in the 'tr' argument. This can be used to selectively
1727 will be available in the 'tr' argument. This can be used to selectively
1710 update caches relevant to the changes in that transaction.
1728 update caches relevant to the changes in that transaction.
1711
1729
1712 If 'full' is set, make sure all caches the function knows about have
1730 If 'full' is set, make sure all caches the function knows about have
1713 up-to-date data. Even the ones usually loaded more lazily.
1731 up-to-date data. Even the ones usually loaded more lazily.
1714 """
1732 """
1715 if tr is not None and tr.hookargs.get('source') == 'strip':
1733 if tr is not None and tr.hookargs.get('source') == 'strip':
1716 # During strip, many caches are invalid but
1734 # During strip, many caches are invalid but
1717 # later call to `destroyed` will refresh them.
1735 # later call to `destroyed` will refresh them.
1718 return
1736 return
1719
1737
1720 if tr is None or tr.changes['origrepolen'] < len(self):
1738 if tr is None or tr.changes['origrepolen'] < len(self):
1721 # updating the unfiltered branchmap should refresh all the others,
1739 # updating the unfiltered branchmap should refresh all the others,
1722 self.ui.debug('updating the branch cache\n')
1740 self.ui.debug('updating the branch cache\n')
1723 branchmap.updatecache(self.filtered('served'))
1741 branchmap.updatecache(self.filtered('served'))
1724
1742
1725 if full:
1743 if full:
1726 rbc = self.revbranchcache()
1744 rbc = self.revbranchcache()
1727 for r in self.changelog:
1745 for r in self.changelog:
1728 rbc.branchinfo(r)
1746 rbc.branchinfo(r)
1729 rbc.write()
1747 rbc.write()
1730
1748
1731 # ensure the working copy parents are in the manifestfulltextcache
1749 # ensure the working copy parents are in the manifestfulltextcache
1732 for ctx in self['.'].parents():
1750 for ctx in self['.'].parents():
1733 ctx.manifest() # accessing the manifest is enough
1751 ctx.manifest() # accessing the manifest is enough
1734
1752
1735 def invalidatecaches(self):
1753 def invalidatecaches(self):
1736
1754
1737 if '_tagscache' in vars(self):
1755 if '_tagscache' in vars(self):
1738 # can't use delattr on proxy
1756 # can't use delattr on proxy
1739 del self.__dict__['_tagscache']
1757 del self.__dict__['_tagscache']
1740
1758
1741 self.unfiltered()._branchcaches.clear()
1759 self.unfiltered()._branchcaches.clear()
1742 self.invalidatevolatilesets()
1760 self.invalidatevolatilesets()
1743 self._sparsesignaturecache.clear()
1761 self._sparsesignaturecache.clear()
1744
1762
1745 def invalidatevolatilesets(self):
1763 def invalidatevolatilesets(self):
1746 self.filteredrevcache.clear()
1764 self.filteredrevcache.clear()
1747 obsolete.clearobscaches(self)
1765 obsolete.clearobscaches(self)
1748
1766
1749 def invalidatedirstate(self):
1767 def invalidatedirstate(self):
1750 '''Invalidates the dirstate, causing the next call to dirstate
1768 '''Invalidates the dirstate, causing the next call to dirstate
1751 to check if it was modified since the last time it was read,
1769 to check if it was modified since the last time it was read,
1752 rereading it if it has.
1770 rereading it if it has.
1753
1771
1754 This is different to dirstate.invalidate() that it doesn't always
1772 This is different to dirstate.invalidate() that it doesn't always
1755 rereads the dirstate. Use dirstate.invalidate() if you want to
1773 rereads the dirstate. Use dirstate.invalidate() if you want to
1756 explicitly read the dirstate again (i.e. restoring it to a previous
1774 explicitly read the dirstate again (i.e. restoring it to a previous
1757 known good state).'''
1775 known good state).'''
1758 if hasunfilteredcache(self, 'dirstate'):
1776 if hasunfilteredcache(self, 'dirstate'):
1759 for k in self.dirstate._filecache:
1777 for k in self.dirstate._filecache:
1760 try:
1778 try:
1761 delattr(self.dirstate, k)
1779 delattr(self.dirstate, k)
1762 except AttributeError:
1780 except AttributeError:
1763 pass
1781 pass
1764 delattr(self.unfiltered(), 'dirstate')
1782 delattr(self.unfiltered(), 'dirstate')
1765
1783
1766 def invalidate(self, clearfilecache=False):
1784 def invalidate(self, clearfilecache=False):
1767 '''Invalidates both store and non-store parts other than dirstate
1785 '''Invalidates both store and non-store parts other than dirstate
1768
1786
1769 If a transaction is running, invalidation of store is omitted,
1787 If a transaction is running, invalidation of store is omitted,
1770 because discarding in-memory changes might cause inconsistency
1788 because discarding in-memory changes might cause inconsistency
1771 (e.g. incomplete fncache causes unintentional failure, but
1789 (e.g. incomplete fncache causes unintentional failure, but
1772 redundant one doesn't).
1790 redundant one doesn't).
1773 '''
1791 '''
1774 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1792 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1775 for k in list(self._filecache.keys()):
1793 for k in list(self._filecache.keys()):
1776 # dirstate is invalidated separately in invalidatedirstate()
1794 # dirstate is invalidated separately in invalidatedirstate()
1777 if k == 'dirstate':
1795 if k == 'dirstate':
1778 continue
1796 continue
1779 if (k == 'changelog' and
1797 if (k == 'changelog' and
1780 self.currenttransaction() and
1798 self.currenttransaction() and
1781 self.changelog._delayed):
1799 self.changelog._delayed):
1782 # The changelog object may store unwritten revisions. We don't
1800 # The changelog object may store unwritten revisions. We don't
1783 # want to lose them.
1801 # want to lose them.
1784 # TODO: Solve the problem instead of working around it.
1802 # TODO: Solve the problem instead of working around it.
1785 continue
1803 continue
1786
1804
1787 if clearfilecache:
1805 if clearfilecache:
1788 del self._filecache[k]
1806 del self._filecache[k]
1789 try:
1807 try:
1790 delattr(unfiltered, k)
1808 delattr(unfiltered, k)
1791 except AttributeError:
1809 except AttributeError:
1792 pass
1810 pass
1793 self.invalidatecaches()
1811 self.invalidatecaches()
1794 if not self.currenttransaction():
1812 if not self.currenttransaction():
1795 # TODO: Changing contents of store outside transaction
1813 # TODO: Changing contents of store outside transaction
1796 # causes inconsistency. We should make in-memory store
1814 # causes inconsistency. We should make in-memory store
1797 # changes detectable, and abort if changed.
1815 # changes detectable, and abort if changed.
1798 self.store.invalidatecaches()
1816 self.store.invalidatecaches()
1799
1817
1800 def invalidateall(self):
1818 def invalidateall(self):
1801 '''Fully invalidates both store and non-store parts, causing the
1819 '''Fully invalidates both store and non-store parts, causing the
1802 subsequent operation to reread any outside changes.'''
1820 subsequent operation to reread any outside changes.'''
1803 # extension should hook this to invalidate its caches
1821 # extension should hook this to invalidate its caches
1804 self.invalidate()
1822 self.invalidate()
1805 self.invalidatedirstate()
1823 self.invalidatedirstate()
1806
1824
1807 @unfilteredmethod
1825 @unfilteredmethod
1808 def _refreshfilecachestats(self, tr):
1826 def _refreshfilecachestats(self, tr):
1809 """Reload stats of cached files so that they are flagged as valid"""
1827 """Reload stats of cached files so that they are flagged as valid"""
1810 for k, ce in self._filecache.items():
1828 for k, ce in self._filecache.items():
1811 k = pycompat.sysstr(k)
1829 k = pycompat.sysstr(k)
1812 if k == r'dirstate' or k not in self.__dict__:
1830 if k == r'dirstate' or k not in self.__dict__:
1813 continue
1831 continue
1814 ce.refresh()
1832 ce.refresh()
1815
1833
1816 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1834 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1817 inheritchecker=None, parentenvvar=None):
1835 inheritchecker=None, parentenvvar=None):
1818 parentlock = None
1836 parentlock = None
1819 # the contents of parentenvvar are used by the underlying lock to
1837 # the contents of parentenvvar are used by the underlying lock to
1820 # determine whether it can be inherited
1838 # determine whether it can be inherited
1821 if parentenvvar is not None:
1839 if parentenvvar is not None:
1822 parentlock = encoding.environ.get(parentenvvar)
1840 parentlock = encoding.environ.get(parentenvvar)
1823
1841
1824 timeout = 0
1842 timeout = 0
1825 warntimeout = 0
1843 warntimeout = 0
1826 if wait:
1844 if wait:
1827 timeout = self.ui.configint("ui", "timeout")
1845 timeout = self.ui.configint("ui", "timeout")
1828 warntimeout = self.ui.configint("ui", "timeout.warn")
1846 warntimeout = self.ui.configint("ui", "timeout.warn")
1829 # internal config: ui.signal-safe-lock
1847 # internal config: ui.signal-safe-lock
1830 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1848 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1831
1849
1832 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1850 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1833 releasefn=releasefn,
1851 releasefn=releasefn,
1834 acquirefn=acquirefn, desc=desc,
1852 acquirefn=acquirefn, desc=desc,
1835 inheritchecker=inheritchecker,
1853 inheritchecker=inheritchecker,
1836 parentlock=parentlock,
1854 parentlock=parentlock,
1837 signalsafe=signalsafe)
1855 signalsafe=signalsafe)
1838 return l
1856 return l
1839
1857
1840 def _afterlock(self, callback):
1858 def _afterlock(self, callback):
1841 """add a callback to be run when the repository is fully unlocked
1859 """add a callback to be run when the repository is fully unlocked
1842
1860
1843 The callback will be executed when the outermost lock is released
1861 The callback will be executed when the outermost lock is released
1844 (with wlock being higher level than 'lock')."""
1862 (with wlock being higher level than 'lock')."""
1845 for ref in (self._wlockref, self._lockref):
1863 for ref in (self._wlockref, self._lockref):
1846 l = ref and ref()
1864 l = ref and ref()
1847 if l and l.held:
1865 if l and l.held:
1848 l.postrelease.append(callback)
1866 l.postrelease.append(callback)
1849 break
1867 break
1850 else: # no lock have been found.
1868 else: # no lock have been found.
1851 callback()
1869 callback()
1852
1870
1853 def lock(self, wait=True):
1871 def lock(self, wait=True):
1854 '''Lock the repository store (.hg/store) and return a weak reference
1872 '''Lock the repository store (.hg/store) and return a weak reference
1855 to the lock. Use this before modifying the store (e.g. committing or
1873 to the lock. Use this before modifying the store (e.g. committing or
1856 stripping). If you are opening a transaction, get a lock as well.)
1874 stripping). If you are opening a transaction, get a lock as well.)
1857
1875
1858 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1876 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1859 'wlock' first to avoid a dead-lock hazard.'''
1877 'wlock' first to avoid a dead-lock hazard.'''
1860 l = self._currentlock(self._lockref)
1878 l = self._currentlock(self._lockref)
1861 if l is not None:
1879 if l is not None:
1862 l.lock()
1880 l.lock()
1863 return l
1881 return l
1864
1882
1865 l = self._lock(self.svfs, "lock", wait, None,
1883 l = self._lock(self.svfs, "lock", wait, None,
1866 self.invalidate, _('repository %s') % self.origroot)
1884 self.invalidate, _('repository %s') % self.origroot)
1867 self._lockref = weakref.ref(l)
1885 self._lockref = weakref.ref(l)
1868 return l
1886 return l
1869
1887
1870 def _wlockchecktransaction(self):
1888 def _wlockchecktransaction(self):
1871 if self.currenttransaction() is not None:
1889 if self.currenttransaction() is not None:
1872 raise error.LockInheritanceContractViolation(
1890 raise error.LockInheritanceContractViolation(
1873 'wlock cannot be inherited in the middle of a transaction')
1891 'wlock cannot be inherited in the middle of a transaction')
1874
1892
1875 def wlock(self, wait=True):
1893 def wlock(self, wait=True):
1876 '''Lock the non-store parts of the repository (everything under
1894 '''Lock the non-store parts of the repository (everything under
1877 .hg except .hg/store) and return a weak reference to the lock.
1895 .hg except .hg/store) and return a weak reference to the lock.
1878
1896
1879 Use this before modifying files in .hg.
1897 Use this before modifying files in .hg.
1880
1898
1881 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1899 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1882 'wlock' first to avoid a dead-lock hazard.'''
1900 'wlock' first to avoid a dead-lock hazard.'''
1883 l = self._wlockref and self._wlockref()
1901 l = self._wlockref and self._wlockref()
1884 if l is not None and l.held:
1902 if l is not None and l.held:
1885 l.lock()
1903 l.lock()
1886 return l
1904 return l
1887
1905
1888 # We do not need to check for non-waiting lock acquisition. Such
1906 # We do not need to check for non-waiting lock acquisition. Such
1889 # acquisition would not cause dead-lock as they would just fail.
1907 # acquisition would not cause dead-lock as they would just fail.
1890 if wait and (self.ui.configbool('devel', 'all-warnings')
1908 if wait and (self.ui.configbool('devel', 'all-warnings')
1891 or self.ui.configbool('devel', 'check-locks')):
1909 or self.ui.configbool('devel', 'check-locks')):
1892 if self._currentlock(self._lockref) is not None:
1910 if self._currentlock(self._lockref) is not None:
1893 self.ui.develwarn('"wlock" acquired after "lock"')
1911 self.ui.develwarn('"wlock" acquired after "lock"')
1894
1912
1895 def unlock():
1913 def unlock():
1896 if self.dirstate.pendingparentchange():
1914 if self.dirstate.pendingparentchange():
1897 self.dirstate.invalidate()
1915 self.dirstate.invalidate()
1898 else:
1916 else:
1899 self.dirstate.write(None)
1917 self.dirstate.write(None)
1900
1918
1901 self._filecache['dirstate'].refresh()
1919 self._filecache['dirstate'].refresh()
1902
1920
1903 l = self._lock(self.vfs, "wlock", wait, unlock,
1921 l = self._lock(self.vfs, "wlock", wait, unlock,
1904 self.invalidatedirstate, _('working directory of %s') %
1922 self.invalidatedirstate, _('working directory of %s') %
1905 self.origroot,
1923 self.origroot,
1906 inheritchecker=self._wlockchecktransaction,
1924 inheritchecker=self._wlockchecktransaction,
1907 parentenvvar='HG_WLOCK_LOCKER')
1925 parentenvvar='HG_WLOCK_LOCKER')
1908 self._wlockref = weakref.ref(l)
1926 self._wlockref = weakref.ref(l)
1909 return l
1927 return l
1910
1928
1911 def _currentlock(self, lockref):
1929 def _currentlock(self, lockref):
1912 """Returns the lock if it's held, or None if it's not."""
1930 """Returns the lock if it's held, or None if it's not."""
1913 if lockref is None:
1931 if lockref is None:
1914 return None
1932 return None
1915 l = lockref()
1933 l = lockref()
1916 if l is None or not l.held:
1934 if l is None or not l.held:
1917 return None
1935 return None
1918 return l
1936 return l
1919
1937
1920 def currentwlock(self):
1938 def currentwlock(self):
1921 """Returns the wlock if it's held, or None if it's not."""
1939 """Returns the wlock if it's held, or None if it's not."""
1922 return self._currentlock(self._wlockref)
1940 return self._currentlock(self._wlockref)
1923
1941
1924 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1942 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1925 """
1943 """
1926 commit an individual file as part of a larger transaction
1944 commit an individual file as part of a larger transaction
1927 """
1945 """
1928
1946
1929 fname = fctx.path()
1947 fname = fctx.path()
1930 fparent1 = manifest1.get(fname, nullid)
1948 fparent1 = manifest1.get(fname, nullid)
1931 fparent2 = manifest2.get(fname, nullid)
1949 fparent2 = manifest2.get(fname, nullid)
1932 if isinstance(fctx, context.filectx):
1950 if isinstance(fctx, context.filectx):
1933 node = fctx.filenode()
1951 node = fctx.filenode()
1934 if node in [fparent1, fparent2]:
1952 if node in [fparent1, fparent2]:
1935 self.ui.debug('reusing %s filelog entry\n' % fname)
1953 self.ui.debug('reusing %s filelog entry\n' % fname)
1936 if manifest1.flags(fname) != fctx.flags():
1954 if manifest1.flags(fname) != fctx.flags():
1937 changelist.append(fname)
1955 changelist.append(fname)
1938 return node
1956 return node
1939
1957
1940 flog = self.file(fname)
1958 flog = self.file(fname)
1941 meta = {}
1959 meta = {}
1942 copy = fctx.renamed()
1960 copy = fctx.renamed()
1943 if copy and copy[0] != fname:
1961 if copy and copy[0] != fname:
1944 # Mark the new revision of this file as a copy of another
1962 # Mark the new revision of this file as a copy of another
1945 # file. This copy data will effectively act as a parent
1963 # file. This copy data will effectively act as a parent
1946 # of this new revision. If this is a merge, the first
1964 # of this new revision. If this is a merge, the first
1947 # parent will be the nullid (meaning "look up the copy data")
1965 # parent will be the nullid (meaning "look up the copy data")
1948 # and the second one will be the other parent. For example:
1966 # and the second one will be the other parent. For example:
1949 #
1967 #
1950 # 0 --- 1 --- 3 rev1 changes file foo
1968 # 0 --- 1 --- 3 rev1 changes file foo
1951 # \ / rev2 renames foo to bar and changes it
1969 # \ / rev2 renames foo to bar and changes it
1952 # \- 2 -/ rev3 should have bar with all changes and
1970 # \- 2 -/ rev3 should have bar with all changes and
1953 # should record that bar descends from
1971 # should record that bar descends from
1954 # bar in rev2 and foo in rev1
1972 # bar in rev2 and foo in rev1
1955 #
1973 #
1956 # this allows this merge to succeed:
1974 # this allows this merge to succeed:
1957 #
1975 #
1958 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1976 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1959 # \ / merging rev3 and rev4 should use bar@rev2
1977 # \ / merging rev3 and rev4 should use bar@rev2
1960 # \- 2 --- 4 as the merge base
1978 # \- 2 --- 4 as the merge base
1961 #
1979 #
1962
1980
1963 cfname = copy[0]
1981 cfname = copy[0]
1964 crev = manifest1.get(cfname)
1982 crev = manifest1.get(cfname)
1965 newfparent = fparent2
1983 newfparent = fparent2
1966
1984
1967 if manifest2: # branch merge
1985 if manifest2: # branch merge
1968 if fparent2 == nullid or crev is None: # copied on remote side
1986 if fparent2 == nullid or crev is None: # copied on remote side
1969 if cfname in manifest2:
1987 if cfname in manifest2:
1970 crev = manifest2[cfname]
1988 crev = manifest2[cfname]
1971 newfparent = fparent1
1989 newfparent = fparent1
1972
1990
1973 # Here, we used to search backwards through history to try to find
1991 # Here, we used to search backwards through history to try to find
1974 # where the file copy came from if the source of a copy was not in
1992 # where the file copy came from if the source of a copy was not in
1975 # the parent directory. However, this doesn't actually make sense to
1993 # the parent directory. However, this doesn't actually make sense to
1976 # do (what does a copy from something not in your working copy even
1994 # do (what does a copy from something not in your working copy even
1977 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1995 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1978 # the user that copy information was dropped, so if they didn't
1996 # the user that copy information was dropped, so if they didn't
1979 # expect this outcome it can be fixed, but this is the correct
1997 # expect this outcome it can be fixed, but this is the correct
1980 # behavior in this circumstance.
1998 # behavior in this circumstance.
1981
1999
1982 if crev:
2000 if crev:
1983 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2001 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1984 meta["copy"] = cfname
2002 meta["copy"] = cfname
1985 meta["copyrev"] = hex(crev)
2003 meta["copyrev"] = hex(crev)
1986 fparent1, fparent2 = nullid, newfparent
2004 fparent1, fparent2 = nullid, newfparent
1987 else:
2005 else:
1988 self.ui.warn(_("warning: can't find ancestor for '%s' "
2006 self.ui.warn(_("warning: can't find ancestor for '%s' "
1989 "copied from '%s'!\n") % (fname, cfname))
2007 "copied from '%s'!\n") % (fname, cfname))
1990
2008
1991 elif fparent1 == nullid:
2009 elif fparent1 == nullid:
1992 fparent1, fparent2 = fparent2, nullid
2010 fparent1, fparent2 = fparent2, nullid
1993 elif fparent2 != nullid:
2011 elif fparent2 != nullid:
1994 # is one parent an ancestor of the other?
2012 # is one parent an ancestor of the other?
1995 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2013 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1996 if fparent1 in fparentancestors:
2014 if fparent1 in fparentancestors:
1997 fparent1, fparent2 = fparent2, nullid
2015 fparent1, fparent2 = fparent2, nullid
1998 elif fparent2 in fparentancestors:
2016 elif fparent2 in fparentancestors:
1999 fparent2 = nullid
2017 fparent2 = nullid
2000
2018
2001 # is the file changed?
2019 # is the file changed?
2002 text = fctx.data()
2020 text = fctx.data()
2003 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2021 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2004 changelist.append(fname)
2022 changelist.append(fname)
2005 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2023 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2006 # are just the flags changed during merge?
2024 # are just the flags changed during merge?
2007 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2025 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2008 changelist.append(fname)
2026 changelist.append(fname)
2009
2027
2010 return fparent1
2028 return fparent1
2011
2029
2012 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2030 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2013 """check for commit arguments that aren't committable"""
2031 """check for commit arguments that aren't committable"""
2014 if match.isexact() or match.prefix():
2032 if match.isexact() or match.prefix():
2015 matched = set(status.modified + status.added + status.removed)
2033 matched = set(status.modified + status.added + status.removed)
2016
2034
2017 for f in match.files():
2035 for f in match.files():
2018 f = self.dirstate.normalize(f)
2036 f = self.dirstate.normalize(f)
2019 if f == '.' or f in matched or f in wctx.substate:
2037 if f == '.' or f in matched or f in wctx.substate:
2020 continue
2038 continue
2021 if f in status.deleted:
2039 if f in status.deleted:
2022 fail(f, _('file not found!'))
2040 fail(f, _('file not found!'))
2023 if f in vdirs: # visited directory
2041 if f in vdirs: # visited directory
2024 d = f + '/'
2042 d = f + '/'
2025 for mf in matched:
2043 for mf in matched:
2026 if mf.startswith(d):
2044 if mf.startswith(d):
2027 break
2045 break
2028 else:
2046 else:
2029 fail(f, _("no match under directory!"))
2047 fail(f, _("no match under directory!"))
2030 elif f not in self.dirstate:
2048 elif f not in self.dirstate:
2031 fail(f, _("file not tracked!"))
2049 fail(f, _("file not tracked!"))
2032
2050
2033 @unfilteredmethod
2051 @unfilteredmethod
2034 def commit(self, text="", user=None, date=None, match=None, force=False,
2052 def commit(self, text="", user=None, date=None, match=None, force=False,
2035 editor=False, extra=None):
2053 editor=False, extra=None):
2036 """Add a new revision to current repository.
2054 """Add a new revision to current repository.
2037
2055
2038 Revision information is gathered from the working directory,
2056 Revision information is gathered from the working directory,
2039 match can be used to filter the committed files. If editor is
2057 match can be used to filter the committed files. If editor is
2040 supplied, it is called to get a commit message.
2058 supplied, it is called to get a commit message.
2041 """
2059 """
2042 if extra is None:
2060 if extra is None:
2043 extra = {}
2061 extra = {}
2044
2062
2045 def fail(f, msg):
2063 def fail(f, msg):
2046 raise error.Abort('%s: %s' % (f, msg))
2064 raise error.Abort('%s: %s' % (f, msg))
2047
2065
2048 if not match:
2066 if not match:
2049 match = matchmod.always(self.root, '')
2067 match = matchmod.always(self.root, '')
2050
2068
2051 if not force:
2069 if not force:
2052 vdirs = []
2070 vdirs = []
2053 match.explicitdir = vdirs.append
2071 match.explicitdir = vdirs.append
2054 match.bad = fail
2072 match.bad = fail
2055
2073
2056 wlock = lock = tr = None
2074 wlock = lock = tr = None
2057 try:
2075 try:
2058 wlock = self.wlock()
2076 wlock = self.wlock()
2059 lock = self.lock() # for recent changelog (see issue4368)
2077 lock = self.lock() # for recent changelog (see issue4368)
2060
2078
2061 wctx = self[None]
2079 wctx = self[None]
2062 merge = len(wctx.parents()) > 1
2080 merge = len(wctx.parents()) > 1
2063
2081
2064 if not force and merge and not match.always():
2082 if not force and merge and not match.always():
2065 raise error.Abort(_('cannot partially commit a merge '
2083 raise error.Abort(_('cannot partially commit a merge '
2066 '(do not specify files or patterns)'))
2084 '(do not specify files or patterns)'))
2067
2085
2068 status = self.status(match=match, clean=force)
2086 status = self.status(match=match, clean=force)
2069 if force:
2087 if force:
2070 status.modified.extend(status.clean) # mq may commit clean files
2088 status.modified.extend(status.clean) # mq may commit clean files
2071
2089
2072 # check subrepos
2090 # check subrepos
2073 subs, commitsubs, newstate = subrepoutil.precommit(
2091 subs, commitsubs, newstate = subrepoutil.precommit(
2074 self.ui, wctx, status, match, force=force)
2092 self.ui, wctx, status, match, force=force)
2075
2093
2076 # make sure all explicit patterns are matched
2094 # make sure all explicit patterns are matched
2077 if not force:
2095 if not force:
2078 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2096 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2079
2097
2080 cctx = context.workingcommitctx(self, status,
2098 cctx = context.workingcommitctx(self, status,
2081 text, user, date, extra)
2099 text, user, date, extra)
2082
2100
2083 # internal config: ui.allowemptycommit
2101 # internal config: ui.allowemptycommit
2084 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2102 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2085 or extra.get('close') or merge or cctx.files()
2103 or extra.get('close') or merge or cctx.files()
2086 or self.ui.configbool('ui', 'allowemptycommit'))
2104 or self.ui.configbool('ui', 'allowemptycommit'))
2087 if not allowemptycommit:
2105 if not allowemptycommit:
2088 return None
2106 return None
2089
2107
2090 if merge and cctx.deleted():
2108 if merge and cctx.deleted():
2091 raise error.Abort(_("cannot commit merge with missing files"))
2109 raise error.Abort(_("cannot commit merge with missing files"))
2092
2110
2093 ms = mergemod.mergestate.read(self)
2111 ms = mergemod.mergestate.read(self)
2094 mergeutil.checkunresolved(ms)
2112 mergeutil.checkunresolved(ms)
2095
2113
2096 if editor:
2114 if editor:
2097 cctx._text = editor(self, cctx, subs)
2115 cctx._text = editor(self, cctx, subs)
2098 edited = (text != cctx._text)
2116 edited = (text != cctx._text)
2099
2117
2100 # Save commit message in case this transaction gets rolled back
2118 # Save commit message in case this transaction gets rolled back
2101 # (e.g. by a pretxncommit hook). Leave the content alone on
2119 # (e.g. by a pretxncommit hook). Leave the content alone on
2102 # the assumption that the user will use the same editor again.
2120 # the assumption that the user will use the same editor again.
2103 msgfn = self.savecommitmessage(cctx._text)
2121 msgfn = self.savecommitmessage(cctx._text)
2104
2122
2105 # commit subs and write new state
2123 # commit subs and write new state
2106 if subs:
2124 if subs:
2107 for s in sorted(commitsubs):
2125 for s in sorted(commitsubs):
2108 sub = wctx.sub(s)
2126 sub = wctx.sub(s)
2109 self.ui.status(_('committing subrepository %s\n') %
2127 self.ui.status(_('committing subrepository %s\n') %
2110 subrepoutil.subrelpath(sub))
2128 subrepoutil.subrelpath(sub))
2111 sr = sub.commit(cctx._text, user, date)
2129 sr = sub.commit(cctx._text, user, date)
2112 newstate[s] = (newstate[s][0], sr)
2130 newstate[s] = (newstate[s][0], sr)
2113 subrepoutil.writestate(self, newstate)
2131 subrepoutil.writestate(self, newstate)
2114
2132
2115 p1, p2 = self.dirstate.parents()
2133 p1, p2 = self.dirstate.parents()
2116 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2134 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2117 try:
2135 try:
2118 self.hook("precommit", throw=True, parent1=hookp1,
2136 self.hook("precommit", throw=True, parent1=hookp1,
2119 parent2=hookp2)
2137 parent2=hookp2)
2120 tr = self.transaction('commit')
2138 tr = self.transaction('commit')
2121 ret = self.commitctx(cctx, True)
2139 ret = self.commitctx(cctx, True)
2122 except: # re-raises
2140 except: # re-raises
2123 if edited:
2141 if edited:
2124 self.ui.write(
2142 self.ui.write(
2125 _('note: commit message saved in %s\n') % msgfn)
2143 _('note: commit message saved in %s\n') % msgfn)
2126 raise
2144 raise
2127 # update bookmarks, dirstate and mergestate
2145 # update bookmarks, dirstate and mergestate
2128 bookmarks.update(self, [p1, p2], ret)
2146 bookmarks.update(self, [p1, p2], ret)
2129 cctx.markcommitted(ret)
2147 cctx.markcommitted(ret)
2130 ms.reset()
2148 ms.reset()
2131 tr.close()
2149 tr.close()
2132
2150
2133 finally:
2151 finally:
2134 lockmod.release(tr, lock, wlock)
2152 lockmod.release(tr, lock, wlock)
2135
2153
2136 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2154 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2137 # hack for command that use a temporary commit (eg: histedit)
2155 # hack for command that use a temporary commit (eg: histedit)
2138 # temporary commit got stripped before hook release
2156 # temporary commit got stripped before hook release
2139 if self.changelog.hasnode(ret):
2157 if self.changelog.hasnode(ret):
2140 self.hook("commit", node=node, parent1=parent1,
2158 self.hook("commit", node=node, parent1=parent1,
2141 parent2=parent2)
2159 parent2=parent2)
2142 self._afterlock(commithook)
2160 self._afterlock(commithook)
2143 return ret
2161 return ret
2144
2162
2145 @unfilteredmethod
2163 @unfilteredmethod
2146 def commitctx(self, ctx, error=False):
2164 def commitctx(self, ctx, error=False):
2147 """Add a new revision to current repository.
2165 """Add a new revision to current repository.
2148 Revision information is passed via the context argument.
2166 Revision information is passed via the context argument.
2149
2167
2150 ctx.files() should list all files involved in this commit, i.e.
2168 ctx.files() should list all files involved in this commit, i.e.
2151 modified/added/removed files. On merge, it may be wider than the
2169 modified/added/removed files. On merge, it may be wider than the
2152 ctx.files() to be committed, since any file nodes derived directly
2170 ctx.files() to be committed, since any file nodes derived directly
2153 from p1 or p2 are excluded from the committed ctx.files().
2171 from p1 or p2 are excluded from the committed ctx.files().
2154 """
2172 """
2155
2173
2156 tr = None
2174 tr = None
2157 p1, p2 = ctx.p1(), ctx.p2()
2175 p1, p2 = ctx.p1(), ctx.p2()
2158 user = ctx.user()
2176 user = ctx.user()
2159
2177
2160 lock = self.lock()
2178 lock = self.lock()
2161 try:
2179 try:
2162 tr = self.transaction("commit")
2180 tr = self.transaction("commit")
2163 trp = weakref.proxy(tr)
2181 trp = weakref.proxy(tr)
2164
2182
2165 if ctx.manifestnode():
2183 if ctx.manifestnode():
2166 # reuse an existing manifest revision
2184 # reuse an existing manifest revision
2167 self.ui.debug('reusing known manifest\n')
2185 self.ui.debug('reusing known manifest\n')
2168 mn = ctx.manifestnode()
2186 mn = ctx.manifestnode()
2169 files = ctx.files()
2187 files = ctx.files()
2170 elif ctx.files():
2188 elif ctx.files():
2171 m1ctx = p1.manifestctx()
2189 m1ctx = p1.manifestctx()
2172 m2ctx = p2.manifestctx()
2190 m2ctx = p2.manifestctx()
2173 mctx = m1ctx.copy()
2191 mctx = m1ctx.copy()
2174
2192
2175 m = mctx.read()
2193 m = mctx.read()
2176 m1 = m1ctx.read()
2194 m1 = m1ctx.read()
2177 m2 = m2ctx.read()
2195 m2 = m2ctx.read()
2178
2196
2179 # check in files
2197 # check in files
2180 added = []
2198 added = []
2181 changed = []
2199 changed = []
2182 removed = list(ctx.removed())
2200 removed = list(ctx.removed())
2183 linkrev = len(self)
2201 linkrev = len(self)
2184 self.ui.note(_("committing files:\n"))
2202 self.ui.note(_("committing files:\n"))
2185 for f in sorted(ctx.modified() + ctx.added()):
2203 for f in sorted(ctx.modified() + ctx.added()):
2186 self.ui.note(f + "\n")
2204 self.ui.note(f + "\n")
2187 try:
2205 try:
2188 fctx = ctx[f]
2206 fctx = ctx[f]
2189 if fctx is None:
2207 if fctx is None:
2190 removed.append(f)
2208 removed.append(f)
2191 else:
2209 else:
2192 added.append(f)
2210 added.append(f)
2193 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2211 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2194 trp, changed)
2212 trp, changed)
2195 m.setflag(f, fctx.flags())
2213 m.setflag(f, fctx.flags())
2196 except OSError as inst:
2214 except OSError as inst:
2197 self.ui.warn(_("trouble committing %s!\n") % f)
2215 self.ui.warn(_("trouble committing %s!\n") % f)
2198 raise
2216 raise
2199 except IOError as inst:
2217 except IOError as inst:
2200 errcode = getattr(inst, 'errno', errno.ENOENT)
2218 errcode = getattr(inst, 'errno', errno.ENOENT)
2201 if error or errcode and errcode != errno.ENOENT:
2219 if error or errcode and errcode != errno.ENOENT:
2202 self.ui.warn(_("trouble committing %s!\n") % f)
2220 self.ui.warn(_("trouble committing %s!\n") % f)
2203 raise
2221 raise
2204
2222
2205 # update manifest
2223 # update manifest
2206 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2224 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2207 drop = [f for f in removed if f in m]
2225 drop = [f for f in removed if f in m]
2208 for f in drop:
2226 for f in drop:
2209 del m[f]
2227 del m[f]
2210 files = changed + removed
2228 files = changed + removed
2211 md = None
2229 md = None
2212 if not files:
2230 if not files:
2213 # if no "files" actually changed in terms of the changelog,
2231 # if no "files" actually changed in terms of the changelog,
2214 # try hard to detect unmodified manifest entry so that the
2232 # try hard to detect unmodified manifest entry so that the
2215 # exact same commit can be reproduced later on convert.
2233 # exact same commit can be reproduced later on convert.
2216 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2234 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2217 if not files and md:
2235 if not files and md:
2218 self.ui.debug('not reusing manifest (no file change in '
2236 self.ui.debug('not reusing manifest (no file change in '
2219 'changelog, but manifest differs)\n')
2237 'changelog, but manifest differs)\n')
2220 if files or md:
2238 if files or md:
2221 self.ui.note(_("committing manifest\n"))
2239 self.ui.note(_("committing manifest\n"))
2222 # we're using narrowmatch here since it's already applied at
2240 # we're using narrowmatch here since it's already applied at
2223 # other stages (such as dirstate.walk), so we're already
2241 # other stages (such as dirstate.walk), so we're already
2224 # ignoring things outside of narrowspec in most cases. The
2242 # ignoring things outside of narrowspec in most cases. The
2225 # one case where we might have files outside the narrowspec
2243 # one case where we might have files outside the narrowspec
2226 # at this point is merges, and we already error out in the
2244 # at this point is merges, and we already error out in the
2227 # case where the merge has files outside of the narrowspec,
2245 # case where the merge has files outside of the narrowspec,
2228 # so this is safe.
2246 # so this is safe.
2229 mn = mctx.write(trp, linkrev,
2247 mn = mctx.write(trp, linkrev,
2230 p1.manifestnode(), p2.manifestnode(),
2248 p1.manifestnode(), p2.manifestnode(),
2231 added, drop, match=self.narrowmatch())
2249 added, drop, match=self.narrowmatch())
2232 else:
2250 else:
2233 self.ui.debug('reusing manifest form p1 (listed files '
2251 self.ui.debug('reusing manifest form p1 (listed files '
2234 'actually unchanged)\n')
2252 'actually unchanged)\n')
2235 mn = p1.manifestnode()
2253 mn = p1.manifestnode()
2236 else:
2254 else:
2237 self.ui.debug('reusing manifest from p1 (no file change)\n')
2255 self.ui.debug('reusing manifest from p1 (no file change)\n')
2238 mn = p1.manifestnode()
2256 mn = p1.manifestnode()
2239 files = []
2257 files = []
2240
2258
2241 # update changelog
2259 # update changelog
2242 self.ui.note(_("committing changelog\n"))
2260 self.ui.note(_("committing changelog\n"))
2243 self.changelog.delayupdate(tr)
2261 self.changelog.delayupdate(tr)
2244 n = self.changelog.add(mn, files, ctx.description(),
2262 n = self.changelog.add(mn, files, ctx.description(),
2245 trp, p1.node(), p2.node(),
2263 trp, p1.node(), p2.node(),
2246 user, ctx.date(), ctx.extra().copy())
2264 user, ctx.date(), ctx.extra().copy())
2247 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2265 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2248 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2266 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2249 parent2=xp2)
2267 parent2=xp2)
2250 # set the new commit is proper phase
2268 # set the new commit is proper phase
2251 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2269 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2252 if targetphase:
2270 if targetphase:
2253 # retract boundary do not alter parent changeset.
2271 # retract boundary do not alter parent changeset.
2254 # if a parent have higher the resulting phase will
2272 # if a parent have higher the resulting phase will
2255 # be compliant anyway
2273 # be compliant anyway
2256 #
2274 #
2257 # if minimal phase was 0 we don't need to retract anything
2275 # if minimal phase was 0 we don't need to retract anything
2258 phases.registernew(self, tr, targetphase, [n])
2276 phases.registernew(self, tr, targetphase, [n])
2259 tr.close()
2277 tr.close()
2260 return n
2278 return n
2261 finally:
2279 finally:
2262 if tr:
2280 if tr:
2263 tr.release()
2281 tr.release()
2264 lock.release()
2282 lock.release()
2265
2283
2266 @unfilteredmethod
2284 @unfilteredmethod
2267 def destroying(self):
2285 def destroying(self):
2268 '''Inform the repository that nodes are about to be destroyed.
2286 '''Inform the repository that nodes are about to be destroyed.
2269 Intended for use by strip and rollback, so there's a common
2287 Intended for use by strip and rollback, so there's a common
2270 place for anything that has to be done before destroying history.
2288 place for anything that has to be done before destroying history.
2271
2289
2272 This is mostly useful for saving state that is in memory and waiting
2290 This is mostly useful for saving state that is in memory and waiting
2273 to be flushed when the current lock is released. Because a call to
2291 to be flushed when the current lock is released. Because a call to
2274 destroyed is imminent, the repo will be invalidated causing those
2292 destroyed is imminent, the repo will be invalidated causing those
2275 changes to stay in memory (waiting for the next unlock), or vanish
2293 changes to stay in memory (waiting for the next unlock), or vanish
2276 completely.
2294 completely.
2277 '''
2295 '''
2278 # When using the same lock to commit and strip, the phasecache is left
2296 # When using the same lock to commit and strip, the phasecache is left
2279 # dirty after committing. Then when we strip, the repo is invalidated,
2297 # dirty after committing. Then when we strip, the repo is invalidated,
2280 # causing those changes to disappear.
2298 # causing those changes to disappear.
2281 if '_phasecache' in vars(self):
2299 if '_phasecache' in vars(self):
2282 self._phasecache.write()
2300 self._phasecache.write()
2283
2301
2284 @unfilteredmethod
2302 @unfilteredmethod
2285 def destroyed(self):
2303 def destroyed(self):
2286 '''Inform the repository that nodes have been destroyed.
2304 '''Inform the repository that nodes have been destroyed.
2287 Intended for use by strip and rollback, so there's a common
2305 Intended for use by strip and rollback, so there's a common
2288 place for anything that has to be done after destroying history.
2306 place for anything that has to be done after destroying history.
2289 '''
2307 '''
2290 # When one tries to:
2308 # When one tries to:
2291 # 1) destroy nodes thus calling this method (e.g. strip)
2309 # 1) destroy nodes thus calling this method (e.g. strip)
2292 # 2) use phasecache somewhere (e.g. commit)
2310 # 2) use phasecache somewhere (e.g. commit)
2293 #
2311 #
2294 # then 2) will fail because the phasecache contains nodes that were
2312 # then 2) will fail because the phasecache contains nodes that were
2295 # removed. We can either remove phasecache from the filecache,
2313 # removed. We can either remove phasecache from the filecache,
2296 # causing it to reload next time it is accessed, or simply filter
2314 # causing it to reload next time it is accessed, or simply filter
2297 # the removed nodes now and write the updated cache.
2315 # the removed nodes now and write the updated cache.
2298 self._phasecache.filterunknown(self)
2316 self._phasecache.filterunknown(self)
2299 self._phasecache.write()
2317 self._phasecache.write()
2300
2318
2301 # refresh all repository caches
2319 # refresh all repository caches
2302 self.updatecaches()
2320 self.updatecaches()
2303
2321
2304 # Ensure the persistent tag cache is updated. Doing it now
2322 # Ensure the persistent tag cache is updated. Doing it now
2305 # means that the tag cache only has to worry about destroyed
2323 # means that the tag cache only has to worry about destroyed
2306 # heads immediately after a strip/rollback. That in turn
2324 # heads immediately after a strip/rollback. That in turn
2307 # guarantees that "cachetip == currenttip" (comparing both rev
2325 # guarantees that "cachetip == currenttip" (comparing both rev
2308 # and node) always means no nodes have been added or destroyed.
2326 # and node) always means no nodes have been added or destroyed.
2309
2327
2310 # XXX this is suboptimal when qrefresh'ing: we strip the current
2328 # XXX this is suboptimal when qrefresh'ing: we strip the current
2311 # head, refresh the tag cache, then immediately add a new head.
2329 # head, refresh the tag cache, then immediately add a new head.
2312 # But I think doing it this way is necessary for the "instant
2330 # But I think doing it this way is necessary for the "instant
2313 # tag cache retrieval" case to work.
2331 # tag cache retrieval" case to work.
2314 self.invalidate()
2332 self.invalidate()
2315
2333
2316 def status(self, node1='.', node2=None, match=None,
2334 def status(self, node1='.', node2=None, match=None,
2317 ignored=False, clean=False, unknown=False,
2335 ignored=False, clean=False, unknown=False,
2318 listsubrepos=False):
2336 listsubrepos=False):
2319 '''a convenience method that calls node1.status(node2)'''
2337 '''a convenience method that calls node1.status(node2)'''
2320 return self[node1].status(node2, match, ignored, clean, unknown,
2338 return self[node1].status(node2, match, ignored, clean, unknown,
2321 listsubrepos)
2339 listsubrepos)
2322
2340
2323 def addpostdsstatus(self, ps):
2341 def addpostdsstatus(self, ps):
2324 """Add a callback to run within the wlock, at the point at which status
2342 """Add a callback to run within the wlock, at the point at which status
2325 fixups happen.
2343 fixups happen.
2326
2344
2327 On status completion, callback(wctx, status) will be called with the
2345 On status completion, callback(wctx, status) will be called with the
2328 wlock held, unless the dirstate has changed from underneath or the wlock
2346 wlock held, unless the dirstate has changed from underneath or the wlock
2329 couldn't be grabbed.
2347 couldn't be grabbed.
2330
2348
2331 Callbacks should not capture and use a cached copy of the dirstate --
2349 Callbacks should not capture and use a cached copy of the dirstate --
2332 it might change in the meanwhile. Instead, they should access the
2350 it might change in the meanwhile. Instead, they should access the
2333 dirstate via wctx.repo().dirstate.
2351 dirstate via wctx.repo().dirstate.
2334
2352
2335 This list is emptied out after each status run -- extensions should
2353 This list is emptied out after each status run -- extensions should
2336 make sure it adds to this list each time dirstate.status is called.
2354 make sure it adds to this list each time dirstate.status is called.
2337 Extensions should also make sure they don't call this for statuses
2355 Extensions should also make sure they don't call this for statuses
2338 that don't involve the dirstate.
2356 that don't involve the dirstate.
2339 """
2357 """
2340
2358
2341 # The list is located here for uniqueness reasons -- it is actually
2359 # The list is located here for uniqueness reasons -- it is actually
2342 # managed by the workingctx, but that isn't unique per-repo.
2360 # managed by the workingctx, but that isn't unique per-repo.
2343 self._postdsstatus.append(ps)
2361 self._postdsstatus.append(ps)
2344
2362
2345 def postdsstatus(self):
2363 def postdsstatus(self):
2346 """Used by workingctx to get the list of post-dirstate-status hooks."""
2364 """Used by workingctx to get the list of post-dirstate-status hooks."""
2347 return self._postdsstatus
2365 return self._postdsstatus
2348
2366
2349 def clearpostdsstatus(self):
2367 def clearpostdsstatus(self):
2350 """Used by workingctx to clear post-dirstate-status hooks."""
2368 """Used by workingctx to clear post-dirstate-status hooks."""
2351 del self._postdsstatus[:]
2369 del self._postdsstatus[:]
2352
2370
2353 def heads(self, start=None):
2371 def heads(self, start=None):
2354 if start is None:
2372 if start is None:
2355 cl = self.changelog
2373 cl = self.changelog
2356 headrevs = reversed(cl.headrevs())
2374 headrevs = reversed(cl.headrevs())
2357 return [cl.node(rev) for rev in headrevs]
2375 return [cl.node(rev) for rev in headrevs]
2358
2376
2359 heads = self.changelog.heads(start)
2377 heads = self.changelog.heads(start)
2360 # sort the output in rev descending order
2378 # sort the output in rev descending order
2361 return sorted(heads, key=self.changelog.rev, reverse=True)
2379 return sorted(heads, key=self.changelog.rev, reverse=True)
2362
2380
2363 def branchheads(self, branch=None, start=None, closed=False):
2381 def branchheads(self, branch=None, start=None, closed=False):
2364 '''return a (possibly filtered) list of heads for the given branch
2382 '''return a (possibly filtered) list of heads for the given branch
2365
2383
2366 Heads are returned in topological order, from newest to oldest.
2384 Heads are returned in topological order, from newest to oldest.
2367 If branch is None, use the dirstate branch.
2385 If branch is None, use the dirstate branch.
2368 If start is not None, return only heads reachable from start.
2386 If start is not None, return only heads reachable from start.
2369 If closed is True, return heads that are marked as closed as well.
2387 If closed is True, return heads that are marked as closed as well.
2370 '''
2388 '''
2371 if branch is None:
2389 if branch is None:
2372 branch = self[None].branch()
2390 branch = self[None].branch()
2373 branches = self.branchmap()
2391 branches = self.branchmap()
2374 if branch not in branches:
2392 if branch not in branches:
2375 return []
2393 return []
2376 # the cache returns heads ordered lowest to highest
2394 # the cache returns heads ordered lowest to highest
2377 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2395 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2378 if start is not None:
2396 if start is not None:
2379 # filter out the heads that cannot be reached from startrev
2397 # filter out the heads that cannot be reached from startrev
2380 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2398 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2381 bheads = [h for h in bheads if h in fbheads]
2399 bheads = [h for h in bheads if h in fbheads]
2382 return bheads
2400 return bheads
2383
2401
2384 def branches(self, nodes):
2402 def branches(self, nodes):
2385 if not nodes:
2403 if not nodes:
2386 nodes = [self.changelog.tip()]
2404 nodes = [self.changelog.tip()]
2387 b = []
2405 b = []
2388 for n in nodes:
2406 for n in nodes:
2389 t = n
2407 t = n
2390 while True:
2408 while True:
2391 p = self.changelog.parents(n)
2409 p = self.changelog.parents(n)
2392 if p[1] != nullid or p[0] == nullid:
2410 if p[1] != nullid or p[0] == nullid:
2393 b.append((t, n, p[0], p[1]))
2411 b.append((t, n, p[0], p[1]))
2394 break
2412 break
2395 n = p[0]
2413 n = p[0]
2396 return b
2414 return b
2397
2415
2398 def between(self, pairs):
2416 def between(self, pairs):
2399 r = []
2417 r = []
2400
2418
2401 for top, bottom in pairs:
2419 for top, bottom in pairs:
2402 n, l, i = top, [], 0
2420 n, l, i = top, [], 0
2403 f = 1
2421 f = 1
2404
2422
2405 while n != bottom and n != nullid:
2423 while n != bottom and n != nullid:
2406 p = self.changelog.parents(n)[0]
2424 p = self.changelog.parents(n)[0]
2407 if i == f:
2425 if i == f:
2408 l.append(n)
2426 l.append(n)
2409 f = f * 2
2427 f = f * 2
2410 n = p
2428 n = p
2411 i += 1
2429 i += 1
2412
2430
2413 r.append(l)
2431 r.append(l)
2414
2432
2415 return r
2433 return r
2416
2434
2417 def checkpush(self, pushop):
2435 def checkpush(self, pushop):
2418 """Extensions can override this function if additional checks have
2436 """Extensions can override this function if additional checks have
2419 to be performed before pushing, or call it if they override push
2437 to be performed before pushing, or call it if they override push
2420 command.
2438 command.
2421 """
2439 """
2422
2440
2423 @unfilteredpropertycache
2441 @unfilteredpropertycache
2424 def prepushoutgoinghooks(self):
2442 def prepushoutgoinghooks(self):
2425 """Return util.hooks consists of a pushop with repo, remote, outgoing
2443 """Return util.hooks consists of a pushop with repo, remote, outgoing
2426 methods, which are called before pushing changesets.
2444 methods, which are called before pushing changesets.
2427 """
2445 """
2428 return util.hooks()
2446 return util.hooks()
2429
2447
2430 def pushkey(self, namespace, key, old, new):
2448 def pushkey(self, namespace, key, old, new):
2431 try:
2449 try:
2432 tr = self.currenttransaction()
2450 tr = self.currenttransaction()
2433 hookargs = {}
2451 hookargs = {}
2434 if tr is not None:
2452 if tr is not None:
2435 hookargs.update(tr.hookargs)
2453 hookargs.update(tr.hookargs)
2436 hookargs = pycompat.strkwargs(hookargs)
2454 hookargs = pycompat.strkwargs(hookargs)
2437 hookargs[r'namespace'] = namespace
2455 hookargs[r'namespace'] = namespace
2438 hookargs[r'key'] = key
2456 hookargs[r'key'] = key
2439 hookargs[r'old'] = old
2457 hookargs[r'old'] = old
2440 hookargs[r'new'] = new
2458 hookargs[r'new'] = new
2441 self.hook('prepushkey', throw=True, **hookargs)
2459 self.hook('prepushkey', throw=True, **hookargs)
2442 except error.HookAbort as exc:
2460 except error.HookAbort as exc:
2443 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2461 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2444 if exc.hint:
2462 if exc.hint:
2445 self.ui.write_err(_("(%s)\n") % exc.hint)
2463 self.ui.write_err(_("(%s)\n") % exc.hint)
2446 return False
2464 return False
2447 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2465 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2448 ret = pushkey.push(self, namespace, key, old, new)
2466 ret = pushkey.push(self, namespace, key, old, new)
2449 def runhook():
2467 def runhook():
2450 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2468 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2451 ret=ret)
2469 ret=ret)
2452 self._afterlock(runhook)
2470 self._afterlock(runhook)
2453 return ret
2471 return ret
2454
2472
2455 def listkeys(self, namespace):
2473 def listkeys(self, namespace):
2456 self.hook('prelistkeys', throw=True, namespace=namespace)
2474 self.hook('prelistkeys', throw=True, namespace=namespace)
2457 self.ui.debug('listing keys for "%s"\n' % namespace)
2475 self.ui.debug('listing keys for "%s"\n' % namespace)
2458 values = pushkey.list(self, namespace)
2476 values = pushkey.list(self, namespace)
2459 self.hook('listkeys', namespace=namespace, values=values)
2477 self.hook('listkeys', namespace=namespace, values=values)
2460 return values
2478 return values
2461
2479
2462 def debugwireargs(self, one, two, three=None, four=None, five=None):
2480 def debugwireargs(self, one, two, three=None, four=None, five=None):
2463 '''used to test argument passing over the wire'''
2481 '''used to test argument passing over the wire'''
2464 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2482 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2465 pycompat.bytestr(four),
2483 pycompat.bytestr(four),
2466 pycompat.bytestr(five))
2484 pycompat.bytestr(five))
2467
2485
2468 def savecommitmessage(self, text):
2486 def savecommitmessage(self, text):
2469 fp = self.vfs('last-message.txt', 'wb')
2487 fp = self.vfs('last-message.txt', 'wb')
2470 try:
2488 try:
2471 fp.write(text)
2489 fp.write(text)
2472 finally:
2490 finally:
2473 fp.close()
2491 fp.close()
2474 return self.pathto(fp.name[len(self.root) + 1:])
2492 return self.pathto(fp.name[len(self.root) + 1:])
2475
2493
2476 # used to avoid circular references so destructors work
2494 # used to avoid circular references so destructors work
2477 def aftertrans(files):
2495 def aftertrans(files):
2478 renamefiles = [tuple(t) for t in files]
2496 renamefiles = [tuple(t) for t in files]
2479 def a():
2497 def a():
2480 for vfs, src, dest in renamefiles:
2498 for vfs, src, dest in renamefiles:
2481 # if src and dest refer to a same file, vfs.rename is a no-op,
2499 # if src and dest refer to a same file, vfs.rename is a no-op,
2482 # leaving both src and dest on disk. delete dest to make sure
2500 # leaving both src and dest on disk. delete dest to make sure
2483 # the rename couldn't be such a no-op.
2501 # the rename couldn't be such a no-op.
2484 vfs.tryunlink(dest)
2502 vfs.tryunlink(dest)
2485 try:
2503 try:
2486 vfs.rename(src, dest)
2504 vfs.rename(src, dest)
2487 except OSError: # journal file does not yet exist
2505 except OSError: # journal file does not yet exist
2488 pass
2506 pass
2489 return a
2507 return a
2490
2508
2491 def undoname(fn):
2509 def undoname(fn):
2492 base, name = os.path.split(fn)
2510 base, name = os.path.split(fn)
2493 assert name.startswith('journal')
2511 assert name.startswith('journal')
2494 return os.path.join(base, name.replace('journal', 'undo', 1))
2512 return os.path.join(base, name.replace('journal', 'undo', 1))
2495
2513
2496 def instance(ui, path, create, intents=None, createopts=None):
2514 def instance(ui, path, create, intents=None, createopts=None):
2497 localpath = util.urllocalpath(path)
2515 localpath = util.urllocalpath(path)
2498 if create:
2516 if create:
2499 createrepository(ui, localpath, createopts=createopts)
2517 createrepository(ui, localpath, createopts=createopts)
2500
2518
2501 return makelocalrepository(ui, localpath, intents=intents)
2519 return makelocalrepository(ui, localpath, intents=intents)
2502
2520
2503 def islocal(path):
2521 def islocal(path):
2504 return True
2522 return True
2505
2523
2506 def newreporequirements(ui, createopts=None):
2524 def newreporequirements(ui, createopts=None):
2507 """Determine the set of requirements for a new local repository.
2525 """Determine the set of requirements for a new local repository.
2508
2526
2509 Extensions can wrap this function to specify custom requirements for
2527 Extensions can wrap this function to specify custom requirements for
2510 new repositories.
2528 new repositories.
2511 """
2529 """
2512 createopts = createopts or {}
2530 createopts = createopts or {}
2513
2531
2514 requirements = {'revlogv1'}
2532 requirements = {'revlogv1'}
2515 if ui.configbool('format', 'usestore'):
2533 if ui.configbool('format', 'usestore'):
2516 requirements.add('store')
2534 requirements.add('store')
2517 if ui.configbool('format', 'usefncache'):
2535 if ui.configbool('format', 'usefncache'):
2518 requirements.add('fncache')
2536 requirements.add('fncache')
2519 if ui.configbool('format', 'dotencode'):
2537 if ui.configbool('format', 'dotencode'):
2520 requirements.add('dotencode')
2538 requirements.add('dotencode')
2521
2539
2522 compengine = ui.config('experimental', 'format.compression')
2540 compengine = ui.config('experimental', 'format.compression')
2523 if compengine not in util.compengines:
2541 if compengine not in util.compengines:
2524 raise error.Abort(_('compression engine %s defined by '
2542 raise error.Abort(_('compression engine %s defined by '
2525 'experimental.format.compression not available') %
2543 'experimental.format.compression not available') %
2526 compengine,
2544 compengine,
2527 hint=_('run "hg debuginstall" to list available '
2545 hint=_('run "hg debuginstall" to list available '
2528 'compression engines'))
2546 'compression engines'))
2529
2547
2530 # zlib is the historical default and doesn't need an explicit requirement.
2548 # zlib is the historical default and doesn't need an explicit requirement.
2531 if compengine != 'zlib':
2549 if compengine != 'zlib':
2532 requirements.add('exp-compression-%s' % compengine)
2550 requirements.add('exp-compression-%s' % compengine)
2533
2551
2534 if scmutil.gdinitconfig(ui):
2552 if scmutil.gdinitconfig(ui):
2535 requirements.add('generaldelta')
2553 requirements.add('generaldelta')
2536 if ui.configbool('experimental', 'treemanifest'):
2554 if ui.configbool('experimental', 'treemanifest'):
2537 requirements.add('treemanifest')
2555 requirements.add('treemanifest')
2538 # experimental config: format.sparse-revlog
2556 # experimental config: format.sparse-revlog
2539 if ui.configbool('format', 'sparse-revlog'):
2557 if ui.configbool('format', 'sparse-revlog'):
2540 requirements.add(SPARSEREVLOG_REQUIREMENT)
2558 requirements.add(SPARSEREVLOG_REQUIREMENT)
2541
2559
2542 revlogv2 = ui.config('experimental', 'revlogv2')
2560 revlogv2 = ui.config('experimental', 'revlogv2')
2543 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2561 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2544 requirements.remove('revlogv1')
2562 requirements.remove('revlogv1')
2545 # generaldelta is implied by revlogv2.
2563 # generaldelta is implied by revlogv2.
2546 requirements.discard('generaldelta')
2564 requirements.discard('generaldelta')
2547 requirements.add(REVLOGV2_REQUIREMENT)
2565 requirements.add(REVLOGV2_REQUIREMENT)
2548 # experimental config: format.internal-phase
2566 # experimental config: format.internal-phase
2549 if ui.configbool('format', 'internal-phase'):
2567 if ui.configbool('format', 'internal-phase'):
2550 requirements.add('internal-phase')
2568 requirements.add('internal-phase')
2551
2569
2552 if createopts.get('narrowfiles'):
2570 if createopts.get('narrowfiles'):
2553 requirements.add(repository.NARROW_REQUIREMENT)
2571 requirements.add(repository.NARROW_REQUIREMENT)
2554
2572
2555 return requirements
2573 return requirements
2556
2574
2557 def filterknowncreateopts(ui, createopts):
2575 def filterknowncreateopts(ui, createopts):
2558 """Filters a dict of repo creation options against options that are known.
2576 """Filters a dict of repo creation options against options that are known.
2559
2577
2560 Receives a dict of repo creation options and returns a dict of those
2578 Receives a dict of repo creation options and returns a dict of those
2561 options that we don't know how to handle.
2579 options that we don't know how to handle.
2562
2580
2563 This function is called as part of repository creation. If the
2581 This function is called as part of repository creation. If the
2564 returned dict contains any items, repository creation will not
2582 returned dict contains any items, repository creation will not
2565 be allowed, as it means there was a request to create a repository
2583 be allowed, as it means there was a request to create a repository
2566 with options not recognized by loaded code.
2584 with options not recognized by loaded code.
2567
2585
2568 Extensions can wrap this function to filter out creation options
2586 Extensions can wrap this function to filter out creation options
2569 they know how to handle.
2587 they know how to handle.
2570 """
2588 """
2571 known = {'narrowfiles'}
2589 known = {'narrowfiles'}
2572
2590
2573 return {k: v for k, v in createopts.items() if k not in known}
2591 return {k: v for k, v in createopts.items() if k not in known}
2574
2592
2575 def createrepository(ui, path, createopts=None):
2593 def createrepository(ui, path, createopts=None):
2576 """Create a new repository in a vfs.
2594 """Create a new repository in a vfs.
2577
2595
2578 ``path`` path to the new repo's working directory.
2596 ``path`` path to the new repo's working directory.
2579 ``createopts`` options for the new repository.
2597 ``createopts`` options for the new repository.
2580 """
2598 """
2581 createopts = createopts or {}
2599 createopts = createopts or {}
2582
2600
2583 unknownopts = filterknowncreateopts(ui, createopts)
2601 unknownopts = filterknowncreateopts(ui, createopts)
2584
2602
2585 if not isinstance(unknownopts, dict):
2603 if not isinstance(unknownopts, dict):
2586 raise error.ProgrammingError('filterknowncreateopts() did not return '
2604 raise error.ProgrammingError('filterknowncreateopts() did not return '
2587 'a dict')
2605 'a dict')
2588
2606
2589 if unknownopts:
2607 if unknownopts:
2590 raise error.Abort(_('unable to create repository because of unknown '
2608 raise error.Abort(_('unable to create repository because of unknown '
2591 'creation option: %s') %
2609 'creation option: %s') %
2592 ', '.sorted(unknownopts),
2610 ', '.sorted(unknownopts),
2593 hint=_('is a required extension not loaded?'))
2611 hint=_('is a required extension not loaded?'))
2594
2612
2595 requirements = newreporequirements(ui, createopts=createopts)
2613 requirements = newreporequirements(ui, createopts=createopts)
2596
2614
2597 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2615 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2598 if not wdirvfs.exists():
2616 if not wdirvfs.exists():
2599 wdirvfs.makedirs()
2617 wdirvfs.makedirs()
2600
2618
2601 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2619 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2602 if hgvfs.exists():
2620 if hgvfs.exists():
2603 raise error.RepoError(_('repository %s already exists') % path)
2621 raise error.RepoError(_('repository %s already exists') % path)
2604
2622
2605 hgvfs.makedir(notindexed=True)
2623 hgvfs.makedir(notindexed=True)
2606
2624
2607 if b'store' in requirements:
2625 if b'store' in requirements:
2608 hgvfs.mkdir(b'store')
2626 hgvfs.mkdir(b'store')
2609
2627
2610 # We create an invalid changelog outside the store so very old
2628 # We create an invalid changelog outside the store so very old
2611 # Mercurial versions (which didn't know about the requirements
2629 # Mercurial versions (which didn't know about the requirements
2612 # file) encounter an error on reading the changelog. This
2630 # file) encounter an error on reading the changelog. This
2613 # effectively locks out old clients and prevents them from
2631 # effectively locks out old clients and prevents them from
2614 # mucking with a repo in an unknown format.
2632 # mucking with a repo in an unknown format.
2615 #
2633 #
2616 # The revlog header has version 2, which won't be recognized by
2634 # The revlog header has version 2, which won't be recognized by
2617 # such old clients.
2635 # such old clients.
2618 hgvfs.append(b'00changelog.i',
2636 hgvfs.append(b'00changelog.i',
2619 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2637 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2620 b'layout')
2638 b'layout')
2621
2639
2622 scmutil.writerequires(hgvfs, requirements)
2640 scmutil.writerequires(hgvfs, requirements)
2623
2641
2624 def poisonrepository(repo):
2642 def poisonrepository(repo):
2625 """Poison a repository instance so it can no longer be used."""
2643 """Poison a repository instance so it can no longer be used."""
2626 # Perform any cleanup on the instance.
2644 # Perform any cleanup on the instance.
2627 repo.close()
2645 repo.close()
2628
2646
2629 # Our strategy is to replace the type of the object with one that
2647 # Our strategy is to replace the type of the object with one that
2630 # has all attribute lookups result in error.
2648 # has all attribute lookups result in error.
2631 #
2649 #
2632 # But we have to allow the close() method because some constructors
2650 # But we have to allow the close() method because some constructors
2633 # of repos call close() on repo references.
2651 # of repos call close() on repo references.
2634 class poisonedrepository(object):
2652 class poisonedrepository(object):
2635 def __getattribute__(self, item):
2653 def __getattribute__(self, item):
2636 if item == r'close':
2654 if item == r'close':
2637 return object.__getattribute__(self, item)
2655 return object.__getattribute__(self, item)
2638
2656
2639 raise error.ProgrammingError('repo instances should not be used '
2657 raise error.ProgrammingError('repo instances should not be used '
2640 'after unshare')
2658 'after unshare')
2641
2659
2642 def close(self):
2660 def close(self):
2643 pass
2661 pass
2644
2662
2645 # We may have a repoview, which intercepts __setattr__. So be sure
2663 # We may have a repoview, which intercepts __setattr__. So be sure
2646 # we operate at the lowest level possible.
2664 # we operate at the lowest level possible.
2647 object.__setattr__(repo, r'__class__', poisonedrepository)
2665 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,224 +1,225 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 changelog,
16 changelog,
17 error,
17 error,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 namespaces,
20 namespaces,
21 pathutil,
21 pathutil,
22 store,
22 store,
23 url,
23 url,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 )
26 )
27
27
28 urlerr = util.urlerr
28 urlerr = util.urlerr
29 urlreq = util.urlreq
29 urlreq = util.urlreq
30
30
31 class httprangereader(object):
31 class httprangereader(object):
32 def __init__(self, url, opener):
32 def __init__(self, url, opener):
33 # we assume opener has HTTPRangeHandler
33 # we assume opener has HTTPRangeHandler
34 self.url = url
34 self.url = url
35 self.pos = 0
35 self.pos = 0
36 self.opener = opener
36 self.opener = opener
37 self.name = url
37 self.name = url
38
38
39 def __enter__(self):
39 def __enter__(self):
40 return self
40 return self
41
41
42 def __exit__(self, exc_type, exc_value, traceback):
42 def __exit__(self, exc_type, exc_value, traceback):
43 self.close()
43 self.close()
44
44
45 def seek(self, pos):
45 def seek(self, pos):
46 self.pos = pos
46 self.pos = pos
47 def read(self, bytes=None):
47 def read(self, bytes=None):
48 req = urlreq.request(self.url)
48 req = urlreq.request(self.url)
49 end = ''
49 end = ''
50 if bytes:
50 if bytes:
51 end = self.pos + bytes - 1
51 end = self.pos + bytes - 1
52 if self.pos or end:
52 if self.pos or end:
53 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
53 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
54
54
55 try:
55 try:
56 f = self.opener.open(req)
56 f = self.opener.open(req)
57 data = f.read()
57 data = f.read()
58 code = f.code
58 code = f.code
59 except urlerr.httperror as inst:
59 except urlerr.httperror as inst:
60 num = inst.code == 404 and errno.ENOENT or None
60 num = inst.code == 404 and errno.ENOENT or None
61 raise IOError(num, inst)
61 raise IOError(num, inst)
62 except urlerr.urlerror as inst:
62 except urlerr.urlerror as inst:
63 raise IOError(None, inst.reason[1])
63 raise IOError(None, inst.reason[1])
64
64
65 if code == 200:
65 if code == 200:
66 # HTTPRangeHandler does nothing if remote does not support
66 # HTTPRangeHandler does nothing if remote does not support
67 # Range headers and returns the full entity. Let's slice it.
67 # Range headers and returns the full entity. Let's slice it.
68 if bytes:
68 if bytes:
69 data = data[self.pos:self.pos + bytes]
69 data = data[self.pos:self.pos + bytes]
70 else:
70 else:
71 data = data[self.pos:]
71 data = data[self.pos:]
72 elif bytes:
72 elif bytes:
73 data = data[:bytes]
73 data = data[:bytes]
74 self.pos += len(data)
74 self.pos += len(data)
75 return data
75 return data
76 def readlines(self):
76 def readlines(self):
77 return self.read().splitlines(True)
77 return self.read().splitlines(True)
78 def __iter__(self):
78 def __iter__(self):
79 return iter(self.readlines())
79 return iter(self.readlines())
80 def close(self):
80 def close(self):
81 pass
81 pass
82
82
83 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
83 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
84 # which was itself extracted from urlgrabber. See the last version of
84 # which was itself extracted from urlgrabber. See the last version of
85 # byterange.py from history if you need more information.
85 # byterange.py from history if you need more information.
86 class _RangeError(IOError):
86 class _RangeError(IOError):
87 """Error raised when an unsatisfiable range is requested."""
87 """Error raised when an unsatisfiable range is requested."""
88
88
89 class _HTTPRangeHandler(urlreq.basehandler):
89 class _HTTPRangeHandler(urlreq.basehandler):
90 """Handler that enables HTTP Range headers.
90 """Handler that enables HTTP Range headers.
91
91
92 This was extremely simple. The Range header is a HTTP feature to
92 This was extremely simple. The Range header is a HTTP feature to
93 begin with so all this class does is tell urllib2 that the
93 begin with so all this class does is tell urllib2 that the
94 "206 Partial Content" response from the HTTP server is what we
94 "206 Partial Content" response from the HTTP server is what we
95 expected.
95 expected.
96 """
96 """
97
97
98 def http_error_206(self, req, fp, code, msg, hdrs):
98 def http_error_206(self, req, fp, code, msg, hdrs):
99 # 206 Partial Content Response
99 # 206 Partial Content Response
100 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
100 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
101 r.code = code
101 r.code = code
102 r.msg = msg
102 r.msg = msg
103 return r
103 return r
104
104
105 def http_error_416(self, req, fp, code, msg, hdrs):
105 def http_error_416(self, req, fp, code, msg, hdrs):
106 # HTTP's Range Not Satisfiable error
106 # HTTP's Range Not Satisfiable error
107 raise _RangeError('Requested Range Not Satisfiable')
107 raise _RangeError('Requested Range Not Satisfiable')
108
108
109 def build_opener(ui, authinfo):
109 def build_opener(ui, authinfo):
110 # urllib cannot handle URLs with embedded user or passwd
110 # urllib cannot handle URLs with embedded user or passwd
111 urlopener = url.opener(ui, authinfo)
111 urlopener = url.opener(ui, authinfo)
112 urlopener.add_handler(_HTTPRangeHandler())
112 urlopener.add_handler(_HTTPRangeHandler())
113
113
114 class statichttpvfs(vfsmod.abstractvfs):
114 class statichttpvfs(vfsmod.abstractvfs):
115 def __init__(self, base):
115 def __init__(self, base):
116 self.base = base
116 self.base = base
117
117
118 def __call__(self, path, mode='r', *args, **kw):
118 def __call__(self, path, mode='r', *args, **kw):
119 if mode not in ('r', 'rb'):
119 if mode not in ('r', 'rb'):
120 raise IOError('Permission denied')
120 raise IOError('Permission denied')
121 f = "/".join((self.base, urlreq.quote(path)))
121 f = "/".join((self.base, urlreq.quote(path)))
122 return httprangereader(f, urlopener)
122 return httprangereader(f, urlopener)
123
123
124 def join(self, path):
124 def join(self, path):
125 if path:
125 if path:
126 return pathutil.join(self.base, path)
126 return pathutil.join(self.base, path)
127 else:
127 else:
128 return self.base
128 return self.base
129
129
130 return statichttpvfs
130 return statichttpvfs
131
131
132 class statichttppeer(localrepo.localpeer):
132 class statichttppeer(localrepo.localpeer):
133 def local(self):
133 def local(self):
134 return None
134 return None
135 def canpush(self):
135 def canpush(self):
136 return False
136 return False
137
137
138 class statichttprepository(localrepo.localrepository):
138 class statichttprepository(localrepo.localrepository):
139 supported = localrepo.localrepository._basesupported
139 supported = localrepo.localrepository._basesupported
140
140
141 def __init__(self, ui, path):
141 def __init__(self, ui, path):
142 self._url = path
142 self._url = path
143 self.ui = ui
143 self.ui = ui
144
144
145 self.root = path
145 self.root = path
146 u = util.url(path.rstrip('/') + "/.hg")
146 u = util.url(path.rstrip('/') + "/.hg")
147 self.path, authinfo = u.authinfo()
147 self.path, authinfo = u.authinfo()
148
148
149 vfsclass = build_opener(ui, authinfo)
149 vfsclass = build_opener(ui, authinfo)
150 self.vfs = vfsclass(self.path)
150 self.vfs = vfsclass(self.path)
151 self.cachevfs = vfsclass(self.vfs.join('cache'))
151 self.cachevfs = vfsclass(self.vfs.join('cache'))
152 self._phasedefaults = []
152 self._phasedefaults = []
153
153
154 self.names = namespaces.namespaces()
154 self.names = namespaces.namespaces()
155 self.filtername = None
155 self.filtername = None
156
156
157 try:
157 try:
158 requirements = set(self.vfs.read(b'requires').splitlines())
158 requirements = set(self.vfs.read(b'requires').splitlines())
159 except IOError as inst:
159 except IOError as inst:
160 if inst.errno != errno.ENOENT:
160 if inst.errno != errno.ENOENT:
161 raise
161 raise
162 requirements = set()
162 requirements = set()
163
163
164 # check if it is a non-empty old-style repository
164 # check if it is a non-empty old-style repository
165 try:
165 try:
166 fp = self.vfs("00changelog.i")
166 fp = self.vfs("00changelog.i")
167 fp.read(1)
167 fp.read(1)
168 fp.close()
168 fp.close()
169 except IOError as inst:
169 except IOError as inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 # we do not care about empty old-style repositories here
172 # we do not care about empty old-style repositories here
173 msg = _("'%s' does not appear to be an hg repository") % path
173 msg = _("'%s' does not appear to be an hg repository") % path
174 raise error.RepoError(msg)
174 raise error.RepoError(msg)
175
175
176 supportedrequirements = localrepo.gathersupportedrequirements(ui)
176 supportedrequirements = localrepo.gathersupportedrequirements(ui)
177 localrepo.ensurerequirementsrecognized(requirements,
177 localrepo.ensurerequirementsrecognized(requirements,
178 supportedrequirements)
178 supportedrequirements)
179 localrepo.ensurerequirementscompatible(ui, requirements)
179
180
180 # setup store
181 # setup store
181 self.store = store.store(requirements, self.path, vfsclass)
182 self.store = store.store(requirements, self.path, vfsclass)
182 self.spath = self.store.path
183 self.spath = self.store.path
183 self.svfs = self.store.opener
184 self.svfs = self.store.opener
184 self.sjoin = self.store.join
185 self.sjoin = self.store.join
185 self._filecache = {}
186 self._filecache = {}
186 self.requirements = requirements
187 self.requirements = requirements
187
188
188 self.manifestlog = manifest.manifestlog(self.svfs, self)
189 self.manifestlog = manifest.manifestlog(self.svfs, self)
189 self.changelog = changelog.changelog(self.svfs)
190 self.changelog = changelog.changelog(self.svfs)
190 self._tags = None
191 self._tags = None
191 self.nodetagscache = None
192 self.nodetagscache = None
192 self._branchcaches = {}
193 self._branchcaches = {}
193 self._revbranchcache = None
194 self._revbranchcache = None
194 self.encodepats = None
195 self.encodepats = None
195 self.decodepats = None
196 self.decodepats = None
196 self._transref = None
197 self._transref = None
197
198
198 def _restrictcapabilities(self, caps):
199 def _restrictcapabilities(self, caps):
199 caps = super(statichttprepository, self)._restrictcapabilities(caps)
200 caps = super(statichttprepository, self)._restrictcapabilities(caps)
200 return caps.difference(["pushkey"])
201 return caps.difference(["pushkey"])
201
202
202 def url(self):
203 def url(self):
203 return self._url
204 return self._url
204
205
205 def local(self):
206 def local(self):
206 return False
207 return False
207
208
208 def peer(self):
209 def peer(self):
209 return statichttppeer(self)
210 return statichttppeer(self)
210
211
211 def wlock(self, wait=True):
212 def wlock(self, wait=True):
212 raise error.LockUnavailable(0, _('lock not available'), 'lock',
213 raise error.LockUnavailable(0, _('lock not available'), 'lock',
213 _('cannot lock static-http repository'))
214 _('cannot lock static-http repository'))
214
215
215 def lock(self, wait=True):
216 def lock(self, wait=True):
216 raise error.Abort(_('cannot lock static-http repository'))
217 raise error.Abort(_('cannot lock static-http repository'))
217
218
218 def _writecaches(self):
219 def _writecaches(self):
219 pass # statichttprepository are read only
220 pass # statichttprepository are read only
220
221
221 def instance(ui, path, create, intents=None, createopts=None):
222 def instance(ui, path, create, intents=None, createopts=None):
222 if create:
223 if create:
223 raise error.Abort(_('cannot create new static-http repository'))
224 raise error.Abort(_('cannot create new static-http repository'))
224 return statichttprepository(ui, path[7:])
225 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now