##// END OF EJS Templates
localrepo: move store() from store module...
Gregory Szorc -
r39734:f4418760 default
parent child Browse files
Show More
@@ -1,2703 +1,2714 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store as storemod,
59 store as storemod,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 ui = baseui.copy()
389 ui = baseui.copy()
390 # Prevent copying repo configuration.
390 # Prevent copying repo configuration.
391 ui.copy = baseui.copy
391 ui.copy = baseui.copy
392
392
393 # Working directory VFS rooted at repository root.
393 # Working directory VFS rooted at repository root.
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395
395
396 # Main VFS for .hg/ directory.
396 # Main VFS for .hg/ directory.
397 hgpath = wdirvfs.join(b'.hg')
397 hgpath = wdirvfs.join(b'.hg')
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399
399
400 # The .hg/ path should exist and should be a directory. All other
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
401 # cases are errors.
402 if not hgvfs.isdir():
402 if not hgvfs.isdir():
403 try:
403 try:
404 hgvfs.stat()
404 hgvfs.stat()
405 except OSError as e:
405 except OSError as e:
406 if e.errno != errno.ENOENT:
406 if e.errno != errno.ENOENT:
407 raise
407 raise
408
408
409 raise error.RepoError(_(b'repository %s not found') % path)
409 raise error.RepoError(_(b'repository %s not found') % path)
410
410
411 # .hg/requires file contains a newline-delimited list of
411 # .hg/requires file contains a newline-delimited list of
412 # features/capabilities the opener (us) must have in order to use
412 # features/capabilities the opener (us) must have in order to use
413 # the repository. This file was introduced in Mercurial 0.9.2,
413 # the repository. This file was introduced in Mercurial 0.9.2,
414 # which means very old repositories may not have one. We assume
414 # which means very old repositories may not have one. We assume
415 # a missing file translates to no requirements.
415 # a missing file translates to no requirements.
416 try:
416 try:
417 requirements = set(hgvfs.read(b'requires').splitlines())
417 requirements = set(hgvfs.read(b'requires').splitlines())
418 except IOError as e:
418 except IOError as e:
419 if e.errno != errno.ENOENT:
419 if e.errno != errno.ENOENT:
420 raise
420 raise
421 requirements = set()
421 requirements = set()
422
422
423 # The .hg/hgrc file may load extensions or contain config options
423 # The .hg/hgrc file may load extensions or contain config options
424 # that influence repository construction. Attempt to load it and
424 # that influence repository construction. Attempt to load it and
425 # process any new extensions that it may have pulled in.
425 # process any new extensions that it may have pulled in.
426 try:
426 try:
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
428 except IOError:
428 except IOError:
429 pass
429 pass
430 else:
430 else:
431 extensions.loadall(ui)
431 extensions.loadall(ui)
432
432
433 supportedrequirements = gathersupportedrequirements(ui)
433 supportedrequirements = gathersupportedrequirements(ui)
434
434
435 # We first validate the requirements are known.
435 # We first validate the requirements are known.
436 ensurerequirementsrecognized(requirements, supportedrequirements)
436 ensurerequirementsrecognized(requirements, supportedrequirements)
437
437
438 # Then we validate that the known set is reasonable to use together.
438 # Then we validate that the known set is reasonable to use together.
439 ensurerequirementscompatible(ui, requirements)
439 ensurerequirementscompatible(ui, requirements)
440
440
441 # TODO there are unhandled edge cases related to opening repositories with
441 # TODO there are unhandled edge cases related to opening repositories with
442 # shared storage. If storage is shared, we should also test for requirements
442 # shared storage. If storage is shared, we should also test for requirements
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
444 # that repo, as that repo may load extensions needed to open it. This is a
444 # that repo, as that repo may load extensions needed to open it. This is a
445 # bit complicated because we don't want the other hgrc to overwrite settings
445 # bit complicated because we don't want the other hgrc to overwrite settings
446 # in this hgrc.
446 # in this hgrc.
447 #
447 #
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
449 # file when sharing repos. But if a requirement is added after the share is
449 # file when sharing repos. But if a requirement is added after the share is
450 # performed, thereby introducing a new requirement for the opener, we may
450 # performed, thereby introducing a new requirement for the opener, we may
451 # will not see that and could encounter a run-time error interacting with
451 # will not see that and could encounter a run-time error interacting with
452 # that shared store since it has an unknown-to-us requirement.
452 # that shared store since it has an unknown-to-us requirement.
453
453
454 # At this point, we know we should be capable of opening the repository.
454 # At this point, we know we should be capable of opening the repository.
455 # Now get on with doing that.
455 # Now get on with doing that.
456
456
457 # The "store" part of the repository holds versioned data. How it is
457 # The "store" part of the repository holds versioned data. How it is
458 # accessed is determined by various requirements. The ``shared`` or
458 # accessed is determined by various requirements. The ``shared`` or
459 # ``relshared`` requirements indicate the store lives in the path contained
459 # ``relshared`` requirements indicate the store lives in the path contained
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
462 if b'shared' in requirements or b'relshared' in requirements:
462 if b'shared' in requirements or b'relshared' in requirements:
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
464 if b'relshared' in requirements:
464 if b'relshared' in requirements:
465 sharedpath = hgvfs.join(sharedpath)
465 sharedpath = hgvfs.join(sharedpath)
466
466
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
468
468
469 if not sharedvfs.exists():
469 if not sharedvfs.exists():
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
471 b'directory %s') % sharedvfs.base)
471 b'directory %s') % sharedvfs.base)
472
472
473 storebasepath = sharedvfs.base
473 storebasepath = sharedvfs.base
474 cachepath = sharedvfs.join(b'cache')
474 cachepath = sharedvfs.join(b'cache')
475 else:
475 else:
476 storebasepath = hgvfs.base
476 storebasepath = hgvfs.base
477 cachepath = hgvfs.join(b'cache')
477 cachepath = hgvfs.join(b'cache')
478
478
479 # The store has changed over time and the exact layout is dictated by
479 # The store has changed over time and the exact layout is dictated by
480 # requirements. The store interface abstracts differences across all
480 # requirements. The store interface abstracts differences across all
481 # of them.
481 # of them.
482 store = storemod.store(requirements, storebasepath,
482 store = makestore(requirements, storebasepath,
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
484
484
485 hgvfs.createmode = store.createmode
485 hgvfs.createmode = store.createmode
486
486
487 # The cache vfs is used to manage cache files.
487 # The cache vfs is used to manage cache files.
488 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
488 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
489 cachevfs.createmode = store.createmode
489 cachevfs.createmode = store.createmode
490
490
491 return localrepository(
491 return localrepository(
492 baseui=baseui,
492 baseui=baseui,
493 ui=ui,
493 ui=ui,
494 origroot=path,
494 origroot=path,
495 wdirvfs=wdirvfs,
495 wdirvfs=wdirvfs,
496 hgvfs=hgvfs,
496 hgvfs=hgvfs,
497 requirements=requirements,
497 requirements=requirements,
498 supportedrequirements=supportedrequirements,
498 supportedrequirements=supportedrequirements,
499 sharedpath=storebasepath,
499 sharedpath=storebasepath,
500 store=store,
500 store=store,
501 cachevfs=cachevfs,
501 cachevfs=cachevfs,
502 intents=intents)
502 intents=intents)
503
503
504 def gathersupportedrequirements(ui):
504 def gathersupportedrequirements(ui):
505 """Determine the complete set of recognized requirements."""
505 """Determine the complete set of recognized requirements."""
506 # Start with all requirements supported by this file.
506 # Start with all requirements supported by this file.
507 supported = set(localrepository._basesupported)
507 supported = set(localrepository._basesupported)
508
508
509 # Execute ``featuresetupfuncs`` entries if they belong to an extension
509 # Execute ``featuresetupfuncs`` entries if they belong to an extension
510 # relevant to this ui instance.
510 # relevant to this ui instance.
511 modules = {m.__name__ for n, m in extensions.extensions(ui)}
511 modules = {m.__name__ for n, m in extensions.extensions(ui)}
512
512
513 for fn in featuresetupfuncs:
513 for fn in featuresetupfuncs:
514 if fn.__module__ in modules:
514 if fn.__module__ in modules:
515 fn(ui, supported)
515 fn(ui, supported)
516
516
517 # Add derived requirements from registered compression engines.
517 # Add derived requirements from registered compression engines.
518 for name in util.compengines:
518 for name in util.compengines:
519 engine = util.compengines[name]
519 engine = util.compengines[name]
520 if engine.revlogheader():
520 if engine.revlogheader():
521 supported.add(b'exp-compression-%s' % name)
521 supported.add(b'exp-compression-%s' % name)
522
522
523 return supported
523 return supported
524
524
525 def ensurerequirementsrecognized(requirements, supported):
525 def ensurerequirementsrecognized(requirements, supported):
526 """Validate that a set of local requirements is recognized.
526 """Validate that a set of local requirements is recognized.
527
527
528 Receives a set of requirements. Raises an ``error.RepoError`` if there
528 Receives a set of requirements. Raises an ``error.RepoError`` if there
529 exists any requirement in that set that currently loaded code doesn't
529 exists any requirement in that set that currently loaded code doesn't
530 recognize.
530 recognize.
531
531
532 Returns a set of supported requirements.
532 Returns a set of supported requirements.
533 """
533 """
534 missing = set()
534 missing = set()
535
535
536 for requirement in requirements:
536 for requirement in requirements:
537 if requirement in supported:
537 if requirement in supported:
538 continue
538 continue
539
539
540 if not requirement or not requirement[0:1].isalnum():
540 if not requirement or not requirement[0:1].isalnum():
541 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
541 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
542
542
543 missing.add(requirement)
543 missing.add(requirement)
544
544
545 if missing:
545 if missing:
546 raise error.RequirementError(
546 raise error.RequirementError(
547 _(b'repository requires features unknown to this Mercurial: %s') %
547 _(b'repository requires features unknown to this Mercurial: %s') %
548 b' '.join(sorted(missing)),
548 b' '.join(sorted(missing)),
549 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
549 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
550 b'for more information'))
550 b'for more information'))
551
551
552 def ensurerequirementscompatible(ui, requirements):
552 def ensurerequirementscompatible(ui, requirements):
553 """Validates that a set of recognized requirements is mutually compatible.
553 """Validates that a set of recognized requirements is mutually compatible.
554
554
555 Some requirements may not be compatible with others or require
555 Some requirements may not be compatible with others or require
556 config options that aren't enabled. This function is called during
556 config options that aren't enabled. This function is called during
557 repository opening to ensure that the set of requirements needed
557 repository opening to ensure that the set of requirements needed
558 to open a repository is sane and compatible with config options.
558 to open a repository is sane and compatible with config options.
559
559
560 Extensions can monkeypatch this function to perform additional
560 Extensions can monkeypatch this function to perform additional
561 checking.
561 checking.
562
562
563 ``error.RepoError`` should be raised on failure.
563 ``error.RepoError`` should be raised on failure.
564 """
564 """
565 if b'exp-sparse' in requirements and not sparse.enabled:
565 if b'exp-sparse' in requirements and not sparse.enabled:
566 raise error.RepoError(_(b'repository is using sparse feature but '
566 raise error.RepoError(_(b'repository is using sparse feature but '
567 b'sparse is not enabled; enable the '
567 b'sparse is not enabled; enable the '
568 b'"sparse" extensions to access'))
568 b'"sparse" extensions to access'))
569
569
570 def makestore(requirements, path, vfstype):
571 """Construct a storage object for a repository."""
572 if b'store' in requirements:
573 if b'fncache' in requirements:
574 return storemod.fncachestore(path, vfstype,
575 b'dotencode' in requirements)
576
577 return storemod.encodedstore(path, vfstype)
578
579 return storemod.basicstore(path, vfstype)
580
570 @interfaceutil.implementer(repository.completelocalrepository)
581 @interfaceutil.implementer(repository.completelocalrepository)
571 class localrepository(object):
582 class localrepository(object):
572
583
573 # obsolete experimental requirements:
584 # obsolete experimental requirements:
574 # - manifestv2: An experimental new manifest format that allowed
585 # - manifestv2: An experimental new manifest format that allowed
575 # for stem compression of long paths. Experiment ended up not
586 # for stem compression of long paths. Experiment ended up not
576 # being successful (repository sizes went up due to worse delta
587 # being successful (repository sizes went up due to worse delta
577 # chains), and the code was deleted in 4.6.
588 # chains), and the code was deleted in 4.6.
578 supportedformats = {
589 supportedformats = {
579 'revlogv1',
590 'revlogv1',
580 'generaldelta',
591 'generaldelta',
581 'treemanifest',
592 'treemanifest',
582 REVLOGV2_REQUIREMENT,
593 REVLOGV2_REQUIREMENT,
583 SPARSEREVLOG_REQUIREMENT,
594 SPARSEREVLOG_REQUIREMENT,
584 }
595 }
585 _basesupported = supportedformats | {
596 _basesupported = supportedformats | {
586 'store',
597 'store',
587 'fncache',
598 'fncache',
588 'shared',
599 'shared',
589 'relshared',
600 'relshared',
590 'dotencode',
601 'dotencode',
591 'exp-sparse',
602 'exp-sparse',
592 'internal-phase'
603 'internal-phase'
593 }
604 }
594 openerreqs = {
605 openerreqs = {
595 'revlogv1',
606 'revlogv1',
596 'generaldelta',
607 'generaldelta',
597 'treemanifest',
608 'treemanifest',
598 }
609 }
599
610
600 # list of prefix for file which can be written without 'wlock'
611 # list of prefix for file which can be written without 'wlock'
601 # Extensions should extend this list when needed
612 # Extensions should extend this list when needed
602 _wlockfreeprefix = {
613 _wlockfreeprefix = {
603 # We migh consider requiring 'wlock' for the next
614 # We migh consider requiring 'wlock' for the next
604 # two, but pretty much all the existing code assume
615 # two, but pretty much all the existing code assume
605 # wlock is not needed so we keep them excluded for
616 # wlock is not needed so we keep them excluded for
606 # now.
617 # now.
607 'hgrc',
618 'hgrc',
608 'requires',
619 'requires',
609 # XXX cache is a complicatged business someone
620 # XXX cache is a complicatged business someone
610 # should investigate this in depth at some point
621 # should investigate this in depth at some point
611 'cache/',
622 'cache/',
612 # XXX shouldn't be dirstate covered by the wlock?
623 # XXX shouldn't be dirstate covered by the wlock?
613 'dirstate',
624 'dirstate',
614 # XXX bisect was still a bit too messy at the time
625 # XXX bisect was still a bit too messy at the time
615 # this changeset was introduced. Someone should fix
626 # this changeset was introduced. Someone should fix
616 # the remainig bit and drop this line
627 # the remainig bit and drop this line
617 'bisect.state',
628 'bisect.state',
618 }
629 }
619
630
620 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
631 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
621 supportedrequirements, sharedpath, store, cachevfs,
632 supportedrequirements, sharedpath, store, cachevfs,
622 intents=None):
633 intents=None):
623 """Create a new local repository instance.
634 """Create a new local repository instance.
624
635
625 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
636 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
626 or ``localrepo.makelocalrepository()`` for obtaining a new repository
637 or ``localrepo.makelocalrepository()`` for obtaining a new repository
627 object.
638 object.
628
639
629 Arguments:
640 Arguments:
630
641
631 baseui
642 baseui
632 ``ui.ui`` instance that ``ui`` argument was based off of.
643 ``ui.ui`` instance that ``ui`` argument was based off of.
633
644
634 ui
645 ui
635 ``ui.ui`` instance for use by the repository.
646 ``ui.ui`` instance for use by the repository.
636
647
637 origroot
648 origroot
638 ``bytes`` path to working directory root of this repository.
649 ``bytes`` path to working directory root of this repository.
639
650
640 wdirvfs
651 wdirvfs
641 ``vfs.vfs`` rooted at the working directory.
652 ``vfs.vfs`` rooted at the working directory.
642
653
643 hgvfs
654 hgvfs
644 ``vfs.vfs`` rooted at .hg/
655 ``vfs.vfs`` rooted at .hg/
645
656
646 requirements
657 requirements
647 ``set`` of bytestrings representing repository opening requirements.
658 ``set`` of bytestrings representing repository opening requirements.
648
659
649 supportedrequirements
660 supportedrequirements
650 ``set`` of bytestrings representing repository requirements that we
661 ``set`` of bytestrings representing repository requirements that we
651 know how to open. May be a supetset of ``requirements``.
662 know how to open. May be a supetset of ``requirements``.
652
663
653 sharedpath
664 sharedpath
654 ``bytes`` Defining path to storage base directory. Points to a
665 ``bytes`` Defining path to storage base directory. Points to a
655 ``.hg/`` directory somewhere.
666 ``.hg/`` directory somewhere.
656
667
657 store
668 store
658 ``store.basicstore`` (or derived) instance providing access to
669 ``store.basicstore`` (or derived) instance providing access to
659 versioned storage.
670 versioned storage.
660
671
661 cachevfs
672 cachevfs
662 ``vfs.vfs`` used for cache files.
673 ``vfs.vfs`` used for cache files.
663
674
664 intents
675 intents
665 ``set`` of system strings indicating what this repo will be used
676 ``set`` of system strings indicating what this repo will be used
666 for.
677 for.
667 """
678 """
668 self.baseui = baseui
679 self.baseui = baseui
669 self.ui = ui
680 self.ui = ui
670 self.origroot = origroot
681 self.origroot = origroot
671 # vfs rooted at working directory.
682 # vfs rooted at working directory.
672 self.wvfs = wdirvfs
683 self.wvfs = wdirvfs
673 self.root = wdirvfs.base
684 self.root = wdirvfs.base
674 # vfs rooted at .hg/. Used to access most non-store paths.
685 # vfs rooted at .hg/. Used to access most non-store paths.
675 self.vfs = hgvfs
686 self.vfs = hgvfs
676 self.path = hgvfs.base
687 self.path = hgvfs.base
677 self.requirements = requirements
688 self.requirements = requirements
678 self.supported = supportedrequirements
689 self.supported = supportedrequirements
679 self.sharedpath = sharedpath
690 self.sharedpath = sharedpath
680 self.store = store
691 self.store = store
681 self.cachevfs = cachevfs
692 self.cachevfs = cachevfs
682
693
683 self.filtername = None
694 self.filtername = None
684
695
685 if (self.ui.configbool('devel', 'all-warnings') or
696 if (self.ui.configbool('devel', 'all-warnings') or
686 self.ui.configbool('devel', 'check-locks')):
697 self.ui.configbool('devel', 'check-locks')):
687 self.vfs.audit = self._getvfsward(self.vfs.audit)
698 self.vfs.audit = self._getvfsward(self.vfs.audit)
688 # A list of callback to shape the phase if no data were found.
699 # A list of callback to shape the phase if no data were found.
689 # Callback are in the form: func(repo, roots) --> processed root.
700 # Callback are in the form: func(repo, roots) --> processed root.
690 # This list it to be filled by extension during repo setup
701 # This list it to be filled by extension during repo setup
691 self._phasedefaults = []
702 self._phasedefaults = []
692
703
693 color.setup(self.ui)
704 color.setup(self.ui)
694
705
695 self.spath = self.store.path
706 self.spath = self.store.path
696 self.svfs = self.store.vfs
707 self.svfs = self.store.vfs
697 self.sjoin = self.store.join
708 self.sjoin = self.store.join
698 if (self.ui.configbool('devel', 'all-warnings') or
709 if (self.ui.configbool('devel', 'all-warnings') or
699 self.ui.configbool('devel', 'check-locks')):
710 self.ui.configbool('devel', 'check-locks')):
700 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
711 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
701 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
712 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
702 else: # standard vfs
713 else: # standard vfs
703 self.svfs.audit = self._getsvfsward(self.svfs.audit)
714 self.svfs.audit = self._getsvfsward(self.svfs.audit)
704 self._applyopenerreqs()
715 self._applyopenerreqs()
705
716
706 self._dirstatevalidatewarned = False
717 self._dirstatevalidatewarned = False
707
718
708 self._branchcaches = {}
719 self._branchcaches = {}
709 self._revbranchcache = None
720 self._revbranchcache = None
710 self._filterpats = {}
721 self._filterpats = {}
711 self._datafilters = {}
722 self._datafilters = {}
712 self._transref = self._lockref = self._wlockref = None
723 self._transref = self._lockref = self._wlockref = None
713
724
714 # A cache for various files under .hg/ that tracks file changes,
725 # A cache for various files under .hg/ that tracks file changes,
715 # (used by the filecache decorator)
726 # (used by the filecache decorator)
716 #
727 #
717 # Maps a property name to its util.filecacheentry
728 # Maps a property name to its util.filecacheentry
718 self._filecache = {}
729 self._filecache = {}
719
730
720 # hold sets of revision to be filtered
731 # hold sets of revision to be filtered
721 # should be cleared when something might have changed the filter value:
732 # should be cleared when something might have changed the filter value:
722 # - new changesets,
733 # - new changesets,
723 # - phase change,
734 # - phase change,
724 # - new obsolescence marker,
735 # - new obsolescence marker,
725 # - working directory parent change,
736 # - working directory parent change,
726 # - bookmark changes
737 # - bookmark changes
727 self.filteredrevcache = {}
738 self.filteredrevcache = {}
728
739
729 # post-dirstate-status hooks
740 # post-dirstate-status hooks
730 self._postdsstatus = []
741 self._postdsstatus = []
731
742
732 # generic mapping between names and nodes
743 # generic mapping between names and nodes
733 self.names = namespaces.namespaces()
744 self.names = namespaces.namespaces()
734
745
735 # Key to signature value.
746 # Key to signature value.
736 self._sparsesignaturecache = {}
747 self._sparsesignaturecache = {}
737 # Signature to cached matcher instance.
748 # Signature to cached matcher instance.
738 self._sparsematchercache = {}
749 self._sparsematchercache = {}
739
750
740 def _getvfsward(self, origfunc):
751 def _getvfsward(self, origfunc):
741 """build a ward for self.vfs"""
752 """build a ward for self.vfs"""
742 rref = weakref.ref(self)
753 rref = weakref.ref(self)
743 def checkvfs(path, mode=None):
754 def checkvfs(path, mode=None):
744 ret = origfunc(path, mode=mode)
755 ret = origfunc(path, mode=mode)
745 repo = rref()
756 repo = rref()
746 if (repo is None
757 if (repo is None
747 or not util.safehasattr(repo, '_wlockref')
758 or not util.safehasattr(repo, '_wlockref')
748 or not util.safehasattr(repo, '_lockref')):
759 or not util.safehasattr(repo, '_lockref')):
749 return
760 return
750 if mode in (None, 'r', 'rb'):
761 if mode in (None, 'r', 'rb'):
751 return
762 return
752 if path.startswith(repo.path):
763 if path.startswith(repo.path):
753 # truncate name relative to the repository (.hg)
764 # truncate name relative to the repository (.hg)
754 path = path[len(repo.path) + 1:]
765 path = path[len(repo.path) + 1:]
755 if path.startswith('cache/'):
766 if path.startswith('cache/'):
756 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
767 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
757 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
768 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
758 if path.startswith('journal.'):
769 if path.startswith('journal.'):
759 # journal is covered by 'lock'
770 # journal is covered by 'lock'
760 if repo._currentlock(repo._lockref) is None:
771 if repo._currentlock(repo._lockref) is None:
761 repo.ui.develwarn('write with no lock: "%s"' % path,
772 repo.ui.develwarn('write with no lock: "%s"' % path,
762 stacklevel=2, config='check-locks')
773 stacklevel=2, config='check-locks')
763 elif repo._currentlock(repo._wlockref) is None:
774 elif repo._currentlock(repo._wlockref) is None:
764 # rest of vfs files are covered by 'wlock'
775 # rest of vfs files are covered by 'wlock'
765 #
776 #
766 # exclude special files
777 # exclude special files
767 for prefix in self._wlockfreeprefix:
778 for prefix in self._wlockfreeprefix:
768 if path.startswith(prefix):
779 if path.startswith(prefix):
769 return
780 return
770 repo.ui.develwarn('write with no wlock: "%s"' % path,
781 repo.ui.develwarn('write with no wlock: "%s"' % path,
771 stacklevel=2, config='check-locks')
782 stacklevel=2, config='check-locks')
772 return ret
783 return ret
773 return checkvfs
784 return checkvfs
774
785
775 def _getsvfsward(self, origfunc):
786 def _getsvfsward(self, origfunc):
776 """build a ward for self.svfs"""
787 """build a ward for self.svfs"""
777 rref = weakref.ref(self)
788 rref = weakref.ref(self)
778 def checksvfs(path, mode=None):
789 def checksvfs(path, mode=None):
779 ret = origfunc(path, mode=mode)
790 ret = origfunc(path, mode=mode)
780 repo = rref()
791 repo = rref()
781 if repo is None or not util.safehasattr(repo, '_lockref'):
792 if repo is None or not util.safehasattr(repo, '_lockref'):
782 return
793 return
783 if mode in (None, 'r', 'rb'):
794 if mode in (None, 'r', 'rb'):
784 return
795 return
785 if path.startswith(repo.sharedpath):
796 if path.startswith(repo.sharedpath):
786 # truncate name relative to the repository (.hg)
797 # truncate name relative to the repository (.hg)
787 path = path[len(repo.sharedpath) + 1:]
798 path = path[len(repo.sharedpath) + 1:]
788 if repo._currentlock(repo._lockref) is None:
799 if repo._currentlock(repo._lockref) is None:
789 repo.ui.develwarn('write with no lock: "%s"' % path,
800 repo.ui.develwarn('write with no lock: "%s"' % path,
790 stacklevel=3)
801 stacklevel=3)
791 return ret
802 return ret
792 return checksvfs
803 return checksvfs
793
804
794 def close(self):
805 def close(self):
795 self._writecaches()
806 self._writecaches()
796
807
797 def _writecaches(self):
808 def _writecaches(self):
798 if self._revbranchcache:
809 if self._revbranchcache:
799 self._revbranchcache.write()
810 self._revbranchcache.write()
800
811
801 def _restrictcapabilities(self, caps):
812 def _restrictcapabilities(self, caps):
802 if self.ui.configbool('experimental', 'bundle2-advertise'):
813 if self.ui.configbool('experimental', 'bundle2-advertise'):
803 caps = set(caps)
814 caps = set(caps)
804 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
815 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
805 role='client'))
816 role='client'))
806 caps.add('bundle2=' + urlreq.quote(capsblob))
817 caps.add('bundle2=' + urlreq.quote(capsblob))
807 return caps
818 return caps
808
819
809 def _applyopenerreqs(self):
820 def _applyopenerreqs(self):
810 self.svfs.options = dict((r, 1) for r in self.requirements
821 self.svfs.options = dict((r, 1) for r in self.requirements
811 if r in self.openerreqs)
822 if r in self.openerreqs)
812 # experimental config: format.chunkcachesize
823 # experimental config: format.chunkcachesize
813 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
824 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
814 if chunkcachesize is not None:
825 if chunkcachesize is not None:
815 self.svfs.options['chunkcachesize'] = chunkcachesize
826 self.svfs.options['chunkcachesize'] = chunkcachesize
816 # experimental config: format.manifestcachesize
827 # experimental config: format.manifestcachesize
817 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
828 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
818 if manifestcachesize is not None:
829 if manifestcachesize is not None:
819 self.svfs.options['manifestcachesize'] = manifestcachesize
830 self.svfs.options['manifestcachesize'] = manifestcachesize
820 deltabothparents = self.ui.configbool('storage',
831 deltabothparents = self.ui.configbool('storage',
821 'revlog.optimize-delta-parent-choice')
832 'revlog.optimize-delta-parent-choice')
822 self.svfs.options['deltabothparents'] = deltabothparents
833 self.svfs.options['deltabothparents'] = deltabothparents
823 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
834 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
824 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
835 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
825 if 0 <= chainspan:
836 if 0 <= chainspan:
826 self.svfs.options['maxdeltachainspan'] = chainspan
837 self.svfs.options['maxdeltachainspan'] = chainspan
827 mmapindexthreshold = self.ui.configbytes('experimental',
838 mmapindexthreshold = self.ui.configbytes('experimental',
828 'mmapindexthreshold')
839 'mmapindexthreshold')
829 if mmapindexthreshold is not None:
840 if mmapindexthreshold is not None:
830 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
841 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
831 withsparseread = self.ui.configbool('experimental', 'sparse-read')
842 withsparseread = self.ui.configbool('experimental', 'sparse-read')
832 srdensitythres = float(self.ui.config('experimental',
843 srdensitythres = float(self.ui.config('experimental',
833 'sparse-read.density-threshold'))
844 'sparse-read.density-threshold'))
834 srmingapsize = self.ui.configbytes('experimental',
845 srmingapsize = self.ui.configbytes('experimental',
835 'sparse-read.min-gap-size')
846 'sparse-read.min-gap-size')
836 self.svfs.options['with-sparse-read'] = withsparseread
847 self.svfs.options['with-sparse-read'] = withsparseread
837 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
848 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
838 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
849 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
839 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
850 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
840 self.svfs.options['sparse-revlog'] = sparserevlog
851 self.svfs.options['sparse-revlog'] = sparserevlog
841 if sparserevlog:
852 if sparserevlog:
842 self.svfs.options['generaldelta'] = True
853 self.svfs.options['generaldelta'] = True
843 maxchainlen = None
854 maxchainlen = None
844 if sparserevlog:
855 if sparserevlog:
845 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
856 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
846 # experimental config: format.maxchainlen
857 # experimental config: format.maxchainlen
847 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
858 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
848 if maxchainlen is not None:
859 if maxchainlen is not None:
849 self.svfs.options['maxchainlen'] = maxchainlen
860 self.svfs.options['maxchainlen'] = maxchainlen
850
861
851 for r in self.requirements:
862 for r in self.requirements:
852 if r.startswith('exp-compression-'):
863 if r.startswith('exp-compression-'):
853 self.svfs.options['compengine'] = r[len('exp-compression-'):]
864 self.svfs.options['compengine'] = r[len('exp-compression-'):]
854
865
855 # TODO move "revlogv2" to openerreqs once finalized.
866 # TODO move "revlogv2" to openerreqs once finalized.
856 if REVLOGV2_REQUIREMENT in self.requirements:
867 if REVLOGV2_REQUIREMENT in self.requirements:
857 self.svfs.options['revlogv2'] = True
868 self.svfs.options['revlogv2'] = True
858
869
859 def _writerequirements(self):
870 def _writerequirements(self):
860 scmutil.writerequires(self.vfs, self.requirements)
871 scmutil.writerequires(self.vfs, self.requirements)
861
872
862 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
873 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
863 # self -> auditor -> self._checknested -> self
874 # self -> auditor -> self._checknested -> self
864
875
865 @property
876 @property
866 def auditor(self):
877 def auditor(self):
867 # This is only used by context.workingctx.match in order to
878 # This is only used by context.workingctx.match in order to
868 # detect files in subrepos.
879 # detect files in subrepos.
869 return pathutil.pathauditor(self.root, callback=self._checknested)
880 return pathutil.pathauditor(self.root, callback=self._checknested)
870
881
871 @property
882 @property
872 def nofsauditor(self):
883 def nofsauditor(self):
873 # This is only used by context.basectx.match in order to detect
884 # This is only used by context.basectx.match in order to detect
874 # files in subrepos.
885 # files in subrepos.
875 return pathutil.pathauditor(self.root, callback=self._checknested,
886 return pathutil.pathauditor(self.root, callback=self._checknested,
876 realfs=False, cached=True)
887 realfs=False, cached=True)
877
888
878 def _checknested(self, path):
889 def _checknested(self, path):
879 """Determine if path is a legal nested repository."""
890 """Determine if path is a legal nested repository."""
880 if not path.startswith(self.root):
891 if not path.startswith(self.root):
881 return False
892 return False
882 subpath = path[len(self.root) + 1:]
893 subpath = path[len(self.root) + 1:]
883 normsubpath = util.pconvert(subpath)
894 normsubpath = util.pconvert(subpath)
884
895
885 # XXX: Checking against the current working copy is wrong in
896 # XXX: Checking against the current working copy is wrong in
886 # the sense that it can reject things like
897 # the sense that it can reject things like
887 #
898 #
888 # $ hg cat -r 10 sub/x.txt
899 # $ hg cat -r 10 sub/x.txt
889 #
900 #
890 # if sub/ is no longer a subrepository in the working copy
901 # if sub/ is no longer a subrepository in the working copy
891 # parent revision.
902 # parent revision.
892 #
903 #
893 # However, it can of course also allow things that would have
904 # However, it can of course also allow things that would have
894 # been rejected before, such as the above cat command if sub/
905 # been rejected before, such as the above cat command if sub/
895 # is a subrepository now, but was a normal directory before.
906 # is a subrepository now, but was a normal directory before.
896 # The old path auditor would have rejected by mistake since it
907 # The old path auditor would have rejected by mistake since it
897 # panics when it sees sub/.hg/.
908 # panics when it sees sub/.hg/.
898 #
909 #
899 # All in all, checking against the working copy seems sensible
910 # All in all, checking against the working copy seems sensible
900 # since we want to prevent access to nested repositories on
911 # since we want to prevent access to nested repositories on
901 # the filesystem *now*.
912 # the filesystem *now*.
902 ctx = self[None]
913 ctx = self[None]
903 parts = util.splitpath(subpath)
914 parts = util.splitpath(subpath)
904 while parts:
915 while parts:
905 prefix = '/'.join(parts)
916 prefix = '/'.join(parts)
906 if prefix in ctx.substate:
917 if prefix in ctx.substate:
907 if prefix == normsubpath:
918 if prefix == normsubpath:
908 return True
919 return True
909 else:
920 else:
910 sub = ctx.sub(prefix)
921 sub = ctx.sub(prefix)
911 return sub.checknested(subpath[len(prefix) + 1:])
922 return sub.checknested(subpath[len(prefix) + 1:])
912 else:
923 else:
913 parts.pop()
924 parts.pop()
914 return False
925 return False
915
926
916 def peer(self):
927 def peer(self):
917 return localpeer(self) # not cached to avoid reference cycle
928 return localpeer(self) # not cached to avoid reference cycle
918
929
919 def unfiltered(self):
930 def unfiltered(self):
920 """Return unfiltered version of the repository
931 """Return unfiltered version of the repository
921
932
922 Intended to be overwritten by filtered repo."""
933 Intended to be overwritten by filtered repo."""
923 return self
934 return self
924
935
925 def filtered(self, name, visibilityexceptions=None):
936 def filtered(self, name, visibilityexceptions=None):
926 """Return a filtered version of a repository"""
937 """Return a filtered version of a repository"""
927 cls = repoview.newtype(self.unfiltered().__class__)
938 cls = repoview.newtype(self.unfiltered().__class__)
928 return cls(self, name, visibilityexceptions)
939 return cls(self, name, visibilityexceptions)
929
940
930 @repofilecache('bookmarks', 'bookmarks.current')
941 @repofilecache('bookmarks', 'bookmarks.current')
931 def _bookmarks(self):
942 def _bookmarks(self):
932 return bookmarks.bmstore(self)
943 return bookmarks.bmstore(self)
933
944
934 @property
945 @property
935 def _activebookmark(self):
946 def _activebookmark(self):
936 return self._bookmarks.active
947 return self._bookmarks.active
937
948
938 # _phasesets depend on changelog. what we need is to call
949 # _phasesets depend on changelog. what we need is to call
939 # _phasecache.invalidate() if '00changelog.i' was changed, but it
950 # _phasecache.invalidate() if '00changelog.i' was changed, but it
940 # can't be easily expressed in filecache mechanism.
951 # can't be easily expressed in filecache mechanism.
941 @storecache('phaseroots', '00changelog.i')
952 @storecache('phaseroots', '00changelog.i')
942 def _phasecache(self):
953 def _phasecache(self):
943 return phases.phasecache(self, self._phasedefaults)
954 return phases.phasecache(self, self._phasedefaults)
944
955
945 @storecache('obsstore')
956 @storecache('obsstore')
946 def obsstore(self):
957 def obsstore(self):
947 return obsolete.makestore(self.ui, self)
958 return obsolete.makestore(self.ui, self)
948
959
949 @storecache('00changelog.i')
960 @storecache('00changelog.i')
950 def changelog(self):
961 def changelog(self):
951 return changelog.changelog(self.svfs,
962 return changelog.changelog(self.svfs,
952 trypending=txnutil.mayhavepending(self.root))
963 trypending=txnutil.mayhavepending(self.root))
953
964
954 def _constructmanifest(self):
965 def _constructmanifest(self):
955 # This is a temporary function while we migrate from manifest to
966 # This is a temporary function while we migrate from manifest to
956 # manifestlog. It allows bundlerepo and unionrepo to intercept the
967 # manifestlog. It allows bundlerepo and unionrepo to intercept the
957 # manifest creation.
968 # manifest creation.
958 return manifest.manifestrevlog(self.svfs)
969 return manifest.manifestrevlog(self.svfs)
959
970
960 @storecache('00manifest.i')
971 @storecache('00manifest.i')
961 def manifestlog(self):
972 def manifestlog(self):
962 return manifest.manifestlog(self.svfs, self)
973 return manifest.manifestlog(self.svfs, self)
963
974
964 @repofilecache('dirstate')
975 @repofilecache('dirstate')
965 def dirstate(self):
976 def dirstate(self):
966 return self._makedirstate()
977 return self._makedirstate()
967
978
968 def _makedirstate(self):
979 def _makedirstate(self):
969 """Extension point for wrapping the dirstate per-repo."""
980 """Extension point for wrapping the dirstate per-repo."""
970 sparsematchfn = lambda: sparse.matcher(self)
981 sparsematchfn = lambda: sparse.matcher(self)
971
982
972 return dirstate.dirstate(self.vfs, self.ui, self.root,
983 return dirstate.dirstate(self.vfs, self.ui, self.root,
973 self._dirstatevalidate, sparsematchfn)
984 self._dirstatevalidate, sparsematchfn)
974
985
975 def _dirstatevalidate(self, node):
986 def _dirstatevalidate(self, node):
976 try:
987 try:
977 self.changelog.rev(node)
988 self.changelog.rev(node)
978 return node
989 return node
979 except error.LookupError:
990 except error.LookupError:
980 if not self._dirstatevalidatewarned:
991 if not self._dirstatevalidatewarned:
981 self._dirstatevalidatewarned = True
992 self._dirstatevalidatewarned = True
982 self.ui.warn(_("warning: ignoring unknown"
993 self.ui.warn(_("warning: ignoring unknown"
983 " working parent %s!\n") % short(node))
994 " working parent %s!\n") % short(node))
984 return nullid
995 return nullid
985
996
986 @storecache(narrowspec.FILENAME)
997 @storecache(narrowspec.FILENAME)
987 def narrowpats(self):
998 def narrowpats(self):
988 """matcher patterns for this repository's narrowspec
999 """matcher patterns for this repository's narrowspec
989
1000
990 A tuple of (includes, excludes).
1001 A tuple of (includes, excludes).
991 """
1002 """
992 source = self
1003 source = self
993 if self.shared():
1004 if self.shared():
994 from . import hg
1005 from . import hg
995 source = hg.sharedreposource(self)
1006 source = hg.sharedreposource(self)
996 return narrowspec.load(source)
1007 return narrowspec.load(source)
997
1008
998 @storecache(narrowspec.FILENAME)
1009 @storecache(narrowspec.FILENAME)
999 def _narrowmatch(self):
1010 def _narrowmatch(self):
1000 if repository.NARROW_REQUIREMENT not in self.requirements:
1011 if repository.NARROW_REQUIREMENT not in self.requirements:
1001 return matchmod.always(self.root, '')
1012 return matchmod.always(self.root, '')
1002 include, exclude = self.narrowpats
1013 include, exclude = self.narrowpats
1003 return narrowspec.match(self.root, include=include, exclude=exclude)
1014 return narrowspec.match(self.root, include=include, exclude=exclude)
1004
1015
1005 # TODO(martinvonz): make this property-like instead?
1016 # TODO(martinvonz): make this property-like instead?
1006 def narrowmatch(self):
1017 def narrowmatch(self):
1007 return self._narrowmatch
1018 return self._narrowmatch
1008
1019
1009 def setnarrowpats(self, newincludes, newexcludes):
1020 def setnarrowpats(self, newincludes, newexcludes):
1010 narrowspec.save(self, newincludes, newexcludes)
1021 narrowspec.save(self, newincludes, newexcludes)
1011 self.invalidate(clearfilecache=True)
1022 self.invalidate(clearfilecache=True)
1012
1023
1013 def __getitem__(self, changeid):
1024 def __getitem__(self, changeid):
1014 if changeid is None:
1025 if changeid is None:
1015 return context.workingctx(self)
1026 return context.workingctx(self)
1016 if isinstance(changeid, context.basectx):
1027 if isinstance(changeid, context.basectx):
1017 return changeid
1028 return changeid
1018 if isinstance(changeid, slice):
1029 if isinstance(changeid, slice):
1019 # wdirrev isn't contiguous so the slice shouldn't include it
1030 # wdirrev isn't contiguous so the slice shouldn't include it
1020 return [context.changectx(self, i)
1031 return [context.changectx(self, i)
1021 for i in pycompat.xrange(*changeid.indices(len(self)))
1032 for i in pycompat.xrange(*changeid.indices(len(self)))
1022 if i not in self.changelog.filteredrevs]
1033 if i not in self.changelog.filteredrevs]
1023 try:
1034 try:
1024 return context.changectx(self, changeid)
1035 return context.changectx(self, changeid)
1025 except error.WdirUnsupported:
1036 except error.WdirUnsupported:
1026 return context.workingctx(self)
1037 return context.workingctx(self)
1027
1038
1028 def __contains__(self, changeid):
1039 def __contains__(self, changeid):
1029 """True if the given changeid exists
1040 """True if the given changeid exists
1030
1041
1031 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1042 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1032 specified.
1043 specified.
1033 """
1044 """
1034 try:
1045 try:
1035 self[changeid]
1046 self[changeid]
1036 return True
1047 return True
1037 except error.RepoLookupError:
1048 except error.RepoLookupError:
1038 return False
1049 return False
1039
1050
1040 def __nonzero__(self):
1051 def __nonzero__(self):
1041 return True
1052 return True
1042
1053
1043 __bool__ = __nonzero__
1054 __bool__ = __nonzero__
1044
1055
1045 def __len__(self):
1056 def __len__(self):
1046 # no need to pay the cost of repoview.changelog
1057 # no need to pay the cost of repoview.changelog
1047 unfi = self.unfiltered()
1058 unfi = self.unfiltered()
1048 return len(unfi.changelog)
1059 return len(unfi.changelog)
1049
1060
1050 def __iter__(self):
1061 def __iter__(self):
1051 return iter(self.changelog)
1062 return iter(self.changelog)
1052
1063
1053 def revs(self, expr, *args):
1064 def revs(self, expr, *args):
1054 '''Find revisions matching a revset.
1065 '''Find revisions matching a revset.
1055
1066
1056 The revset is specified as a string ``expr`` that may contain
1067 The revset is specified as a string ``expr`` that may contain
1057 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1068 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1058
1069
1059 Revset aliases from the configuration are not expanded. To expand
1070 Revset aliases from the configuration are not expanded. To expand
1060 user aliases, consider calling ``scmutil.revrange()`` or
1071 user aliases, consider calling ``scmutil.revrange()`` or
1061 ``repo.anyrevs([expr], user=True)``.
1072 ``repo.anyrevs([expr], user=True)``.
1062
1073
1063 Returns a revset.abstractsmartset, which is a list-like interface
1074 Returns a revset.abstractsmartset, which is a list-like interface
1064 that contains integer revisions.
1075 that contains integer revisions.
1065 '''
1076 '''
1066 expr = revsetlang.formatspec(expr, *args)
1077 expr = revsetlang.formatspec(expr, *args)
1067 m = revset.match(None, expr)
1078 m = revset.match(None, expr)
1068 return m(self)
1079 return m(self)
1069
1080
1070 def set(self, expr, *args):
1081 def set(self, expr, *args):
1071 '''Find revisions matching a revset and emit changectx instances.
1082 '''Find revisions matching a revset and emit changectx instances.
1072
1083
1073 This is a convenience wrapper around ``revs()`` that iterates the
1084 This is a convenience wrapper around ``revs()`` that iterates the
1074 result and is a generator of changectx instances.
1085 result and is a generator of changectx instances.
1075
1086
1076 Revset aliases from the configuration are not expanded. To expand
1087 Revset aliases from the configuration are not expanded. To expand
1077 user aliases, consider calling ``scmutil.revrange()``.
1088 user aliases, consider calling ``scmutil.revrange()``.
1078 '''
1089 '''
1079 for r in self.revs(expr, *args):
1090 for r in self.revs(expr, *args):
1080 yield self[r]
1091 yield self[r]
1081
1092
1082 def anyrevs(self, specs, user=False, localalias=None):
1093 def anyrevs(self, specs, user=False, localalias=None):
1083 '''Find revisions matching one of the given revsets.
1094 '''Find revisions matching one of the given revsets.
1084
1095
1085 Revset aliases from the configuration are not expanded by default. To
1096 Revset aliases from the configuration are not expanded by default. To
1086 expand user aliases, specify ``user=True``. To provide some local
1097 expand user aliases, specify ``user=True``. To provide some local
1087 definitions overriding user aliases, set ``localalias`` to
1098 definitions overriding user aliases, set ``localalias`` to
1088 ``{name: definitionstring}``.
1099 ``{name: definitionstring}``.
1089 '''
1100 '''
1090 if user:
1101 if user:
1091 m = revset.matchany(self.ui, specs,
1102 m = revset.matchany(self.ui, specs,
1092 lookup=revset.lookupfn(self),
1103 lookup=revset.lookupfn(self),
1093 localalias=localalias)
1104 localalias=localalias)
1094 else:
1105 else:
1095 m = revset.matchany(None, specs, localalias=localalias)
1106 m = revset.matchany(None, specs, localalias=localalias)
1096 return m(self)
1107 return m(self)
1097
1108
1098 def url(self):
1109 def url(self):
1099 return 'file:' + self.root
1110 return 'file:' + self.root
1100
1111
1101 def hook(self, name, throw=False, **args):
1112 def hook(self, name, throw=False, **args):
1102 """Call a hook, passing this repo instance.
1113 """Call a hook, passing this repo instance.
1103
1114
1104 This a convenience method to aid invoking hooks. Extensions likely
1115 This a convenience method to aid invoking hooks. Extensions likely
1105 won't call this unless they have registered a custom hook or are
1116 won't call this unless they have registered a custom hook or are
1106 replacing code that is expected to call a hook.
1117 replacing code that is expected to call a hook.
1107 """
1118 """
1108 return hook.hook(self.ui, self, name, throw, **args)
1119 return hook.hook(self.ui, self, name, throw, **args)
1109
1120
1110 @filteredpropertycache
1121 @filteredpropertycache
1111 def _tagscache(self):
1122 def _tagscache(self):
1112 '''Returns a tagscache object that contains various tags related
1123 '''Returns a tagscache object that contains various tags related
1113 caches.'''
1124 caches.'''
1114
1125
1115 # This simplifies its cache management by having one decorated
1126 # This simplifies its cache management by having one decorated
1116 # function (this one) and the rest simply fetch things from it.
1127 # function (this one) and the rest simply fetch things from it.
1117 class tagscache(object):
1128 class tagscache(object):
1118 def __init__(self):
1129 def __init__(self):
1119 # These two define the set of tags for this repository. tags
1130 # These two define the set of tags for this repository. tags
1120 # maps tag name to node; tagtypes maps tag name to 'global' or
1131 # maps tag name to node; tagtypes maps tag name to 'global' or
1121 # 'local'. (Global tags are defined by .hgtags across all
1132 # 'local'. (Global tags are defined by .hgtags across all
1122 # heads, and local tags are defined in .hg/localtags.)
1133 # heads, and local tags are defined in .hg/localtags.)
1123 # They constitute the in-memory cache of tags.
1134 # They constitute the in-memory cache of tags.
1124 self.tags = self.tagtypes = None
1135 self.tags = self.tagtypes = None
1125
1136
1126 self.nodetagscache = self.tagslist = None
1137 self.nodetagscache = self.tagslist = None
1127
1138
1128 cache = tagscache()
1139 cache = tagscache()
1129 cache.tags, cache.tagtypes = self._findtags()
1140 cache.tags, cache.tagtypes = self._findtags()
1130
1141
1131 return cache
1142 return cache
1132
1143
1133 def tags(self):
1144 def tags(self):
1134 '''return a mapping of tag to node'''
1145 '''return a mapping of tag to node'''
1135 t = {}
1146 t = {}
1136 if self.changelog.filteredrevs:
1147 if self.changelog.filteredrevs:
1137 tags, tt = self._findtags()
1148 tags, tt = self._findtags()
1138 else:
1149 else:
1139 tags = self._tagscache.tags
1150 tags = self._tagscache.tags
1140 for k, v in tags.iteritems():
1151 for k, v in tags.iteritems():
1141 try:
1152 try:
1142 # ignore tags to unknown nodes
1153 # ignore tags to unknown nodes
1143 self.changelog.rev(v)
1154 self.changelog.rev(v)
1144 t[k] = v
1155 t[k] = v
1145 except (error.LookupError, ValueError):
1156 except (error.LookupError, ValueError):
1146 pass
1157 pass
1147 return t
1158 return t
1148
1159
1149 def _findtags(self):
1160 def _findtags(self):
1150 '''Do the hard work of finding tags. Return a pair of dicts
1161 '''Do the hard work of finding tags. Return a pair of dicts
1151 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1162 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1152 maps tag name to a string like \'global\' or \'local\'.
1163 maps tag name to a string like \'global\' or \'local\'.
1153 Subclasses or extensions are free to add their own tags, but
1164 Subclasses or extensions are free to add their own tags, but
1154 should be aware that the returned dicts will be retained for the
1165 should be aware that the returned dicts will be retained for the
1155 duration of the localrepo object.'''
1166 duration of the localrepo object.'''
1156
1167
1157 # XXX what tagtype should subclasses/extensions use? Currently
1168 # XXX what tagtype should subclasses/extensions use? Currently
1158 # mq and bookmarks add tags, but do not set the tagtype at all.
1169 # mq and bookmarks add tags, but do not set the tagtype at all.
1159 # Should each extension invent its own tag type? Should there
1170 # Should each extension invent its own tag type? Should there
1160 # be one tagtype for all such "virtual" tags? Or is the status
1171 # be one tagtype for all such "virtual" tags? Or is the status
1161 # quo fine?
1172 # quo fine?
1162
1173
1163
1174
1164 # map tag name to (node, hist)
1175 # map tag name to (node, hist)
1165 alltags = tagsmod.findglobaltags(self.ui, self)
1176 alltags = tagsmod.findglobaltags(self.ui, self)
1166 # map tag name to tag type
1177 # map tag name to tag type
1167 tagtypes = dict((tag, 'global') for tag in alltags)
1178 tagtypes = dict((tag, 'global') for tag in alltags)
1168
1179
1169 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1180 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1170
1181
1171 # Build the return dicts. Have to re-encode tag names because
1182 # Build the return dicts. Have to re-encode tag names because
1172 # the tags module always uses UTF-8 (in order not to lose info
1183 # the tags module always uses UTF-8 (in order not to lose info
1173 # writing to the cache), but the rest of Mercurial wants them in
1184 # writing to the cache), but the rest of Mercurial wants them in
1174 # local encoding.
1185 # local encoding.
1175 tags = {}
1186 tags = {}
1176 for (name, (node, hist)) in alltags.iteritems():
1187 for (name, (node, hist)) in alltags.iteritems():
1177 if node != nullid:
1188 if node != nullid:
1178 tags[encoding.tolocal(name)] = node
1189 tags[encoding.tolocal(name)] = node
1179 tags['tip'] = self.changelog.tip()
1190 tags['tip'] = self.changelog.tip()
1180 tagtypes = dict([(encoding.tolocal(name), value)
1191 tagtypes = dict([(encoding.tolocal(name), value)
1181 for (name, value) in tagtypes.iteritems()])
1192 for (name, value) in tagtypes.iteritems()])
1182 return (tags, tagtypes)
1193 return (tags, tagtypes)
1183
1194
1184 def tagtype(self, tagname):
1195 def tagtype(self, tagname):
1185 '''
1196 '''
1186 return the type of the given tag. result can be:
1197 return the type of the given tag. result can be:
1187
1198
1188 'local' : a local tag
1199 'local' : a local tag
1189 'global' : a global tag
1200 'global' : a global tag
1190 None : tag does not exist
1201 None : tag does not exist
1191 '''
1202 '''
1192
1203
1193 return self._tagscache.tagtypes.get(tagname)
1204 return self._tagscache.tagtypes.get(tagname)
1194
1205
1195 def tagslist(self):
1206 def tagslist(self):
1196 '''return a list of tags ordered by revision'''
1207 '''return a list of tags ordered by revision'''
1197 if not self._tagscache.tagslist:
1208 if not self._tagscache.tagslist:
1198 l = []
1209 l = []
1199 for t, n in self.tags().iteritems():
1210 for t, n in self.tags().iteritems():
1200 l.append((self.changelog.rev(n), t, n))
1211 l.append((self.changelog.rev(n), t, n))
1201 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1212 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1202
1213
1203 return self._tagscache.tagslist
1214 return self._tagscache.tagslist
1204
1215
1205 def nodetags(self, node):
1216 def nodetags(self, node):
1206 '''return the tags associated with a node'''
1217 '''return the tags associated with a node'''
1207 if not self._tagscache.nodetagscache:
1218 if not self._tagscache.nodetagscache:
1208 nodetagscache = {}
1219 nodetagscache = {}
1209 for t, n in self._tagscache.tags.iteritems():
1220 for t, n in self._tagscache.tags.iteritems():
1210 nodetagscache.setdefault(n, []).append(t)
1221 nodetagscache.setdefault(n, []).append(t)
1211 for tags in nodetagscache.itervalues():
1222 for tags in nodetagscache.itervalues():
1212 tags.sort()
1223 tags.sort()
1213 self._tagscache.nodetagscache = nodetagscache
1224 self._tagscache.nodetagscache = nodetagscache
1214 return self._tagscache.nodetagscache.get(node, [])
1225 return self._tagscache.nodetagscache.get(node, [])
1215
1226
1216 def nodebookmarks(self, node):
1227 def nodebookmarks(self, node):
1217 """return the list of bookmarks pointing to the specified node"""
1228 """return the list of bookmarks pointing to the specified node"""
1218 return self._bookmarks.names(node)
1229 return self._bookmarks.names(node)
1219
1230
1220 def branchmap(self):
1231 def branchmap(self):
1221 '''returns a dictionary {branch: [branchheads]} with branchheads
1232 '''returns a dictionary {branch: [branchheads]} with branchheads
1222 ordered by increasing revision number'''
1233 ordered by increasing revision number'''
1223 branchmap.updatecache(self)
1234 branchmap.updatecache(self)
1224 return self._branchcaches[self.filtername]
1235 return self._branchcaches[self.filtername]
1225
1236
1226 @unfilteredmethod
1237 @unfilteredmethod
1227 def revbranchcache(self):
1238 def revbranchcache(self):
1228 if not self._revbranchcache:
1239 if not self._revbranchcache:
1229 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1240 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1230 return self._revbranchcache
1241 return self._revbranchcache
1231
1242
1232 def branchtip(self, branch, ignoremissing=False):
1243 def branchtip(self, branch, ignoremissing=False):
1233 '''return the tip node for a given branch
1244 '''return the tip node for a given branch
1234
1245
1235 If ignoremissing is True, then this method will not raise an error.
1246 If ignoremissing is True, then this method will not raise an error.
1236 This is helpful for callers that only expect None for a missing branch
1247 This is helpful for callers that only expect None for a missing branch
1237 (e.g. namespace).
1248 (e.g. namespace).
1238
1249
1239 '''
1250 '''
1240 try:
1251 try:
1241 return self.branchmap().branchtip(branch)
1252 return self.branchmap().branchtip(branch)
1242 except KeyError:
1253 except KeyError:
1243 if not ignoremissing:
1254 if not ignoremissing:
1244 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1255 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1245 else:
1256 else:
1246 pass
1257 pass
1247
1258
1248 def lookup(self, key):
1259 def lookup(self, key):
1249 return scmutil.revsymbol(self, key).node()
1260 return scmutil.revsymbol(self, key).node()
1250
1261
1251 def lookupbranch(self, key):
1262 def lookupbranch(self, key):
1252 if key in self.branchmap():
1263 if key in self.branchmap():
1253 return key
1264 return key
1254
1265
1255 return scmutil.revsymbol(self, key).branch()
1266 return scmutil.revsymbol(self, key).branch()
1256
1267
1257 def known(self, nodes):
1268 def known(self, nodes):
1258 cl = self.changelog
1269 cl = self.changelog
1259 nm = cl.nodemap
1270 nm = cl.nodemap
1260 filtered = cl.filteredrevs
1271 filtered = cl.filteredrevs
1261 result = []
1272 result = []
1262 for n in nodes:
1273 for n in nodes:
1263 r = nm.get(n)
1274 r = nm.get(n)
1264 resp = not (r is None or r in filtered)
1275 resp = not (r is None or r in filtered)
1265 result.append(resp)
1276 result.append(resp)
1266 return result
1277 return result
1267
1278
1268 def local(self):
1279 def local(self):
1269 return self
1280 return self
1270
1281
1271 def publishing(self):
1282 def publishing(self):
1272 # it's safe (and desirable) to trust the publish flag unconditionally
1283 # it's safe (and desirable) to trust the publish flag unconditionally
1273 # so that we don't finalize changes shared between users via ssh or nfs
1284 # so that we don't finalize changes shared between users via ssh or nfs
1274 return self.ui.configbool('phases', 'publish', untrusted=True)
1285 return self.ui.configbool('phases', 'publish', untrusted=True)
1275
1286
1276 def cancopy(self):
1287 def cancopy(self):
1277 # so statichttprepo's override of local() works
1288 # so statichttprepo's override of local() works
1278 if not self.local():
1289 if not self.local():
1279 return False
1290 return False
1280 if not self.publishing():
1291 if not self.publishing():
1281 return True
1292 return True
1282 # if publishing we can't copy if there is filtered content
1293 # if publishing we can't copy if there is filtered content
1283 return not self.filtered('visible').changelog.filteredrevs
1294 return not self.filtered('visible').changelog.filteredrevs
1284
1295
1285 def shared(self):
1296 def shared(self):
1286 '''the type of shared repository (None if not shared)'''
1297 '''the type of shared repository (None if not shared)'''
1287 if self.sharedpath != self.path:
1298 if self.sharedpath != self.path:
1288 return 'store'
1299 return 'store'
1289 return None
1300 return None
1290
1301
1291 def wjoin(self, f, *insidef):
1302 def wjoin(self, f, *insidef):
1292 return self.vfs.reljoin(self.root, f, *insidef)
1303 return self.vfs.reljoin(self.root, f, *insidef)
1293
1304
1294 def file(self, f):
1305 def file(self, f):
1295 if f[0] == '/':
1306 if f[0] == '/':
1296 f = f[1:]
1307 f = f[1:]
1297 return filelog.filelog(self.svfs, f)
1308 return filelog.filelog(self.svfs, f)
1298
1309
1299 def setparents(self, p1, p2=nullid):
1310 def setparents(self, p1, p2=nullid):
1300 with self.dirstate.parentchange():
1311 with self.dirstate.parentchange():
1301 copies = self.dirstate.setparents(p1, p2)
1312 copies = self.dirstate.setparents(p1, p2)
1302 pctx = self[p1]
1313 pctx = self[p1]
1303 if copies:
1314 if copies:
1304 # Adjust copy records, the dirstate cannot do it, it
1315 # Adjust copy records, the dirstate cannot do it, it
1305 # requires access to parents manifests. Preserve them
1316 # requires access to parents manifests. Preserve them
1306 # only for entries added to first parent.
1317 # only for entries added to first parent.
1307 for f in copies:
1318 for f in copies:
1308 if f not in pctx and copies[f] in pctx:
1319 if f not in pctx and copies[f] in pctx:
1309 self.dirstate.copy(copies[f], f)
1320 self.dirstate.copy(copies[f], f)
1310 if p2 == nullid:
1321 if p2 == nullid:
1311 for f, s in sorted(self.dirstate.copies().items()):
1322 for f, s in sorted(self.dirstate.copies().items()):
1312 if f not in pctx and s not in pctx:
1323 if f not in pctx and s not in pctx:
1313 self.dirstate.copy(None, f)
1324 self.dirstate.copy(None, f)
1314
1325
1315 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1326 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1316 """changeid can be a changeset revision, node, or tag.
1327 """changeid can be a changeset revision, node, or tag.
1317 fileid can be a file revision or node."""
1328 fileid can be a file revision or node."""
1318 return context.filectx(self, path, changeid, fileid,
1329 return context.filectx(self, path, changeid, fileid,
1319 changectx=changectx)
1330 changectx=changectx)
1320
1331
1321 def getcwd(self):
1332 def getcwd(self):
1322 return self.dirstate.getcwd()
1333 return self.dirstate.getcwd()
1323
1334
1324 def pathto(self, f, cwd=None):
1335 def pathto(self, f, cwd=None):
1325 return self.dirstate.pathto(f, cwd)
1336 return self.dirstate.pathto(f, cwd)
1326
1337
1327 def _loadfilter(self, filter):
1338 def _loadfilter(self, filter):
1328 if filter not in self._filterpats:
1339 if filter not in self._filterpats:
1329 l = []
1340 l = []
1330 for pat, cmd in self.ui.configitems(filter):
1341 for pat, cmd in self.ui.configitems(filter):
1331 if cmd == '!':
1342 if cmd == '!':
1332 continue
1343 continue
1333 mf = matchmod.match(self.root, '', [pat])
1344 mf = matchmod.match(self.root, '', [pat])
1334 fn = None
1345 fn = None
1335 params = cmd
1346 params = cmd
1336 for name, filterfn in self._datafilters.iteritems():
1347 for name, filterfn in self._datafilters.iteritems():
1337 if cmd.startswith(name):
1348 if cmd.startswith(name):
1338 fn = filterfn
1349 fn = filterfn
1339 params = cmd[len(name):].lstrip()
1350 params = cmd[len(name):].lstrip()
1340 break
1351 break
1341 if not fn:
1352 if not fn:
1342 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1353 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1343 # Wrap old filters not supporting keyword arguments
1354 # Wrap old filters not supporting keyword arguments
1344 if not pycompat.getargspec(fn)[2]:
1355 if not pycompat.getargspec(fn)[2]:
1345 oldfn = fn
1356 oldfn = fn
1346 fn = lambda s, c, **kwargs: oldfn(s, c)
1357 fn = lambda s, c, **kwargs: oldfn(s, c)
1347 l.append((mf, fn, params))
1358 l.append((mf, fn, params))
1348 self._filterpats[filter] = l
1359 self._filterpats[filter] = l
1349 return self._filterpats[filter]
1360 return self._filterpats[filter]
1350
1361
1351 def _filter(self, filterpats, filename, data):
1362 def _filter(self, filterpats, filename, data):
1352 for mf, fn, cmd in filterpats:
1363 for mf, fn, cmd in filterpats:
1353 if mf(filename):
1364 if mf(filename):
1354 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1365 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1355 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1366 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1356 break
1367 break
1357
1368
1358 return data
1369 return data
1359
1370
1360 @unfilteredpropertycache
1371 @unfilteredpropertycache
1361 def _encodefilterpats(self):
1372 def _encodefilterpats(self):
1362 return self._loadfilter('encode')
1373 return self._loadfilter('encode')
1363
1374
1364 @unfilteredpropertycache
1375 @unfilteredpropertycache
1365 def _decodefilterpats(self):
1376 def _decodefilterpats(self):
1366 return self._loadfilter('decode')
1377 return self._loadfilter('decode')
1367
1378
1368 def adddatafilter(self, name, filter):
1379 def adddatafilter(self, name, filter):
1369 self._datafilters[name] = filter
1380 self._datafilters[name] = filter
1370
1381
1371 def wread(self, filename):
1382 def wread(self, filename):
1372 if self.wvfs.islink(filename):
1383 if self.wvfs.islink(filename):
1373 data = self.wvfs.readlink(filename)
1384 data = self.wvfs.readlink(filename)
1374 else:
1385 else:
1375 data = self.wvfs.read(filename)
1386 data = self.wvfs.read(filename)
1376 return self._filter(self._encodefilterpats, filename, data)
1387 return self._filter(self._encodefilterpats, filename, data)
1377
1388
1378 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1389 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1379 """write ``data`` into ``filename`` in the working directory
1390 """write ``data`` into ``filename`` in the working directory
1380
1391
1381 This returns length of written (maybe decoded) data.
1392 This returns length of written (maybe decoded) data.
1382 """
1393 """
1383 data = self._filter(self._decodefilterpats, filename, data)
1394 data = self._filter(self._decodefilterpats, filename, data)
1384 if 'l' in flags:
1395 if 'l' in flags:
1385 self.wvfs.symlink(data, filename)
1396 self.wvfs.symlink(data, filename)
1386 else:
1397 else:
1387 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1398 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1388 **kwargs)
1399 **kwargs)
1389 if 'x' in flags:
1400 if 'x' in flags:
1390 self.wvfs.setflags(filename, False, True)
1401 self.wvfs.setflags(filename, False, True)
1391 else:
1402 else:
1392 self.wvfs.setflags(filename, False, False)
1403 self.wvfs.setflags(filename, False, False)
1393 return len(data)
1404 return len(data)
1394
1405
1395 def wwritedata(self, filename, data):
1406 def wwritedata(self, filename, data):
1396 return self._filter(self._decodefilterpats, filename, data)
1407 return self._filter(self._decodefilterpats, filename, data)
1397
1408
1398 def currenttransaction(self):
1409 def currenttransaction(self):
1399 """return the current transaction or None if non exists"""
1410 """return the current transaction or None if non exists"""
1400 if self._transref:
1411 if self._transref:
1401 tr = self._transref()
1412 tr = self._transref()
1402 else:
1413 else:
1403 tr = None
1414 tr = None
1404
1415
1405 if tr and tr.running():
1416 if tr and tr.running():
1406 return tr
1417 return tr
1407 return None
1418 return None
1408
1419
1409 def transaction(self, desc, report=None):
1420 def transaction(self, desc, report=None):
1410 if (self.ui.configbool('devel', 'all-warnings')
1421 if (self.ui.configbool('devel', 'all-warnings')
1411 or self.ui.configbool('devel', 'check-locks')):
1422 or self.ui.configbool('devel', 'check-locks')):
1412 if self._currentlock(self._lockref) is None:
1423 if self._currentlock(self._lockref) is None:
1413 raise error.ProgrammingError('transaction requires locking')
1424 raise error.ProgrammingError('transaction requires locking')
1414 tr = self.currenttransaction()
1425 tr = self.currenttransaction()
1415 if tr is not None:
1426 if tr is not None:
1416 return tr.nest(name=desc)
1427 return tr.nest(name=desc)
1417
1428
1418 # abort here if the journal already exists
1429 # abort here if the journal already exists
1419 if self.svfs.exists("journal"):
1430 if self.svfs.exists("journal"):
1420 raise error.RepoError(
1431 raise error.RepoError(
1421 _("abandoned transaction found"),
1432 _("abandoned transaction found"),
1422 hint=_("run 'hg recover' to clean up transaction"))
1433 hint=_("run 'hg recover' to clean up transaction"))
1423
1434
1424 idbase = "%.40f#%f" % (random.random(), time.time())
1435 idbase = "%.40f#%f" % (random.random(), time.time())
1425 ha = hex(hashlib.sha1(idbase).digest())
1436 ha = hex(hashlib.sha1(idbase).digest())
1426 txnid = 'TXN:' + ha
1437 txnid = 'TXN:' + ha
1427 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1438 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1428
1439
1429 self._writejournal(desc)
1440 self._writejournal(desc)
1430 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1441 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1431 if report:
1442 if report:
1432 rp = report
1443 rp = report
1433 else:
1444 else:
1434 rp = self.ui.warn
1445 rp = self.ui.warn
1435 vfsmap = {'plain': self.vfs} # root of .hg/
1446 vfsmap = {'plain': self.vfs} # root of .hg/
1436 # we must avoid cyclic reference between repo and transaction.
1447 # we must avoid cyclic reference between repo and transaction.
1437 reporef = weakref.ref(self)
1448 reporef = weakref.ref(self)
1438 # Code to track tag movement
1449 # Code to track tag movement
1439 #
1450 #
1440 # Since tags are all handled as file content, it is actually quite hard
1451 # Since tags are all handled as file content, it is actually quite hard
1441 # to track these movement from a code perspective. So we fallback to a
1452 # to track these movement from a code perspective. So we fallback to a
1442 # tracking at the repository level. One could envision to track changes
1453 # tracking at the repository level. One could envision to track changes
1443 # to the '.hgtags' file through changegroup apply but that fails to
1454 # to the '.hgtags' file through changegroup apply but that fails to
1444 # cope with case where transaction expose new heads without changegroup
1455 # cope with case where transaction expose new heads without changegroup
1445 # being involved (eg: phase movement).
1456 # being involved (eg: phase movement).
1446 #
1457 #
1447 # For now, We gate the feature behind a flag since this likely comes
1458 # For now, We gate the feature behind a flag since this likely comes
1448 # with performance impacts. The current code run more often than needed
1459 # with performance impacts. The current code run more often than needed
1449 # and do not use caches as much as it could. The current focus is on
1460 # and do not use caches as much as it could. The current focus is on
1450 # the behavior of the feature so we disable it by default. The flag
1461 # the behavior of the feature so we disable it by default. The flag
1451 # will be removed when we are happy with the performance impact.
1462 # will be removed when we are happy with the performance impact.
1452 #
1463 #
1453 # Once this feature is no longer experimental move the following
1464 # Once this feature is no longer experimental move the following
1454 # documentation to the appropriate help section:
1465 # documentation to the appropriate help section:
1455 #
1466 #
1456 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1467 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1457 # tags (new or changed or deleted tags). In addition the details of
1468 # tags (new or changed or deleted tags). In addition the details of
1458 # these changes are made available in a file at:
1469 # these changes are made available in a file at:
1459 # ``REPOROOT/.hg/changes/tags.changes``.
1470 # ``REPOROOT/.hg/changes/tags.changes``.
1460 # Make sure you check for HG_TAG_MOVED before reading that file as it
1471 # Make sure you check for HG_TAG_MOVED before reading that file as it
1461 # might exist from a previous transaction even if no tag were touched
1472 # might exist from a previous transaction even if no tag were touched
1462 # in this one. Changes are recorded in a line base format::
1473 # in this one. Changes are recorded in a line base format::
1463 #
1474 #
1464 # <action> <hex-node> <tag-name>\n
1475 # <action> <hex-node> <tag-name>\n
1465 #
1476 #
1466 # Actions are defined as follow:
1477 # Actions are defined as follow:
1467 # "-R": tag is removed,
1478 # "-R": tag is removed,
1468 # "+A": tag is added,
1479 # "+A": tag is added,
1469 # "-M": tag is moved (old value),
1480 # "-M": tag is moved (old value),
1470 # "+M": tag is moved (new value),
1481 # "+M": tag is moved (new value),
1471 tracktags = lambda x: None
1482 tracktags = lambda x: None
1472 # experimental config: experimental.hook-track-tags
1483 # experimental config: experimental.hook-track-tags
1473 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1484 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1474 if desc != 'strip' and shouldtracktags:
1485 if desc != 'strip' and shouldtracktags:
1475 oldheads = self.changelog.headrevs()
1486 oldheads = self.changelog.headrevs()
1476 def tracktags(tr2):
1487 def tracktags(tr2):
1477 repo = reporef()
1488 repo = reporef()
1478 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1489 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1479 newheads = repo.changelog.headrevs()
1490 newheads = repo.changelog.headrevs()
1480 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1491 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1481 # notes: we compare lists here.
1492 # notes: we compare lists here.
1482 # As we do it only once buiding set would not be cheaper
1493 # As we do it only once buiding set would not be cheaper
1483 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1494 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1484 if changes:
1495 if changes:
1485 tr2.hookargs['tag_moved'] = '1'
1496 tr2.hookargs['tag_moved'] = '1'
1486 with repo.vfs('changes/tags.changes', 'w',
1497 with repo.vfs('changes/tags.changes', 'w',
1487 atomictemp=True) as changesfile:
1498 atomictemp=True) as changesfile:
1488 # note: we do not register the file to the transaction
1499 # note: we do not register the file to the transaction
1489 # because we needs it to still exist on the transaction
1500 # because we needs it to still exist on the transaction
1490 # is close (for txnclose hooks)
1501 # is close (for txnclose hooks)
1491 tagsmod.writediff(changesfile, changes)
1502 tagsmod.writediff(changesfile, changes)
1492 def validate(tr2):
1503 def validate(tr2):
1493 """will run pre-closing hooks"""
1504 """will run pre-closing hooks"""
1494 # XXX the transaction API is a bit lacking here so we take a hacky
1505 # XXX the transaction API is a bit lacking here so we take a hacky
1495 # path for now
1506 # path for now
1496 #
1507 #
1497 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1508 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1498 # dict is copied before these run. In addition we needs the data
1509 # dict is copied before these run. In addition we needs the data
1499 # available to in memory hooks too.
1510 # available to in memory hooks too.
1500 #
1511 #
1501 # Moreover, we also need to make sure this runs before txnclose
1512 # Moreover, we also need to make sure this runs before txnclose
1502 # hooks and there is no "pending" mechanism that would execute
1513 # hooks and there is no "pending" mechanism that would execute
1503 # logic only if hooks are about to run.
1514 # logic only if hooks are about to run.
1504 #
1515 #
1505 # Fixing this limitation of the transaction is also needed to track
1516 # Fixing this limitation of the transaction is also needed to track
1506 # other families of changes (bookmarks, phases, obsolescence).
1517 # other families of changes (bookmarks, phases, obsolescence).
1507 #
1518 #
1508 # This will have to be fixed before we remove the experimental
1519 # This will have to be fixed before we remove the experimental
1509 # gating.
1520 # gating.
1510 tracktags(tr2)
1521 tracktags(tr2)
1511 repo = reporef()
1522 repo = reporef()
1512 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1523 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1513 scmutil.enforcesinglehead(repo, tr2, desc)
1524 scmutil.enforcesinglehead(repo, tr2, desc)
1514 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1525 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1515 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1526 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1516 args = tr.hookargs.copy()
1527 args = tr.hookargs.copy()
1517 args.update(bookmarks.preparehookargs(name, old, new))
1528 args.update(bookmarks.preparehookargs(name, old, new))
1518 repo.hook('pretxnclose-bookmark', throw=True,
1529 repo.hook('pretxnclose-bookmark', throw=True,
1519 txnname=desc,
1530 txnname=desc,
1520 **pycompat.strkwargs(args))
1531 **pycompat.strkwargs(args))
1521 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1532 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1522 cl = repo.unfiltered().changelog
1533 cl = repo.unfiltered().changelog
1523 for rev, (old, new) in tr.changes['phases'].items():
1534 for rev, (old, new) in tr.changes['phases'].items():
1524 args = tr.hookargs.copy()
1535 args = tr.hookargs.copy()
1525 node = hex(cl.node(rev))
1536 node = hex(cl.node(rev))
1526 args.update(phases.preparehookargs(node, old, new))
1537 args.update(phases.preparehookargs(node, old, new))
1527 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1538 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1528 **pycompat.strkwargs(args))
1539 **pycompat.strkwargs(args))
1529
1540
1530 repo.hook('pretxnclose', throw=True,
1541 repo.hook('pretxnclose', throw=True,
1531 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1542 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1532 def releasefn(tr, success):
1543 def releasefn(tr, success):
1533 repo = reporef()
1544 repo = reporef()
1534 if success:
1545 if success:
1535 # this should be explicitly invoked here, because
1546 # this should be explicitly invoked here, because
1536 # in-memory changes aren't written out at closing
1547 # in-memory changes aren't written out at closing
1537 # transaction, if tr.addfilegenerator (via
1548 # transaction, if tr.addfilegenerator (via
1538 # dirstate.write or so) isn't invoked while
1549 # dirstate.write or so) isn't invoked while
1539 # transaction running
1550 # transaction running
1540 repo.dirstate.write(None)
1551 repo.dirstate.write(None)
1541 else:
1552 else:
1542 # discard all changes (including ones already written
1553 # discard all changes (including ones already written
1543 # out) in this transaction
1554 # out) in this transaction
1544 narrowspec.restorebackup(self, 'journal.narrowspec')
1555 narrowspec.restorebackup(self, 'journal.narrowspec')
1545 repo.dirstate.restorebackup(None, 'journal.dirstate')
1556 repo.dirstate.restorebackup(None, 'journal.dirstate')
1546
1557
1547 repo.invalidate(clearfilecache=True)
1558 repo.invalidate(clearfilecache=True)
1548
1559
1549 tr = transaction.transaction(rp, self.svfs, vfsmap,
1560 tr = transaction.transaction(rp, self.svfs, vfsmap,
1550 "journal",
1561 "journal",
1551 "undo",
1562 "undo",
1552 aftertrans(renames),
1563 aftertrans(renames),
1553 self.store.createmode,
1564 self.store.createmode,
1554 validator=validate,
1565 validator=validate,
1555 releasefn=releasefn,
1566 releasefn=releasefn,
1556 checkambigfiles=_cachedfiles,
1567 checkambigfiles=_cachedfiles,
1557 name=desc)
1568 name=desc)
1558 tr.changes['origrepolen'] = len(self)
1569 tr.changes['origrepolen'] = len(self)
1559 tr.changes['obsmarkers'] = set()
1570 tr.changes['obsmarkers'] = set()
1560 tr.changes['phases'] = {}
1571 tr.changes['phases'] = {}
1561 tr.changes['bookmarks'] = {}
1572 tr.changes['bookmarks'] = {}
1562
1573
1563 tr.hookargs['txnid'] = txnid
1574 tr.hookargs['txnid'] = txnid
1564 # note: writing the fncache only during finalize mean that the file is
1575 # note: writing the fncache only during finalize mean that the file is
1565 # outdated when running hooks. As fncache is used for streaming clone,
1576 # outdated when running hooks. As fncache is used for streaming clone,
1566 # this is not expected to break anything that happen during the hooks.
1577 # this is not expected to break anything that happen during the hooks.
1567 tr.addfinalize('flush-fncache', self.store.write)
1578 tr.addfinalize('flush-fncache', self.store.write)
1568 def txnclosehook(tr2):
1579 def txnclosehook(tr2):
1569 """To be run if transaction is successful, will schedule a hook run
1580 """To be run if transaction is successful, will schedule a hook run
1570 """
1581 """
1571 # Don't reference tr2 in hook() so we don't hold a reference.
1582 # Don't reference tr2 in hook() so we don't hold a reference.
1572 # This reduces memory consumption when there are multiple
1583 # This reduces memory consumption when there are multiple
1573 # transactions per lock. This can likely go away if issue5045
1584 # transactions per lock. This can likely go away if issue5045
1574 # fixes the function accumulation.
1585 # fixes the function accumulation.
1575 hookargs = tr2.hookargs
1586 hookargs = tr2.hookargs
1576
1587
1577 def hookfunc():
1588 def hookfunc():
1578 repo = reporef()
1589 repo = reporef()
1579 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1590 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1580 bmchanges = sorted(tr.changes['bookmarks'].items())
1591 bmchanges = sorted(tr.changes['bookmarks'].items())
1581 for name, (old, new) in bmchanges:
1592 for name, (old, new) in bmchanges:
1582 args = tr.hookargs.copy()
1593 args = tr.hookargs.copy()
1583 args.update(bookmarks.preparehookargs(name, old, new))
1594 args.update(bookmarks.preparehookargs(name, old, new))
1584 repo.hook('txnclose-bookmark', throw=False,
1595 repo.hook('txnclose-bookmark', throw=False,
1585 txnname=desc, **pycompat.strkwargs(args))
1596 txnname=desc, **pycompat.strkwargs(args))
1586
1597
1587 if hook.hashook(repo.ui, 'txnclose-phase'):
1598 if hook.hashook(repo.ui, 'txnclose-phase'):
1588 cl = repo.unfiltered().changelog
1599 cl = repo.unfiltered().changelog
1589 phasemv = sorted(tr.changes['phases'].items())
1600 phasemv = sorted(tr.changes['phases'].items())
1590 for rev, (old, new) in phasemv:
1601 for rev, (old, new) in phasemv:
1591 args = tr.hookargs.copy()
1602 args = tr.hookargs.copy()
1592 node = hex(cl.node(rev))
1603 node = hex(cl.node(rev))
1593 args.update(phases.preparehookargs(node, old, new))
1604 args.update(phases.preparehookargs(node, old, new))
1594 repo.hook('txnclose-phase', throw=False, txnname=desc,
1605 repo.hook('txnclose-phase', throw=False, txnname=desc,
1595 **pycompat.strkwargs(args))
1606 **pycompat.strkwargs(args))
1596
1607
1597 repo.hook('txnclose', throw=False, txnname=desc,
1608 repo.hook('txnclose', throw=False, txnname=desc,
1598 **pycompat.strkwargs(hookargs))
1609 **pycompat.strkwargs(hookargs))
1599 reporef()._afterlock(hookfunc)
1610 reporef()._afterlock(hookfunc)
1600 tr.addfinalize('txnclose-hook', txnclosehook)
1611 tr.addfinalize('txnclose-hook', txnclosehook)
1601 # Include a leading "-" to make it happen before the transaction summary
1612 # Include a leading "-" to make it happen before the transaction summary
1602 # reports registered via scmutil.registersummarycallback() whose names
1613 # reports registered via scmutil.registersummarycallback() whose names
1603 # are 00-txnreport etc. That way, the caches will be warm when the
1614 # are 00-txnreport etc. That way, the caches will be warm when the
1604 # callbacks run.
1615 # callbacks run.
1605 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1616 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1606 def txnaborthook(tr2):
1617 def txnaborthook(tr2):
1607 """To be run if transaction is aborted
1618 """To be run if transaction is aborted
1608 """
1619 """
1609 reporef().hook('txnabort', throw=False, txnname=desc,
1620 reporef().hook('txnabort', throw=False, txnname=desc,
1610 **pycompat.strkwargs(tr2.hookargs))
1621 **pycompat.strkwargs(tr2.hookargs))
1611 tr.addabort('txnabort-hook', txnaborthook)
1622 tr.addabort('txnabort-hook', txnaborthook)
1612 # avoid eager cache invalidation. in-memory data should be identical
1623 # avoid eager cache invalidation. in-memory data should be identical
1613 # to stored data if transaction has no error.
1624 # to stored data if transaction has no error.
1614 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1625 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1615 self._transref = weakref.ref(tr)
1626 self._transref = weakref.ref(tr)
1616 scmutil.registersummarycallback(self, tr, desc)
1627 scmutil.registersummarycallback(self, tr, desc)
1617 return tr
1628 return tr
1618
1629
1619 def _journalfiles(self):
1630 def _journalfiles(self):
1620 return ((self.svfs, 'journal'),
1631 return ((self.svfs, 'journal'),
1621 (self.vfs, 'journal.dirstate'),
1632 (self.vfs, 'journal.dirstate'),
1622 (self.vfs, 'journal.branch'),
1633 (self.vfs, 'journal.branch'),
1623 (self.vfs, 'journal.desc'),
1634 (self.vfs, 'journal.desc'),
1624 (self.vfs, 'journal.bookmarks'),
1635 (self.vfs, 'journal.bookmarks'),
1625 (self.svfs, 'journal.phaseroots'))
1636 (self.svfs, 'journal.phaseroots'))
1626
1637
1627 def undofiles(self):
1638 def undofiles(self):
1628 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1639 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1629
1640
1630 @unfilteredmethod
1641 @unfilteredmethod
1631 def _writejournal(self, desc):
1642 def _writejournal(self, desc):
1632 self.dirstate.savebackup(None, 'journal.dirstate')
1643 self.dirstate.savebackup(None, 'journal.dirstate')
1633 narrowspec.savebackup(self, 'journal.narrowspec')
1644 narrowspec.savebackup(self, 'journal.narrowspec')
1634 self.vfs.write("journal.branch",
1645 self.vfs.write("journal.branch",
1635 encoding.fromlocal(self.dirstate.branch()))
1646 encoding.fromlocal(self.dirstate.branch()))
1636 self.vfs.write("journal.desc",
1647 self.vfs.write("journal.desc",
1637 "%d\n%s\n" % (len(self), desc))
1648 "%d\n%s\n" % (len(self), desc))
1638 self.vfs.write("journal.bookmarks",
1649 self.vfs.write("journal.bookmarks",
1639 self.vfs.tryread("bookmarks"))
1650 self.vfs.tryread("bookmarks"))
1640 self.svfs.write("journal.phaseroots",
1651 self.svfs.write("journal.phaseroots",
1641 self.svfs.tryread("phaseroots"))
1652 self.svfs.tryread("phaseroots"))
1642
1653
1643 def recover(self):
1654 def recover(self):
1644 with self.lock():
1655 with self.lock():
1645 if self.svfs.exists("journal"):
1656 if self.svfs.exists("journal"):
1646 self.ui.status(_("rolling back interrupted transaction\n"))
1657 self.ui.status(_("rolling back interrupted transaction\n"))
1647 vfsmap = {'': self.svfs,
1658 vfsmap = {'': self.svfs,
1648 'plain': self.vfs,}
1659 'plain': self.vfs,}
1649 transaction.rollback(self.svfs, vfsmap, "journal",
1660 transaction.rollback(self.svfs, vfsmap, "journal",
1650 self.ui.warn,
1661 self.ui.warn,
1651 checkambigfiles=_cachedfiles)
1662 checkambigfiles=_cachedfiles)
1652 self.invalidate()
1663 self.invalidate()
1653 return True
1664 return True
1654 else:
1665 else:
1655 self.ui.warn(_("no interrupted transaction available\n"))
1666 self.ui.warn(_("no interrupted transaction available\n"))
1656 return False
1667 return False
1657
1668
1658 def rollback(self, dryrun=False, force=False):
1669 def rollback(self, dryrun=False, force=False):
1659 wlock = lock = dsguard = None
1670 wlock = lock = dsguard = None
1660 try:
1671 try:
1661 wlock = self.wlock()
1672 wlock = self.wlock()
1662 lock = self.lock()
1673 lock = self.lock()
1663 if self.svfs.exists("undo"):
1674 if self.svfs.exists("undo"):
1664 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1675 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1665
1676
1666 return self._rollback(dryrun, force, dsguard)
1677 return self._rollback(dryrun, force, dsguard)
1667 else:
1678 else:
1668 self.ui.warn(_("no rollback information available\n"))
1679 self.ui.warn(_("no rollback information available\n"))
1669 return 1
1680 return 1
1670 finally:
1681 finally:
1671 release(dsguard, lock, wlock)
1682 release(dsguard, lock, wlock)
1672
1683
1673 @unfilteredmethod # Until we get smarter cache management
1684 @unfilteredmethod # Until we get smarter cache management
1674 def _rollback(self, dryrun, force, dsguard):
1685 def _rollback(self, dryrun, force, dsguard):
1675 ui = self.ui
1686 ui = self.ui
1676 try:
1687 try:
1677 args = self.vfs.read('undo.desc').splitlines()
1688 args = self.vfs.read('undo.desc').splitlines()
1678 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1689 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1679 if len(args) >= 3:
1690 if len(args) >= 3:
1680 detail = args[2]
1691 detail = args[2]
1681 oldtip = oldlen - 1
1692 oldtip = oldlen - 1
1682
1693
1683 if detail and ui.verbose:
1694 if detail and ui.verbose:
1684 msg = (_('repository tip rolled back to revision %d'
1695 msg = (_('repository tip rolled back to revision %d'
1685 ' (undo %s: %s)\n')
1696 ' (undo %s: %s)\n')
1686 % (oldtip, desc, detail))
1697 % (oldtip, desc, detail))
1687 else:
1698 else:
1688 msg = (_('repository tip rolled back to revision %d'
1699 msg = (_('repository tip rolled back to revision %d'
1689 ' (undo %s)\n')
1700 ' (undo %s)\n')
1690 % (oldtip, desc))
1701 % (oldtip, desc))
1691 except IOError:
1702 except IOError:
1692 msg = _('rolling back unknown transaction\n')
1703 msg = _('rolling back unknown transaction\n')
1693 desc = None
1704 desc = None
1694
1705
1695 if not force and self['.'] != self['tip'] and desc == 'commit':
1706 if not force and self['.'] != self['tip'] and desc == 'commit':
1696 raise error.Abort(
1707 raise error.Abort(
1697 _('rollback of last commit while not checked out '
1708 _('rollback of last commit while not checked out '
1698 'may lose data'), hint=_('use -f to force'))
1709 'may lose data'), hint=_('use -f to force'))
1699
1710
1700 ui.status(msg)
1711 ui.status(msg)
1701 if dryrun:
1712 if dryrun:
1702 return 0
1713 return 0
1703
1714
1704 parents = self.dirstate.parents()
1715 parents = self.dirstate.parents()
1705 self.destroying()
1716 self.destroying()
1706 vfsmap = {'plain': self.vfs, '': self.svfs}
1717 vfsmap = {'plain': self.vfs, '': self.svfs}
1707 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1718 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1708 checkambigfiles=_cachedfiles)
1719 checkambigfiles=_cachedfiles)
1709 if self.vfs.exists('undo.bookmarks'):
1720 if self.vfs.exists('undo.bookmarks'):
1710 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1721 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1711 if self.svfs.exists('undo.phaseroots'):
1722 if self.svfs.exists('undo.phaseroots'):
1712 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1723 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1713 self.invalidate()
1724 self.invalidate()
1714
1725
1715 parentgone = (parents[0] not in self.changelog.nodemap or
1726 parentgone = (parents[0] not in self.changelog.nodemap or
1716 parents[1] not in self.changelog.nodemap)
1727 parents[1] not in self.changelog.nodemap)
1717 if parentgone:
1728 if parentgone:
1718 # prevent dirstateguard from overwriting already restored one
1729 # prevent dirstateguard from overwriting already restored one
1719 dsguard.close()
1730 dsguard.close()
1720
1731
1721 narrowspec.restorebackup(self, 'undo.narrowspec')
1732 narrowspec.restorebackup(self, 'undo.narrowspec')
1722 self.dirstate.restorebackup(None, 'undo.dirstate')
1733 self.dirstate.restorebackup(None, 'undo.dirstate')
1723 try:
1734 try:
1724 branch = self.vfs.read('undo.branch')
1735 branch = self.vfs.read('undo.branch')
1725 self.dirstate.setbranch(encoding.tolocal(branch))
1736 self.dirstate.setbranch(encoding.tolocal(branch))
1726 except IOError:
1737 except IOError:
1727 ui.warn(_('named branch could not be reset: '
1738 ui.warn(_('named branch could not be reset: '
1728 'current branch is still \'%s\'\n')
1739 'current branch is still \'%s\'\n')
1729 % self.dirstate.branch())
1740 % self.dirstate.branch())
1730
1741
1731 parents = tuple([p.rev() for p in self[None].parents()])
1742 parents = tuple([p.rev() for p in self[None].parents()])
1732 if len(parents) > 1:
1743 if len(parents) > 1:
1733 ui.status(_('working directory now based on '
1744 ui.status(_('working directory now based on '
1734 'revisions %d and %d\n') % parents)
1745 'revisions %d and %d\n') % parents)
1735 else:
1746 else:
1736 ui.status(_('working directory now based on '
1747 ui.status(_('working directory now based on '
1737 'revision %d\n') % parents)
1748 'revision %d\n') % parents)
1738 mergemod.mergestate.clean(self, self['.'].node())
1749 mergemod.mergestate.clean(self, self['.'].node())
1739
1750
1740 # TODO: if we know which new heads may result from this rollback, pass
1751 # TODO: if we know which new heads may result from this rollback, pass
1741 # them to destroy(), which will prevent the branchhead cache from being
1752 # them to destroy(), which will prevent the branchhead cache from being
1742 # invalidated.
1753 # invalidated.
1743 self.destroyed()
1754 self.destroyed()
1744 return 0
1755 return 0
1745
1756
1746 def _buildcacheupdater(self, newtransaction):
1757 def _buildcacheupdater(self, newtransaction):
1747 """called during transaction to build the callback updating cache
1758 """called during transaction to build the callback updating cache
1748
1759
1749 Lives on the repository to help extension who might want to augment
1760 Lives on the repository to help extension who might want to augment
1750 this logic. For this purpose, the created transaction is passed to the
1761 this logic. For this purpose, the created transaction is passed to the
1751 method.
1762 method.
1752 """
1763 """
1753 # we must avoid cyclic reference between repo and transaction.
1764 # we must avoid cyclic reference between repo and transaction.
1754 reporef = weakref.ref(self)
1765 reporef = weakref.ref(self)
1755 def updater(tr):
1766 def updater(tr):
1756 repo = reporef()
1767 repo = reporef()
1757 repo.updatecaches(tr)
1768 repo.updatecaches(tr)
1758 return updater
1769 return updater
1759
1770
1760 @unfilteredmethod
1771 @unfilteredmethod
1761 def updatecaches(self, tr=None, full=False):
1772 def updatecaches(self, tr=None, full=False):
1762 """warm appropriate caches
1773 """warm appropriate caches
1763
1774
1764 If this function is called after a transaction closed. The transaction
1775 If this function is called after a transaction closed. The transaction
1765 will be available in the 'tr' argument. This can be used to selectively
1776 will be available in the 'tr' argument. This can be used to selectively
1766 update caches relevant to the changes in that transaction.
1777 update caches relevant to the changes in that transaction.
1767
1778
1768 If 'full' is set, make sure all caches the function knows about have
1779 If 'full' is set, make sure all caches the function knows about have
1769 up-to-date data. Even the ones usually loaded more lazily.
1780 up-to-date data. Even the ones usually loaded more lazily.
1770 """
1781 """
1771 if tr is not None and tr.hookargs.get('source') == 'strip':
1782 if tr is not None and tr.hookargs.get('source') == 'strip':
1772 # During strip, many caches are invalid but
1783 # During strip, many caches are invalid but
1773 # later call to `destroyed` will refresh them.
1784 # later call to `destroyed` will refresh them.
1774 return
1785 return
1775
1786
1776 if tr is None or tr.changes['origrepolen'] < len(self):
1787 if tr is None or tr.changes['origrepolen'] < len(self):
1777 # updating the unfiltered branchmap should refresh all the others,
1788 # updating the unfiltered branchmap should refresh all the others,
1778 self.ui.debug('updating the branch cache\n')
1789 self.ui.debug('updating the branch cache\n')
1779 branchmap.updatecache(self.filtered('served'))
1790 branchmap.updatecache(self.filtered('served'))
1780
1791
1781 if full:
1792 if full:
1782 rbc = self.revbranchcache()
1793 rbc = self.revbranchcache()
1783 for r in self.changelog:
1794 for r in self.changelog:
1784 rbc.branchinfo(r)
1795 rbc.branchinfo(r)
1785 rbc.write()
1796 rbc.write()
1786
1797
1787 # ensure the working copy parents are in the manifestfulltextcache
1798 # ensure the working copy parents are in the manifestfulltextcache
1788 for ctx in self['.'].parents():
1799 for ctx in self['.'].parents():
1789 ctx.manifest() # accessing the manifest is enough
1800 ctx.manifest() # accessing the manifest is enough
1790
1801
1791 def invalidatecaches(self):
1802 def invalidatecaches(self):
1792
1803
1793 if '_tagscache' in vars(self):
1804 if '_tagscache' in vars(self):
1794 # can't use delattr on proxy
1805 # can't use delattr on proxy
1795 del self.__dict__['_tagscache']
1806 del self.__dict__['_tagscache']
1796
1807
1797 self.unfiltered()._branchcaches.clear()
1808 self.unfiltered()._branchcaches.clear()
1798 self.invalidatevolatilesets()
1809 self.invalidatevolatilesets()
1799 self._sparsesignaturecache.clear()
1810 self._sparsesignaturecache.clear()
1800
1811
1801 def invalidatevolatilesets(self):
1812 def invalidatevolatilesets(self):
1802 self.filteredrevcache.clear()
1813 self.filteredrevcache.clear()
1803 obsolete.clearobscaches(self)
1814 obsolete.clearobscaches(self)
1804
1815
1805 def invalidatedirstate(self):
1816 def invalidatedirstate(self):
1806 '''Invalidates the dirstate, causing the next call to dirstate
1817 '''Invalidates the dirstate, causing the next call to dirstate
1807 to check if it was modified since the last time it was read,
1818 to check if it was modified since the last time it was read,
1808 rereading it if it has.
1819 rereading it if it has.
1809
1820
1810 This is different to dirstate.invalidate() that it doesn't always
1821 This is different to dirstate.invalidate() that it doesn't always
1811 rereads the dirstate. Use dirstate.invalidate() if you want to
1822 rereads the dirstate. Use dirstate.invalidate() if you want to
1812 explicitly read the dirstate again (i.e. restoring it to a previous
1823 explicitly read the dirstate again (i.e. restoring it to a previous
1813 known good state).'''
1824 known good state).'''
1814 if hasunfilteredcache(self, 'dirstate'):
1825 if hasunfilteredcache(self, 'dirstate'):
1815 for k in self.dirstate._filecache:
1826 for k in self.dirstate._filecache:
1816 try:
1827 try:
1817 delattr(self.dirstate, k)
1828 delattr(self.dirstate, k)
1818 except AttributeError:
1829 except AttributeError:
1819 pass
1830 pass
1820 delattr(self.unfiltered(), 'dirstate')
1831 delattr(self.unfiltered(), 'dirstate')
1821
1832
1822 def invalidate(self, clearfilecache=False):
1833 def invalidate(self, clearfilecache=False):
1823 '''Invalidates both store and non-store parts other than dirstate
1834 '''Invalidates both store and non-store parts other than dirstate
1824
1835
1825 If a transaction is running, invalidation of store is omitted,
1836 If a transaction is running, invalidation of store is omitted,
1826 because discarding in-memory changes might cause inconsistency
1837 because discarding in-memory changes might cause inconsistency
1827 (e.g. incomplete fncache causes unintentional failure, but
1838 (e.g. incomplete fncache causes unintentional failure, but
1828 redundant one doesn't).
1839 redundant one doesn't).
1829 '''
1840 '''
1830 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1841 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1831 for k in list(self._filecache.keys()):
1842 for k in list(self._filecache.keys()):
1832 # dirstate is invalidated separately in invalidatedirstate()
1843 # dirstate is invalidated separately in invalidatedirstate()
1833 if k == 'dirstate':
1844 if k == 'dirstate':
1834 continue
1845 continue
1835 if (k == 'changelog' and
1846 if (k == 'changelog' and
1836 self.currenttransaction() and
1847 self.currenttransaction() and
1837 self.changelog._delayed):
1848 self.changelog._delayed):
1838 # The changelog object may store unwritten revisions. We don't
1849 # The changelog object may store unwritten revisions. We don't
1839 # want to lose them.
1850 # want to lose them.
1840 # TODO: Solve the problem instead of working around it.
1851 # TODO: Solve the problem instead of working around it.
1841 continue
1852 continue
1842
1853
1843 if clearfilecache:
1854 if clearfilecache:
1844 del self._filecache[k]
1855 del self._filecache[k]
1845 try:
1856 try:
1846 delattr(unfiltered, k)
1857 delattr(unfiltered, k)
1847 except AttributeError:
1858 except AttributeError:
1848 pass
1859 pass
1849 self.invalidatecaches()
1860 self.invalidatecaches()
1850 if not self.currenttransaction():
1861 if not self.currenttransaction():
1851 # TODO: Changing contents of store outside transaction
1862 # TODO: Changing contents of store outside transaction
1852 # causes inconsistency. We should make in-memory store
1863 # causes inconsistency. We should make in-memory store
1853 # changes detectable, and abort if changed.
1864 # changes detectable, and abort if changed.
1854 self.store.invalidatecaches()
1865 self.store.invalidatecaches()
1855
1866
1856 def invalidateall(self):
1867 def invalidateall(self):
1857 '''Fully invalidates both store and non-store parts, causing the
1868 '''Fully invalidates both store and non-store parts, causing the
1858 subsequent operation to reread any outside changes.'''
1869 subsequent operation to reread any outside changes.'''
1859 # extension should hook this to invalidate its caches
1870 # extension should hook this to invalidate its caches
1860 self.invalidate()
1871 self.invalidate()
1861 self.invalidatedirstate()
1872 self.invalidatedirstate()
1862
1873
1863 @unfilteredmethod
1874 @unfilteredmethod
1864 def _refreshfilecachestats(self, tr):
1875 def _refreshfilecachestats(self, tr):
1865 """Reload stats of cached files so that they are flagged as valid"""
1876 """Reload stats of cached files so that they are flagged as valid"""
1866 for k, ce in self._filecache.items():
1877 for k, ce in self._filecache.items():
1867 k = pycompat.sysstr(k)
1878 k = pycompat.sysstr(k)
1868 if k == r'dirstate' or k not in self.__dict__:
1879 if k == r'dirstate' or k not in self.__dict__:
1869 continue
1880 continue
1870 ce.refresh()
1881 ce.refresh()
1871
1882
1872 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1883 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1873 inheritchecker=None, parentenvvar=None):
1884 inheritchecker=None, parentenvvar=None):
1874 parentlock = None
1885 parentlock = None
1875 # the contents of parentenvvar are used by the underlying lock to
1886 # the contents of parentenvvar are used by the underlying lock to
1876 # determine whether it can be inherited
1887 # determine whether it can be inherited
1877 if parentenvvar is not None:
1888 if parentenvvar is not None:
1878 parentlock = encoding.environ.get(parentenvvar)
1889 parentlock = encoding.environ.get(parentenvvar)
1879
1890
1880 timeout = 0
1891 timeout = 0
1881 warntimeout = 0
1892 warntimeout = 0
1882 if wait:
1893 if wait:
1883 timeout = self.ui.configint("ui", "timeout")
1894 timeout = self.ui.configint("ui", "timeout")
1884 warntimeout = self.ui.configint("ui", "timeout.warn")
1895 warntimeout = self.ui.configint("ui", "timeout.warn")
1885 # internal config: ui.signal-safe-lock
1896 # internal config: ui.signal-safe-lock
1886 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1897 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1887
1898
1888 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1899 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1889 releasefn=releasefn,
1900 releasefn=releasefn,
1890 acquirefn=acquirefn, desc=desc,
1901 acquirefn=acquirefn, desc=desc,
1891 inheritchecker=inheritchecker,
1902 inheritchecker=inheritchecker,
1892 parentlock=parentlock,
1903 parentlock=parentlock,
1893 signalsafe=signalsafe)
1904 signalsafe=signalsafe)
1894 return l
1905 return l
1895
1906
1896 def _afterlock(self, callback):
1907 def _afterlock(self, callback):
1897 """add a callback to be run when the repository is fully unlocked
1908 """add a callback to be run when the repository is fully unlocked
1898
1909
1899 The callback will be executed when the outermost lock is released
1910 The callback will be executed when the outermost lock is released
1900 (with wlock being higher level than 'lock')."""
1911 (with wlock being higher level than 'lock')."""
1901 for ref in (self._wlockref, self._lockref):
1912 for ref in (self._wlockref, self._lockref):
1902 l = ref and ref()
1913 l = ref and ref()
1903 if l and l.held:
1914 if l and l.held:
1904 l.postrelease.append(callback)
1915 l.postrelease.append(callback)
1905 break
1916 break
1906 else: # no lock have been found.
1917 else: # no lock have been found.
1907 callback()
1918 callback()
1908
1919
1909 def lock(self, wait=True):
1920 def lock(self, wait=True):
1910 '''Lock the repository store (.hg/store) and return a weak reference
1921 '''Lock the repository store (.hg/store) and return a weak reference
1911 to the lock. Use this before modifying the store (e.g. committing or
1922 to the lock. Use this before modifying the store (e.g. committing or
1912 stripping). If you are opening a transaction, get a lock as well.)
1923 stripping). If you are opening a transaction, get a lock as well.)
1913
1924
1914 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1925 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1915 'wlock' first to avoid a dead-lock hazard.'''
1926 'wlock' first to avoid a dead-lock hazard.'''
1916 l = self._currentlock(self._lockref)
1927 l = self._currentlock(self._lockref)
1917 if l is not None:
1928 if l is not None:
1918 l.lock()
1929 l.lock()
1919 return l
1930 return l
1920
1931
1921 l = self._lock(self.svfs, "lock", wait, None,
1932 l = self._lock(self.svfs, "lock", wait, None,
1922 self.invalidate, _('repository %s') % self.origroot)
1933 self.invalidate, _('repository %s') % self.origroot)
1923 self._lockref = weakref.ref(l)
1934 self._lockref = weakref.ref(l)
1924 return l
1935 return l
1925
1936
1926 def _wlockchecktransaction(self):
1937 def _wlockchecktransaction(self):
1927 if self.currenttransaction() is not None:
1938 if self.currenttransaction() is not None:
1928 raise error.LockInheritanceContractViolation(
1939 raise error.LockInheritanceContractViolation(
1929 'wlock cannot be inherited in the middle of a transaction')
1940 'wlock cannot be inherited in the middle of a transaction')
1930
1941
1931 def wlock(self, wait=True):
1942 def wlock(self, wait=True):
1932 '''Lock the non-store parts of the repository (everything under
1943 '''Lock the non-store parts of the repository (everything under
1933 .hg except .hg/store) and return a weak reference to the lock.
1944 .hg except .hg/store) and return a weak reference to the lock.
1934
1945
1935 Use this before modifying files in .hg.
1946 Use this before modifying files in .hg.
1936
1947
1937 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1948 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1938 'wlock' first to avoid a dead-lock hazard.'''
1949 'wlock' first to avoid a dead-lock hazard.'''
1939 l = self._wlockref and self._wlockref()
1950 l = self._wlockref and self._wlockref()
1940 if l is not None and l.held:
1951 if l is not None and l.held:
1941 l.lock()
1952 l.lock()
1942 return l
1953 return l
1943
1954
1944 # We do not need to check for non-waiting lock acquisition. Such
1955 # We do not need to check for non-waiting lock acquisition. Such
1945 # acquisition would not cause dead-lock as they would just fail.
1956 # acquisition would not cause dead-lock as they would just fail.
1946 if wait and (self.ui.configbool('devel', 'all-warnings')
1957 if wait and (self.ui.configbool('devel', 'all-warnings')
1947 or self.ui.configbool('devel', 'check-locks')):
1958 or self.ui.configbool('devel', 'check-locks')):
1948 if self._currentlock(self._lockref) is not None:
1959 if self._currentlock(self._lockref) is not None:
1949 self.ui.develwarn('"wlock" acquired after "lock"')
1960 self.ui.develwarn('"wlock" acquired after "lock"')
1950
1961
1951 def unlock():
1962 def unlock():
1952 if self.dirstate.pendingparentchange():
1963 if self.dirstate.pendingparentchange():
1953 self.dirstate.invalidate()
1964 self.dirstate.invalidate()
1954 else:
1965 else:
1955 self.dirstate.write(None)
1966 self.dirstate.write(None)
1956
1967
1957 self._filecache['dirstate'].refresh()
1968 self._filecache['dirstate'].refresh()
1958
1969
1959 l = self._lock(self.vfs, "wlock", wait, unlock,
1970 l = self._lock(self.vfs, "wlock", wait, unlock,
1960 self.invalidatedirstate, _('working directory of %s') %
1971 self.invalidatedirstate, _('working directory of %s') %
1961 self.origroot,
1972 self.origroot,
1962 inheritchecker=self._wlockchecktransaction,
1973 inheritchecker=self._wlockchecktransaction,
1963 parentenvvar='HG_WLOCK_LOCKER')
1974 parentenvvar='HG_WLOCK_LOCKER')
1964 self._wlockref = weakref.ref(l)
1975 self._wlockref = weakref.ref(l)
1965 return l
1976 return l
1966
1977
1967 def _currentlock(self, lockref):
1978 def _currentlock(self, lockref):
1968 """Returns the lock if it's held, or None if it's not."""
1979 """Returns the lock if it's held, or None if it's not."""
1969 if lockref is None:
1980 if lockref is None:
1970 return None
1981 return None
1971 l = lockref()
1982 l = lockref()
1972 if l is None or not l.held:
1983 if l is None or not l.held:
1973 return None
1984 return None
1974 return l
1985 return l
1975
1986
1976 def currentwlock(self):
1987 def currentwlock(self):
1977 """Returns the wlock if it's held, or None if it's not."""
1988 """Returns the wlock if it's held, or None if it's not."""
1978 return self._currentlock(self._wlockref)
1989 return self._currentlock(self._wlockref)
1979
1990
1980 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1991 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1981 """
1992 """
1982 commit an individual file as part of a larger transaction
1993 commit an individual file as part of a larger transaction
1983 """
1994 """
1984
1995
1985 fname = fctx.path()
1996 fname = fctx.path()
1986 fparent1 = manifest1.get(fname, nullid)
1997 fparent1 = manifest1.get(fname, nullid)
1987 fparent2 = manifest2.get(fname, nullid)
1998 fparent2 = manifest2.get(fname, nullid)
1988 if isinstance(fctx, context.filectx):
1999 if isinstance(fctx, context.filectx):
1989 node = fctx.filenode()
2000 node = fctx.filenode()
1990 if node in [fparent1, fparent2]:
2001 if node in [fparent1, fparent2]:
1991 self.ui.debug('reusing %s filelog entry\n' % fname)
2002 self.ui.debug('reusing %s filelog entry\n' % fname)
1992 if manifest1.flags(fname) != fctx.flags():
2003 if manifest1.flags(fname) != fctx.flags():
1993 changelist.append(fname)
2004 changelist.append(fname)
1994 return node
2005 return node
1995
2006
1996 flog = self.file(fname)
2007 flog = self.file(fname)
1997 meta = {}
2008 meta = {}
1998 copy = fctx.renamed()
2009 copy = fctx.renamed()
1999 if copy and copy[0] != fname:
2010 if copy and copy[0] != fname:
2000 # Mark the new revision of this file as a copy of another
2011 # Mark the new revision of this file as a copy of another
2001 # file. This copy data will effectively act as a parent
2012 # file. This copy data will effectively act as a parent
2002 # of this new revision. If this is a merge, the first
2013 # of this new revision. If this is a merge, the first
2003 # parent will be the nullid (meaning "look up the copy data")
2014 # parent will be the nullid (meaning "look up the copy data")
2004 # and the second one will be the other parent. For example:
2015 # and the second one will be the other parent. For example:
2005 #
2016 #
2006 # 0 --- 1 --- 3 rev1 changes file foo
2017 # 0 --- 1 --- 3 rev1 changes file foo
2007 # \ / rev2 renames foo to bar and changes it
2018 # \ / rev2 renames foo to bar and changes it
2008 # \- 2 -/ rev3 should have bar with all changes and
2019 # \- 2 -/ rev3 should have bar with all changes and
2009 # should record that bar descends from
2020 # should record that bar descends from
2010 # bar in rev2 and foo in rev1
2021 # bar in rev2 and foo in rev1
2011 #
2022 #
2012 # this allows this merge to succeed:
2023 # this allows this merge to succeed:
2013 #
2024 #
2014 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2025 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2015 # \ / merging rev3 and rev4 should use bar@rev2
2026 # \ / merging rev3 and rev4 should use bar@rev2
2016 # \- 2 --- 4 as the merge base
2027 # \- 2 --- 4 as the merge base
2017 #
2028 #
2018
2029
2019 cfname = copy[0]
2030 cfname = copy[0]
2020 crev = manifest1.get(cfname)
2031 crev = manifest1.get(cfname)
2021 newfparent = fparent2
2032 newfparent = fparent2
2022
2033
2023 if manifest2: # branch merge
2034 if manifest2: # branch merge
2024 if fparent2 == nullid or crev is None: # copied on remote side
2035 if fparent2 == nullid or crev is None: # copied on remote side
2025 if cfname in manifest2:
2036 if cfname in manifest2:
2026 crev = manifest2[cfname]
2037 crev = manifest2[cfname]
2027 newfparent = fparent1
2038 newfparent = fparent1
2028
2039
2029 # Here, we used to search backwards through history to try to find
2040 # Here, we used to search backwards through history to try to find
2030 # where the file copy came from if the source of a copy was not in
2041 # where the file copy came from if the source of a copy was not in
2031 # the parent directory. However, this doesn't actually make sense to
2042 # the parent directory. However, this doesn't actually make sense to
2032 # do (what does a copy from something not in your working copy even
2043 # do (what does a copy from something not in your working copy even
2033 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2044 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2034 # the user that copy information was dropped, so if they didn't
2045 # the user that copy information was dropped, so if they didn't
2035 # expect this outcome it can be fixed, but this is the correct
2046 # expect this outcome it can be fixed, but this is the correct
2036 # behavior in this circumstance.
2047 # behavior in this circumstance.
2037
2048
2038 if crev:
2049 if crev:
2039 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2050 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2040 meta["copy"] = cfname
2051 meta["copy"] = cfname
2041 meta["copyrev"] = hex(crev)
2052 meta["copyrev"] = hex(crev)
2042 fparent1, fparent2 = nullid, newfparent
2053 fparent1, fparent2 = nullid, newfparent
2043 else:
2054 else:
2044 self.ui.warn(_("warning: can't find ancestor for '%s' "
2055 self.ui.warn(_("warning: can't find ancestor for '%s' "
2045 "copied from '%s'!\n") % (fname, cfname))
2056 "copied from '%s'!\n") % (fname, cfname))
2046
2057
2047 elif fparent1 == nullid:
2058 elif fparent1 == nullid:
2048 fparent1, fparent2 = fparent2, nullid
2059 fparent1, fparent2 = fparent2, nullid
2049 elif fparent2 != nullid:
2060 elif fparent2 != nullid:
2050 # is one parent an ancestor of the other?
2061 # is one parent an ancestor of the other?
2051 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2062 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2052 if fparent1 in fparentancestors:
2063 if fparent1 in fparentancestors:
2053 fparent1, fparent2 = fparent2, nullid
2064 fparent1, fparent2 = fparent2, nullid
2054 elif fparent2 in fparentancestors:
2065 elif fparent2 in fparentancestors:
2055 fparent2 = nullid
2066 fparent2 = nullid
2056
2067
2057 # is the file changed?
2068 # is the file changed?
2058 text = fctx.data()
2069 text = fctx.data()
2059 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2070 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2060 changelist.append(fname)
2071 changelist.append(fname)
2061 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2072 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2062 # are just the flags changed during merge?
2073 # are just the flags changed during merge?
2063 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2074 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2064 changelist.append(fname)
2075 changelist.append(fname)
2065
2076
2066 return fparent1
2077 return fparent1
2067
2078
2068 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2079 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2069 """check for commit arguments that aren't committable"""
2080 """check for commit arguments that aren't committable"""
2070 if match.isexact() or match.prefix():
2081 if match.isexact() or match.prefix():
2071 matched = set(status.modified + status.added + status.removed)
2082 matched = set(status.modified + status.added + status.removed)
2072
2083
2073 for f in match.files():
2084 for f in match.files():
2074 f = self.dirstate.normalize(f)
2085 f = self.dirstate.normalize(f)
2075 if f == '.' or f in matched or f in wctx.substate:
2086 if f == '.' or f in matched or f in wctx.substate:
2076 continue
2087 continue
2077 if f in status.deleted:
2088 if f in status.deleted:
2078 fail(f, _('file not found!'))
2089 fail(f, _('file not found!'))
2079 if f in vdirs: # visited directory
2090 if f in vdirs: # visited directory
2080 d = f + '/'
2091 d = f + '/'
2081 for mf in matched:
2092 for mf in matched:
2082 if mf.startswith(d):
2093 if mf.startswith(d):
2083 break
2094 break
2084 else:
2095 else:
2085 fail(f, _("no match under directory!"))
2096 fail(f, _("no match under directory!"))
2086 elif f not in self.dirstate:
2097 elif f not in self.dirstate:
2087 fail(f, _("file not tracked!"))
2098 fail(f, _("file not tracked!"))
2088
2099
2089 @unfilteredmethod
2100 @unfilteredmethod
2090 def commit(self, text="", user=None, date=None, match=None, force=False,
2101 def commit(self, text="", user=None, date=None, match=None, force=False,
2091 editor=False, extra=None):
2102 editor=False, extra=None):
2092 """Add a new revision to current repository.
2103 """Add a new revision to current repository.
2093
2104
2094 Revision information is gathered from the working directory,
2105 Revision information is gathered from the working directory,
2095 match can be used to filter the committed files. If editor is
2106 match can be used to filter the committed files. If editor is
2096 supplied, it is called to get a commit message.
2107 supplied, it is called to get a commit message.
2097 """
2108 """
2098 if extra is None:
2109 if extra is None:
2099 extra = {}
2110 extra = {}
2100
2111
2101 def fail(f, msg):
2112 def fail(f, msg):
2102 raise error.Abort('%s: %s' % (f, msg))
2113 raise error.Abort('%s: %s' % (f, msg))
2103
2114
2104 if not match:
2115 if not match:
2105 match = matchmod.always(self.root, '')
2116 match = matchmod.always(self.root, '')
2106
2117
2107 if not force:
2118 if not force:
2108 vdirs = []
2119 vdirs = []
2109 match.explicitdir = vdirs.append
2120 match.explicitdir = vdirs.append
2110 match.bad = fail
2121 match.bad = fail
2111
2122
2112 wlock = lock = tr = None
2123 wlock = lock = tr = None
2113 try:
2124 try:
2114 wlock = self.wlock()
2125 wlock = self.wlock()
2115 lock = self.lock() # for recent changelog (see issue4368)
2126 lock = self.lock() # for recent changelog (see issue4368)
2116
2127
2117 wctx = self[None]
2128 wctx = self[None]
2118 merge = len(wctx.parents()) > 1
2129 merge = len(wctx.parents()) > 1
2119
2130
2120 if not force and merge and not match.always():
2131 if not force and merge and not match.always():
2121 raise error.Abort(_('cannot partially commit a merge '
2132 raise error.Abort(_('cannot partially commit a merge '
2122 '(do not specify files or patterns)'))
2133 '(do not specify files or patterns)'))
2123
2134
2124 status = self.status(match=match, clean=force)
2135 status = self.status(match=match, clean=force)
2125 if force:
2136 if force:
2126 status.modified.extend(status.clean) # mq may commit clean files
2137 status.modified.extend(status.clean) # mq may commit clean files
2127
2138
2128 # check subrepos
2139 # check subrepos
2129 subs, commitsubs, newstate = subrepoutil.precommit(
2140 subs, commitsubs, newstate = subrepoutil.precommit(
2130 self.ui, wctx, status, match, force=force)
2141 self.ui, wctx, status, match, force=force)
2131
2142
2132 # make sure all explicit patterns are matched
2143 # make sure all explicit patterns are matched
2133 if not force:
2144 if not force:
2134 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2145 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2135
2146
2136 cctx = context.workingcommitctx(self, status,
2147 cctx = context.workingcommitctx(self, status,
2137 text, user, date, extra)
2148 text, user, date, extra)
2138
2149
2139 # internal config: ui.allowemptycommit
2150 # internal config: ui.allowemptycommit
2140 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2151 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2141 or extra.get('close') or merge or cctx.files()
2152 or extra.get('close') or merge or cctx.files()
2142 or self.ui.configbool('ui', 'allowemptycommit'))
2153 or self.ui.configbool('ui', 'allowemptycommit'))
2143 if not allowemptycommit:
2154 if not allowemptycommit:
2144 return None
2155 return None
2145
2156
2146 if merge and cctx.deleted():
2157 if merge and cctx.deleted():
2147 raise error.Abort(_("cannot commit merge with missing files"))
2158 raise error.Abort(_("cannot commit merge with missing files"))
2148
2159
2149 ms = mergemod.mergestate.read(self)
2160 ms = mergemod.mergestate.read(self)
2150 mergeutil.checkunresolved(ms)
2161 mergeutil.checkunresolved(ms)
2151
2162
2152 if editor:
2163 if editor:
2153 cctx._text = editor(self, cctx, subs)
2164 cctx._text = editor(self, cctx, subs)
2154 edited = (text != cctx._text)
2165 edited = (text != cctx._text)
2155
2166
2156 # Save commit message in case this transaction gets rolled back
2167 # Save commit message in case this transaction gets rolled back
2157 # (e.g. by a pretxncommit hook). Leave the content alone on
2168 # (e.g. by a pretxncommit hook). Leave the content alone on
2158 # the assumption that the user will use the same editor again.
2169 # the assumption that the user will use the same editor again.
2159 msgfn = self.savecommitmessage(cctx._text)
2170 msgfn = self.savecommitmessage(cctx._text)
2160
2171
2161 # commit subs and write new state
2172 # commit subs and write new state
2162 if subs:
2173 if subs:
2163 for s in sorted(commitsubs):
2174 for s in sorted(commitsubs):
2164 sub = wctx.sub(s)
2175 sub = wctx.sub(s)
2165 self.ui.status(_('committing subrepository %s\n') %
2176 self.ui.status(_('committing subrepository %s\n') %
2166 subrepoutil.subrelpath(sub))
2177 subrepoutil.subrelpath(sub))
2167 sr = sub.commit(cctx._text, user, date)
2178 sr = sub.commit(cctx._text, user, date)
2168 newstate[s] = (newstate[s][0], sr)
2179 newstate[s] = (newstate[s][0], sr)
2169 subrepoutil.writestate(self, newstate)
2180 subrepoutil.writestate(self, newstate)
2170
2181
2171 p1, p2 = self.dirstate.parents()
2182 p1, p2 = self.dirstate.parents()
2172 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2183 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2173 try:
2184 try:
2174 self.hook("precommit", throw=True, parent1=hookp1,
2185 self.hook("precommit", throw=True, parent1=hookp1,
2175 parent2=hookp2)
2186 parent2=hookp2)
2176 tr = self.transaction('commit')
2187 tr = self.transaction('commit')
2177 ret = self.commitctx(cctx, True)
2188 ret = self.commitctx(cctx, True)
2178 except: # re-raises
2189 except: # re-raises
2179 if edited:
2190 if edited:
2180 self.ui.write(
2191 self.ui.write(
2181 _('note: commit message saved in %s\n') % msgfn)
2192 _('note: commit message saved in %s\n') % msgfn)
2182 raise
2193 raise
2183 # update bookmarks, dirstate and mergestate
2194 # update bookmarks, dirstate and mergestate
2184 bookmarks.update(self, [p1, p2], ret)
2195 bookmarks.update(self, [p1, p2], ret)
2185 cctx.markcommitted(ret)
2196 cctx.markcommitted(ret)
2186 ms.reset()
2197 ms.reset()
2187 tr.close()
2198 tr.close()
2188
2199
2189 finally:
2200 finally:
2190 lockmod.release(tr, lock, wlock)
2201 lockmod.release(tr, lock, wlock)
2191
2202
2192 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2203 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2193 # hack for command that use a temporary commit (eg: histedit)
2204 # hack for command that use a temporary commit (eg: histedit)
2194 # temporary commit got stripped before hook release
2205 # temporary commit got stripped before hook release
2195 if self.changelog.hasnode(ret):
2206 if self.changelog.hasnode(ret):
2196 self.hook("commit", node=node, parent1=parent1,
2207 self.hook("commit", node=node, parent1=parent1,
2197 parent2=parent2)
2208 parent2=parent2)
2198 self._afterlock(commithook)
2209 self._afterlock(commithook)
2199 return ret
2210 return ret
2200
2211
2201 @unfilteredmethod
2212 @unfilteredmethod
2202 def commitctx(self, ctx, error=False):
2213 def commitctx(self, ctx, error=False):
2203 """Add a new revision to current repository.
2214 """Add a new revision to current repository.
2204 Revision information is passed via the context argument.
2215 Revision information is passed via the context argument.
2205
2216
2206 ctx.files() should list all files involved in this commit, i.e.
2217 ctx.files() should list all files involved in this commit, i.e.
2207 modified/added/removed files. On merge, it may be wider than the
2218 modified/added/removed files. On merge, it may be wider than the
2208 ctx.files() to be committed, since any file nodes derived directly
2219 ctx.files() to be committed, since any file nodes derived directly
2209 from p1 or p2 are excluded from the committed ctx.files().
2220 from p1 or p2 are excluded from the committed ctx.files().
2210 """
2221 """
2211
2222
2212 tr = None
2223 tr = None
2213 p1, p2 = ctx.p1(), ctx.p2()
2224 p1, p2 = ctx.p1(), ctx.p2()
2214 user = ctx.user()
2225 user = ctx.user()
2215
2226
2216 lock = self.lock()
2227 lock = self.lock()
2217 try:
2228 try:
2218 tr = self.transaction("commit")
2229 tr = self.transaction("commit")
2219 trp = weakref.proxy(tr)
2230 trp = weakref.proxy(tr)
2220
2231
2221 if ctx.manifestnode():
2232 if ctx.manifestnode():
2222 # reuse an existing manifest revision
2233 # reuse an existing manifest revision
2223 self.ui.debug('reusing known manifest\n')
2234 self.ui.debug('reusing known manifest\n')
2224 mn = ctx.manifestnode()
2235 mn = ctx.manifestnode()
2225 files = ctx.files()
2236 files = ctx.files()
2226 elif ctx.files():
2237 elif ctx.files():
2227 m1ctx = p1.manifestctx()
2238 m1ctx = p1.manifestctx()
2228 m2ctx = p2.manifestctx()
2239 m2ctx = p2.manifestctx()
2229 mctx = m1ctx.copy()
2240 mctx = m1ctx.copy()
2230
2241
2231 m = mctx.read()
2242 m = mctx.read()
2232 m1 = m1ctx.read()
2243 m1 = m1ctx.read()
2233 m2 = m2ctx.read()
2244 m2 = m2ctx.read()
2234
2245
2235 # check in files
2246 # check in files
2236 added = []
2247 added = []
2237 changed = []
2248 changed = []
2238 removed = list(ctx.removed())
2249 removed = list(ctx.removed())
2239 linkrev = len(self)
2250 linkrev = len(self)
2240 self.ui.note(_("committing files:\n"))
2251 self.ui.note(_("committing files:\n"))
2241 for f in sorted(ctx.modified() + ctx.added()):
2252 for f in sorted(ctx.modified() + ctx.added()):
2242 self.ui.note(f + "\n")
2253 self.ui.note(f + "\n")
2243 try:
2254 try:
2244 fctx = ctx[f]
2255 fctx = ctx[f]
2245 if fctx is None:
2256 if fctx is None:
2246 removed.append(f)
2257 removed.append(f)
2247 else:
2258 else:
2248 added.append(f)
2259 added.append(f)
2249 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2260 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2250 trp, changed)
2261 trp, changed)
2251 m.setflag(f, fctx.flags())
2262 m.setflag(f, fctx.flags())
2252 except OSError as inst:
2263 except OSError as inst:
2253 self.ui.warn(_("trouble committing %s!\n") % f)
2264 self.ui.warn(_("trouble committing %s!\n") % f)
2254 raise
2265 raise
2255 except IOError as inst:
2266 except IOError as inst:
2256 errcode = getattr(inst, 'errno', errno.ENOENT)
2267 errcode = getattr(inst, 'errno', errno.ENOENT)
2257 if error or errcode and errcode != errno.ENOENT:
2268 if error or errcode and errcode != errno.ENOENT:
2258 self.ui.warn(_("trouble committing %s!\n") % f)
2269 self.ui.warn(_("trouble committing %s!\n") % f)
2259 raise
2270 raise
2260
2271
2261 # update manifest
2272 # update manifest
2262 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2273 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2263 drop = [f for f in removed if f in m]
2274 drop = [f for f in removed if f in m]
2264 for f in drop:
2275 for f in drop:
2265 del m[f]
2276 del m[f]
2266 files = changed + removed
2277 files = changed + removed
2267 md = None
2278 md = None
2268 if not files:
2279 if not files:
2269 # if no "files" actually changed in terms of the changelog,
2280 # if no "files" actually changed in terms of the changelog,
2270 # try hard to detect unmodified manifest entry so that the
2281 # try hard to detect unmodified manifest entry so that the
2271 # exact same commit can be reproduced later on convert.
2282 # exact same commit can be reproduced later on convert.
2272 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2283 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2273 if not files and md:
2284 if not files and md:
2274 self.ui.debug('not reusing manifest (no file change in '
2285 self.ui.debug('not reusing manifest (no file change in '
2275 'changelog, but manifest differs)\n')
2286 'changelog, but manifest differs)\n')
2276 if files or md:
2287 if files or md:
2277 self.ui.note(_("committing manifest\n"))
2288 self.ui.note(_("committing manifest\n"))
2278 # we're using narrowmatch here since it's already applied at
2289 # we're using narrowmatch here since it's already applied at
2279 # other stages (such as dirstate.walk), so we're already
2290 # other stages (such as dirstate.walk), so we're already
2280 # ignoring things outside of narrowspec in most cases. The
2291 # ignoring things outside of narrowspec in most cases. The
2281 # one case where we might have files outside the narrowspec
2292 # one case where we might have files outside the narrowspec
2282 # at this point is merges, and we already error out in the
2293 # at this point is merges, and we already error out in the
2283 # case where the merge has files outside of the narrowspec,
2294 # case where the merge has files outside of the narrowspec,
2284 # so this is safe.
2295 # so this is safe.
2285 mn = mctx.write(trp, linkrev,
2296 mn = mctx.write(trp, linkrev,
2286 p1.manifestnode(), p2.manifestnode(),
2297 p1.manifestnode(), p2.manifestnode(),
2287 added, drop, match=self.narrowmatch())
2298 added, drop, match=self.narrowmatch())
2288 else:
2299 else:
2289 self.ui.debug('reusing manifest form p1 (listed files '
2300 self.ui.debug('reusing manifest form p1 (listed files '
2290 'actually unchanged)\n')
2301 'actually unchanged)\n')
2291 mn = p1.manifestnode()
2302 mn = p1.manifestnode()
2292 else:
2303 else:
2293 self.ui.debug('reusing manifest from p1 (no file change)\n')
2304 self.ui.debug('reusing manifest from p1 (no file change)\n')
2294 mn = p1.manifestnode()
2305 mn = p1.manifestnode()
2295 files = []
2306 files = []
2296
2307
2297 # update changelog
2308 # update changelog
2298 self.ui.note(_("committing changelog\n"))
2309 self.ui.note(_("committing changelog\n"))
2299 self.changelog.delayupdate(tr)
2310 self.changelog.delayupdate(tr)
2300 n = self.changelog.add(mn, files, ctx.description(),
2311 n = self.changelog.add(mn, files, ctx.description(),
2301 trp, p1.node(), p2.node(),
2312 trp, p1.node(), p2.node(),
2302 user, ctx.date(), ctx.extra().copy())
2313 user, ctx.date(), ctx.extra().copy())
2303 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2314 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2304 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2315 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2305 parent2=xp2)
2316 parent2=xp2)
2306 # set the new commit is proper phase
2317 # set the new commit is proper phase
2307 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2318 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2308 if targetphase:
2319 if targetphase:
2309 # retract boundary do not alter parent changeset.
2320 # retract boundary do not alter parent changeset.
2310 # if a parent have higher the resulting phase will
2321 # if a parent have higher the resulting phase will
2311 # be compliant anyway
2322 # be compliant anyway
2312 #
2323 #
2313 # if minimal phase was 0 we don't need to retract anything
2324 # if minimal phase was 0 we don't need to retract anything
2314 phases.registernew(self, tr, targetphase, [n])
2325 phases.registernew(self, tr, targetphase, [n])
2315 tr.close()
2326 tr.close()
2316 return n
2327 return n
2317 finally:
2328 finally:
2318 if tr:
2329 if tr:
2319 tr.release()
2330 tr.release()
2320 lock.release()
2331 lock.release()
2321
2332
2322 @unfilteredmethod
2333 @unfilteredmethod
2323 def destroying(self):
2334 def destroying(self):
2324 '''Inform the repository that nodes are about to be destroyed.
2335 '''Inform the repository that nodes are about to be destroyed.
2325 Intended for use by strip and rollback, so there's a common
2336 Intended for use by strip and rollback, so there's a common
2326 place for anything that has to be done before destroying history.
2337 place for anything that has to be done before destroying history.
2327
2338
2328 This is mostly useful for saving state that is in memory and waiting
2339 This is mostly useful for saving state that is in memory and waiting
2329 to be flushed when the current lock is released. Because a call to
2340 to be flushed when the current lock is released. Because a call to
2330 destroyed is imminent, the repo will be invalidated causing those
2341 destroyed is imminent, the repo will be invalidated causing those
2331 changes to stay in memory (waiting for the next unlock), or vanish
2342 changes to stay in memory (waiting for the next unlock), or vanish
2332 completely.
2343 completely.
2333 '''
2344 '''
2334 # When using the same lock to commit and strip, the phasecache is left
2345 # When using the same lock to commit and strip, the phasecache is left
2335 # dirty after committing. Then when we strip, the repo is invalidated,
2346 # dirty after committing. Then when we strip, the repo is invalidated,
2336 # causing those changes to disappear.
2347 # causing those changes to disappear.
2337 if '_phasecache' in vars(self):
2348 if '_phasecache' in vars(self):
2338 self._phasecache.write()
2349 self._phasecache.write()
2339
2350
2340 @unfilteredmethod
2351 @unfilteredmethod
2341 def destroyed(self):
2352 def destroyed(self):
2342 '''Inform the repository that nodes have been destroyed.
2353 '''Inform the repository that nodes have been destroyed.
2343 Intended for use by strip and rollback, so there's a common
2354 Intended for use by strip and rollback, so there's a common
2344 place for anything that has to be done after destroying history.
2355 place for anything that has to be done after destroying history.
2345 '''
2356 '''
2346 # When one tries to:
2357 # When one tries to:
2347 # 1) destroy nodes thus calling this method (e.g. strip)
2358 # 1) destroy nodes thus calling this method (e.g. strip)
2348 # 2) use phasecache somewhere (e.g. commit)
2359 # 2) use phasecache somewhere (e.g. commit)
2349 #
2360 #
2350 # then 2) will fail because the phasecache contains nodes that were
2361 # then 2) will fail because the phasecache contains nodes that were
2351 # removed. We can either remove phasecache from the filecache,
2362 # removed. We can either remove phasecache from the filecache,
2352 # causing it to reload next time it is accessed, or simply filter
2363 # causing it to reload next time it is accessed, or simply filter
2353 # the removed nodes now and write the updated cache.
2364 # the removed nodes now and write the updated cache.
2354 self._phasecache.filterunknown(self)
2365 self._phasecache.filterunknown(self)
2355 self._phasecache.write()
2366 self._phasecache.write()
2356
2367
2357 # refresh all repository caches
2368 # refresh all repository caches
2358 self.updatecaches()
2369 self.updatecaches()
2359
2370
2360 # Ensure the persistent tag cache is updated. Doing it now
2371 # Ensure the persistent tag cache is updated. Doing it now
2361 # means that the tag cache only has to worry about destroyed
2372 # means that the tag cache only has to worry about destroyed
2362 # heads immediately after a strip/rollback. That in turn
2373 # heads immediately after a strip/rollback. That in turn
2363 # guarantees that "cachetip == currenttip" (comparing both rev
2374 # guarantees that "cachetip == currenttip" (comparing both rev
2364 # and node) always means no nodes have been added or destroyed.
2375 # and node) always means no nodes have been added or destroyed.
2365
2376
2366 # XXX this is suboptimal when qrefresh'ing: we strip the current
2377 # XXX this is suboptimal when qrefresh'ing: we strip the current
2367 # head, refresh the tag cache, then immediately add a new head.
2378 # head, refresh the tag cache, then immediately add a new head.
2368 # But I think doing it this way is necessary for the "instant
2379 # But I think doing it this way is necessary for the "instant
2369 # tag cache retrieval" case to work.
2380 # tag cache retrieval" case to work.
2370 self.invalidate()
2381 self.invalidate()
2371
2382
2372 def status(self, node1='.', node2=None, match=None,
2383 def status(self, node1='.', node2=None, match=None,
2373 ignored=False, clean=False, unknown=False,
2384 ignored=False, clean=False, unknown=False,
2374 listsubrepos=False):
2385 listsubrepos=False):
2375 '''a convenience method that calls node1.status(node2)'''
2386 '''a convenience method that calls node1.status(node2)'''
2376 return self[node1].status(node2, match, ignored, clean, unknown,
2387 return self[node1].status(node2, match, ignored, clean, unknown,
2377 listsubrepos)
2388 listsubrepos)
2378
2389
2379 def addpostdsstatus(self, ps):
2390 def addpostdsstatus(self, ps):
2380 """Add a callback to run within the wlock, at the point at which status
2391 """Add a callback to run within the wlock, at the point at which status
2381 fixups happen.
2392 fixups happen.
2382
2393
2383 On status completion, callback(wctx, status) will be called with the
2394 On status completion, callback(wctx, status) will be called with the
2384 wlock held, unless the dirstate has changed from underneath or the wlock
2395 wlock held, unless the dirstate has changed from underneath or the wlock
2385 couldn't be grabbed.
2396 couldn't be grabbed.
2386
2397
2387 Callbacks should not capture and use a cached copy of the dirstate --
2398 Callbacks should not capture and use a cached copy of the dirstate --
2388 it might change in the meanwhile. Instead, they should access the
2399 it might change in the meanwhile. Instead, they should access the
2389 dirstate via wctx.repo().dirstate.
2400 dirstate via wctx.repo().dirstate.
2390
2401
2391 This list is emptied out after each status run -- extensions should
2402 This list is emptied out after each status run -- extensions should
2392 make sure it adds to this list each time dirstate.status is called.
2403 make sure it adds to this list each time dirstate.status is called.
2393 Extensions should also make sure they don't call this for statuses
2404 Extensions should also make sure they don't call this for statuses
2394 that don't involve the dirstate.
2405 that don't involve the dirstate.
2395 """
2406 """
2396
2407
2397 # The list is located here for uniqueness reasons -- it is actually
2408 # The list is located here for uniqueness reasons -- it is actually
2398 # managed by the workingctx, but that isn't unique per-repo.
2409 # managed by the workingctx, but that isn't unique per-repo.
2399 self._postdsstatus.append(ps)
2410 self._postdsstatus.append(ps)
2400
2411
2401 def postdsstatus(self):
2412 def postdsstatus(self):
2402 """Used by workingctx to get the list of post-dirstate-status hooks."""
2413 """Used by workingctx to get the list of post-dirstate-status hooks."""
2403 return self._postdsstatus
2414 return self._postdsstatus
2404
2415
2405 def clearpostdsstatus(self):
2416 def clearpostdsstatus(self):
2406 """Used by workingctx to clear post-dirstate-status hooks."""
2417 """Used by workingctx to clear post-dirstate-status hooks."""
2407 del self._postdsstatus[:]
2418 del self._postdsstatus[:]
2408
2419
2409 def heads(self, start=None):
2420 def heads(self, start=None):
2410 if start is None:
2421 if start is None:
2411 cl = self.changelog
2422 cl = self.changelog
2412 headrevs = reversed(cl.headrevs())
2423 headrevs = reversed(cl.headrevs())
2413 return [cl.node(rev) for rev in headrevs]
2424 return [cl.node(rev) for rev in headrevs]
2414
2425
2415 heads = self.changelog.heads(start)
2426 heads = self.changelog.heads(start)
2416 # sort the output in rev descending order
2427 # sort the output in rev descending order
2417 return sorted(heads, key=self.changelog.rev, reverse=True)
2428 return sorted(heads, key=self.changelog.rev, reverse=True)
2418
2429
2419 def branchheads(self, branch=None, start=None, closed=False):
2430 def branchheads(self, branch=None, start=None, closed=False):
2420 '''return a (possibly filtered) list of heads for the given branch
2431 '''return a (possibly filtered) list of heads for the given branch
2421
2432
2422 Heads are returned in topological order, from newest to oldest.
2433 Heads are returned in topological order, from newest to oldest.
2423 If branch is None, use the dirstate branch.
2434 If branch is None, use the dirstate branch.
2424 If start is not None, return only heads reachable from start.
2435 If start is not None, return only heads reachable from start.
2425 If closed is True, return heads that are marked as closed as well.
2436 If closed is True, return heads that are marked as closed as well.
2426 '''
2437 '''
2427 if branch is None:
2438 if branch is None:
2428 branch = self[None].branch()
2439 branch = self[None].branch()
2429 branches = self.branchmap()
2440 branches = self.branchmap()
2430 if branch not in branches:
2441 if branch not in branches:
2431 return []
2442 return []
2432 # the cache returns heads ordered lowest to highest
2443 # the cache returns heads ordered lowest to highest
2433 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2444 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2434 if start is not None:
2445 if start is not None:
2435 # filter out the heads that cannot be reached from startrev
2446 # filter out the heads that cannot be reached from startrev
2436 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2447 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2437 bheads = [h for h in bheads if h in fbheads]
2448 bheads = [h for h in bheads if h in fbheads]
2438 return bheads
2449 return bheads
2439
2450
2440 def branches(self, nodes):
2451 def branches(self, nodes):
2441 if not nodes:
2452 if not nodes:
2442 nodes = [self.changelog.tip()]
2453 nodes = [self.changelog.tip()]
2443 b = []
2454 b = []
2444 for n in nodes:
2455 for n in nodes:
2445 t = n
2456 t = n
2446 while True:
2457 while True:
2447 p = self.changelog.parents(n)
2458 p = self.changelog.parents(n)
2448 if p[1] != nullid or p[0] == nullid:
2459 if p[1] != nullid or p[0] == nullid:
2449 b.append((t, n, p[0], p[1]))
2460 b.append((t, n, p[0], p[1]))
2450 break
2461 break
2451 n = p[0]
2462 n = p[0]
2452 return b
2463 return b
2453
2464
2454 def between(self, pairs):
2465 def between(self, pairs):
2455 r = []
2466 r = []
2456
2467
2457 for top, bottom in pairs:
2468 for top, bottom in pairs:
2458 n, l, i = top, [], 0
2469 n, l, i = top, [], 0
2459 f = 1
2470 f = 1
2460
2471
2461 while n != bottom and n != nullid:
2472 while n != bottom and n != nullid:
2462 p = self.changelog.parents(n)[0]
2473 p = self.changelog.parents(n)[0]
2463 if i == f:
2474 if i == f:
2464 l.append(n)
2475 l.append(n)
2465 f = f * 2
2476 f = f * 2
2466 n = p
2477 n = p
2467 i += 1
2478 i += 1
2468
2479
2469 r.append(l)
2480 r.append(l)
2470
2481
2471 return r
2482 return r
2472
2483
2473 def checkpush(self, pushop):
2484 def checkpush(self, pushop):
2474 """Extensions can override this function if additional checks have
2485 """Extensions can override this function if additional checks have
2475 to be performed before pushing, or call it if they override push
2486 to be performed before pushing, or call it if they override push
2476 command.
2487 command.
2477 """
2488 """
2478
2489
2479 @unfilteredpropertycache
2490 @unfilteredpropertycache
2480 def prepushoutgoinghooks(self):
2491 def prepushoutgoinghooks(self):
2481 """Return util.hooks consists of a pushop with repo, remote, outgoing
2492 """Return util.hooks consists of a pushop with repo, remote, outgoing
2482 methods, which are called before pushing changesets.
2493 methods, which are called before pushing changesets.
2483 """
2494 """
2484 return util.hooks()
2495 return util.hooks()
2485
2496
2486 def pushkey(self, namespace, key, old, new):
2497 def pushkey(self, namespace, key, old, new):
2487 try:
2498 try:
2488 tr = self.currenttransaction()
2499 tr = self.currenttransaction()
2489 hookargs = {}
2500 hookargs = {}
2490 if tr is not None:
2501 if tr is not None:
2491 hookargs.update(tr.hookargs)
2502 hookargs.update(tr.hookargs)
2492 hookargs = pycompat.strkwargs(hookargs)
2503 hookargs = pycompat.strkwargs(hookargs)
2493 hookargs[r'namespace'] = namespace
2504 hookargs[r'namespace'] = namespace
2494 hookargs[r'key'] = key
2505 hookargs[r'key'] = key
2495 hookargs[r'old'] = old
2506 hookargs[r'old'] = old
2496 hookargs[r'new'] = new
2507 hookargs[r'new'] = new
2497 self.hook('prepushkey', throw=True, **hookargs)
2508 self.hook('prepushkey', throw=True, **hookargs)
2498 except error.HookAbort as exc:
2509 except error.HookAbort as exc:
2499 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2510 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2500 if exc.hint:
2511 if exc.hint:
2501 self.ui.write_err(_("(%s)\n") % exc.hint)
2512 self.ui.write_err(_("(%s)\n") % exc.hint)
2502 return False
2513 return False
2503 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2514 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2504 ret = pushkey.push(self, namespace, key, old, new)
2515 ret = pushkey.push(self, namespace, key, old, new)
2505 def runhook():
2516 def runhook():
2506 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2517 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2507 ret=ret)
2518 ret=ret)
2508 self._afterlock(runhook)
2519 self._afterlock(runhook)
2509 return ret
2520 return ret
2510
2521
2511 def listkeys(self, namespace):
2522 def listkeys(self, namespace):
2512 self.hook('prelistkeys', throw=True, namespace=namespace)
2523 self.hook('prelistkeys', throw=True, namespace=namespace)
2513 self.ui.debug('listing keys for "%s"\n' % namespace)
2524 self.ui.debug('listing keys for "%s"\n' % namespace)
2514 values = pushkey.list(self, namespace)
2525 values = pushkey.list(self, namespace)
2515 self.hook('listkeys', namespace=namespace, values=values)
2526 self.hook('listkeys', namespace=namespace, values=values)
2516 return values
2527 return values
2517
2528
2518 def debugwireargs(self, one, two, three=None, four=None, five=None):
2529 def debugwireargs(self, one, two, three=None, four=None, five=None):
2519 '''used to test argument passing over the wire'''
2530 '''used to test argument passing over the wire'''
2520 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2531 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2521 pycompat.bytestr(four),
2532 pycompat.bytestr(four),
2522 pycompat.bytestr(five))
2533 pycompat.bytestr(five))
2523
2534
2524 def savecommitmessage(self, text):
2535 def savecommitmessage(self, text):
2525 fp = self.vfs('last-message.txt', 'wb')
2536 fp = self.vfs('last-message.txt', 'wb')
2526 try:
2537 try:
2527 fp.write(text)
2538 fp.write(text)
2528 finally:
2539 finally:
2529 fp.close()
2540 fp.close()
2530 return self.pathto(fp.name[len(self.root) + 1:])
2541 return self.pathto(fp.name[len(self.root) + 1:])
2531
2542
2532 # used to avoid circular references so destructors work
2543 # used to avoid circular references so destructors work
2533 def aftertrans(files):
2544 def aftertrans(files):
2534 renamefiles = [tuple(t) for t in files]
2545 renamefiles = [tuple(t) for t in files]
2535 def a():
2546 def a():
2536 for vfs, src, dest in renamefiles:
2547 for vfs, src, dest in renamefiles:
2537 # if src and dest refer to a same file, vfs.rename is a no-op,
2548 # if src and dest refer to a same file, vfs.rename is a no-op,
2538 # leaving both src and dest on disk. delete dest to make sure
2549 # leaving both src and dest on disk. delete dest to make sure
2539 # the rename couldn't be such a no-op.
2550 # the rename couldn't be such a no-op.
2540 vfs.tryunlink(dest)
2551 vfs.tryunlink(dest)
2541 try:
2552 try:
2542 vfs.rename(src, dest)
2553 vfs.rename(src, dest)
2543 except OSError: # journal file does not yet exist
2554 except OSError: # journal file does not yet exist
2544 pass
2555 pass
2545 return a
2556 return a
2546
2557
2547 def undoname(fn):
2558 def undoname(fn):
2548 base, name = os.path.split(fn)
2559 base, name = os.path.split(fn)
2549 assert name.startswith('journal')
2560 assert name.startswith('journal')
2550 return os.path.join(base, name.replace('journal', 'undo', 1))
2561 return os.path.join(base, name.replace('journal', 'undo', 1))
2551
2562
2552 def instance(ui, path, create, intents=None, createopts=None):
2563 def instance(ui, path, create, intents=None, createopts=None):
2553 localpath = util.urllocalpath(path)
2564 localpath = util.urllocalpath(path)
2554 if create:
2565 if create:
2555 createrepository(ui, localpath, createopts=createopts)
2566 createrepository(ui, localpath, createopts=createopts)
2556
2567
2557 return makelocalrepository(ui, localpath, intents=intents)
2568 return makelocalrepository(ui, localpath, intents=intents)
2558
2569
2559 def islocal(path):
2570 def islocal(path):
2560 return True
2571 return True
2561
2572
2562 def newreporequirements(ui, createopts=None):
2573 def newreporequirements(ui, createopts=None):
2563 """Determine the set of requirements for a new local repository.
2574 """Determine the set of requirements for a new local repository.
2564
2575
2565 Extensions can wrap this function to specify custom requirements for
2576 Extensions can wrap this function to specify custom requirements for
2566 new repositories.
2577 new repositories.
2567 """
2578 """
2568 createopts = createopts or {}
2579 createopts = createopts or {}
2569
2580
2570 requirements = {'revlogv1'}
2581 requirements = {'revlogv1'}
2571 if ui.configbool('format', 'usestore'):
2582 if ui.configbool('format', 'usestore'):
2572 requirements.add('store')
2583 requirements.add('store')
2573 if ui.configbool('format', 'usefncache'):
2584 if ui.configbool('format', 'usefncache'):
2574 requirements.add('fncache')
2585 requirements.add('fncache')
2575 if ui.configbool('format', 'dotencode'):
2586 if ui.configbool('format', 'dotencode'):
2576 requirements.add('dotencode')
2587 requirements.add('dotencode')
2577
2588
2578 compengine = ui.config('experimental', 'format.compression')
2589 compengine = ui.config('experimental', 'format.compression')
2579 if compengine not in util.compengines:
2590 if compengine not in util.compengines:
2580 raise error.Abort(_('compression engine %s defined by '
2591 raise error.Abort(_('compression engine %s defined by '
2581 'experimental.format.compression not available') %
2592 'experimental.format.compression not available') %
2582 compengine,
2593 compengine,
2583 hint=_('run "hg debuginstall" to list available '
2594 hint=_('run "hg debuginstall" to list available '
2584 'compression engines'))
2595 'compression engines'))
2585
2596
2586 # zlib is the historical default and doesn't need an explicit requirement.
2597 # zlib is the historical default and doesn't need an explicit requirement.
2587 if compengine != 'zlib':
2598 if compengine != 'zlib':
2588 requirements.add('exp-compression-%s' % compengine)
2599 requirements.add('exp-compression-%s' % compengine)
2589
2600
2590 if scmutil.gdinitconfig(ui):
2601 if scmutil.gdinitconfig(ui):
2591 requirements.add('generaldelta')
2602 requirements.add('generaldelta')
2592 if ui.configbool('experimental', 'treemanifest'):
2603 if ui.configbool('experimental', 'treemanifest'):
2593 requirements.add('treemanifest')
2604 requirements.add('treemanifest')
2594 # experimental config: format.sparse-revlog
2605 # experimental config: format.sparse-revlog
2595 if ui.configbool('format', 'sparse-revlog'):
2606 if ui.configbool('format', 'sparse-revlog'):
2596 requirements.add(SPARSEREVLOG_REQUIREMENT)
2607 requirements.add(SPARSEREVLOG_REQUIREMENT)
2597
2608
2598 revlogv2 = ui.config('experimental', 'revlogv2')
2609 revlogv2 = ui.config('experimental', 'revlogv2')
2599 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2610 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2600 requirements.remove('revlogv1')
2611 requirements.remove('revlogv1')
2601 # generaldelta is implied by revlogv2.
2612 # generaldelta is implied by revlogv2.
2602 requirements.discard('generaldelta')
2613 requirements.discard('generaldelta')
2603 requirements.add(REVLOGV2_REQUIREMENT)
2614 requirements.add(REVLOGV2_REQUIREMENT)
2604 # experimental config: format.internal-phase
2615 # experimental config: format.internal-phase
2605 if ui.configbool('format', 'internal-phase'):
2616 if ui.configbool('format', 'internal-phase'):
2606 requirements.add('internal-phase')
2617 requirements.add('internal-phase')
2607
2618
2608 if createopts.get('narrowfiles'):
2619 if createopts.get('narrowfiles'):
2609 requirements.add(repository.NARROW_REQUIREMENT)
2620 requirements.add(repository.NARROW_REQUIREMENT)
2610
2621
2611 return requirements
2622 return requirements
2612
2623
2613 def filterknowncreateopts(ui, createopts):
2624 def filterknowncreateopts(ui, createopts):
2614 """Filters a dict of repo creation options against options that are known.
2625 """Filters a dict of repo creation options against options that are known.
2615
2626
2616 Receives a dict of repo creation options and returns a dict of those
2627 Receives a dict of repo creation options and returns a dict of those
2617 options that we don't know how to handle.
2628 options that we don't know how to handle.
2618
2629
2619 This function is called as part of repository creation. If the
2630 This function is called as part of repository creation. If the
2620 returned dict contains any items, repository creation will not
2631 returned dict contains any items, repository creation will not
2621 be allowed, as it means there was a request to create a repository
2632 be allowed, as it means there was a request to create a repository
2622 with options not recognized by loaded code.
2633 with options not recognized by loaded code.
2623
2634
2624 Extensions can wrap this function to filter out creation options
2635 Extensions can wrap this function to filter out creation options
2625 they know how to handle.
2636 they know how to handle.
2626 """
2637 """
2627 known = {'narrowfiles'}
2638 known = {'narrowfiles'}
2628
2639
2629 return {k: v for k, v in createopts.items() if k not in known}
2640 return {k: v for k, v in createopts.items() if k not in known}
2630
2641
2631 def createrepository(ui, path, createopts=None):
2642 def createrepository(ui, path, createopts=None):
2632 """Create a new repository in a vfs.
2643 """Create a new repository in a vfs.
2633
2644
2634 ``path`` path to the new repo's working directory.
2645 ``path`` path to the new repo's working directory.
2635 ``createopts`` options for the new repository.
2646 ``createopts`` options for the new repository.
2636 """
2647 """
2637 createopts = createopts or {}
2648 createopts = createopts or {}
2638
2649
2639 unknownopts = filterknowncreateopts(ui, createopts)
2650 unknownopts = filterknowncreateopts(ui, createopts)
2640
2651
2641 if not isinstance(unknownopts, dict):
2652 if not isinstance(unknownopts, dict):
2642 raise error.ProgrammingError('filterknowncreateopts() did not return '
2653 raise error.ProgrammingError('filterknowncreateopts() did not return '
2643 'a dict')
2654 'a dict')
2644
2655
2645 if unknownopts:
2656 if unknownopts:
2646 raise error.Abort(_('unable to create repository because of unknown '
2657 raise error.Abort(_('unable to create repository because of unknown '
2647 'creation option: %s') %
2658 'creation option: %s') %
2648 ', '.sorted(unknownopts),
2659 ', '.sorted(unknownopts),
2649 hint=_('is a required extension not loaded?'))
2660 hint=_('is a required extension not loaded?'))
2650
2661
2651 requirements = newreporequirements(ui, createopts=createopts)
2662 requirements = newreporequirements(ui, createopts=createopts)
2652
2663
2653 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2664 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2654 if not wdirvfs.exists():
2665 if not wdirvfs.exists():
2655 wdirvfs.makedirs()
2666 wdirvfs.makedirs()
2656
2667
2657 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2668 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2658 if hgvfs.exists():
2669 if hgvfs.exists():
2659 raise error.RepoError(_('repository %s already exists') % path)
2670 raise error.RepoError(_('repository %s already exists') % path)
2660
2671
2661 hgvfs.makedir(notindexed=True)
2672 hgvfs.makedir(notindexed=True)
2662
2673
2663 if b'store' in requirements:
2674 if b'store' in requirements:
2664 hgvfs.mkdir(b'store')
2675 hgvfs.mkdir(b'store')
2665
2676
2666 # We create an invalid changelog outside the store so very old
2677 # We create an invalid changelog outside the store so very old
2667 # Mercurial versions (which didn't know about the requirements
2678 # Mercurial versions (which didn't know about the requirements
2668 # file) encounter an error on reading the changelog. This
2679 # file) encounter an error on reading the changelog. This
2669 # effectively locks out old clients and prevents them from
2680 # effectively locks out old clients and prevents them from
2670 # mucking with a repo in an unknown format.
2681 # mucking with a repo in an unknown format.
2671 #
2682 #
2672 # The revlog header has version 2, which won't be recognized by
2683 # The revlog header has version 2, which won't be recognized by
2673 # such old clients.
2684 # such old clients.
2674 hgvfs.append(b'00changelog.i',
2685 hgvfs.append(b'00changelog.i',
2675 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2686 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2676 b'layout')
2687 b'layout')
2677
2688
2678 scmutil.writerequires(hgvfs, requirements)
2689 scmutil.writerequires(hgvfs, requirements)
2679
2690
2680 def poisonrepository(repo):
2691 def poisonrepository(repo):
2681 """Poison a repository instance so it can no longer be used."""
2692 """Poison a repository instance so it can no longer be used."""
2682 # Perform any cleanup on the instance.
2693 # Perform any cleanup on the instance.
2683 repo.close()
2694 repo.close()
2684
2695
2685 # Our strategy is to replace the type of the object with one that
2696 # Our strategy is to replace the type of the object with one that
2686 # has all attribute lookups result in error.
2697 # has all attribute lookups result in error.
2687 #
2698 #
2688 # But we have to allow the close() method because some constructors
2699 # But we have to allow the close() method because some constructors
2689 # of repos call close() on repo references.
2700 # of repos call close() on repo references.
2690 class poisonedrepository(object):
2701 class poisonedrepository(object):
2691 def __getattribute__(self, item):
2702 def __getattribute__(self, item):
2692 if item == r'close':
2703 if item == r'close':
2693 return object.__getattribute__(self, item)
2704 return object.__getattribute__(self, item)
2694
2705
2695 raise error.ProgrammingError('repo instances should not be used '
2706 raise error.ProgrammingError('repo instances should not be used '
2696 'after unshare')
2707 'after unshare')
2697
2708
2698 def close(self):
2709 def close(self):
2699 pass
2710 pass
2700
2711
2701 # We may have a repoview, which intercepts __setattr__. So be sure
2712 # We may have a repoview, which intercepts __setattr__. So be sure
2702 # we operate at the lowest level possible.
2713 # we operate at the lowest level possible.
2703 object.__setattr__(repo, r'__class__', poisonedrepository)
2714 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,225 +1,224 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 changelog,
16 changelog,
17 error,
17 error,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 namespaces,
20 namespaces,
21 pathutil,
21 pathutil,
22 store,
23 url,
22 url,
24 util,
23 util,
25 vfs as vfsmod,
24 vfs as vfsmod,
26 )
25 )
27
26
28 urlerr = util.urlerr
27 urlerr = util.urlerr
29 urlreq = util.urlreq
28 urlreq = util.urlreq
30
29
31 class httprangereader(object):
30 class httprangereader(object):
32 def __init__(self, url, opener):
31 def __init__(self, url, opener):
33 # we assume opener has HTTPRangeHandler
32 # we assume opener has HTTPRangeHandler
34 self.url = url
33 self.url = url
35 self.pos = 0
34 self.pos = 0
36 self.opener = opener
35 self.opener = opener
37 self.name = url
36 self.name = url
38
37
39 def __enter__(self):
38 def __enter__(self):
40 return self
39 return self
41
40
42 def __exit__(self, exc_type, exc_value, traceback):
41 def __exit__(self, exc_type, exc_value, traceback):
43 self.close()
42 self.close()
44
43
45 def seek(self, pos):
44 def seek(self, pos):
46 self.pos = pos
45 self.pos = pos
47 def read(self, bytes=None):
46 def read(self, bytes=None):
48 req = urlreq.request(self.url)
47 req = urlreq.request(self.url)
49 end = ''
48 end = ''
50 if bytes:
49 if bytes:
51 end = self.pos + bytes - 1
50 end = self.pos + bytes - 1
52 if self.pos or end:
51 if self.pos or end:
53 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
52 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
54
53
55 try:
54 try:
56 f = self.opener.open(req)
55 f = self.opener.open(req)
57 data = f.read()
56 data = f.read()
58 code = f.code
57 code = f.code
59 except urlerr.httperror as inst:
58 except urlerr.httperror as inst:
60 num = inst.code == 404 and errno.ENOENT or None
59 num = inst.code == 404 and errno.ENOENT or None
61 raise IOError(num, inst)
60 raise IOError(num, inst)
62 except urlerr.urlerror as inst:
61 except urlerr.urlerror as inst:
63 raise IOError(None, inst.reason[1])
62 raise IOError(None, inst.reason[1])
64
63
65 if code == 200:
64 if code == 200:
66 # HTTPRangeHandler does nothing if remote does not support
65 # HTTPRangeHandler does nothing if remote does not support
67 # Range headers and returns the full entity. Let's slice it.
66 # Range headers and returns the full entity. Let's slice it.
68 if bytes:
67 if bytes:
69 data = data[self.pos:self.pos + bytes]
68 data = data[self.pos:self.pos + bytes]
70 else:
69 else:
71 data = data[self.pos:]
70 data = data[self.pos:]
72 elif bytes:
71 elif bytes:
73 data = data[:bytes]
72 data = data[:bytes]
74 self.pos += len(data)
73 self.pos += len(data)
75 return data
74 return data
76 def readlines(self):
75 def readlines(self):
77 return self.read().splitlines(True)
76 return self.read().splitlines(True)
78 def __iter__(self):
77 def __iter__(self):
79 return iter(self.readlines())
78 return iter(self.readlines())
80 def close(self):
79 def close(self):
81 pass
80 pass
82
81
83 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
82 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
84 # which was itself extracted from urlgrabber. See the last version of
83 # which was itself extracted from urlgrabber. See the last version of
85 # byterange.py from history if you need more information.
84 # byterange.py from history if you need more information.
86 class _RangeError(IOError):
85 class _RangeError(IOError):
87 """Error raised when an unsatisfiable range is requested."""
86 """Error raised when an unsatisfiable range is requested."""
88
87
89 class _HTTPRangeHandler(urlreq.basehandler):
88 class _HTTPRangeHandler(urlreq.basehandler):
90 """Handler that enables HTTP Range headers.
89 """Handler that enables HTTP Range headers.
91
90
92 This was extremely simple. The Range header is a HTTP feature to
91 This was extremely simple. The Range header is a HTTP feature to
93 begin with so all this class does is tell urllib2 that the
92 begin with so all this class does is tell urllib2 that the
94 "206 Partial Content" response from the HTTP server is what we
93 "206 Partial Content" response from the HTTP server is what we
95 expected.
94 expected.
96 """
95 """
97
96
98 def http_error_206(self, req, fp, code, msg, hdrs):
97 def http_error_206(self, req, fp, code, msg, hdrs):
99 # 206 Partial Content Response
98 # 206 Partial Content Response
100 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
99 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
101 r.code = code
100 r.code = code
102 r.msg = msg
101 r.msg = msg
103 return r
102 return r
104
103
105 def http_error_416(self, req, fp, code, msg, hdrs):
104 def http_error_416(self, req, fp, code, msg, hdrs):
106 # HTTP's Range Not Satisfiable error
105 # HTTP's Range Not Satisfiable error
107 raise _RangeError('Requested Range Not Satisfiable')
106 raise _RangeError('Requested Range Not Satisfiable')
108
107
109 def build_opener(ui, authinfo):
108 def build_opener(ui, authinfo):
110 # urllib cannot handle URLs with embedded user or passwd
109 # urllib cannot handle URLs with embedded user or passwd
111 urlopener = url.opener(ui, authinfo)
110 urlopener = url.opener(ui, authinfo)
112 urlopener.add_handler(_HTTPRangeHandler())
111 urlopener.add_handler(_HTTPRangeHandler())
113
112
114 class statichttpvfs(vfsmod.abstractvfs):
113 class statichttpvfs(vfsmod.abstractvfs):
115 def __init__(self, base):
114 def __init__(self, base):
116 self.base = base
115 self.base = base
117
116
118 def __call__(self, path, mode='r', *args, **kw):
117 def __call__(self, path, mode='r', *args, **kw):
119 if mode not in ('r', 'rb'):
118 if mode not in ('r', 'rb'):
120 raise IOError('Permission denied')
119 raise IOError('Permission denied')
121 f = "/".join((self.base, urlreq.quote(path)))
120 f = "/".join((self.base, urlreq.quote(path)))
122 return httprangereader(f, urlopener)
121 return httprangereader(f, urlopener)
123
122
124 def join(self, path):
123 def join(self, path):
125 if path:
124 if path:
126 return pathutil.join(self.base, path)
125 return pathutil.join(self.base, path)
127 else:
126 else:
128 return self.base
127 return self.base
129
128
130 return statichttpvfs
129 return statichttpvfs
131
130
132 class statichttppeer(localrepo.localpeer):
131 class statichttppeer(localrepo.localpeer):
133 def local(self):
132 def local(self):
134 return None
133 return None
135 def canpush(self):
134 def canpush(self):
136 return False
135 return False
137
136
138 class statichttprepository(localrepo.localrepository):
137 class statichttprepository(localrepo.localrepository):
139 supported = localrepo.localrepository._basesupported
138 supported = localrepo.localrepository._basesupported
140
139
141 def __init__(self, ui, path):
140 def __init__(self, ui, path):
142 self._url = path
141 self._url = path
143 self.ui = ui
142 self.ui = ui
144
143
145 self.root = path
144 self.root = path
146 u = util.url(path.rstrip('/') + "/.hg")
145 u = util.url(path.rstrip('/') + "/.hg")
147 self.path, authinfo = u.authinfo()
146 self.path, authinfo = u.authinfo()
148
147
149 vfsclass = build_opener(ui, authinfo)
148 vfsclass = build_opener(ui, authinfo)
150 self.vfs = vfsclass(self.path)
149 self.vfs = vfsclass(self.path)
151 self.cachevfs = vfsclass(self.vfs.join('cache'))
150 self.cachevfs = vfsclass(self.vfs.join('cache'))
152 self._phasedefaults = []
151 self._phasedefaults = []
153
152
154 self.names = namespaces.namespaces()
153 self.names = namespaces.namespaces()
155 self.filtername = None
154 self.filtername = None
156
155
157 try:
156 try:
158 requirements = set(self.vfs.read(b'requires').splitlines())
157 requirements = set(self.vfs.read(b'requires').splitlines())
159 except IOError as inst:
158 except IOError as inst:
160 if inst.errno != errno.ENOENT:
159 if inst.errno != errno.ENOENT:
161 raise
160 raise
162 requirements = set()
161 requirements = set()
163
162
164 # check if it is a non-empty old-style repository
163 # check if it is a non-empty old-style repository
165 try:
164 try:
166 fp = self.vfs("00changelog.i")
165 fp = self.vfs("00changelog.i")
167 fp.read(1)
166 fp.read(1)
168 fp.close()
167 fp.close()
169 except IOError as inst:
168 except IOError as inst:
170 if inst.errno != errno.ENOENT:
169 if inst.errno != errno.ENOENT:
171 raise
170 raise
172 # we do not care about empty old-style repositories here
171 # we do not care about empty old-style repositories here
173 msg = _("'%s' does not appear to be an hg repository") % path
172 msg = _("'%s' does not appear to be an hg repository") % path
174 raise error.RepoError(msg)
173 raise error.RepoError(msg)
175
174
176 supportedrequirements = localrepo.gathersupportedrequirements(ui)
175 supportedrequirements = localrepo.gathersupportedrequirements(ui)
177 localrepo.ensurerequirementsrecognized(requirements,
176 localrepo.ensurerequirementsrecognized(requirements,
178 supportedrequirements)
177 supportedrequirements)
179 localrepo.ensurerequirementscompatible(ui, requirements)
178 localrepo.ensurerequirementscompatible(ui, requirements)
180
179
181 # setup store
180 # setup store
182 self.store = store.store(requirements, self.path, vfsclass)
181 self.store = localrepo.makestore(requirements, self.path, vfsclass)
183 self.spath = self.store.path
182 self.spath = self.store.path
184 self.svfs = self.store.opener
183 self.svfs = self.store.opener
185 self.sjoin = self.store.join
184 self.sjoin = self.store.join
186 self._filecache = {}
185 self._filecache = {}
187 self.requirements = requirements
186 self.requirements = requirements
188
187
189 self.manifestlog = manifest.manifestlog(self.svfs, self)
188 self.manifestlog = manifest.manifestlog(self.svfs, self)
190 self.changelog = changelog.changelog(self.svfs)
189 self.changelog = changelog.changelog(self.svfs)
191 self._tags = None
190 self._tags = None
192 self.nodetagscache = None
191 self.nodetagscache = None
193 self._branchcaches = {}
192 self._branchcaches = {}
194 self._revbranchcache = None
193 self._revbranchcache = None
195 self.encodepats = None
194 self.encodepats = None
196 self.decodepats = None
195 self.decodepats = None
197 self._transref = None
196 self._transref = None
198
197
199 def _restrictcapabilities(self, caps):
198 def _restrictcapabilities(self, caps):
200 caps = super(statichttprepository, self)._restrictcapabilities(caps)
199 caps = super(statichttprepository, self)._restrictcapabilities(caps)
201 return caps.difference(["pushkey"])
200 return caps.difference(["pushkey"])
202
201
203 def url(self):
202 def url(self):
204 return self._url
203 return self._url
205
204
206 def local(self):
205 def local(self):
207 return False
206 return False
208
207
209 def peer(self):
208 def peer(self):
210 return statichttppeer(self)
209 return statichttppeer(self)
211
210
212 def wlock(self, wait=True):
211 def wlock(self, wait=True):
213 raise error.LockUnavailable(0, _('lock not available'), 'lock',
212 raise error.LockUnavailable(0, _('lock not available'), 'lock',
214 _('cannot lock static-http repository'))
213 _('cannot lock static-http repository'))
215
214
216 def lock(self, wait=True):
215 def lock(self, wait=True):
217 raise error.Abort(_('cannot lock static-http repository'))
216 raise error.Abort(_('cannot lock static-http repository'))
218
217
219 def _writecaches(self):
218 def _writecaches(self):
220 pass # statichttprepository are read only
219 pass # statichttprepository are read only
221
220
222 def instance(ui, path, create, intents=None, createopts=None):
221 def instance(ui, path, create, intents=None, createopts=None):
223 if create:
222 if create:
224 raise error.Abort(_('cannot create new static-http repository'))
223 raise error.Abort(_('cannot create new static-http repository'))
225 return statichttprepository(ui, path[7:])
224 return statichttprepository(ui, path[7:])
@@ -1,594 +1,587 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 error,
17 error,
18 node,
18 node,
19 policy,
19 policy,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 vfs as vfsmod,
22 vfs as vfsmod,
23 )
23 )
24
24
25 parsers = policy.importmod(r'parsers')
25 parsers = policy.importmod(r'parsers')
26
26
27 # This avoids a collision between a file named foo and a dir named
27 # This avoids a collision between a file named foo and a dir named
28 # foo.i or foo.d
28 # foo.i or foo.d
29 def _encodedir(path):
29 def _encodedir(path):
30 '''
30 '''
31 >>> _encodedir(b'data/foo.i')
31 >>> _encodedir(b'data/foo.i')
32 'data/foo.i'
32 'data/foo.i'
33 >>> _encodedir(b'data/foo.i/bla.i')
33 >>> _encodedir(b'data/foo.i/bla.i')
34 'data/foo.i.hg/bla.i'
34 'data/foo.i.hg/bla.i'
35 >>> _encodedir(b'data/foo.i.hg/bla.i')
35 >>> _encodedir(b'data/foo.i.hg/bla.i')
36 'data/foo.i.hg.hg/bla.i'
36 'data/foo.i.hg.hg/bla.i'
37 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
37 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
38 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
38 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
39 '''
39 '''
40 return (path
40 return (path
41 .replace(".hg/", ".hg.hg/")
41 .replace(".hg/", ".hg.hg/")
42 .replace(".i/", ".i.hg/")
42 .replace(".i/", ".i.hg/")
43 .replace(".d/", ".d.hg/"))
43 .replace(".d/", ".d.hg/"))
44
44
45 encodedir = getattr(parsers, 'encodedir', _encodedir)
45 encodedir = getattr(parsers, 'encodedir', _encodedir)
46
46
47 def decodedir(path):
47 def decodedir(path):
48 '''
48 '''
49 >>> decodedir(b'data/foo.i')
49 >>> decodedir(b'data/foo.i')
50 'data/foo.i'
50 'data/foo.i'
51 >>> decodedir(b'data/foo.i.hg/bla.i')
51 >>> decodedir(b'data/foo.i.hg/bla.i')
52 'data/foo.i/bla.i'
52 'data/foo.i/bla.i'
53 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
53 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
54 'data/foo.i.hg/bla.i'
54 'data/foo.i.hg/bla.i'
55 '''
55 '''
56 if ".hg/" not in path:
56 if ".hg/" not in path:
57 return path
57 return path
58 return (path
58 return (path
59 .replace(".d.hg/", ".d/")
59 .replace(".d.hg/", ".d/")
60 .replace(".i.hg/", ".i/")
60 .replace(".i.hg/", ".i/")
61 .replace(".hg.hg/", ".hg/"))
61 .replace(".hg.hg/", ".hg/"))
62
62
63 def _reserved():
63 def _reserved():
64 ''' characters that are problematic for filesystems
64 ''' characters that are problematic for filesystems
65
65
66 * ascii escapes (0..31)
66 * ascii escapes (0..31)
67 * ascii hi (126..255)
67 * ascii hi (126..255)
68 * windows specials
68 * windows specials
69
69
70 these characters will be escaped by encodefunctions
70 these characters will be escaped by encodefunctions
71 '''
71 '''
72 winreserved = [ord(x) for x in u'\\:*?"<>|']
72 winreserved = [ord(x) for x in u'\\:*?"<>|']
73 for x in range(32):
73 for x in range(32):
74 yield x
74 yield x
75 for x in range(126, 256):
75 for x in range(126, 256):
76 yield x
76 yield x
77 for x in winreserved:
77 for x in winreserved:
78 yield x
78 yield x
79
79
80 def _buildencodefun():
80 def _buildencodefun():
81 '''
81 '''
82 >>> enc, dec = _buildencodefun()
82 >>> enc, dec = _buildencodefun()
83
83
84 >>> enc(b'nothing/special.txt')
84 >>> enc(b'nothing/special.txt')
85 'nothing/special.txt'
85 'nothing/special.txt'
86 >>> dec(b'nothing/special.txt')
86 >>> dec(b'nothing/special.txt')
87 'nothing/special.txt'
87 'nothing/special.txt'
88
88
89 >>> enc(b'HELLO')
89 >>> enc(b'HELLO')
90 '_h_e_l_l_o'
90 '_h_e_l_l_o'
91 >>> dec(b'_h_e_l_l_o')
91 >>> dec(b'_h_e_l_l_o')
92 'HELLO'
92 'HELLO'
93
93
94 >>> enc(b'hello:world?')
94 >>> enc(b'hello:world?')
95 'hello~3aworld~3f'
95 'hello~3aworld~3f'
96 >>> dec(b'hello~3aworld~3f')
96 >>> dec(b'hello~3aworld~3f')
97 'hello:world?'
97 'hello:world?'
98
98
99 >>> enc(b'the\\x07quick\\xADshot')
99 >>> enc(b'the\\x07quick\\xADshot')
100 'the~07quick~adshot'
100 'the~07quick~adshot'
101 >>> dec(b'the~07quick~adshot')
101 >>> dec(b'the~07quick~adshot')
102 'the\\x07quick\\xadshot'
102 'the\\x07quick\\xadshot'
103 '''
103 '''
104 e = '_'
104 e = '_'
105 xchr = pycompat.bytechr
105 xchr = pycompat.bytechr
106 asciistr = list(map(xchr, range(127)))
106 asciistr = list(map(xchr, range(127)))
107 capitals = list(range(ord("A"), ord("Z") + 1))
107 capitals = list(range(ord("A"), ord("Z") + 1))
108
108
109 cmap = dict((x, x) for x in asciistr)
109 cmap = dict((x, x) for x in asciistr)
110 for x in _reserved():
110 for x in _reserved():
111 cmap[xchr(x)] = "~%02x" % x
111 cmap[xchr(x)] = "~%02x" % x
112 for x in capitals + [ord(e)]:
112 for x in capitals + [ord(e)]:
113 cmap[xchr(x)] = e + xchr(x).lower()
113 cmap[xchr(x)] = e + xchr(x).lower()
114
114
115 dmap = {}
115 dmap = {}
116 for k, v in cmap.iteritems():
116 for k, v in cmap.iteritems():
117 dmap[v] = k
117 dmap[v] = k
118 def decode(s):
118 def decode(s):
119 i = 0
119 i = 0
120 while i < len(s):
120 while i < len(s):
121 for l in pycompat.xrange(1, 4):
121 for l in pycompat.xrange(1, 4):
122 try:
122 try:
123 yield dmap[s[i:i + l]]
123 yield dmap[s[i:i + l]]
124 i += l
124 i += l
125 break
125 break
126 except KeyError:
126 except KeyError:
127 pass
127 pass
128 else:
128 else:
129 raise KeyError
129 raise KeyError
130 return (lambda s: ''.join([cmap[s[c:c + 1]]
130 return (lambda s: ''.join([cmap[s[c:c + 1]]
131 for c in pycompat.xrange(len(s))]),
131 for c in pycompat.xrange(len(s))]),
132 lambda s: ''.join(list(decode(s))))
132 lambda s: ''.join(list(decode(s))))
133
133
134 _encodefname, _decodefname = _buildencodefun()
134 _encodefname, _decodefname = _buildencodefun()
135
135
136 def encodefilename(s):
136 def encodefilename(s):
137 '''
137 '''
138 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
138 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
139 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
139 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
140 '''
140 '''
141 return _encodefname(encodedir(s))
141 return _encodefname(encodedir(s))
142
142
143 def decodefilename(s):
143 def decodefilename(s):
144 '''
144 '''
145 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
145 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
146 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
146 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
147 '''
147 '''
148 return decodedir(_decodefname(s))
148 return decodedir(_decodefname(s))
149
149
150 def _buildlowerencodefun():
150 def _buildlowerencodefun():
151 '''
151 '''
152 >>> f = _buildlowerencodefun()
152 >>> f = _buildlowerencodefun()
153 >>> f(b'nothing/special.txt')
153 >>> f(b'nothing/special.txt')
154 'nothing/special.txt'
154 'nothing/special.txt'
155 >>> f(b'HELLO')
155 >>> f(b'HELLO')
156 'hello'
156 'hello'
157 >>> f(b'hello:world?')
157 >>> f(b'hello:world?')
158 'hello~3aworld~3f'
158 'hello~3aworld~3f'
159 >>> f(b'the\\x07quick\\xADshot')
159 >>> f(b'the\\x07quick\\xADshot')
160 'the~07quick~adshot'
160 'the~07quick~adshot'
161 '''
161 '''
162 xchr = pycompat.bytechr
162 xchr = pycompat.bytechr
163 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
163 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
164 for x in _reserved():
164 for x in _reserved():
165 cmap[xchr(x)] = "~%02x" % x
165 cmap[xchr(x)] = "~%02x" % x
166 for x in range(ord("A"), ord("Z") + 1):
166 for x in range(ord("A"), ord("Z") + 1):
167 cmap[xchr(x)] = xchr(x).lower()
167 cmap[xchr(x)] = xchr(x).lower()
168 def lowerencode(s):
168 def lowerencode(s):
169 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
169 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
170 return lowerencode
170 return lowerencode
171
171
172 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
172 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
173
173
174 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
174 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
175 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
175 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
176 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
176 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
177 def _auxencode(path, dotencode):
177 def _auxencode(path, dotencode):
178 '''
178 '''
179 Encodes filenames containing names reserved by Windows or which end in
179 Encodes filenames containing names reserved by Windows or which end in
180 period or space. Does not touch other single reserved characters c.
180 period or space. Does not touch other single reserved characters c.
181 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
181 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
182 Additionally encodes space or period at the beginning, if dotencode is
182 Additionally encodes space or period at the beginning, if dotencode is
183 True. Parameter path is assumed to be all lowercase.
183 True. Parameter path is assumed to be all lowercase.
184 A segment only needs encoding if a reserved name appears as a
184 A segment only needs encoding if a reserved name appears as a
185 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
185 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
186 doesn't need encoding.
186 doesn't need encoding.
187
187
188 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
188 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
189 >>> _auxencode(s.split(b'/'), True)
189 >>> _auxencode(s.split(b'/'), True)
190 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
190 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
191 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
191 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
192 >>> _auxencode(s.split(b'/'), False)
192 >>> _auxencode(s.split(b'/'), False)
193 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
193 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
194 >>> _auxencode([b'foo. '], True)
194 >>> _auxencode([b'foo. '], True)
195 ['foo.~20']
195 ['foo.~20']
196 >>> _auxencode([b' .foo'], True)
196 >>> _auxencode([b' .foo'], True)
197 ['~20.foo']
197 ['~20.foo']
198 '''
198 '''
199 for i, n in enumerate(path):
199 for i, n in enumerate(path):
200 if not n:
200 if not n:
201 continue
201 continue
202 if dotencode and n[0] in '. ':
202 if dotencode and n[0] in '. ':
203 n = "~%02x" % ord(n[0:1]) + n[1:]
203 n = "~%02x" % ord(n[0:1]) + n[1:]
204 path[i] = n
204 path[i] = n
205 else:
205 else:
206 l = n.find('.')
206 l = n.find('.')
207 if l == -1:
207 if l == -1:
208 l = len(n)
208 l = len(n)
209 if ((l == 3 and n[:3] in _winres3) or
209 if ((l == 3 and n[:3] in _winres3) or
210 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
210 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
211 and n[:3] in _winres4)):
211 and n[:3] in _winres4)):
212 # encode third letter ('aux' -> 'au~78')
212 # encode third letter ('aux' -> 'au~78')
213 ec = "~%02x" % ord(n[2:3])
213 ec = "~%02x" % ord(n[2:3])
214 n = n[0:2] + ec + n[3:]
214 n = n[0:2] + ec + n[3:]
215 path[i] = n
215 path[i] = n
216 if n[-1] in '. ':
216 if n[-1] in '. ':
217 # encode last period or space ('foo...' -> 'foo..~2e')
217 # encode last period or space ('foo...' -> 'foo..~2e')
218 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
218 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
219 return path
219 return path
220
220
221 _maxstorepathlen = 120
221 _maxstorepathlen = 120
222 _dirprefixlen = 8
222 _dirprefixlen = 8
223 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
223 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
224
224
225 def _hashencode(path, dotencode):
225 def _hashencode(path, dotencode):
226 digest = node.hex(hashlib.sha1(path).digest())
226 digest = node.hex(hashlib.sha1(path).digest())
227 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
227 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
228 parts = _auxencode(le, dotencode)
228 parts = _auxencode(le, dotencode)
229 basename = parts[-1]
229 basename = parts[-1]
230 _root, ext = os.path.splitext(basename)
230 _root, ext = os.path.splitext(basename)
231 sdirs = []
231 sdirs = []
232 sdirslen = 0
232 sdirslen = 0
233 for p in parts[:-1]:
233 for p in parts[:-1]:
234 d = p[:_dirprefixlen]
234 d = p[:_dirprefixlen]
235 if d[-1] in '. ':
235 if d[-1] in '. ':
236 # Windows can't access dirs ending in period or space
236 # Windows can't access dirs ending in period or space
237 d = d[:-1] + '_'
237 d = d[:-1] + '_'
238 if sdirslen == 0:
238 if sdirslen == 0:
239 t = len(d)
239 t = len(d)
240 else:
240 else:
241 t = sdirslen + 1 + len(d)
241 t = sdirslen + 1 + len(d)
242 if t > _maxshortdirslen:
242 if t > _maxshortdirslen:
243 break
243 break
244 sdirs.append(d)
244 sdirs.append(d)
245 sdirslen = t
245 sdirslen = t
246 dirs = '/'.join(sdirs)
246 dirs = '/'.join(sdirs)
247 if len(dirs) > 0:
247 if len(dirs) > 0:
248 dirs += '/'
248 dirs += '/'
249 res = 'dh/' + dirs + digest + ext
249 res = 'dh/' + dirs + digest + ext
250 spaceleft = _maxstorepathlen - len(res)
250 spaceleft = _maxstorepathlen - len(res)
251 if spaceleft > 0:
251 if spaceleft > 0:
252 filler = basename[:spaceleft]
252 filler = basename[:spaceleft]
253 res = 'dh/' + dirs + filler + digest + ext
253 res = 'dh/' + dirs + filler + digest + ext
254 return res
254 return res
255
255
256 def _hybridencode(path, dotencode):
256 def _hybridencode(path, dotencode):
257 '''encodes path with a length limit
257 '''encodes path with a length limit
258
258
259 Encodes all paths that begin with 'data/', according to the following.
259 Encodes all paths that begin with 'data/', according to the following.
260
260
261 Default encoding (reversible):
261 Default encoding (reversible):
262
262
263 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
263 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
264 characters are encoded as '~xx', where xx is the two digit hex code
264 characters are encoded as '~xx', where xx is the two digit hex code
265 of the character (see encodefilename).
265 of the character (see encodefilename).
266 Relevant path components consisting of Windows reserved filenames are
266 Relevant path components consisting of Windows reserved filenames are
267 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
267 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
268
268
269 Hashed encoding (not reversible):
269 Hashed encoding (not reversible):
270
270
271 If the default-encoded path is longer than _maxstorepathlen, a
271 If the default-encoded path is longer than _maxstorepathlen, a
272 non-reversible hybrid hashing of the path is done instead.
272 non-reversible hybrid hashing of the path is done instead.
273 This encoding uses up to _dirprefixlen characters of all directory
273 This encoding uses up to _dirprefixlen characters of all directory
274 levels of the lowerencoded path, but not more levels than can fit into
274 levels of the lowerencoded path, but not more levels than can fit into
275 _maxshortdirslen.
275 _maxshortdirslen.
276 Then follows the filler followed by the sha digest of the full path.
276 Then follows the filler followed by the sha digest of the full path.
277 The filler is the beginning of the basename of the lowerencoded path
277 The filler is the beginning of the basename of the lowerencoded path
278 (the basename is everything after the last path separator). The filler
278 (the basename is everything after the last path separator). The filler
279 is as long as possible, filling in characters from the basename until
279 is as long as possible, filling in characters from the basename until
280 the encoded path has _maxstorepathlen characters (or all chars of the
280 the encoded path has _maxstorepathlen characters (or all chars of the
281 basename have been taken).
281 basename have been taken).
282 The extension (e.g. '.i' or '.d') is preserved.
282 The extension (e.g. '.i' or '.d') is preserved.
283
283
284 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
284 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
285 encoding was used.
285 encoding was used.
286 '''
286 '''
287 path = encodedir(path)
287 path = encodedir(path)
288 ef = _encodefname(path).split('/')
288 ef = _encodefname(path).split('/')
289 res = '/'.join(_auxencode(ef, dotencode))
289 res = '/'.join(_auxencode(ef, dotencode))
290 if len(res) > _maxstorepathlen:
290 if len(res) > _maxstorepathlen:
291 res = _hashencode(path, dotencode)
291 res = _hashencode(path, dotencode)
292 return res
292 return res
293
293
294 def _pathencode(path):
294 def _pathencode(path):
295 de = encodedir(path)
295 de = encodedir(path)
296 if len(path) > _maxstorepathlen:
296 if len(path) > _maxstorepathlen:
297 return _hashencode(de, True)
297 return _hashencode(de, True)
298 ef = _encodefname(de).split('/')
298 ef = _encodefname(de).split('/')
299 res = '/'.join(_auxencode(ef, True))
299 res = '/'.join(_auxencode(ef, True))
300 if len(res) > _maxstorepathlen:
300 if len(res) > _maxstorepathlen:
301 return _hashencode(de, True)
301 return _hashencode(de, True)
302 return res
302 return res
303
303
304 _pathencode = getattr(parsers, 'pathencode', _pathencode)
304 _pathencode = getattr(parsers, 'pathencode', _pathencode)
305
305
306 def _plainhybridencode(f):
306 def _plainhybridencode(f):
307 return _hybridencode(f, False)
307 return _hybridencode(f, False)
308
308
309 def _calcmode(vfs):
309 def _calcmode(vfs):
310 try:
310 try:
311 # files in .hg/ will be created using this mode
311 # files in .hg/ will be created using this mode
312 mode = vfs.stat().st_mode
312 mode = vfs.stat().st_mode
313 # avoid some useless chmods
313 # avoid some useless chmods
314 if (0o777 & ~util.umask) == (0o777 & mode):
314 if (0o777 & ~util.umask) == (0o777 & mode):
315 mode = None
315 mode = None
316 except OSError:
316 except OSError:
317 mode = None
317 mode = None
318 return mode
318 return mode
319
319
320 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
320 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
321 ' 00changelog.d 00changelog.i phaseroots obsstore')
321 ' 00changelog.d 00changelog.i phaseroots obsstore')
322
322
323 def isrevlog(f, kind, st):
323 def isrevlog(f, kind, st):
324 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
324 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
325
325
326 class basicstore(object):
326 class basicstore(object):
327 '''base class for local repository stores'''
327 '''base class for local repository stores'''
328 def __init__(self, path, vfstype):
328 def __init__(self, path, vfstype):
329 vfs = vfstype(path)
329 vfs = vfstype(path)
330 self.path = vfs.base
330 self.path = vfs.base
331 self.createmode = _calcmode(vfs)
331 self.createmode = _calcmode(vfs)
332 vfs.createmode = self.createmode
332 vfs.createmode = self.createmode
333 self.rawvfs = vfs
333 self.rawvfs = vfs
334 self.vfs = vfsmod.filtervfs(vfs, encodedir)
334 self.vfs = vfsmod.filtervfs(vfs, encodedir)
335 self.opener = self.vfs
335 self.opener = self.vfs
336
336
337 def join(self, f):
337 def join(self, f):
338 return self.path + '/' + encodedir(f)
338 return self.path + '/' + encodedir(f)
339
339
340 def _walk(self, relpath, recurse, filefilter=isrevlog):
340 def _walk(self, relpath, recurse, filefilter=isrevlog):
341 '''yields (unencoded, encoded, size)'''
341 '''yields (unencoded, encoded, size)'''
342 path = self.path
342 path = self.path
343 if relpath:
343 if relpath:
344 path += '/' + relpath
344 path += '/' + relpath
345 striplen = len(self.path) + 1
345 striplen = len(self.path) + 1
346 l = []
346 l = []
347 if self.rawvfs.isdir(path):
347 if self.rawvfs.isdir(path):
348 visit = [path]
348 visit = [path]
349 readdir = self.rawvfs.readdir
349 readdir = self.rawvfs.readdir
350 while visit:
350 while visit:
351 p = visit.pop()
351 p = visit.pop()
352 for f, kind, st in readdir(p, stat=True):
352 for f, kind, st in readdir(p, stat=True):
353 fp = p + '/' + f
353 fp = p + '/' + f
354 if filefilter(f, kind, st):
354 if filefilter(f, kind, st):
355 n = util.pconvert(fp[striplen:])
355 n = util.pconvert(fp[striplen:])
356 l.append((decodedir(n), n, st.st_size))
356 l.append((decodedir(n), n, st.st_size))
357 elif kind == stat.S_IFDIR and recurse:
357 elif kind == stat.S_IFDIR and recurse:
358 visit.append(fp)
358 visit.append(fp)
359 l.sort()
359 l.sort()
360 return l
360 return l
361
361
362 def datafiles(self):
362 def datafiles(self):
363 return self._walk('data', True) + self._walk('meta', True)
363 return self._walk('data', True) + self._walk('meta', True)
364
364
365 def topfiles(self):
365 def topfiles(self):
366 # yield manifest before changelog
366 # yield manifest before changelog
367 return reversed(self._walk('', False))
367 return reversed(self._walk('', False))
368
368
369 def walk(self):
369 def walk(self):
370 '''yields (unencoded, encoded, size)'''
370 '''yields (unencoded, encoded, size)'''
371 # yield data files first
371 # yield data files first
372 for x in self.datafiles():
372 for x in self.datafiles():
373 yield x
373 yield x
374 for x in self.topfiles():
374 for x in self.topfiles():
375 yield x
375 yield x
376
376
377 def copylist(self):
377 def copylist(self):
378 return ['requires'] + _data.split()
378 return ['requires'] + _data.split()
379
379
380 def write(self, tr):
380 def write(self, tr):
381 pass
381 pass
382
382
383 def invalidatecaches(self):
383 def invalidatecaches(self):
384 pass
384 pass
385
385
386 def markremoved(self, fn):
386 def markremoved(self, fn):
387 pass
387 pass
388
388
389 def __contains__(self, path):
389 def __contains__(self, path):
390 '''Checks if the store contains path'''
390 '''Checks if the store contains path'''
391 path = "/".join(("data", path))
391 path = "/".join(("data", path))
392 # file?
392 # file?
393 if self.vfs.exists(path + ".i"):
393 if self.vfs.exists(path + ".i"):
394 return True
394 return True
395 # dir?
395 # dir?
396 if not path.endswith("/"):
396 if not path.endswith("/"):
397 path = path + "/"
397 path = path + "/"
398 return self.vfs.exists(path)
398 return self.vfs.exists(path)
399
399
400 class encodedstore(basicstore):
400 class encodedstore(basicstore):
401 def __init__(self, path, vfstype):
401 def __init__(self, path, vfstype):
402 vfs = vfstype(path + '/store')
402 vfs = vfstype(path + '/store')
403 self.path = vfs.base
403 self.path = vfs.base
404 self.createmode = _calcmode(vfs)
404 self.createmode = _calcmode(vfs)
405 vfs.createmode = self.createmode
405 vfs.createmode = self.createmode
406 self.rawvfs = vfs
406 self.rawvfs = vfs
407 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
407 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
408 self.opener = self.vfs
408 self.opener = self.vfs
409
409
410 def datafiles(self):
410 def datafiles(self):
411 for a, b, size in super(encodedstore, self).datafiles():
411 for a, b, size in super(encodedstore, self).datafiles():
412 try:
412 try:
413 a = decodefilename(a)
413 a = decodefilename(a)
414 except KeyError:
414 except KeyError:
415 a = None
415 a = None
416 yield a, b, size
416 yield a, b, size
417
417
418 def join(self, f):
418 def join(self, f):
419 return self.path + '/' + encodefilename(f)
419 return self.path + '/' + encodefilename(f)
420
420
421 def copylist(self):
421 def copylist(self):
422 return (['requires', '00changelog.i'] +
422 return (['requires', '00changelog.i'] +
423 ['store/' + f for f in _data.split()])
423 ['store/' + f for f in _data.split()])
424
424
425 class fncache(object):
425 class fncache(object):
426 # the filename used to be partially encoded
426 # the filename used to be partially encoded
427 # hence the encodedir/decodedir dance
427 # hence the encodedir/decodedir dance
428 def __init__(self, vfs):
428 def __init__(self, vfs):
429 self.vfs = vfs
429 self.vfs = vfs
430 self.entries = None
430 self.entries = None
431 self._dirty = False
431 self._dirty = False
432
432
433 def _load(self):
433 def _load(self):
434 '''fill the entries from the fncache file'''
434 '''fill the entries from the fncache file'''
435 self._dirty = False
435 self._dirty = False
436 try:
436 try:
437 fp = self.vfs('fncache', mode='rb')
437 fp = self.vfs('fncache', mode='rb')
438 except IOError:
438 except IOError:
439 # skip nonexistent file
439 # skip nonexistent file
440 self.entries = set()
440 self.entries = set()
441 return
441 return
442 self.entries = set(decodedir(fp.read()).splitlines())
442 self.entries = set(decodedir(fp.read()).splitlines())
443 if '' in self.entries:
443 if '' in self.entries:
444 fp.seek(0)
444 fp.seek(0)
445 for n, line in enumerate(util.iterfile(fp)):
445 for n, line in enumerate(util.iterfile(fp)):
446 if not line.rstrip('\n'):
446 if not line.rstrip('\n'):
447 t = _('invalid entry in fncache, line %d') % (n + 1)
447 t = _('invalid entry in fncache, line %d') % (n + 1)
448 raise error.Abort(t)
448 raise error.Abort(t)
449 fp.close()
449 fp.close()
450
450
451 def write(self, tr):
451 def write(self, tr):
452 if self._dirty:
452 if self._dirty:
453 assert self.entries is not None
453 assert self.entries is not None
454 tr.addbackup('fncache')
454 tr.addbackup('fncache')
455 fp = self.vfs('fncache', mode='wb', atomictemp=True)
455 fp = self.vfs('fncache', mode='wb', atomictemp=True)
456 if self.entries:
456 if self.entries:
457 fp.write(encodedir('\n'.join(self.entries) + '\n'))
457 fp.write(encodedir('\n'.join(self.entries) + '\n'))
458 fp.close()
458 fp.close()
459 self._dirty = False
459 self._dirty = False
460
460
461 def add(self, fn):
461 def add(self, fn):
462 if self.entries is None:
462 if self.entries is None:
463 self._load()
463 self._load()
464 if fn not in self.entries:
464 if fn not in self.entries:
465 self._dirty = True
465 self._dirty = True
466 self.entries.add(fn)
466 self.entries.add(fn)
467
467
468 def remove(self, fn):
468 def remove(self, fn):
469 if self.entries is None:
469 if self.entries is None:
470 self._load()
470 self._load()
471 try:
471 try:
472 self.entries.remove(fn)
472 self.entries.remove(fn)
473 self._dirty = True
473 self._dirty = True
474 except KeyError:
474 except KeyError:
475 pass
475 pass
476
476
477 def __contains__(self, fn):
477 def __contains__(self, fn):
478 if self.entries is None:
478 if self.entries is None:
479 self._load()
479 self._load()
480 return fn in self.entries
480 return fn in self.entries
481
481
482 def __iter__(self):
482 def __iter__(self):
483 if self.entries is None:
483 if self.entries is None:
484 self._load()
484 self._load()
485 return iter(self.entries)
485 return iter(self.entries)
486
486
487 class _fncachevfs(vfsmod.abstractvfs, vfsmod.proxyvfs):
487 class _fncachevfs(vfsmod.abstractvfs, vfsmod.proxyvfs):
488 def __init__(self, vfs, fnc, encode):
488 def __init__(self, vfs, fnc, encode):
489 vfsmod.proxyvfs.__init__(self, vfs)
489 vfsmod.proxyvfs.__init__(self, vfs)
490 self.fncache = fnc
490 self.fncache = fnc
491 self.encode = encode
491 self.encode = encode
492
492
493 def __call__(self, path, mode='r', *args, **kw):
493 def __call__(self, path, mode='r', *args, **kw):
494 encoded = self.encode(path)
494 encoded = self.encode(path)
495 if mode not in ('r', 'rb') and (path.startswith('data/') or
495 if mode not in ('r', 'rb') and (path.startswith('data/') or
496 path.startswith('meta/')):
496 path.startswith('meta/')):
497 # do not trigger a fncache load when adding a file that already is
497 # do not trigger a fncache load when adding a file that already is
498 # known to exist.
498 # known to exist.
499 notload = self.fncache.entries is None and self.vfs.exists(encoded)
499 notload = self.fncache.entries is None and self.vfs.exists(encoded)
500 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
500 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
501 # when appending to an existing file, if the file has size zero,
501 # when appending to an existing file, if the file has size zero,
502 # it should be considered as missing. Such zero-size files are
502 # it should be considered as missing. Such zero-size files are
503 # the result of truncation when a transaction is aborted.
503 # the result of truncation when a transaction is aborted.
504 notload = False
504 notload = False
505 if not notload:
505 if not notload:
506 self.fncache.add(path)
506 self.fncache.add(path)
507 return self.vfs(encoded, mode, *args, **kw)
507 return self.vfs(encoded, mode, *args, **kw)
508
508
509 def join(self, path):
509 def join(self, path):
510 if path:
510 if path:
511 return self.vfs.join(self.encode(path))
511 return self.vfs.join(self.encode(path))
512 else:
512 else:
513 return self.vfs.join(path)
513 return self.vfs.join(path)
514
514
515 class fncachestore(basicstore):
515 class fncachestore(basicstore):
516 def __init__(self, path, vfstype, dotencode):
516 def __init__(self, path, vfstype, dotencode):
517 if dotencode:
517 if dotencode:
518 encode = _pathencode
518 encode = _pathencode
519 else:
519 else:
520 encode = _plainhybridencode
520 encode = _plainhybridencode
521 self.encode = encode
521 self.encode = encode
522 vfs = vfstype(path + '/store')
522 vfs = vfstype(path + '/store')
523 self.path = vfs.base
523 self.path = vfs.base
524 self.pathsep = self.path + '/'
524 self.pathsep = self.path + '/'
525 self.createmode = _calcmode(vfs)
525 self.createmode = _calcmode(vfs)
526 vfs.createmode = self.createmode
526 vfs.createmode = self.createmode
527 self.rawvfs = vfs
527 self.rawvfs = vfs
528 fnc = fncache(vfs)
528 fnc = fncache(vfs)
529 self.fncache = fnc
529 self.fncache = fnc
530 self.vfs = _fncachevfs(vfs, fnc, encode)
530 self.vfs = _fncachevfs(vfs, fnc, encode)
531 self.opener = self.vfs
531 self.opener = self.vfs
532
532
533 def join(self, f):
533 def join(self, f):
534 return self.pathsep + self.encode(f)
534 return self.pathsep + self.encode(f)
535
535
536 def getsize(self, path):
536 def getsize(self, path):
537 return self.rawvfs.stat(path).st_size
537 return self.rawvfs.stat(path).st_size
538
538
539 def datafiles(self):
539 def datafiles(self):
540 for f in sorted(self.fncache):
540 for f in sorted(self.fncache):
541 ef = self.encode(f)
541 ef = self.encode(f)
542 try:
542 try:
543 yield f, ef, self.getsize(ef)
543 yield f, ef, self.getsize(ef)
544 except OSError as err:
544 except OSError as err:
545 if err.errno != errno.ENOENT:
545 if err.errno != errno.ENOENT:
546 raise
546 raise
547
547
548 def copylist(self):
548 def copylist(self):
549 d = ('narrowspec data meta dh fncache phaseroots obsstore'
549 d = ('narrowspec data meta dh fncache phaseroots obsstore'
550 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
550 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
551 return (['requires', '00changelog.i'] +
551 return (['requires', '00changelog.i'] +
552 ['store/' + f for f in d.split()])
552 ['store/' + f for f in d.split()])
553
553
554 def write(self, tr):
554 def write(self, tr):
555 self.fncache.write(tr)
555 self.fncache.write(tr)
556
556
557 def invalidatecaches(self):
557 def invalidatecaches(self):
558 self.fncache.entries = None
558 self.fncache.entries = None
559
559
560 def markremoved(self, fn):
560 def markremoved(self, fn):
561 self.fncache.remove(fn)
561 self.fncache.remove(fn)
562
562
563 def _exists(self, f):
563 def _exists(self, f):
564 ef = self.encode(f)
564 ef = self.encode(f)
565 try:
565 try:
566 self.getsize(ef)
566 self.getsize(ef)
567 return True
567 return True
568 except OSError as err:
568 except OSError as err:
569 if err.errno != errno.ENOENT:
569 if err.errno != errno.ENOENT:
570 raise
570 raise
571 # nonexistent entry
571 # nonexistent entry
572 return False
572 return False
573
573
574 def __contains__(self, path):
574 def __contains__(self, path):
575 '''Checks if the store contains path'''
575 '''Checks if the store contains path'''
576 path = "/".join(("data", path))
576 path = "/".join(("data", path))
577 # check for files (exact match)
577 # check for files (exact match)
578 e = path + '.i'
578 e = path + '.i'
579 if e in self.fncache and self._exists(e):
579 if e in self.fncache and self._exists(e):
580 return True
580 return True
581 # now check for directories (prefix match)
581 # now check for directories (prefix match)
582 if not path.endswith('/'):
582 if not path.endswith('/'):
583 path += '/'
583 path += '/'
584 for e in self.fncache:
584 for e in self.fncache:
585 if e.startswith(path) and self._exists(e):
585 if e.startswith(path) and self._exists(e):
586 return True
586 return True
587 return False
587 return False
588
589 def store(requirements, path, vfstype):
590 if 'store' in requirements:
591 if 'fncache' in requirements:
592 return fncachestore(path, vfstype, 'dotencode' in requirements)
593 return encodedstore(path, vfstype)
594 return basicstore(path, vfstype)
@@ -1,510 +1,510 b''
1 #require repofncache
1 #require repofncache
2
2
3 Init repo1:
3 Init repo1:
4
4
5 $ hg init repo1
5 $ hg init repo1
6 $ cd repo1
6 $ cd repo1
7 $ echo "some text" > a
7 $ echo "some text" > a
8 $ hg add
8 $ hg add
9 adding a
9 adding a
10 $ hg ci -m first
10 $ hg ci -m first
11 $ cat .hg/store/fncache | sort
11 $ cat .hg/store/fncache | sort
12 data/a.i
12 data/a.i
13
13
14 Testing a.i/b:
14 Testing a.i/b:
15
15
16 $ mkdir a.i
16 $ mkdir a.i
17 $ echo "some other text" > a.i/b
17 $ echo "some other text" > a.i/b
18 $ hg add
18 $ hg add
19 adding a.i/b
19 adding a.i/b
20 $ hg ci -m second
20 $ hg ci -m second
21 $ cat .hg/store/fncache | sort
21 $ cat .hg/store/fncache | sort
22 data/a.i
22 data/a.i
23 data/a.i.hg/b.i
23 data/a.i.hg/b.i
24
24
25 Testing a.i.hg/c:
25 Testing a.i.hg/c:
26
26
27 $ mkdir a.i.hg
27 $ mkdir a.i.hg
28 $ echo "yet another text" > a.i.hg/c
28 $ echo "yet another text" > a.i.hg/c
29 $ hg add
29 $ hg add
30 adding a.i.hg/c
30 adding a.i.hg/c
31 $ hg ci -m third
31 $ hg ci -m third
32 $ cat .hg/store/fncache | sort
32 $ cat .hg/store/fncache | sort
33 data/a.i
33 data/a.i
34 data/a.i.hg.hg/c.i
34 data/a.i.hg.hg/c.i
35 data/a.i.hg/b.i
35 data/a.i.hg/b.i
36
36
37 Testing verify:
37 Testing verify:
38
38
39 $ hg verify
39 $ hg verify
40 checking changesets
40 checking changesets
41 checking manifests
41 checking manifests
42 crosschecking files in changesets and manifests
42 crosschecking files in changesets and manifests
43 checking files
43 checking files
44 checked 3 changesets with 3 changes to 3 files
44 checked 3 changesets with 3 changes to 3 files
45
45
46 $ rm .hg/store/fncache
46 $ rm .hg/store/fncache
47
47
48 $ hg verify
48 $ hg verify
49 checking changesets
49 checking changesets
50 checking manifests
50 checking manifests
51 crosschecking files in changesets and manifests
51 crosschecking files in changesets and manifests
52 checking files
52 checking files
53 warning: revlog 'data/a.i' not in fncache!
53 warning: revlog 'data/a.i' not in fncache!
54 warning: revlog 'data/a.i.hg/c.i' not in fncache!
54 warning: revlog 'data/a.i.hg/c.i' not in fncache!
55 warning: revlog 'data/a.i/b.i' not in fncache!
55 warning: revlog 'data/a.i/b.i' not in fncache!
56 checked 3 changesets with 3 changes to 3 files
56 checked 3 changesets with 3 changes to 3 files
57 3 warnings encountered!
57 3 warnings encountered!
58 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
58 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
59
59
60 Follow the hint to make sure it works
60 Follow the hint to make sure it works
61
61
62 $ hg debugrebuildfncache
62 $ hg debugrebuildfncache
63 adding data/a.i
63 adding data/a.i
64 adding data/a.i.hg/c.i
64 adding data/a.i.hg/c.i
65 adding data/a.i/b.i
65 adding data/a.i/b.i
66 3 items added, 0 removed from fncache
66 3 items added, 0 removed from fncache
67
67
68 $ hg verify
68 $ hg verify
69 checking changesets
69 checking changesets
70 checking manifests
70 checking manifests
71 crosschecking files in changesets and manifests
71 crosschecking files in changesets and manifests
72 checking files
72 checking files
73 checked 3 changesets with 3 changes to 3 files
73 checked 3 changesets with 3 changes to 3 files
74
74
75 $ cd ..
75 $ cd ..
76
76
77 Non store repo:
77 Non store repo:
78
78
79 $ hg --config format.usestore=False init foo
79 $ hg --config format.usestore=False init foo
80 $ cd foo
80 $ cd foo
81 $ mkdir tst.d
81 $ mkdir tst.d
82 $ echo foo > tst.d/foo
82 $ echo foo > tst.d/foo
83 $ hg ci -Amfoo
83 $ hg ci -Amfoo
84 adding tst.d/foo
84 adding tst.d/foo
85 $ find .hg | sort
85 $ find .hg | sort
86 .hg
86 .hg
87 .hg/00changelog.i
87 .hg/00changelog.i
88 .hg/00manifest.i
88 .hg/00manifest.i
89 .hg/cache
89 .hg/cache
90 .hg/cache/branch2-served
90 .hg/cache/branch2-served
91 .hg/cache/manifestfulltextcache (reporevlogstore !)
91 .hg/cache/manifestfulltextcache (reporevlogstore !)
92 .hg/cache/rbc-names-v1
92 .hg/cache/rbc-names-v1
93 .hg/cache/rbc-revs-v1
93 .hg/cache/rbc-revs-v1
94 .hg/data
94 .hg/data
95 .hg/data/tst.d.hg
95 .hg/data/tst.d.hg
96 .hg/data/tst.d.hg/foo.i
96 .hg/data/tst.d.hg/foo.i
97 .hg/dirstate
97 .hg/dirstate
98 .hg/fsmonitor.state (fsmonitor !)
98 .hg/fsmonitor.state (fsmonitor !)
99 .hg/last-message.txt
99 .hg/last-message.txt
100 .hg/phaseroots
100 .hg/phaseroots
101 .hg/requires
101 .hg/requires
102 .hg/undo
102 .hg/undo
103 .hg/undo.backup.dirstate
103 .hg/undo.backup.dirstate
104 .hg/undo.backupfiles
104 .hg/undo.backupfiles
105 .hg/undo.bookmarks
105 .hg/undo.bookmarks
106 .hg/undo.branch
106 .hg/undo.branch
107 .hg/undo.desc
107 .hg/undo.desc
108 .hg/undo.dirstate
108 .hg/undo.dirstate
109 .hg/undo.phaseroots
109 .hg/undo.phaseroots
110 $ cd ..
110 $ cd ..
111
111
112 Non fncache repo:
112 Non fncache repo:
113
113
114 $ hg --config format.usefncache=False init bar
114 $ hg --config format.usefncache=False init bar
115 $ cd bar
115 $ cd bar
116 $ mkdir tst.d
116 $ mkdir tst.d
117 $ echo foo > tst.d/Foo
117 $ echo foo > tst.d/Foo
118 $ hg ci -Amfoo
118 $ hg ci -Amfoo
119 adding tst.d/Foo
119 adding tst.d/Foo
120 $ find .hg | sort
120 $ find .hg | sort
121 .hg
121 .hg
122 .hg/00changelog.i
122 .hg/00changelog.i
123 .hg/cache
123 .hg/cache
124 .hg/cache/branch2-served
124 .hg/cache/branch2-served
125 .hg/cache/manifestfulltextcache (reporevlogstore !)
125 .hg/cache/manifestfulltextcache (reporevlogstore !)
126 .hg/cache/rbc-names-v1
126 .hg/cache/rbc-names-v1
127 .hg/cache/rbc-revs-v1
127 .hg/cache/rbc-revs-v1
128 .hg/dirstate
128 .hg/dirstate
129 .hg/fsmonitor.state (fsmonitor !)
129 .hg/fsmonitor.state (fsmonitor !)
130 .hg/last-message.txt
130 .hg/last-message.txt
131 .hg/requires
131 .hg/requires
132 .hg/store
132 .hg/store
133 .hg/store/00changelog.i
133 .hg/store/00changelog.i
134 .hg/store/00manifest.i
134 .hg/store/00manifest.i
135 .hg/store/data
135 .hg/store/data
136 .hg/store/data/tst.d.hg
136 .hg/store/data/tst.d.hg
137 .hg/store/data/tst.d.hg/_foo.i
137 .hg/store/data/tst.d.hg/_foo.i
138 .hg/store/phaseroots
138 .hg/store/phaseroots
139 .hg/store/undo
139 .hg/store/undo
140 .hg/store/undo.backupfiles
140 .hg/store/undo.backupfiles
141 .hg/store/undo.phaseroots
141 .hg/store/undo.phaseroots
142 .hg/undo.backup.dirstate
142 .hg/undo.backup.dirstate
143 .hg/undo.bookmarks
143 .hg/undo.bookmarks
144 .hg/undo.branch
144 .hg/undo.branch
145 .hg/undo.desc
145 .hg/undo.desc
146 .hg/undo.dirstate
146 .hg/undo.dirstate
147 $ cd ..
147 $ cd ..
148
148
149 Encoding of reserved / long paths in the store
149 Encoding of reserved / long paths in the store
150
150
151 $ hg init r2
151 $ hg init r2
152 $ cd r2
152 $ cd r2
153 $ cat <<EOF > .hg/hgrc
153 $ cat <<EOF > .hg/hgrc
154 > [ui]
154 > [ui]
155 > portablefilenames = ignore
155 > portablefilenames = ignore
156 > EOF
156 > EOF
157
157
158 $ hg import -q --bypass - <<EOF
158 $ hg import -q --bypass - <<EOF
159 > # HG changeset patch
159 > # HG changeset patch
160 > # User test
160 > # User test
161 > # Date 0 0
161 > # Date 0 0
162 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
162 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
163 > # Parent 0000000000000000000000000000000000000000
163 > # Parent 0000000000000000000000000000000000000000
164 > 1
164 > 1
165 >
165 >
166 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
166 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
167 > new file mode 100644
167 > new file mode 100644
168 > --- /dev/null
168 > --- /dev/null
169 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
169 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
170 > @@ -0,0 +1,1 @@
170 > @@ -0,0 +1,1 @@
171 > +foo
171 > +foo
172 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
172 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
173 > new file mode 100644
173 > new file mode 100644
174 > --- /dev/null
174 > --- /dev/null
175 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
175 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
176 > @@ -0,0 +1,1 @@
176 > @@ -0,0 +1,1 @@
177 > +foo
177 > +foo
178 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
178 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
179 > new file mode 100644
179 > new file mode 100644
180 > --- /dev/null
180 > --- /dev/null
181 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
181 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
182 > @@ -0,0 +1,1 @@
182 > @@ -0,0 +1,1 @@
183 > +foo
183 > +foo
184 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
184 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
185 > new file mode 100644
185 > new file mode 100644
186 > --- /dev/null
186 > --- /dev/null
187 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
187 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
188 > @@ -0,0 +1,1 @@
188 > @@ -0,0 +1,1 @@
189 > +foo
189 > +foo
190 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
190 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
191 > new file mode 100644
191 > new file mode 100644
192 > --- /dev/null
192 > --- /dev/null
193 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
193 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
194 > @@ -0,0 +1,1 @@
194 > @@ -0,0 +1,1 @@
195 > +foo
195 > +foo
196 > EOF
196 > EOF
197
197
198 $ find .hg/store -name *.i | sort
198 $ find .hg/store -name *.i | sort
199 .hg/store/00changelog.i
199 .hg/store/00changelog.i
200 .hg/store/00manifest.i
200 .hg/store/00manifest.i
201 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
201 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
202 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
202 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
203 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
203 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
204 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
204 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
205 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
205 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
206
206
207 $ cd ..
207 $ cd ..
208
208
209 Aborting lock does not prevent fncache writes
209 Aborting lock does not prevent fncache writes
210
210
211 $ cat > exceptionext.py <<EOF
211 $ cat > exceptionext.py <<EOF
212 > from __future__ import absolute_import
212 > from __future__ import absolute_import
213 > import os
213 > import os
214 > from mercurial import commands, error, extensions
214 > from mercurial import commands, error, extensions
215 >
215 >
216 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
216 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
217 > def releasewrap():
217 > def releasewrap():
218 > l.held = False # ensure __del__ is a noop
218 > l.held = False # ensure __del__ is a noop
219 > raise error.Abort("forced lock failure")
219 > raise error.Abort("forced lock failure")
220 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
220 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
221 > return l
221 > return l
222 >
222 >
223 > def reposetup(ui, repo):
223 > def reposetup(ui, repo):
224 > extensions.wrapfunction(repo, '_lock', lockexception)
224 > extensions.wrapfunction(repo, '_lock', lockexception)
225 >
225 >
226 > cmdtable = {}
226 > cmdtable = {}
227 >
227 >
228 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
228 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
229 > # at the end of dispatching (for intentional "forced lcok failure")
229 > # at the end of dispatching (for intentional "forced lcok failure")
230 > def commitwrap(orig, ui, repo, *pats, **opts):
230 > def commitwrap(orig, ui, repo, *pats, **opts):
231 > repo = repo.unfiltered() # to use replaced repo._lock certainly
231 > repo = repo.unfiltered() # to use replaced repo._lock certainly
232 > wlock = repo.wlock()
232 > wlock = repo.wlock()
233 > try:
233 > try:
234 > return orig(ui, repo, *pats, **opts)
234 > return orig(ui, repo, *pats, **opts)
235 > finally:
235 > finally:
236 > # multiple 'relase()' is needed for complete releasing wlock,
236 > # multiple 'relase()' is needed for complete releasing wlock,
237 > # because "forced" abort at last releasing store lock
237 > # because "forced" abort at last releasing store lock
238 > # prevents wlock from being released at same 'lockmod.release()'
238 > # prevents wlock from being released at same 'lockmod.release()'
239 > for i in range(wlock.held):
239 > for i in range(wlock.held):
240 > wlock.release()
240 > wlock.release()
241 >
241 >
242 > def extsetup(ui):
242 > def extsetup(ui):
243 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
243 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
244 > EOF
244 > EOF
245 $ extpath=`pwd`/exceptionext.py
245 $ extpath=`pwd`/exceptionext.py
246 $ hg init fncachetxn
246 $ hg init fncachetxn
247 $ cd fncachetxn
247 $ cd fncachetxn
248 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
248 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
249 $ touch y
249 $ touch y
250 $ hg ci -qAm y
250 $ hg ci -qAm y
251 abort: forced lock failure
251 abort: forced lock failure
252 [255]
252 [255]
253 $ cat .hg/store/fncache
253 $ cat .hg/store/fncache
254 data/y.i
254 data/y.i
255
255
256 Aborting transaction prevents fncache change
256 Aborting transaction prevents fncache change
257
257
258 $ cat > ../exceptionext.py <<EOF
258 $ cat > ../exceptionext.py <<EOF
259 > from __future__ import absolute_import
259 > from __future__ import absolute_import
260 > import os
260 > import os
261 > from mercurial import commands, error, extensions, localrepo
261 > from mercurial import commands, error, extensions, localrepo
262 >
262 >
263 > def wrapper(orig, self, *args, **kwargs):
263 > def wrapper(orig, self, *args, **kwargs):
264 > tr = orig(self, *args, **kwargs)
264 > tr = orig(self, *args, **kwargs)
265 > def fail(tr):
265 > def fail(tr):
266 > raise error.Abort(b"forced transaction failure")
266 > raise error.Abort(b"forced transaction failure")
267 > # zzz prefix to ensure it sorted after store.write
267 > # zzz prefix to ensure it sorted after store.write
268 > tr.addfinalize(b'zzz-forcefails', fail)
268 > tr.addfinalize(b'zzz-forcefails', fail)
269 > return tr
269 > return tr
270 >
270 >
271 > def uisetup(ui):
271 > def uisetup(ui):
272 > extensions.wrapfunction(
272 > extensions.wrapfunction(
273 > localrepo.localrepository, b'transaction', wrapper)
273 > localrepo.localrepository, b'transaction', wrapper)
274 >
274 >
275 > cmdtable = {}
275 > cmdtable = {}
276 >
276 >
277 > EOF
277 > EOF
278
278
279 Clean cached version
279 Clean cached version
280 $ rm -f "${extpath}c"
280 $ rm -f "${extpath}c"
281 $ rm -Rf "`dirname $extpath`/__pycache__"
281 $ rm -Rf "`dirname $extpath`/__pycache__"
282
282
283 $ touch z
283 $ touch z
284 $ hg ci -qAm z
284 $ hg ci -qAm z
285 transaction abort!
285 transaction abort!
286 rollback completed
286 rollback completed
287 abort: forced transaction failure
287 abort: forced transaction failure
288 [255]
288 [255]
289 $ cat .hg/store/fncache
289 $ cat .hg/store/fncache
290 data/y.i
290 data/y.i
291
291
292 Aborted transactions can be recovered later
292 Aborted transactions can be recovered later
293
293
294 $ cat > ../exceptionext.py <<EOF
294 $ cat > ../exceptionext.py <<EOF
295 > from __future__ import absolute_import
295 > from __future__ import absolute_import
296 > import os
296 > import os
297 > from mercurial import (
297 > from mercurial import (
298 > commands,
298 > commands,
299 > error,
299 > error,
300 > extensions,
300 > extensions,
301 > localrepo,
301 > localrepo,
302 > transaction,
302 > transaction,
303 > )
303 > )
304 >
304 >
305 > def trwrapper(orig, self, *args, **kwargs):
305 > def trwrapper(orig, self, *args, **kwargs):
306 > tr = orig(self, *args, **kwargs)
306 > tr = orig(self, *args, **kwargs)
307 > def fail(tr):
307 > def fail(tr):
308 > raise error.Abort("forced transaction failure")
308 > raise error.Abort("forced transaction failure")
309 > # zzz prefix to ensure it sorted after store.write
309 > # zzz prefix to ensure it sorted after store.write
310 > tr.addfinalize('zzz-forcefails', fail)
310 > tr.addfinalize('zzz-forcefails', fail)
311 > return tr
311 > return tr
312 >
312 >
313 > def abortwrapper(orig, self, *args, **kwargs):
313 > def abortwrapper(orig, self, *args, **kwargs):
314 > raise error.Abort("forced transaction failure")
314 > raise error.Abort("forced transaction failure")
315 >
315 >
316 > def uisetup(ui):
316 > def uisetup(ui):
317 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
317 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
318 > trwrapper)
318 > trwrapper)
319 > extensions.wrapfunction(transaction.transaction, '_abort',
319 > extensions.wrapfunction(transaction.transaction, '_abort',
320 > abortwrapper)
320 > abortwrapper)
321 >
321 >
322 > cmdtable = {}
322 > cmdtable = {}
323 >
323 >
324 > EOF
324 > EOF
325
325
326 Clean cached versions
326 Clean cached versions
327 $ rm -f "${extpath}c"
327 $ rm -f "${extpath}c"
328 $ rm -Rf "`dirname $extpath`/__pycache__"
328 $ rm -Rf "`dirname $extpath`/__pycache__"
329
329
330 $ hg up -q 1
330 $ hg up -q 1
331 $ touch z
331 $ touch z
332 $ hg ci -qAm z 2>/dev/null
332 $ hg ci -qAm z 2>/dev/null
333 [255]
333 [255]
334 $ cat .hg/store/fncache | sort
334 $ cat .hg/store/fncache | sort
335 data/y.i
335 data/y.i
336 data/z.i
336 data/z.i
337 $ hg recover
337 $ hg recover
338 rolling back interrupted transaction
338 rolling back interrupted transaction
339 checking changesets
339 checking changesets
340 checking manifests
340 checking manifests
341 crosschecking files in changesets and manifests
341 crosschecking files in changesets and manifests
342 checking files
342 checking files
343 checked 1 changesets with 1 changes to 1 files
343 checked 1 changesets with 1 changes to 1 files
344 $ cat .hg/store/fncache
344 $ cat .hg/store/fncache
345 data/y.i
345 data/y.i
346
346
347 $ cd ..
347 $ cd ..
348
348
349 debugrebuildfncache does nothing unless repo has fncache requirement
349 debugrebuildfncache does nothing unless repo has fncache requirement
350
350
351 $ hg --config format.usefncache=false init nofncache
351 $ hg --config format.usefncache=false init nofncache
352 $ cd nofncache
352 $ cd nofncache
353 $ hg debugrebuildfncache
353 $ hg debugrebuildfncache
354 (not rebuilding fncache because repository does not support fncache)
354 (not rebuilding fncache because repository does not support fncache)
355
355
356 $ cd ..
356 $ cd ..
357
357
358 debugrebuildfncache works on empty repository
358 debugrebuildfncache works on empty repository
359
359
360 $ hg init empty
360 $ hg init empty
361 $ cd empty
361 $ cd empty
362 $ hg debugrebuildfncache
362 $ hg debugrebuildfncache
363 fncache already up to date
363 fncache already up to date
364 $ cd ..
364 $ cd ..
365
365
366 debugrebuildfncache on an up to date repository no-ops
366 debugrebuildfncache on an up to date repository no-ops
367
367
368 $ hg init repo
368 $ hg init repo
369 $ cd repo
369 $ cd repo
370 $ echo initial > foo
370 $ echo initial > foo
371 $ echo initial > .bar
371 $ echo initial > .bar
372 $ hg commit -A -m initial
372 $ hg commit -A -m initial
373 adding .bar
373 adding .bar
374 adding foo
374 adding foo
375
375
376 $ cat .hg/store/fncache | sort
376 $ cat .hg/store/fncache | sort
377 data/.bar.i
377 data/.bar.i
378 data/foo.i
378 data/foo.i
379
379
380 $ hg debugrebuildfncache
380 $ hg debugrebuildfncache
381 fncache already up to date
381 fncache already up to date
382
382
383 debugrebuildfncache restores deleted fncache file
383 debugrebuildfncache restores deleted fncache file
384
384
385 $ rm -f .hg/store/fncache
385 $ rm -f .hg/store/fncache
386 $ hg debugrebuildfncache
386 $ hg debugrebuildfncache
387 adding data/.bar.i
387 adding data/.bar.i
388 adding data/foo.i
388 adding data/foo.i
389 2 items added, 0 removed from fncache
389 2 items added, 0 removed from fncache
390
390
391 $ cat .hg/store/fncache | sort
391 $ cat .hg/store/fncache | sort
392 data/.bar.i
392 data/.bar.i
393 data/foo.i
393 data/foo.i
394
394
395 Rebuild after rebuild should no-op
395 Rebuild after rebuild should no-op
396
396
397 $ hg debugrebuildfncache
397 $ hg debugrebuildfncache
398 fncache already up to date
398 fncache already up to date
399
399
400 A single missing file should get restored, an extra file should be removed
400 A single missing file should get restored, an extra file should be removed
401
401
402 $ cat > .hg/store/fncache << EOF
402 $ cat > .hg/store/fncache << EOF
403 > data/foo.i
403 > data/foo.i
404 > data/bad-entry.i
404 > data/bad-entry.i
405 > EOF
405 > EOF
406
406
407 $ hg debugrebuildfncache
407 $ hg debugrebuildfncache
408 removing data/bad-entry.i
408 removing data/bad-entry.i
409 adding data/.bar.i
409 adding data/.bar.i
410 1 items added, 1 removed from fncache
410 1 items added, 1 removed from fncache
411
411
412 $ cat .hg/store/fncache | sort
412 $ cat .hg/store/fncache | sort
413 data/.bar.i
413 data/.bar.i
414 data/foo.i
414 data/foo.i
415
415
416 $ cd ..
416 $ cd ..
417
417
418 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
418 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
419
419
420 $ hg --config format.dotencode=false init nodotencode
420 $ hg --config format.dotencode=false init nodotencode
421 $ cd nodotencode
421 $ cd nodotencode
422 $ echo initial > foo
422 $ echo initial > foo
423 $ echo initial > .bar
423 $ echo initial > .bar
424 $ hg commit -A -m initial
424 $ hg commit -A -m initial
425 adding .bar
425 adding .bar
426 adding foo
426 adding foo
427
427
428 $ cat .hg/store/fncache | sort
428 $ cat .hg/store/fncache | sort
429 data/.bar.i
429 data/.bar.i
430 data/foo.i
430 data/foo.i
431
431
432 $ rm .hg/store/fncache
432 $ rm .hg/store/fncache
433 $ hg debugrebuildfncache
433 $ hg debugrebuildfncache
434 adding data/.bar.i
434 adding data/.bar.i
435 adding data/foo.i
435 adding data/foo.i
436 2 items added, 0 removed from fncache
436 2 items added, 0 removed from fncache
437
437
438 $ cat .hg/store/fncache | sort
438 $ cat .hg/store/fncache | sort
439 data/.bar.i
439 data/.bar.i
440 data/foo.i
440 data/foo.i
441
441
442 $ cd ..
442 $ cd ..
443
443
444 In repositories that have accumulated a large number of files over time, the
444 In repositories that have accumulated a large number of files over time, the
445 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
445 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
446 The cache should not loaded when committing changes to existing files, or when unbundling
446 The cache should not loaded when committing changes to existing files, or when unbundling
447 changesets that only contain changes to existing files:
447 changesets that only contain changes to existing files:
448
448
449 $ cat > fncacheloadwarn.py << EOF
449 $ cat > fncacheloadwarn.py << EOF
450 > from __future__ import absolute_import
450 > from __future__ import absolute_import
451 > from mercurial import extensions, store
451 > from mercurial import extensions, localrepo
452 >
452 >
453 > def extsetup(ui):
453 > def extsetup(ui):
454 > def wrapstore(orig, requirements, *args):
454 > def wrapstore(orig, requirements, *args):
455 > store = orig(requirements, *args)
455 > store = orig(requirements, *args)
456 > if 'store' in requirements and 'fncache' in requirements:
456 > if 'store' in requirements and 'fncache' in requirements:
457 > instrumentfncachestore(store, ui)
457 > instrumentfncachestore(store, ui)
458 > return store
458 > return store
459 > extensions.wrapfunction(store, 'store', wrapstore)
459 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
460 >
460 >
461 > def instrumentfncachestore(fncachestore, ui):
461 > def instrumentfncachestore(fncachestore, ui):
462 > class instrumentedfncache(type(fncachestore.fncache)):
462 > class instrumentedfncache(type(fncachestore.fncache)):
463 > def _load(self):
463 > def _load(self):
464 > ui.warn('fncache load triggered!\n')
464 > ui.warn('fncache load triggered!\n')
465 > super(instrumentedfncache, self)._load()
465 > super(instrumentedfncache, self)._load()
466 > fncachestore.fncache.__class__ = instrumentedfncache
466 > fncachestore.fncache.__class__ = instrumentedfncache
467 > EOF
467 > EOF
468
468
469 $ fncachextpath=`pwd`/fncacheloadwarn.py
469 $ fncachextpath=`pwd`/fncacheloadwarn.py
470 $ hg init nofncacheload
470 $ hg init nofncacheload
471 $ cd nofncacheload
471 $ cd nofncacheload
472 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
472 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
473
473
474 A new file should trigger a load, as we'd want to update the fncache set in that case:
474 A new file should trigger a load, as we'd want to update the fncache set in that case:
475
475
476 $ touch foo
476 $ touch foo
477 $ hg ci -qAm foo
477 $ hg ci -qAm foo
478 fncache load triggered!
478 fncache load triggered!
479
479
480 But modifying that file should not:
480 But modifying that file should not:
481
481
482 $ echo bar >> foo
482 $ echo bar >> foo
483 $ hg ci -qm foo
483 $ hg ci -qm foo
484
484
485 If a transaction has been aborted, the zero-size truncated index file will
485 If a transaction has been aborted, the zero-size truncated index file will
486 not prevent the fncache from being loaded; rather than actually abort
486 not prevent the fncache from being loaded; rather than actually abort
487 a transaction, we simulate the situation by creating a zero-size index file:
487 a transaction, we simulate the situation by creating a zero-size index file:
488
488
489 $ touch .hg/store/data/bar.i
489 $ touch .hg/store/data/bar.i
490 $ touch bar
490 $ touch bar
491 $ hg ci -qAm bar
491 $ hg ci -qAm bar
492 fncache load triggered!
492 fncache load triggered!
493
493
494 Unbundling should follow the same rules; existing files should not cause a load:
494 Unbundling should follow the same rules; existing files should not cause a load:
495
495
496 $ hg clone -q . tobundle
496 $ hg clone -q . tobundle
497 $ echo 'new line' > tobundle/bar
497 $ echo 'new line' > tobundle/bar
498 $ hg -R tobundle ci -qm bar
498 $ hg -R tobundle ci -qm bar
499 $ hg -R tobundle bundle -q barupdated.hg
499 $ hg -R tobundle bundle -q barupdated.hg
500 $ hg unbundle -q barupdated.hg
500 $ hg unbundle -q barupdated.hg
501
501
502 but adding new files should:
502 but adding new files should:
503
503
504 $ touch tobundle/newfile
504 $ touch tobundle/newfile
505 $ hg -R tobundle ci -qAm newfile
505 $ hg -R tobundle ci -qAm newfile
506 $ hg -R tobundle bundle -q newfile.hg
506 $ hg -R tobundle bundle -q newfile.hg
507 $ hg unbundle -q newfile.hg
507 $ hg unbundle -q newfile.hg
508 fncache load triggered!
508 fncache load triggered!
509
509
510 $ cd ..
510 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now