##// END OF EJS Templates
commit: move sorting of added and removed files list to lower level...
Martin von Zweigbergk -
r42534:63101806 default
parent child Browse files
Show More
@@ -1,3180 +1,3180 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 class mixedrepostorecache(_basefilecache):
125 class mixedrepostorecache(_basefilecache):
126 """filecache for a mix files in .hg/store and outside"""
126 """filecache for a mix files in .hg/store and outside"""
127 def __init__(self, *pathsandlocations):
127 def __init__(self, *pathsandlocations):
128 # scmutil.filecache only uses the path for passing back into our
128 # scmutil.filecache only uses the path for passing back into our
129 # join(), so we can safely pass a list of paths and locations
129 # join(), so we can safely pass a list of paths and locations
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 for path, location in pathsandlocations:
131 for path, location in pathsandlocations:
132 _cachedfiles.update(pathsandlocations)
132 _cachedfiles.update(pathsandlocations)
133
133
134 def join(self, obj, fnameandlocation):
134 def join(self, obj, fnameandlocation):
135 fname, location = fnameandlocation
135 fname, location = fnameandlocation
136 if location == '':
136 if location == '':
137 return obj.vfs.join(fname)
137 return obj.vfs.join(fname)
138 else:
138 else:
139 if location != 'store':
139 if location != 'store':
140 raise error.ProgrammingError('unexpected location: %s' %
140 raise error.ProgrammingError('unexpected location: %s' %
141 location)
141 location)
142 return obj.sjoin(fname)
142 return obj.sjoin(fname)
143
143
144 def isfilecached(repo, name):
144 def isfilecached(repo, name):
145 """check if a repo has already cached "name" filecache-ed property
145 """check if a repo has already cached "name" filecache-ed property
146
146
147 This returns (cachedobj-or-None, iscached) tuple.
147 This returns (cachedobj-or-None, iscached) tuple.
148 """
148 """
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
150 if not cacheentry:
150 if not cacheentry:
151 return None, False
151 return None, False
152 return cacheentry.obj, True
152 return cacheentry.obj, True
153
153
154 class unfilteredpropertycache(util.propertycache):
154 class unfilteredpropertycache(util.propertycache):
155 """propertycache that apply to unfiltered repo only"""
155 """propertycache that apply to unfiltered repo only"""
156
156
157 def __get__(self, repo, type=None):
157 def __get__(self, repo, type=None):
158 unfi = repo.unfiltered()
158 unfi = repo.unfiltered()
159 if unfi is repo:
159 if unfi is repo:
160 return super(unfilteredpropertycache, self).__get__(unfi)
160 return super(unfilteredpropertycache, self).__get__(unfi)
161 return getattr(unfi, self.name)
161 return getattr(unfi, self.name)
162
162
163 class filteredpropertycache(util.propertycache):
163 class filteredpropertycache(util.propertycache):
164 """propertycache that must take filtering in account"""
164 """propertycache that must take filtering in account"""
165
165
166 def cachevalue(self, obj, value):
166 def cachevalue(self, obj, value):
167 object.__setattr__(obj, self.name, value)
167 object.__setattr__(obj, self.name, value)
168
168
169
169
170 def hasunfilteredcache(repo, name):
170 def hasunfilteredcache(repo, name):
171 """check if a repo has an unfilteredpropertycache value for <name>"""
171 """check if a repo has an unfilteredpropertycache value for <name>"""
172 return name in vars(repo.unfiltered())
172 return name in vars(repo.unfiltered())
173
173
174 def unfilteredmethod(orig):
174 def unfilteredmethod(orig):
175 """decorate method that always need to be run on unfiltered version"""
175 """decorate method that always need to be run on unfiltered version"""
176 def wrapper(repo, *args, **kwargs):
176 def wrapper(repo, *args, **kwargs):
177 return orig(repo.unfiltered(), *args, **kwargs)
177 return orig(repo.unfiltered(), *args, **kwargs)
178 return wrapper
178 return wrapper
179
179
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
181 'unbundle'}
181 'unbundle'}
182 legacycaps = moderncaps.union({'changegroupsubset'})
182 legacycaps = moderncaps.union({'changegroupsubset'})
183
183
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
185 class localcommandexecutor(object):
185 class localcommandexecutor(object):
186 def __init__(self, peer):
186 def __init__(self, peer):
187 self._peer = peer
187 self._peer = peer
188 self._sent = False
188 self._sent = False
189 self._closed = False
189 self._closed = False
190
190
191 def __enter__(self):
191 def __enter__(self):
192 return self
192 return self
193
193
194 def __exit__(self, exctype, excvalue, exctb):
194 def __exit__(self, exctype, excvalue, exctb):
195 self.close()
195 self.close()
196
196
197 def callcommand(self, command, args):
197 def callcommand(self, command, args):
198 if self._sent:
198 if self._sent:
199 raise error.ProgrammingError('callcommand() cannot be used after '
199 raise error.ProgrammingError('callcommand() cannot be used after '
200 'sendcommands()')
200 'sendcommands()')
201
201
202 if self._closed:
202 if self._closed:
203 raise error.ProgrammingError('callcommand() cannot be used after '
203 raise error.ProgrammingError('callcommand() cannot be used after '
204 'close()')
204 'close()')
205
205
206 # We don't need to support anything fancy. Just call the named
206 # We don't need to support anything fancy. Just call the named
207 # method on the peer and return a resolved future.
207 # method on the peer and return a resolved future.
208 fn = getattr(self._peer, pycompat.sysstr(command))
208 fn = getattr(self._peer, pycompat.sysstr(command))
209
209
210 f = pycompat.futures.Future()
210 f = pycompat.futures.Future()
211
211
212 try:
212 try:
213 result = fn(**pycompat.strkwargs(args))
213 result = fn(**pycompat.strkwargs(args))
214 except Exception:
214 except Exception:
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
216 else:
216 else:
217 f.set_result(result)
217 f.set_result(result)
218
218
219 return f
219 return f
220
220
221 def sendcommands(self):
221 def sendcommands(self):
222 self._sent = True
222 self._sent = True
223
223
224 def close(self):
224 def close(self):
225 self._closed = True
225 self._closed = True
226
226
227 @interfaceutil.implementer(repository.ipeercommands)
227 @interfaceutil.implementer(repository.ipeercommands)
228 class localpeer(repository.peer):
228 class localpeer(repository.peer):
229 '''peer for a local repo; reflects only the most recent API'''
229 '''peer for a local repo; reflects only the most recent API'''
230
230
231 def __init__(self, repo, caps=None):
231 def __init__(self, repo, caps=None):
232 super(localpeer, self).__init__()
232 super(localpeer, self).__init__()
233
233
234 if caps is None:
234 if caps is None:
235 caps = moderncaps.copy()
235 caps = moderncaps.copy()
236 self._repo = repo.filtered('served')
236 self._repo = repo.filtered('served')
237 self.ui = repo.ui
237 self.ui = repo.ui
238 self._caps = repo._restrictcapabilities(caps)
238 self._caps = repo._restrictcapabilities(caps)
239
239
240 # Begin of _basepeer interface.
240 # Begin of _basepeer interface.
241
241
242 def url(self):
242 def url(self):
243 return self._repo.url()
243 return self._repo.url()
244
244
245 def local(self):
245 def local(self):
246 return self._repo
246 return self._repo
247
247
248 def peer(self):
248 def peer(self):
249 return self
249 return self
250
250
251 def canpush(self):
251 def canpush(self):
252 return True
252 return True
253
253
254 def close(self):
254 def close(self):
255 self._repo.close()
255 self._repo.close()
256
256
257 # End of _basepeer interface.
257 # End of _basepeer interface.
258
258
259 # Begin of _basewirecommands interface.
259 # Begin of _basewirecommands interface.
260
260
261 def branchmap(self):
261 def branchmap(self):
262 return self._repo.branchmap()
262 return self._repo.branchmap()
263
263
264 def capabilities(self):
264 def capabilities(self):
265 return self._caps
265 return self._caps
266
266
267 def clonebundles(self):
267 def clonebundles(self):
268 return self._repo.tryread('clonebundles.manifest')
268 return self._repo.tryread('clonebundles.manifest')
269
269
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
271 """Used to test argument passing over the wire"""
271 """Used to test argument passing over the wire"""
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
273 pycompat.bytestr(four),
273 pycompat.bytestr(four),
274 pycompat.bytestr(five))
274 pycompat.bytestr(five))
275
275
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
277 **kwargs):
277 **kwargs):
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
279 common=common, bundlecaps=bundlecaps,
279 common=common, bundlecaps=bundlecaps,
280 **kwargs)[1]
280 **kwargs)[1]
281 cb = util.chunkbuffer(chunks)
281 cb = util.chunkbuffer(chunks)
282
282
283 if exchange.bundle2requested(bundlecaps):
283 if exchange.bundle2requested(bundlecaps):
284 # When requesting a bundle2, getbundle returns a stream to make the
284 # When requesting a bundle2, getbundle returns a stream to make the
285 # wire level function happier. We need to build a proper object
285 # wire level function happier. We need to build a proper object
286 # from it in local peer.
286 # from it in local peer.
287 return bundle2.getunbundler(self.ui, cb)
287 return bundle2.getunbundler(self.ui, cb)
288 else:
288 else:
289 return changegroup.getunbundler('01', cb, None)
289 return changegroup.getunbundler('01', cb, None)
290
290
291 def heads(self):
291 def heads(self):
292 return self._repo.heads()
292 return self._repo.heads()
293
293
294 def known(self, nodes):
294 def known(self, nodes):
295 return self._repo.known(nodes)
295 return self._repo.known(nodes)
296
296
297 def listkeys(self, namespace):
297 def listkeys(self, namespace):
298 return self._repo.listkeys(namespace)
298 return self._repo.listkeys(namespace)
299
299
300 def lookup(self, key):
300 def lookup(self, key):
301 return self._repo.lookup(key)
301 return self._repo.lookup(key)
302
302
303 def pushkey(self, namespace, key, old, new):
303 def pushkey(self, namespace, key, old, new):
304 return self._repo.pushkey(namespace, key, old, new)
304 return self._repo.pushkey(namespace, key, old, new)
305
305
306 def stream_out(self):
306 def stream_out(self):
307 raise error.Abort(_('cannot perform stream clone against local '
307 raise error.Abort(_('cannot perform stream clone against local '
308 'peer'))
308 'peer'))
309
309
310 def unbundle(self, bundle, heads, url):
310 def unbundle(self, bundle, heads, url):
311 """apply a bundle on a repo
311 """apply a bundle on a repo
312
312
313 This function handles the repo locking itself."""
313 This function handles the repo locking itself."""
314 try:
314 try:
315 try:
315 try:
316 bundle = exchange.readbundle(self.ui, bundle, None)
316 bundle = exchange.readbundle(self.ui, bundle, None)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
318 if util.safehasattr(ret, 'getchunks'):
318 if util.safehasattr(ret, 'getchunks'):
319 # This is a bundle20 object, turn it into an unbundler.
319 # This is a bundle20 object, turn it into an unbundler.
320 # This little dance should be dropped eventually when the
320 # This little dance should be dropped eventually when the
321 # API is finally improved.
321 # API is finally improved.
322 stream = util.chunkbuffer(ret.getchunks())
322 stream = util.chunkbuffer(ret.getchunks())
323 ret = bundle2.getunbundler(self.ui, stream)
323 ret = bundle2.getunbundler(self.ui, stream)
324 return ret
324 return ret
325 except Exception as exc:
325 except Exception as exc:
326 # If the exception contains output salvaged from a bundle2
326 # If the exception contains output salvaged from a bundle2
327 # reply, we need to make sure it is printed before continuing
327 # reply, we need to make sure it is printed before continuing
328 # to fail. So we build a bundle2 with such output and consume
328 # to fail. So we build a bundle2 with such output and consume
329 # it directly.
329 # it directly.
330 #
330 #
331 # This is not very elegant but allows a "simple" solution for
331 # This is not very elegant but allows a "simple" solution for
332 # issue4594
332 # issue4594
333 output = getattr(exc, '_bundle2salvagedoutput', ())
333 output = getattr(exc, '_bundle2salvagedoutput', ())
334 if output:
334 if output:
335 bundler = bundle2.bundle20(self._repo.ui)
335 bundler = bundle2.bundle20(self._repo.ui)
336 for out in output:
336 for out in output:
337 bundler.addpart(out)
337 bundler.addpart(out)
338 stream = util.chunkbuffer(bundler.getchunks())
338 stream = util.chunkbuffer(bundler.getchunks())
339 b = bundle2.getunbundler(self.ui, stream)
339 b = bundle2.getunbundler(self.ui, stream)
340 bundle2.processbundle(self._repo, b)
340 bundle2.processbundle(self._repo, b)
341 raise
341 raise
342 except error.PushRaced as exc:
342 except error.PushRaced as exc:
343 raise error.ResponseError(_('push failed:'),
343 raise error.ResponseError(_('push failed:'),
344 stringutil.forcebytestr(exc))
344 stringutil.forcebytestr(exc))
345
345
346 # End of _basewirecommands interface.
346 # End of _basewirecommands interface.
347
347
348 # Begin of peer interface.
348 # Begin of peer interface.
349
349
350 def commandexecutor(self):
350 def commandexecutor(self):
351 return localcommandexecutor(self)
351 return localcommandexecutor(self)
352
352
353 # End of peer interface.
353 # End of peer interface.
354
354
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
356 class locallegacypeer(localpeer):
356 class locallegacypeer(localpeer):
357 '''peer extension which implements legacy methods too; used for tests with
357 '''peer extension which implements legacy methods too; used for tests with
358 restricted capabilities'''
358 restricted capabilities'''
359
359
360 def __init__(self, repo):
360 def __init__(self, repo):
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
362
362
363 # Begin of baselegacywirecommands interface.
363 # Begin of baselegacywirecommands interface.
364
364
365 def between(self, pairs):
365 def between(self, pairs):
366 return self._repo.between(pairs)
366 return self._repo.between(pairs)
367
367
368 def branches(self, nodes):
368 def branches(self, nodes):
369 return self._repo.branches(nodes)
369 return self._repo.branches(nodes)
370
370
371 def changegroup(self, nodes, source):
371 def changegroup(self, nodes, source):
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
373 missingheads=self._repo.heads())
373 missingheads=self._repo.heads())
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
375
375
376 def changegroupsubset(self, bases, heads, source):
376 def changegroupsubset(self, bases, heads, source):
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
378 missingheads=heads)
378 missingheads=heads)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
380
380
381 # End of baselegacywirecommands interface.
381 # End of baselegacywirecommands interface.
382
382
383 # Increment the sub-version when the revlog v2 format changes to lock out old
383 # Increment the sub-version when the revlog v2 format changes to lock out old
384 # clients.
384 # clients.
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
386
386
387 # A repository with the sparserevlog feature will have delta chains that
387 # A repository with the sparserevlog feature will have delta chains that
388 # can spread over a larger span. Sparse reading cuts these large spans into
388 # can spread over a larger span. Sparse reading cuts these large spans into
389 # pieces, so that each piece isn't too big.
389 # pieces, so that each piece isn't too big.
390 # Without the sparserevlog capability, reading from the repository could use
390 # Without the sparserevlog capability, reading from the repository could use
391 # huge amounts of memory, because the whole span would be read at once,
391 # huge amounts of memory, because the whole span would be read at once,
392 # including all the intermediate revisions that aren't pertinent for the chain.
392 # including all the intermediate revisions that aren't pertinent for the chain.
393 # This is why once a repository has enabled sparse-read, it becomes required.
393 # This is why once a repository has enabled sparse-read, it becomes required.
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
395
395
396 # Functions receiving (ui, features) that extensions can register to impact
396 # Functions receiving (ui, features) that extensions can register to impact
397 # the ability to load repositories with custom requirements. Only
397 # the ability to load repositories with custom requirements. Only
398 # functions defined in loaded extensions are called.
398 # functions defined in loaded extensions are called.
399 #
399 #
400 # The function receives a set of requirement strings that the repository
400 # The function receives a set of requirement strings that the repository
401 # is capable of opening. Functions will typically add elements to the
401 # is capable of opening. Functions will typically add elements to the
402 # set to reflect that the extension knows how to handle that requirements.
402 # set to reflect that the extension knows how to handle that requirements.
403 featuresetupfuncs = set()
403 featuresetupfuncs = set()
404
404
405 def makelocalrepository(baseui, path, intents=None):
405 def makelocalrepository(baseui, path, intents=None):
406 """Create a local repository object.
406 """Create a local repository object.
407
407
408 Given arguments needed to construct a local repository, this function
408 Given arguments needed to construct a local repository, this function
409 performs various early repository loading functionality (such as
409 performs various early repository loading functionality (such as
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
411 the repository can be opened, derives a type suitable for representing
411 the repository can be opened, derives a type suitable for representing
412 that repository, and returns an instance of it.
412 that repository, and returns an instance of it.
413
413
414 The returned object conforms to the ``repository.completelocalrepository``
414 The returned object conforms to the ``repository.completelocalrepository``
415 interface.
415 interface.
416
416
417 The repository type is derived by calling a series of factory functions
417 The repository type is derived by calling a series of factory functions
418 for each aspect/interface of the final repository. These are defined by
418 for each aspect/interface of the final repository. These are defined by
419 ``REPO_INTERFACES``.
419 ``REPO_INTERFACES``.
420
420
421 Each factory function is called to produce a type implementing a specific
421 Each factory function is called to produce a type implementing a specific
422 interface. The cumulative list of returned types will be combined into a
422 interface. The cumulative list of returned types will be combined into a
423 new type and that type will be instantiated to represent the local
423 new type and that type will be instantiated to represent the local
424 repository.
424 repository.
425
425
426 The factory functions each receive various state that may be consulted
426 The factory functions each receive various state that may be consulted
427 as part of deriving a type.
427 as part of deriving a type.
428
428
429 Extensions should wrap these factory functions to customize repository type
429 Extensions should wrap these factory functions to customize repository type
430 creation. Note that an extension's wrapped function may be called even if
430 creation. Note that an extension's wrapped function may be called even if
431 that extension is not loaded for the repo being constructed. Extensions
431 that extension is not loaded for the repo being constructed. Extensions
432 should check if their ``__name__`` appears in the
432 should check if their ``__name__`` appears in the
433 ``extensionmodulenames`` set passed to the factory function and no-op if
433 ``extensionmodulenames`` set passed to the factory function and no-op if
434 not.
434 not.
435 """
435 """
436 ui = baseui.copy()
436 ui = baseui.copy()
437 # Prevent copying repo configuration.
437 # Prevent copying repo configuration.
438 ui.copy = baseui.copy
438 ui.copy = baseui.copy
439
439
440 # Working directory VFS rooted at repository root.
440 # Working directory VFS rooted at repository root.
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
442
442
443 # Main VFS for .hg/ directory.
443 # Main VFS for .hg/ directory.
444 hgpath = wdirvfs.join(b'.hg')
444 hgpath = wdirvfs.join(b'.hg')
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
446
446
447 # The .hg/ path should exist and should be a directory. All other
447 # The .hg/ path should exist and should be a directory. All other
448 # cases are errors.
448 # cases are errors.
449 if not hgvfs.isdir():
449 if not hgvfs.isdir():
450 try:
450 try:
451 hgvfs.stat()
451 hgvfs.stat()
452 except OSError as e:
452 except OSError as e:
453 if e.errno != errno.ENOENT:
453 if e.errno != errno.ENOENT:
454 raise
454 raise
455
455
456 raise error.RepoError(_(b'repository %s not found') % path)
456 raise error.RepoError(_(b'repository %s not found') % path)
457
457
458 # .hg/requires file contains a newline-delimited list of
458 # .hg/requires file contains a newline-delimited list of
459 # features/capabilities the opener (us) must have in order to use
459 # features/capabilities the opener (us) must have in order to use
460 # the repository. This file was introduced in Mercurial 0.9.2,
460 # the repository. This file was introduced in Mercurial 0.9.2,
461 # which means very old repositories may not have one. We assume
461 # which means very old repositories may not have one. We assume
462 # a missing file translates to no requirements.
462 # a missing file translates to no requirements.
463 try:
463 try:
464 requirements = set(hgvfs.read(b'requires').splitlines())
464 requirements = set(hgvfs.read(b'requires').splitlines())
465 except IOError as e:
465 except IOError as e:
466 if e.errno != errno.ENOENT:
466 if e.errno != errno.ENOENT:
467 raise
467 raise
468 requirements = set()
468 requirements = set()
469
469
470 # The .hg/hgrc file may load extensions or contain config options
470 # The .hg/hgrc file may load extensions or contain config options
471 # that influence repository construction. Attempt to load it and
471 # that influence repository construction. Attempt to load it and
472 # process any new extensions that it may have pulled in.
472 # process any new extensions that it may have pulled in.
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
475 extensions.loadall(ui)
475 extensions.loadall(ui)
476 extensions.populateui(ui)
476 extensions.populateui(ui)
477
477
478 # Set of module names of extensions loaded for this repository.
478 # Set of module names of extensions loaded for this repository.
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
480
480
481 supportedrequirements = gathersupportedrequirements(ui)
481 supportedrequirements = gathersupportedrequirements(ui)
482
482
483 # We first validate the requirements are known.
483 # We first validate the requirements are known.
484 ensurerequirementsrecognized(requirements, supportedrequirements)
484 ensurerequirementsrecognized(requirements, supportedrequirements)
485
485
486 # Then we validate that the known set is reasonable to use together.
486 # Then we validate that the known set is reasonable to use together.
487 ensurerequirementscompatible(ui, requirements)
487 ensurerequirementscompatible(ui, requirements)
488
488
489 # TODO there are unhandled edge cases related to opening repositories with
489 # TODO there are unhandled edge cases related to opening repositories with
490 # shared storage. If storage is shared, we should also test for requirements
490 # shared storage. If storage is shared, we should also test for requirements
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
492 # that repo, as that repo may load extensions needed to open it. This is a
492 # that repo, as that repo may load extensions needed to open it. This is a
493 # bit complicated because we don't want the other hgrc to overwrite settings
493 # bit complicated because we don't want the other hgrc to overwrite settings
494 # in this hgrc.
494 # in this hgrc.
495 #
495 #
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
497 # file when sharing repos. But if a requirement is added after the share is
497 # file when sharing repos. But if a requirement is added after the share is
498 # performed, thereby introducing a new requirement for the opener, we may
498 # performed, thereby introducing a new requirement for the opener, we may
499 # will not see that and could encounter a run-time error interacting with
499 # will not see that and could encounter a run-time error interacting with
500 # that shared store since it has an unknown-to-us requirement.
500 # that shared store since it has an unknown-to-us requirement.
501
501
502 # At this point, we know we should be capable of opening the repository.
502 # At this point, we know we should be capable of opening the repository.
503 # Now get on with doing that.
503 # Now get on with doing that.
504
504
505 features = set()
505 features = set()
506
506
507 # The "store" part of the repository holds versioned data. How it is
507 # The "store" part of the repository holds versioned data. How it is
508 # accessed is determined by various requirements. The ``shared`` or
508 # accessed is determined by various requirements. The ``shared`` or
509 # ``relshared`` requirements indicate the store lives in the path contained
509 # ``relshared`` requirements indicate the store lives in the path contained
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
512 if b'shared' in requirements or b'relshared' in requirements:
512 if b'shared' in requirements or b'relshared' in requirements:
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 if b'relshared' in requirements:
514 if b'relshared' in requirements:
515 sharedpath = hgvfs.join(sharedpath)
515 sharedpath = hgvfs.join(sharedpath)
516
516
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518
518
519 if not sharedvfs.exists():
519 if not sharedvfs.exists():
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
521 b'directory %s') % sharedvfs.base)
521 b'directory %s') % sharedvfs.base)
522
522
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
524
524
525 storebasepath = sharedvfs.base
525 storebasepath = sharedvfs.base
526 cachepath = sharedvfs.join(b'cache')
526 cachepath = sharedvfs.join(b'cache')
527 else:
527 else:
528 storebasepath = hgvfs.base
528 storebasepath = hgvfs.base
529 cachepath = hgvfs.join(b'cache')
529 cachepath = hgvfs.join(b'cache')
530 wcachepath = hgvfs.join(b'wcache')
530 wcachepath = hgvfs.join(b'wcache')
531
531
532
532
533 # The store has changed over time and the exact layout is dictated by
533 # The store has changed over time and the exact layout is dictated by
534 # requirements. The store interface abstracts differences across all
534 # requirements. The store interface abstracts differences across all
535 # of them.
535 # of them.
536 store = makestore(requirements, storebasepath,
536 store = makestore(requirements, storebasepath,
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
538 hgvfs.createmode = store.createmode
538 hgvfs.createmode = store.createmode
539
539
540 storevfs = store.vfs
540 storevfs = store.vfs
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
542
542
543 # The cache vfs is used to manage cache files.
543 # The cache vfs is used to manage cache files.
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 cachevfs.createmode = store.createmode
545 cachevfs.createmode = store.createmode
546 # The cache vfs is used to manage cache files related to the working copy
546 # The cache vfs is used to manage cache files related to the working copy
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
548 wcachevfs.createmode = store.createmode
548 wcachevfs.createmode = store.createmode
549
549
550 # Now resolve the type for the repository object. We do this by repeatedly
550 # Now resolve the type for the repository object. We do this by repeatedly
551 # calling a factory function to produces types for specific aspects of the
551 # calling a factory function to produces types for specific aspects of the
552 # repo's operation. The aggregate returned types are used as base classes
552 # repo's operation. The aggregate returned types are used as base classes
553 # for a dynamically-derived type, which will represent our new repository.
553 # for a dynamically-derived type, which will represent our new repository.
554
554
555 bases = []
555 bases = []
556 extrastate = {}
556 extrastate = {}
557
557
558 for iface, fn in REPO_INTERFACES:
558 for iface, fn in REPO_INTERFACES:
559 # We pass all potentially useful state to give extensions tons of
559 # We pass all potentially useful state to give extensions tons of
560 # flexibility.
560 # flexibility.
561 typ = fn()(ui=ui,
561 typ = fn()(ui=ui,
562 intents=intents,
562 intents=intents,
563 requirements=requirements,
563 requirements=requirements,
564 features=features,
564 features=features,
565 wdirvfs=wdirvfs,
565 wdirvfs=wdirvfs,
566 hgvfs=hgvfs,
566 hgvfs=hgvfs,
567 store=store,
567 store=store,
568 storevfs=storevfs,
568 storevfs=storevfs,
569 storeoptions=storevfs.options,
569 storeoptions=storevfs.options,
570 cachevfs=cachevfs,
570 cachevfs=cachevfs,
571 wcachevfs=wcachevfs,
571 wcachevfs=wcachevfs,
572 extensionmodulenames=extensionmodulenames,
572 extensionmodulenames=extensionmodulenames,
573 extrastate=extrastate,
573 extrastate=extrastate,
574 baseclasses=bases)
574 baseclasses=bases)
575
575
576 if not isinstance(typ, type):
576 if not isinstance(typ, type):
577 raise error.ProgrammingError('unable to construct type for %s' %
577 raise error.ProgrammingError('unable to construct type for %s' %
578 iface)
578 iface)
579
579
580 bases.append(typ)
580 bases.append(typ)
581
581
582 # type() allows you to use characters in type names that wouldn't be
582 # type() allows you to use characters in type names that wouldn't be
583 # recognized as Python symbols in source code. We abuse that to add
583 # recognized as Python symbols in source code. We abuse that to add
584 # rich information about our constructed repo.
584 # rich information about our constructed repo.
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
586 wdirvfs.base,
586 wdirvfs.base,
587 b','.join(sorted(requirements))))
587 b','.join(sorted(requirements))))
588
588
589 cls = type(name, tuple(bases), {})
589 cls = type(name, tuple(bases), {})
590
590
591 return cls(
591 return cls(
592 baseui=baseui,
592 baseui=baseui,
593 ui=ui,
593 ui=ui,
594 origroot=path,
594 origroot=path,
595 wdirvfs=wdirvfs,
595 wdirvfs=wdirvfs,
596 hgvfs=hgvfs,
596 hgvfs=hgvfs,
597 requirements=requirements,
597 requirements=requirements,
598 supportedrequirements=supportedrequirements,
598 supportedrequirements=supportedrequirements,
599 sharedpath=storebasepath,
599 sharedpath=storebasepath,
600 store=store,
600 store=store,
601 cachevfs=cachevfs,
601 cachevfs=cachevfs,
602 wcachevfs=wcachevfs,
602 wcachevfs=wcachevfs,
603 features=features,
603 features=features,
604 intents=intents)
604 intents=intents)
605
605
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
607 """Load hgrc files/content into a ui instance.
607 """Load hgrc files/content into a ui instance.
608
608
609 This is called during repository opening to load any additional
609 This is called during repository opening to load any additional
610 config files or settings relevant to the current repository.
610 config files or settings relevant to the current repository.
611
611
612 Returns a bool indicating whether any additional configs were loaded.
612 Returns a bool indicating whether any additional configs were loaded.
613
613
614 Extensions should monkeypatch this function to modify how per-repo
614 Extensions should monkeypatch this function to modify how per-repo
615 configs are loaded. For example, an extension may wish to pull in
615 configs are loaded. For example, an extension may wish to pull in
616 configs from alternate files or sources.
616 configs from alternate files or sources.
617 """
617 """
618 try:
618 try:
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
620 return True
620 return True
621 except IOError:
621 except IOError:
622 return False
622 return False
623
623
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
625 """Perform additional actions after .hg/hgrc is loaded.
625 """Perform additional actions after .hg/hgrc is loaded.
626
626
627 This function is called during repository loading immediately after
627 This function is called during repository loading immediately after
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
629
629
630 The function can be used to validate configs, automatically add
630 The function can be used to validate configs, automatically add
631 options (including extensions) based on requirements, etc.
631 options (including extensions) based on requirements, etc.
632 """
632 """
633
633
634 # Map of requirements to list of extensions to load automatically when
634 # Map of requirements to list of extensions to load automatically when
635 # requirement is present.
635 # requirement is present.
636 autoextensions = {
636 autoextensions = {
637 b'largefiles': [b'largefiles'],
637 b'largefiles': [b'largefiles'],
638 b'lfs': [b'lfs'],
638 b'lfs': [b'lfs'],
639 }
639 }
640
640
641 for requirement, names in sorted(autoextensions.items()):
641 for requirement, names in sorted(autoextensions.items()):
642 if requirement not in requirements:
642 if requirement not in requirements:
643 continue
643 continue
644
644
645 for name in names:
645 for name in names:
646 if not ui.hasconfig(b'extensions', name):
646 if not ui.hasconfig(b'extensions', name):
647 ui.setconfig(b'extensions', name, b'', source='autoload')
647 ui.setconfig(b'extensions', name, b'', source='autoload')
648
648
649 def gathersupportedrequirements(ui):
649 def gathersupportedrequirements(ui):
650 """Determine the complete set of recognized requirements."""
650 """Determine the complete set of recognized requirements."""
651 # Start with all requirements supported by this file.
651 # Start with all requirements supported by this file.
652 supported = set(localrepository._basesupported)
652 supported = set(localrepository._basesupported)
653
653
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
655 # relevant to this ui instance.
655 # relevant to this ui instance.
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
657
657
658 for fn in featuresetupfuncs:
658 for fn in featuresetupfuncs:
659 if fn.__module__ in modules:
659 if fn.__module__ in modules:
660 fn(ui, supported)
660 fn(ui, supported)
661
661
662 # Add derived requirements from registered compression engines.
662 # Add derived requirements from registered compression engines.
663 for name in util.compengines:
663 for name in util.compengines:
664 engine = util.compengines[name]
664 engine = util.compengines[name]
665 if engine.available() and engine.revlogheader():
665 if engine.available() and engine.revlogheader():
666 supported.add(b'exp-compression-%s' % name)
666 supported.add(b'exp-compression-%s' % name)
667 if engine.name() == 'zstd':
667 if engine.name() == 'zstd':
668 supported.add(b'revlog-compression-zstd')
668 supported.add(b'revlog-compression-zstd')
669
669
670 return supported
670 return supported
671
671
672 def ensurerequirementsrecognized(requirements, supported):
672 def ensurerequirementsrecognized(requirements, supported):
673 """Validate that a set of local requirements is recognized.
673 """Validate that a set of local requirements is recognized.
674
674
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
676 exists any requirement in that set that currently loaded code doesn't
676 exists any requirement in that set that currently loaded code doesn't
677 recognize.
677 recognize.
678
678
679 Returns a set of supported requirements.
679 Returns a set of supported requirements.
680 """
680 """
681 missing = set()
681 missing = set()
682
682
683 for requirement in requirements:
683 for requirement in requirements:
684 if requirement in supported:
684 if requirement in supported:
685 continue
685 continue
686
686
687 if not requirement or not requirement[0:1].isalnum():
687 if not requirement or not requirement[0:1].isalnum():
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
689
689
690 missing.add(requirement)
690 missing.add(requirement)
691
691
692 if missing:
692 if missing:
693 raise error.RequirementError(
693 raise error.RequirementError(
694 _(b'repository requires features unknown to this Mercurial: %s') %
694 _(b'repository requires features unknown to this Mercurial: %s') %
695 b' '.join(sorted(missing)),
695 b' '.join(sorted(missing)),
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
697 b'for more information'))
697 b'for more information'))
698
698
699 def ensurerequirementscompatible(ui, requirements):
699 def ensurerequirementscompatible(ui, requirements):
700 """Validates that a set of recognized requirements is mutually compatible.
700 """Validates that a set of recognized requirements is mutually compatible.
701
701
702 Some requirements may not be compatible with others or require
702 Some requirements may not be compatible with others or require
703 config options that aren't enabled. This function is called during
703 config options that aren't enabled. This function is called during
704 repository opening to ensure that the set of requirements needed
704 repository opening to ensure that the set of requirements needed
705 to open a repository is sane and compatible with config options.
705 to open a repository is sane and compatible with config options.
706
706
707 Extensions can monkeypatch this function to perform additional
707 Extensions can monkeypatch this function to perform additional
708 checking.
708 checking.
709
709
710 ``error.RepoError`` should be raised on failure.
710 ``error.RepoError`` should be raised on failure.
711 """
711 """
712 if b'exp-sparse' in requirements and not sparse.enabled:
712 if b'exp-sparse' in requirements and not sparse.enabled:
713 raise error.RepoError(_(b'repository is using sparse feature but '
713 raise error.RepoError(_(b'repository is using sparse feature but '
714 b'sparse is not enabled; enable the '
714 b'sparse is not enabled; enable the '
715 b'"sparse" extensions to access'))
715 b'"sparse" extensions to access'))
716
716
717 def makestore(requirements, path, vfstype):
717 def makestore(requirements, path, vfstype):
718 """Construct a storage object for a repository."""
718 """Construct a storage object for a repository."""
719 if b'store' in requirements:
719 if b'store' in requirements:
720 if b'fncache' in requirements:
720 if b'fncache' in requirements:
721 return storemod.fncachestore(path, vfstype,
721 return storemod.fncachestore(path, vfstype,
722 b'dotencode' in requirements)
722 b'dotencode' in requirements)
723
723
724 return storemod.encodedstore(path, vfstype)
724 return storemod.encodedstore(path, vfstype)
725
725
726 return storemod.basicstore(path, vfstype)
726 return storemod.basicstore(path, vfstype)
727
727
728 def resolvestorevfsoptions(ui, requirements, features):
728 def resolvestorevfsoptions(ui, requirements, features):
729 """Resolve the options to pass to the store vfs opener.
729 """Resolve the options to pass to the store vfs opener.
730
730
731 The returned dict is used to influence behavior of the storage layer.
731 The returned dict is used to influence behavior of the storage layer.
732 """
732 """
733 options = {}
733 options = {}
734
734
735 if b'treemanifest' in requirements:
735 if b'treemanifest' in requirements:
736 options[b'treemanifest'] = True
736 options[b'treemanifest'] = True
737
737
738 # experimental config: format.manifestcachesize
738 # experimental config: format.manifestcachesize
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
740 if manifestcachesize is not None:
740 if manifestcachesize is not None:
741 options[b'manifestcachesize'] = manifestcachesize
741 options[b'manifestcachesize'] = manifestcachesize
742
742
743 # In the absence of another requirement superseding a revlog-related
743 # In the absence of another requirement superseding a revlog-related
744 # requirement, we have to assume the repo is using revlog version 0.
744 # requirement, we have to assume the repo is using revlog version 0.
745 # This revlog format is super old and we don't bother trying to parse
745 # This revlog format is super old and we don't bother trying to parse
746 # opener options for it because those options wouldn't do anything
746 # opener options for it because those options wouldn't do anything
747 # meaningful on such old repos.
747 # meaningful on such old repos.
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
750
750
751 return options
751 return options
752
752
753 def resolverevlogstorevfsoptions(ui, requirements, features):
753 def resolverevlogstorevfsoptions(ui, requirements, features):
754 """Resolve opener options specific to revlogs."""
754 """Resolve opener options specific to revlogs."""
755
755
756 options = {}
756 options = {}
757 options[b'flagprocessors'] = {}
757 options[b'flagprocessors'] = {}
758
758
759 if b'revlogv1' in requirements:
759 if b'revlogv1' in requirements:
760 options[b'revlogv1'] = True
760 options[b'revlogv1'] = True
761 if REVLOGV2_REQUIREMENT in requirements:
761 if REVLOGV2_REQUIREMENT in requirements:
762 options[b'revlogv2'] = True
762 options[b'revlogv2'] = True
763
763
764 if b'generaldelta' in requirements:
764 if b'generaldelta' in requirements:
765 options[b'generaldelta'] = True
765 options[b'generaldelta'] = True
766
766
767 # experimental config: format.chunkcachesize
767 # experimental config: format.chunkcachesize
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
769 if chunkcachesize is not None:
769 if chunkcachesize is not None:
770 options[b'chunkcachesize'] = chunkcachesize
770 options[b'chunkcachesize'] = chunkcachesize
771
771
772 deltabothparents = ui.configbool(b'storage',
772 deltabothparents = ui.configbool(b'storage',
773 b'revlog.optimize-delta-parent-choice')
773 b'revlog.optimize-delta-parent-choice')
774 options[b'deltabothparents'] = deltabothparents
774 options[b'deltabothparents'] = deltabothparents
775
775
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
777 lazydeltabase = False
777 lazydeltabase = False
778 if lazydelta:
778 if lazydelta:
779 lazydeltabase = ui.configbool(b'storage',
779 lazydeltabase = ui.configbool(b'storage',
780 b'revlog.reuse-external-delta-parent')
780 b'revlog.reuse-external-delta-parent')
781 if lazydeltabase is None:
781 if lazydeltabase is None:
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
783 options[b'lazydelta'] = lazydelta
783 options[b'lazydelta'] = lazydelta
784 options[b'lazydeltabase'] = lazydeltabase
784 options[b'lazydeltabase'] = lazydeltabase
785
785
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
787 if 0 <= chainspan:
787 if 0 <= chainspan:
788 options[b'maxdeltachainspan'] = chainspan
788 options[b'maxdeltachainspan'] = chainspan
789
789
790 mmapindexthreshold = ui.configbytes(b'experimental',
790 mmapindexthreshold = ui.configbytes(b'experimental',
791 b'mmapindexthreshold')
791 b'mmapindexthreshold')
792 if mmapindexthreshold is not None:
792 if mmapindexthreshold is not None:
793 options[b'mmapindexthreshold'] = mmapindexthreshold
793 options[b'mmapindexthreshold'] = mmapindexthreshold
794
794
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
796 srdensitythres = float(ui.config(b'experimental',
796 srdensitythres = float(ui.config(b'experimental',
797 b'sparse-read.density-threshold'))
797 b'sparse-read.density-threshold'))
798 srmingapsize = ui.configbytes(b'experimental',
798 srmingapsize = ui.configbytes(b'experimental',
799 b'sparse-read.min-gap-size')
799 b'sparse-read.min-gap-size')
800 options[b'with-sparse-read'] = withsparseread
800 options[b'with-sparse-read'] = withsparseread
801 options[b'sparse-read-density-threshold'] = srdensitythres
801 options[b'sparse-read-density-threshold'] = srdensitythres
802 options[b'sparse-read-min-gap-size'] = srmingapsize
802 options[b'sparse-read-min-gap-size'] = srmingapsize
803
803
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
805 options[b'sparse-revlog'] = sparserevlog
805 options[b'sparse-revlog'] = sparserevlog
806 if sparserevlog:
806 if sparserevlog:
807 options[b'generaldelta'] = True
807 options[b'generaldelta'] = True
808
808
809 maxchainlen = None
809 maxchainlen = None
810 if sparserevlog:
810 if sparserevlog:
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
812 # experimental config: format.maxchainlen
812 # experimental config: format.maxchainlen
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
814 if maxchainlen is not None:
814 if maxchainlen is not None:
815 options[b'maxchainlen'] = maxchainlen
815 options[b'maxchainlen'] = maxchainlen
816
816
817 for r in requirements:
817 for r in requirements:
818 # we allow multiple compression engine requirement to co-exist because
818 # we allow multiple compression engine requirement to co-exist because
819 # strickly speaking, revlog seems to support mixed compression style.
819 # strickly speaking, revlog seems to support mixed compression style.
820 #
820 #
821 # The compression used for new entries will be "the last one"
821 # The compression used for new entries will be "the last one"
822 prefix = r.startswith
822 prefix = r.startswith
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
824 options[b'compengine'] = r.split('-', 2)[2]
824 options[b'compengine'] = r.split('-', 2)[2]
825
825
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
827 if options[b'zlib.level'] is not None:
827 if options[b'zlib.level'] is not None:
828 if not (0 <= options[b'zlib.level'] <= 9):
828 if not (0 <= options[b'zlib.level'] <= 9):
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
830 raise error.Abort(msg % options[b'zlib.level'])
830 raise error.Abort(msg % options[b'zlib.level'])
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
832 if options[b'zstd.level'] is not None:
832 if options[b'zstd.level'] is not None:
833 if not (0 <= options[b'zstd.level'] <= 22):
833 if not (0 <= options[b'zstd.level'] <= 22):
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
835 raise error.Abort(msg % options[b'zstd.level'])
835 raise error.Abort(msg % options[b'zstd.level'])
836
836
837 if repository.NARROW_REQUIREMENT in requirements:
837 if repository.NARROW_REQUIREMENT in requirements:
838 options[b'enableellipsis'] = True
838 options[b'enableellipsis'] = True
839
839
840 return options
840 return options
841
841
842 def makemain(**kwargs):
842 def makemain(**kwargs):
843 """Produce a type conforming to ``ilocalrepositorymain``."""
843 """Produce a type conforming to ``ilocalrepositorymain``."""
844 return localrepository
844 return localrepository
845
845
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
847 class revlogfilestorage(object):
847 class revlogfilestorage(object):
848 """File storage when using revlogs."""
848 """File storage when using revlogs."""
849
849
850 def file(self, path):
850 def file(self, path):
851 if path[0] == b'/':
851 if path[0] == b'/':
852 path = path[1:]
852 path = path[1:]
853
853
854 return filelog.filelog(self.svfs, path)
854 return filelog.filelog(self.svfs, path)
855
855
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
857 class revlognarrowfilestorage(object):
857 class revlognarrowfilestorage(object):
858 """File storage when using revlogs and narrow files."""
858 """File storage when using revlogs and narrow files."""
859
859
860 def file(self, path):
860 def file(self, path):
861 if path[0] == b'/':
861 if path[0] == b'/':
862 path = path[1:]
862 path = path[1:]
863
863
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
865
865
866 def makefilestorage(requirements, features, **kwargs):
866 def makefilestorage(requirements, features, **kwargs):
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
870
870
871 if repository.NARROW_REQUIREMENT in requirements:
871 if repository.NARROW_REQUIREMENT in requirements:
872 return revlognarrowfilestorage
872 return revlognarrowfilestorage
873 else:
873 else:
874 return revlogfilestorage
874 return revlogfilestorage
875
875
876 # List of repository interfaces and factory functions for them. Each
876 # List of repository interfaces and factory functions for them. Each
877 # will be called in order during ``makelocalrepository()`` to iteratively
877 # will be called in order during ``makelocalrepository()`` to iteratively
878 # derive the final type for a local repository instance. We capture the
878 # derive the final type for a local repository instance. We capture the
879 # function as a lambda so we don't hold a reference and the module-level
879 # function as a lambda so we don't hold a reference and the module-level
880 # functions can be wrapped.
880 # functions can be wrapped.
881 REPO_INTERFACES = [
881 REPO_INTERFACES = [
882 (repository.ilocalrepositorymain, lambda: makemain),
882 (repository.ilocalrepositorymain, lambda: makemain),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
884 ]
884 ]
885
885
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
887 class localrepository(object):
887 class localrepository(object):
888 """Main class for representing local repositories.
888 """Main class for representing local repositories.
889
889
890 All local repositories are instances of this class.
890 All local repositories are instances of this class.
891
891
892 Constructed on its own, instances of this class are not usable as
892 Constructed on its own, instances of this class are not usable as
893 repository objects. To obtain a usable repository object, call
893 repository objects. To obtain a usable repository object, call
894 ``hg.repository()``, ``localrepo.instance()``, or
894 ``hg.repository()``, ``localrepo.instance()``, or
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
896 ``instance()`` adds support for creating new repositories.
896 ``instance()`` adds support for creating new repositories.
897 ``hg.repository()`` adds more extension integration, including calling
897 ``hg.repository()`` adds more extension integration, including calling
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
899 used.
899 used.
900 """
900 """
901
901
902 # obsolete experimental requirements:
902 # obsolete experimental requirements:
903 # - manifestv2: An experimental new manifest format that allowed
903 # - manifestv2: An experimental new manifest format that allowed
904 # for stem compression of long paths. Experiment ended up not
904 # for stem compression of long paths. Experiment ended up not
905 # being successful (repository sizes went up due to worse delta
905 # being successful (repository sizes went up due to worse delta
906 # chains), and the code was deleted in 4.6.
906 # chains), and the code was deleted in 4.6.
907 supportedformats = {
907 supportedformats = {
908 'revlogv1',
908 'revlogv1',
909 'generaldelta',
909 'generaldelta',
910 'treemanifest',
910 'treemanifest',
911 REVLOGV2_REQUIREMENT,
911 REVLOGV2_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
913 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
913 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
914 }
914 }
915 _basesupported = supportedformats | {
915 _basesupported = supportedformats | {
916 'store',
916 'store',
917 'fncache',
917 'fncache',
918 'shared',
918 'shared',
919 'relshared',
919 'relshared',
920 'dotencode',
920 'dotencode',
921 'exp-sparse',
921 'exp-sparse',
922 'internal-phase'
922 'internal-phase'
923 }
923 }
924
924
925 # list of prefix for file which can be written without 'wlock'
925 # list of prefix for file which can be written without 'wlock'
926 # Extensions should extend this list when needed
926 # Extensions should extend this list when needed
927 _wlockfreeprefix = {
927 _wlockfreeprefix = {
928 # We migh consider requiring 'wlock' for the next
928 # We migh consider requiring 'wlock' for the next
929 # two, but pretty much all the existing code assume
929 # two, but pretty much all the existing code assume
930 # wlock is not needed so we keep them excluded for
930 # wlock is not needed so we keep them excluded for
931 # now.
931 # now.
932 'hgrc',
932 'hgrc',
933 'requires',
933 'requires',
934 # XXX cache is a complicatged business someone
934 # XXX cache is a complicatged business someone
935 # should investigate this in depth at some point
935 # should investigate this in depth at some point
936 'cache/',
936 'cache/',
937 # XXX shouldn't be dirstate covered by the wlock?
937 # XXX shouldn't be dirstate covered by the wlock?
938 'dirstate',
938 'dirstate',
939 # XXX bisect was still a bit too messy at the time
939 # XXX bisect was still a bit too messy at the time
940 # this changeset was introduced. Someone should fix
940 # this changeset was introduced. Someone should fix
941 # the remainig bit and drop this line
941 # the remainig bit and drop this line
942 'bisect.state',
942 'bisect.state',
943 }
943 }
944
944
945 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
945 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
946 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
946 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
947 features, intents=None):
947 features, intents=None):
948 """Create a new local repository instance.
948 """Create a new local repository instance.
949
949
950 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
950 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
951 or ``localrepo.makelocalrepository()`` for obtaining a new repository
951 or ``localrepo.makelocalrepository()`` for obtaining a new repository
952 object.
952 object.
953
953
954 Arguments:
954 Arguments:
955
955
956 baseui
956 baseui
957 ``ui.ui`` instance that ``ui`` argument was based off of.
957 ``ui.ui`` instance that ``ui`` argument was based off of.
958
958
959 ui
959 ui
960 ``ui.ui`` instance for use by the repository.
960 ``ui.ui`` instance for use by the repository.
961
961
962 origroot
962 origroot
963 ``bytes`` path to working directory root of this repository.
963 ``bytes`` path to working directory root of this repository.
964
964
965 wdirvfs
965 wdirvfs
966 ``vfs.vfs`` rooted at the working directory.
966 ``vfs.vfs`` rooted at the working directory.
967
967
968 hgvfs
968 hgvfs
969 ``vfs.vfs`` rooted at .hg/
969 ``vfs.vfs`` rooted at .hg/
970
970
971 requirements
971 requirements
972 ``set`` of bytestrings representing repository opening requirements.
972 ``set`` of bytestrings representing repository opening requirements.
973
973
974 supportedrequirements
974 supportedrequirements
975 ``set`` of bytestrings representing repository requirements that we
975 ``set`` of bytestrings representing repository requirements that we
976 know how to open. May be a supetset of ``requirements``.
976 know how to open. May be a supetset of ``requirements``.
977
977
978 sharedpath
978 sharedpath
979 ``bytes`` Defining path to storage base directory. Points to a
979 ``bytes`` Defining path to storage base directory. Points to a
980 ``.hg/`` directory somewhere.
980 ``.hg/`` directory somewhere.
981
981
982 store
982 store
983 ``store.basicstore`` (or derived) instance providing access to
983 ``store.basicstore`` (or derived) instance providing access to
984 versioned storage.
984 versioned storage.
985
985
986 cachevfs
986 cachevfs
987 ``vfs.vfs`` used for cache files.
987 ``vfs.vfs`` used for cache files.
988
988
989 wcachevfs
989 wcachevfs
990 ``vfs.vfs`` used for cache files related to the working copy.
990 ``vfs.vfs`` used for cache files related to the working copy.
991
991
992 features
992 features
993 ``set`` of bytestrings defining features/capabilities of this
993 ``set`` of bytestrings defining features/capabilities of this
994 instance.
994 instance.
995
995
996 intents
996 intents
997 ``set`` of system strings indicating what this repo will be used
997 ``set`` of system strings indicating what this repo will be used
998 for.
998 for.
999 """
999 """
1000 self.baseui = baseui
1000 self.baseui = baseui
1001 self.ui = ui
1001 self.ui = ui
1002 self.origroot = origroot
1002 self.origroot = origroot
1003 # vfs rooted at working directory.
1003 # vfs rooted at working directory.
1004 self.wvfs = wdirvfs
1004 self.wvfs = wdirvfs
1005 self.root = wdirvfs.base
1005 self.root = wdirvfs.base
1006 # vfs rooted at .hg/. Used to access most non-store paths.
1006 # vfs rooted at .hg/. Used to access most non-store paths.
1007 self.vfs = hgvfs
1007 self.vfs = hgvfs
1008 self.path = hgvfs.base
1008 self.path = hgvfs.base
1009 self.requirements = requirements
1009 self.requirements = requirements
1010 self.supported = supportedrequirements
1010 self.supported = supportedrequirements
1011 self.sharedpath = sharedpath
1011 self.sharedpath = sharedpath
1012 self.store = store
1012 self.store = store
1013 self.cachevfs = cachevfs
1013 self.cachevfs = cachevfs
1014 self.wcachevfs = wcachevfs
1014 self.wcachevfs = wcachevfs
1015 self.features = features
1015 self.features = features
1016
1016
1017 self.filtername = None
1017 self.filtername = None
1018
1018
1019 if (self.ui.configbool('devel', 'all-warnings') or
1019 if (self.ui.configbool('devel', 'all-warnings') or
1020 self.ui.configbool('devel', 'check-locks')):
1020 self.ui.configbool('devel', 'check-locks')):
1021 self.vfs.audit = self._getvfsward(self.vfs.audit)
1021 self.vfs.audit = self._getvfsward(self.vfs.audit)
1022 # A list of callback to shape the phase if no data were found.
1022 # A list of callback to shape the phase if no data were found.
1023 # Callback are in the form: func(repo, roots) --> processed root.
1023 # Callback are in the form: func(repo, roots) --> processed root.
1024 # This list it to be filled by extension during repo setup
1024 # This list it to be filled by extension during repo setup
1025 self._phasedefaults = []
1025 self._phasedefaults = []
1026
1026
1027 color.setup(self.ui)
1027 color.setup(self.ui)
1028
1028
1029 self.spath = self.store.path
1029 self.spath = self.store.path
1030 self.svfs = self.store.vfs
1030 self.svfs = self.store.vfs
1031 self.sjoin = self.store.join
1031 self.sjoin = self.store.join
1032 if (self.ui.configbool('devel', 'all-warnings') or
1032 if (self.ui.configbool('devel', 'all-warnings') or
1033 self.ui.configbool('devel', 'check-locks')):
1033 self.ui.configbool('devel', 'check-locks')):
1034 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1034 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1035 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1035 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1036 else: # standard vfs
1036 else: # standard vfs
1037 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1037 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1038
1038
1039 self._dirstatevalidatewarned = False
1039 self._dirstatevalidatewarned = False
1040
1040
1041 self._branchcaches = branchmap.BranchMapCache()
1041 self._branchcaches = branchmap.BranchMapCache()
1042 self._revbranchcache = None
1042 self._revbranchcache = None
1043 self._filterpats = {}
1043 self._filterpats = {}
1044 self._datafilters = {}
1044 self._datafilters = {}
1045 self._transref = self._lockref = self._wlockref = None
1045 self._transref = self._lockref = self._wlockref = None
1046
1046
1047 # A cache for various files under .hg/ that tracks file changes,
1047 # A cache for various files under .hg/ that tracks file changes,
1048 # (used by the filecache decorator)
1048 # (used by the filecache decorator)
1049 #
1049 #
1050 # Maps a property name to its util.filecacheentry
1050 # Maps a property name to its util.filecacheentry
1051 self._filecache = {}
1051 self._filecache = {}
1052
1052
1053 # hold sets of revision to be filtered
1053 # hold sets of revision to be filtered
1054 # should be cleared when something might have changed the filter value:
1054 # should be cleared when something might have changed the filter value:
1055 # - new changesets,
1055 # - new changesets,
1056 # - phase change,
1056 # - phase change,
1057 # - new obsolescence marker,
1057 # - new obsolescence marker,
1058 # - working directory parent change,
1058 # - working directory parent change,
1059 # - bookmark changes
1059 # - bookmark changes
1060 self.filteredrevcache = {}
1060 self.filteredrevcache = {}
1061
1061
1062 # post-dirstate-status hooks
1062 # post-dirstate-status hooks
1063 self._postdsstatus = []
1063 self._postdsstatus = []
1064
1064
1065 # generic mapping between names and nodes
1065 # generic mapping between names and nodes
1066 self.names = namespaces.namespaces()
1066 self.names = namespaces.namespaces()
1067
1067
1068 # Key to signature value.
1068 # Key to signature value.
1069 self._sparsesignaturecache = {}
1069 self._sparsesignaturecache = {}
1070 # Signature to cached matcher instance.
1070 # Signature to cached matcher instance.
1071 self._sparsematchercache = {}
1071 self._sparsematchercache = {}
1072
1072
1073 self._extrafilterid = repoview.extrafilter(ui)
1073 self._extrafilterid = repoview.extrafilter(ui)
1074
1074
1075 def _getvfsward(self, origfunc):
1075 def _getvfsward(self, origfunc):
1076 """build a ward for self.vfs"""
1076 """build a ward for self.vfs"""
1077 rref = weakref.ref(self)
1077 rref = weakref.ref(self)
1078 def checkvfs(path, mode=None):
1078 def checkvfs(path, mode=None):
1079 ret = origfunc(path, mode=mode)
1079 ret = origfunc(path, mode=mode)
1080 repo = rref()
1080 repo = rref()
1081 if (repo is None
1081 if (repo is None
1082 or not util.safehasattr(repo, '_wlockref')
1082 or not util.safehasattr(repo, '_wlockref')
1083 or not util.safehasattr(repo, '_lockref')):
1083 or not util.safehasattr(repo, '_lockref')):
1084 return
1084 return
1085 if mode in (None, 'r', 'rb'):
1085 if mode in (None, 'r', 'rb'):
1086 return
1086 return
1087 if path.startswith(repo.path):
1087 if path.startswith(repo.path):
1088 # truncate name relative to the repository (.hg)
1088 # truncate name relative to the repository (.hg)
1089 path = path[len(repo.path) + 1:]
1089 path = path[len(repo.path) + 1:]
1090 if path.startswith('cache/'):
1090 if path.startswith('cache/'):
1091 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1091 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1092 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1092 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1093 if path.startswith('journal.') or path.startswith('undo.'):
1093 if path.startswith('journal.') or path.startswith('undo.'):
1094 # journal is covered by 'lock'
1094 # journal is covered by 'lock'
1095 if repo._currentlock(repo._lockref) is None:
1095 if repo._currentlock(repo._lockref) is None:
1096 repo.ui.develwarn('write with no lock: "%s"' % path,
1096 repo.ui.develwarn('write with no lock: "%s"' % path,
1097 stacklevel=3, config='check-locks')
1097 stacklevel=3, config='check-locks')
1098 elif repo._currentlock(repo._wlockref) is None:
1098 elif repo._currentlock(repo._wlockref) is None:
1099 # rest of vfs files are covered by 'wlock'
1099 # rest of vfs files are covered by 'wlock'
1100 #
1100 #
1101 # exclude special files
1101 # exclude special files
1102 for prefix in self._wlockfreeprefix:
1102 for prefix in self._wlockfreeprefix:
1103 if path.startswith(prefix):
1103 if path.startswith(prefix):
1104 return
1104 return
1105 repo.ui.develwarn('write with no wlock: "%s"' % path,
1105 repo.ui.develwarn('write with no wlock: "%s"' % path,
1106 stacklevel=3, config='check-locks')
1106 stacklevel=3, config='check-locks')
1107 return ret
1107 return ret
1108 return checkvfs
1108 return checkvfs
1109
1109
1110 def _getsvfsward(self, origfunc):
1110 def _getsvfsward(self, origfunc):
1111 """build a ward for self.svfs"""
1111 """build a ward for self.svfs"""
1112 rref = weakref.ref(self)
1112 rref = weakref.ref(self)
1113 def checksvfs(path, mode=None):
1113 def checksvfs(path, mode=None):
1114 ret = origfunc(path, mode=mode)
1114 ret = origfunc(path, mode=mode)
1115 repo = rref()
1115 repo = rref()
1116 if repo is None or not util.safehasattr(repo, '_lockref'):
1116 if repo is None or not util.safehasattr(repo, '_lockref'):
1117 return
1117 return
1118 if mode in (None, 'r', 'rb'):
1118 if mode in (None, 'r', 'rb'):
1119 return
1119 return
1120 if path.startswith(repo.sharedpath):
1120 if path.startswith(repo.sharedpath):
1121 # truncate name relative to the repository (.hg)
1121 # truncate name relative to the repository (.hg)
1122 path = path[len(repo.sharedpath) + 1:]
1122 path = path[len(repo.sharedpath) + 1:]
1123 if repo._currentlock(repo._lockref) is None:
1123 if repo._currentlock(repo._lockref) is None:
1124 repo.ui.develwarn('write with no lock: "%s"' % path,
1124 repo.ui.develwarn('write with no lock: "%s"' % path,
1125 stacklevel=4)
1125 stacklevel=4)
1126 return ret
1126 return ret
1127 return checksvfs
1127 return checksvfs
1128
1128
1129 def close(self):
1129 def close(self):
1130 self._writecaches()
1130 self._writecaches()
1131
1131
1132 def _writecaches(self):
1132 def _writecaches(self):
1133 if self._revbranchcache:
1133 if self._revbranchcache:
1134 self._revbranchcache.write()
1134 self._revbranchcache.write()
1135
1135
1136 def _restrictcapabilities(self, caps):
1136 def _restrictcapabilities(self, caps):
1137 if self.ui.configbool('experimental', 'bundle2-advertise'):
1137 if self.ui.configbool('experimental', 'bundle2-advertise'):
1138 caps = set(caps)
1138 caps = set(caps)
1139 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1139 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1140 role='client'))
1140 role='client'))
1141 caps.add('bundle2=' + urlreq.quote(capsblob))
1141 caps.add('bundle2=' + urlreq.quote(capsblob))
1142 return caps
1142 return caps
1143
1143
1144 def _writerequirements(self):
1144 def _writerequirements(self):
1145 scmutil.writerequires(self.vfs, self.requirements)
1145 scmutil.writerequires(self.vfs, self.requirements)
1146
1146
1147 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1147 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1148 # self -> auditor -> self._checknested -> self
1148 # self -> auditor -> self._checknested -> self
1149
1149
1150 @property
1150 @property
1151 def auditor(self):
1151 def auditor(self):
1152 # This is only used by context.workingctx.match in order to
1152 # This is only used by context.workingctx.match in order to
1153 # detect files in subrepos.
1153 # detect files in subrepos.
1154 return pathutil.pathauditor(self.root, callback=self._checknested)
1154 return pathutil.pathauditor(self.root, callback=self._checknested)
1155
1155
1156 @property
1156 @property
1157 def nofsauditor(self):
1157 def nofsauditor(self):
1158 # This is only used by context.basectx.match in order to detect
1158 # This is only used by context.basectx.match in order to detect
1159 # files in subrepos.
1159 # files in subrepos.
1160 return pathutil.pathauditor(self.root, callback=self._checknested,
1160 return pathutil.pathauditor(self.root, callback=self._checknested,
1161 realfs=False, cached=True)
1161 realfs=False, cached=True)
1162
1162
1163 def _checknested(self, path):
1163 def _checknested(self, path):
1164 """Determine if path is a legal nested repository."""
1164 """Determine if path is a legal nested repository."""
1165 if not path.startswith(self.root):
1165 if not path.startswith(self.root):
1166 return False
1166 return False
1167 subpath = path[len(self.root) + 1:]
1167 subpath = path[len(self.root) + 1:]
1168 normsubpath = util.pconvert(subpath)
1168 normsubpath = util.pconvert(subpath)
1169
1169
1170 # XXX: Checking against the current working copy is wrong in
1170 # XXX: Checking against the current working copy is wrong in
1171 # the sense that it can reject things like
1171 # the sense that it can reject things like
1172 #
1172 #
1173 # $ hg cat -r 10 sub/x.txt
1173 # $ hg cat -r 10 sub/x.txt
1174 #
1174 #
1175 # if sub/ is no longer a subrepository in the working copy
1175 # if sub/ is no longer a subrepository in the working copy
1176 # parent revision.
1176 # parent revision.
1177 #
1177 #
1178 # However, it can of course also allow things that would have
1178 # However, it can of course also allow things that would have
1179 # been rejected before, such as the above cat command if sub/
1179 # been rejected before, such as the above cat command if sub/
1180 # is a subrepository now, but was a normal directory before.
1180 # is a subrepository now, but was a normal directory before.
1181 # The old path auditor would have rejected by mistake since it
1181 # The old path auditor would have rejected by mistake since it
1182 # panics when it sees sub/.hg/.
1182 # panics when it sees sub/.hg/.
1183 #
1183 #
1184 # All in all, checking against the working copy seems sensible
1184 # All in all, checking against the working copy seems sensible
1185 # since we want to prevent access to nested repositories on
1185 # since we want to prevent access to nested repositories on
1186 # the filesystem *now*.
1186 # the filesystem *now*.
1187 ctx = self[None]
1187 ctx = self[None]
1188 parts = util.splitpath(subpath)
1188 parts = util.splitpath(subpath)
1189 while parts:
1189 while parts:
1190 prefix = '/'.join(parts)
1190 prefix = '/'.join(parts)
1191 if prefix in ctx.substate:
1191 if prefix in ctx.substate:
1192 if prefix == normsubpath:
1192 if prefix == normsubpath:
1193 return True
1193 return True
1194 else:
1194 else:
1195 sub = ctx.sub(prefix)
1195 sub = ctx.sub(prefix)
1196 return sub.checknested(subpath[len(prefix) + 1:])
1196 return sub.checknested(subpath[len(prefix) + 1:])
1197 else:
1197 else:
1198 parts.pop()
1198 parts.pop()
1199 return False
1199 return False
1200
1200
1201 def peer(self):
1201 def peer(self):
1202 return localpeer(self) # not cached to avoid reference cycle
1202 return localpeer(self) # not cached to avoid reference cycle
1203
1203
1204 def unfiltered(self):
1204 def unfiltered(self):
1205 """Return unfiltered version of the repository
1205 """Return unfiltered version of the repository
1206
1206
1207 Intended to be overwritten by filtered repo."""
1207 Intended to be overwritten by filtered repo."""
1208 return self
1208 return self
1209
1209
1210 def filtered(self, name, visibilityexceptions=None):
1210 def filtered(self, name, visibilityexceptions=None):
1211 """Return a filtered version of a repository
1211 """Return a filtered version of a repository
1212
1212
1213 The `name` parameter is the identifier of the requested view. This
1213 The `name` parameter is the identifier of the requested view. This
1214 will return a repoview object set "exactly" to the specified view.
1214 will return a repoview object set "exactly" to the specified view.
1215
1215
1216 This function does not apply recursive filtering to a repository. For
1216 This function does not apply recursive filtering to a repository. For
1217 example calling `repo.filtered("served")` will return a repoview using
1217 example calling `repo.filtered("served")` will return a repoview using
1218 the "served" view, regardless of the initial view used by `repo`.
1218 the "served" view, regardless of the initial view used by `repo`.
1219
1219
1220 In other word, there is always only one level of `repoview` "filtering".
1220 In other word, there is always only one level of `repoview` "filtering".
1221 """
1221 """
1222 if self._extrafilterid is not None and '%' not in name:
1222 if self._extrafilterid is not None and '%' not in name:
1223 name = name + '%' + self._extrafilterid
1223 name = name + '%' + self._extrafilterid
1224
1224
1225 cls = repoview.newtype(self.unfiltered().__class__)
1225 cls = repoview.newtype(self.unfiltered().__class__)
1226 return cls(self, name, visibilityexceptions)
1226 return cls(self, name, visibilityexceptions)
1227
1227
1228 @mixedrepostorecache(('bookmarks', ''), ('bookmarks.current', ''),
1228 @mixedrepostorecache(('bookmarks', ''), ('bookmarks.current', ''),
1229 ('bookmarks', 'store'))
1229 ('bookmarks', 'store'))
1230 def _bookmarks(self):
1230 def _bookmarks(self):
1231 return bookmarks.bmstore(self)
1231 return bookmarks.bmstore(self)
1232
1232
1233 @property
1233 @property
1234 def _activebookmark(self):
1234 def _activebookmark(self):
1235 return self._bookmarks.active
1235 return self._bookmarks.active
1236
1236
1237 # _phasesets depend on changelog. what we need is to call
1237 # _phasesets depend on changelog. what we need is to call
1238 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1238 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1239 # can't be easily expressed in filecache mechanism.
1239 # can't be easily expressed in filecache mechanism.
1240 @storecache('phaseroots', '00changelog.i')
1240 @storecache('phaseroots', '00changelog.i')
1241 def _phasecache(self):
1241 def _phasecache(self):
1242 return phases.phasecache(self, self._phasedefaults)
1242 return phases.phasecache(self, self._phasedefaults)
1243
1243
1244 @storecache('obsstore')
1244 @storecache('obsstore')
1245 def obsstore(self):
1245 def obsstore(self):
1246 return obsolete.makestore(self.ui, self)
1246 return obsolete.makestore(self.ui, self)
1247
1247
1248 @storecache('00changelog.i')
1248 @storecache('00changelog.i')
1249 def changelog(self):
1249 def changelog(self):
1250 return changelog.changelog(self.svfs,
1250 return changelog.changelog(self.svfs,
1251 trypending=txnutil.mayhavepending(self.root))
1251 trypending=txnutil.mayhavepending(self.root))
1252
1252
1253 @storecache('00manifest.i')
1253 @storecache('00manifest.i')
1254 def manifestlog(self):
1254 def manifestlog(self):
1255 rootstore = manifest.manifestrevlog(self.svfs)
1255 rootstore = manifest.manifestrevlog(self.svfs)
1256 return manifest.manifestlog(self.svfs, self, rootstore,
1256 return manifest.manifestlog(self.svfs, self, rootstore,
1257 self._storenarrowmatch)
1257 self._storenarrowmatch)
1258
1258
1259 @repofilecache('dirstate')
1259 @repofilecache('dirstate')
1260 def dirstate(self):
1260 def dirstate(self):
1261 return self._makedirstate()
1261 return self._makedirstate()
1262
1262
1263 def _makedirstate(self):
1263 def _makedirstate(self):
1264 """Extension point for wrapping the dirstate per-repo."""
1264 """Extension point for wrapping the dirstate per-repo."""
1265 sparsematchfn = lambda: sparse.matcher(self)
1265 sparsematchfn = lambda: sparse.matcher(self)
1266
1266
1267 return dirstate.dirstate(self.vfs, self.ui, self.root,
1267 return dirstate.dirstate(self.vfs, self.ui, self.root,
1268 self._dirstatevalidate, sparsematchfn)
1268 self._dirstatevalidate, sparsematchfn)
1269
1269
1270 def _dirstatevalidate(self, node):
1270 def _dirstatevalidate(self, node):
1271 try:
1271 try:
1272 self.changelog.rev(node)
1272 self.changelog.rev(node)
1273 return node
1273 return node
1274 except error.LookupError:
1274 except error.LookupError:
1275 if not self._dirstatevalidatewarned:
1275 if not self._dirstatevalidatewarned:
1276 self._dirstatevalidatewarned = True
1276 self._dirstatevalidatewarned = True
1277 self.ui.warn(_("warning: ignoring unknown"
1277 self.ui.warn(_("warning: ignoring unknown"
1278 " working parent %s!\n") % short(node))
1278 " working parent %s!\n") % short(node))
1279 return nullid
1279 return nullid
1280
1280
1281 @storecache(narrowspec.FILENAME)
1281 @storecache(narrowspec.FILENAME)
1282 def narrowpats(self):
1282 def narrowpats(self):
1283 """matcher patterns for this repository's narrowspec
1283 """matcher patterns for this repository's narrowspec
1284
1284
1285 A tuple of (includes, excludes).
1285 A tuple of (includes, excludes).
1286 """
1286 """
1287 return narrowspec.load(self)
1287 return narrowspec.load(self)
1288
1288
1289 @storecache(narrowspec.FILENAME)
1289 @storecache(narrowspec.FILENAME)
1290 def _storenarrowmatch(self):
1290 def _storenarrowmatch(self):
1291 if repository.NARROW_REQUIREMENT not in self.requirements:
1291 if repository.NARROW_REQUIREMENT not in self.requirements:
1292 return matchmod.always()
1292 return matchmod.always()
1293 include, exclude = self.narrowpats
1293 include, exclude = self.narrowpats
1294 return narrowspec.match(self.root, include=include, exclude=exclude)
1294 return narrowspec.match(self.root, include=include, exclude=exclude)
1295
1295
1296 @storecache(narrowspec.FILENAME)
1296 @storecache(narrowspec.FILENAME)
1297 def _narrowmatch(self):
1297 def _narrowmatch(self):
1298 if repository.NARROW_REQUIREMENT not in self.requirements:
1298 if repository.NARROW_REQUIREMENT not in self.requirements:
1299 return matchmod.always()
1299 return matchmod.always()
1300 narrowspec.checkworkingcopynarrowspec(self)
1300 narrowspec.checkworkingcopynarrowspec(self)
1301 include, exclude = self.narrowpats
1301 include, exclude = self.narrowpats
1302 return narrowspec.match(self.root, include=include, exclude=exclude)
1302 return narrowspec.match(self.root, include=include, exclude=exclude)
1303
1303
1304 def narrowmatch(self, match=None, includeexact=False):
1304 def narrowmatch(self, match=None, includeexact=False):
1305 """matcher corresponding the the repo's narrowspec
1305 """matcher corresponding the the repo's narrowspec
1306
1306
1307 If `match` is given, then that will be intersected with the narrow
1307 If `match` is given, then that will be intersected with the narrow
1308 matcher.
1308 matcher.
1309
1309
1310 If `includeexact` is True, then any exact matches from `match` will
1310 If `includeexact` is True, then any exact matches from `match` will
1311 be included even if they're outside the narrowspec.
1311 be included even if they're outside the narrowspec.
1312 """
1312 """
1313 if match:
1313 if match:
1314 if includeexact and not self._narrowmatch.always():
1314 if includeexact and not self._narrowmatch.always():
1315 # do not exclude explicitly-specified paths so that they can
1315 # do not exclude explicitly-specified paths so that they can
1316 # be warned later on
1316 # be warned later on
1317 em = matchmod.exact(match.files())
1317 em = matchmod.exact(match.files())
1318 nm = matchmod.unionmatcher([self._narrowmatch, em])
1318 nm = matchmod.unionmatcher([self._narrowmatch, em])
1319 return matchmod.intersectmatchers(match, nm)
1319 return matchmod.intersectmatchers(match, nm)
1320 return matchmod.intersectmatchers(match, self._narrowmatch)
1320 return matchmod.intersectmatchers(match, self._narrowmatch)
1321 return self._narrowmatch
1321 return self._narrowmatch
1322
1322
1323 def setnarrowpats(self, newincludes, newexcludes):
1323 def setnarrowpats(self, newincludes, newexcludes):
1324 narrowspec.save(self, newincludes, newexcludes)
1324 narrowspec.save(self, newincludes, newexcludes)
1325 self.invalidate(clearfilecache=True)
1325 self.invalidate(clearfilecache=True)
1326
1326
1327 def __getitem__(self, changeid):
1327 def __getitem__(self, changeid):
1328 if changeid is None:
1328 if changeid is None:
1329 return context.workingctx(self)
1329 return context.workingctx(self)
1330 if isinstance(changeid, context.basectx):
1330 if isinstance(changeid, context.basectx):
1331 return changeid
1331 return changeid
1332 if isinstance(changeid, slice):
1332 if isinstance(changeid, slice):
1333 # wdirrev isn't contiguous so the slice shouldn't include it
1333 # wdirrev isn't contiguous so the slice shouldn't include it
1334 return [self[i]
1334 return [self[i]
1335 for i in pycompat.xrange(*changeid.indices(len(self)))
1335 for i in pycompat.xrange(*changeid.indices(len(self)))
1336 if i not in self.changelog.filteredrevs]
1336 if i not in self.changelog.filteredrevs]
1337 try:
1337 try:
1338 if isinstance(changeid, int):
1338 if isinstance(changeid, int):
1339 node = self.changelog.node(changeid)
1339 node = self.changelog.node(changeid)
1340 rev = changeid
1340 rev = changeid
1341 elif changeid == 'null':
1341 elif changeid == 'null':
1342 node = nullid
1342 node = nullid
1343 rev = nullrev
1343 rev = nullrev
1344 elif changeid == 'tip':
1344 elif changeid == 'tip':
1345 node = self.changelog.tip()
1345 node = self.changelog.tip()
1346 rev = self.changelog.rev(node)
1346 rev = self.changelog.rev(node)
1347 elif changeid == '.':
1347 elif changeid == '.':
1348 # this is a hack to delay/avoid loading obsmarkers
1348 # this is a hack to delay/avoid loading obsmarkers
1349 # when we know that '.' won't be hidden
1349 # when we know that '.' won't be hidden
1350 node = self.dirstate.p1()
1350 node = self.dirstate.p1()
1351 rev = self.unfiltered().changelog.rev(node)
1351 rev = self.unfiltered().changelog.rev(node)
1352 elif len(changeid) == 20:
1352 elif len(changeid) == 20:
1353 try:
1353 try:
1354 node = changeid
1354 node = changeid
1355 rev = self.changelog.rev(changeid)
1355 rev = self.changelog.rev(changeid)
1356 except error.FilteredLookupError:
1356 except error.FilteredLookupError:
1357 changeid = hex(changeid) # for the error message
1357 changeid = hex(changeid) # for the error message
1358 raise
1358 raise
1359 except LookupError:
1359 except LookupError:
1360 # check if it might have come from damaged dirstate
1360 # check if it might have come from damaged dirstate
1361 #
1361 #
1362 # XXX we could avoid the unfiltered if we had a recognizable
1362 # XXX we could avoid the unfiltered if we had a recognizable
1363 # exception for filtered changeset access
1363 # exception for filtered changeset access
1364 if (self.local()
1364 if (self.local()
1365 and changeid in self.unfiltered().dirstate.parents()):
1365 and changeid in self.unfiltered().dirstate.parents()):
1366 msg = _("working directory has unknown parent '%s'!")
1366 msg = _("working directory has unknown parent '%s'!")
1367 raise error.Abort(msg % short(changeid))
1367 raise error.Abort(msg % short(changeid))
1368 changeid = hex(changeid) # for the error message
1368 changeid = hex(changeid) # for the error message
1369 raise
1369 raise
1370
1370
1371 elif len(changeid) == 40:
1371 elif len(changeid) == 40:
1372 node = bin(changeid)
1372 node = bin(changeid)
1373 rev = self.changelog.rev(node)
1373 rev = self.changelog.rev(node)
1374 else:
1374 else:
1375 raise error.ProgrammingError(
1375 raise error.ProgrammingError(
1376 "unsupported changeid '%s' of type %s" %
1376 "unsupported changeid '%s' of type %s" %
1377 (changeid, type(changeid)))
1377 (changeid, type(changeid)))
1378
1378
1379 return context.changectx(self, rev, node)
1379 return context.changectx(self, rev, node)
1380
1380
1381 except (error.FilteredIndexError, error.FilteredLookupError):
1381 except (error.FilteredIndexError, error.FilteredLookupError):
1382 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1382 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1383 % pycompat.bytestr(changeid))
1383 % pycompat.bytestr(changeid))
1384 except (IndexError, LookupError):
1384 except (IndexError, LookupError):
1385 raise error.RepoLookupError(
1385 raise error.RepoLookupError(
1386 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1386 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1387 except error.WdirUnsupported:
1387 except error.WdirUnsupported:
1388 return context.workingctx(self)
1388 return context.workingctx(self)
1389
1389
1390 def __contains__(self, changeid):
1390 def __contains__(self, changeid):
1391 """True if the given changeid exists
1391 """True if the given changeid exists
1392
1392
1393 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1393 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1394 specified.
1394 specified.
1395 """
1395 """
1396 try:
1396 try:
1397 self[changeid]
1397 self[changeid]
1398 return True
1398 return True
1399 except error.RepoLookupError:
1399 except error.RepoLookupError:
1400 return False
1400 return False
1401
1401
1402 def __nonzero__(self):
1402 def __nonzero__(self):
1403 return True
1403 return True
1404
1404
1405 __bool__ = __nonzero__
1405 __bool__ = __nonzero__
1406
1406
1407 def __len__(self):
1407 def __len__(self):
1408 # no need to pay the cost of repoview.changelog
1408 # no need to pay the cost of repoview.changelog
1409 unfi = self.unfiltered()
1409 unfi = self.unfiltered()
1410 return len(unfi.changelog)
1410 return len(unfi.changelog)
1411
1411
1412 def __iter__(self):
1412 def __iter__(self):
1413 return iter(self.changelog)
1413 return iter(self.changelog)
1414
1414
1415 def revs(self, expr, *args):
1415 def revs(self, expr, *args):
1416 '''Find revisions matching a revset.
1416 '''Find revisions matching a revset.
1417
1417
1418 The revset is specified as a string ``expr`` that may contain
1418 The revset is specified as a string ``expr`` that may contain
1419 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1419 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1420
1420
1421 Revset aliases from the configuration are not expanded. To expand
1421 Revset aliases from the configuration are not expanded. To expand
1422 user aliases, consider calling ``scmutil.revrange()`` or
1422 user aliases, consider calling ``scmutil.revrange()`` or
1423 ``repo.anyrevs([expr], user=True)``.
1423 ``repo.anyrevs([expr], user=True)``.
1424
1424
1425 Returns a revset.abstractsmartset, which is a list-like interface
1425 Returns a revset.abstractsmartset, which is a list-like interface
1426 that contains integer revisions.
1426 that contains integer revisions.
1427 '''
1427 '''
1428 tree = revsetlang.spectree(expr, *args)
1428 tree = revsetlang.spectree(expr, *args)
1429 return revset.makematcher(tree)(self)
1429 return revset.makematcher(tree)(self)
1430
1430
1431 def set(self, expr, *args):
1431 def set(self, expr, *args):
1432 '''Find revisions matching a revset and emit changectx instances.
1432 '''Find revisions matching a revset and emit changectx instances.
1433
1433
1434 This is a convenience wrapper around ``revs()`` that iterates the
1434 This is a convenience wrapper around ``revs()`` that iterates the
1435 result and is a generator of changectx instances.
1435 result and is a generator of changectx instances.
1436
1436
1437 Revset aliases from the configuration are not expanded. To expand
1437 Revset aliases from the configuration are not expanded. To expand
1438 user aliases, consider calling ``scmutil.revrange()``.
1438 user aliases, consider calling ``scmutil.revrange()``.
1439 '''
1439 '''
1440 for r in self.revs(expr, *args):
1440 for r in self.revs(expr, *args):
1441 yield self[r]
1441 yield self[r]
1442
1442
1443 def anyrevs(self, specs, user=False, localalias=None):
1443 def anyrevs(self, specs, user=False, localalias=None):
1444 '''Find revisions matching one of the given revsets.
1444 '''Find revisions matching one of the given revsets.
1445
1445
1446 Revset aliases from the configuration are not expanded by default. To
1446 Revset aliases from the configuration are not expanded by default. To
1447 expand user aliases, specify ``user=True``. To provide some local
1447 expand user aliases, specify ``user=True``. To provide some local
1448 definitions overriding user aliases, set ``localalias`` to
1448 definitions overriding user aliases, set ``localalias`` to
1449 ``{name: definitionstring}``.
1449 ``{name: definitionstring}``.
1450 '''
1450 '''
1451 if user:
1451 if user:
1452 m = revset.matchany(self.ui, specs,
1452 m = revset.matchany(self.ui, specs,
1453 lookup=revset.lookupfn(self),
1453 lookup=revset.lookupfn(self),
1454 localalias=localalias)
1454 localalias=localalias)
1455 else:
1455 else:
1456 m = revset.matchany(None, specs, localalias=localalias)
1456 m = revset.matchany(None, specs, localalias=localalias)
1457 return m(self)
1457 return m(self)
1458
1458
1459 def url(self):
1459 def url(self):
1460 return 'file:' + self.root
1460 return 'file:' + self.root
1461
1461
1462 def hook(self, name, throw=False, **args):
1462 def hook(self, name, throw=False, **args):
1463 """Call a hook, passing this repo instance.
1463 """Call a hook, passing this repo instance.
1464
1464
1465 This a convenience method to aid invoking hooks. Extensions likely
1465 This a convenience method to aid invoking hooks. Extensions likely
1466 won't call this unless they have registered a custom hook or are
1466 won't call this unless they have registered a custom hook or are
1467 replacing code that is expected to call a hook.
1467 replacing code that is expected to call a hook.
1468 """
1468 """
1469 return hook.hook(self.ui, self, name, throw, **args)
1469 return hook.hook(self.ui, self, name, throw, **args)
1470
1470
1471 @filteredpropertycache
1471 @filteredpropertycache
1472 def _tagscache(self):
1472 def _tagscache(self):
1473 '''Returns a tagscache object that contains various tags related
1473 '''Returns a tagscache object that contains various tags related
1474 caches.'''
1474 caches.'''
1475
1475
1476 # This simplifies its cache management by having one decorated
1476 # This simplifies its cache management by having one decorated
1477 # function (this one) and the rest simply fetch things from it.
1477 # function (this one) and the rest simply fetch things from it.
1478 class tagscache(object):
1478 class tagscache(object):
1479 def __init__(self):
1479 def __init__(self):
1480 # These two define the set of tags for this repository. tags
1480 # These two define the set of tags for this repository. tags
1481 # maps tag name to node; tagtypes maps tag name to 'global' or
1481 # maps tag name to node; tagtypes maps tag name to 'global' or
1482 # 'local'. (Global tags are defined by .hgtags across all
1482 # 'local'. (Global tags are defined by .hgtags across all
1483 # heads, and local tags are defined in .hg/localtags.)
1483 # heads, and local tags are defined in .hg/localtags.)
1484 # They constitute the in-memory cache of tags.
1484 # They constitute the in-memory cache of tags.
1485 self.tags = self.tagtypes = None
1485 self.tags = self.tagtypes = None
1486
1486
1487 self.nodetagscache = self.tagslist = None
1487 self.nodetagscache = self.tagslist = None
1488
1488
1489 cache = tagscache()
1489 cache = tagscache()
1490 cache.tags, cache.tagtypes = self._findtags()
1490 cache.tags, cache.tagtypes = self._findtags()
1491
1491
1492 return cache
1492 return cache
1493
1493
1494 def tags(self):
1494 def tags(self):
1495 '''return a mapping of tag to node'''
1495 '''return a mapping of tag to node'''
1496 t = {}
1496 t = {}
1497 if self.changelog.filteredrevs:
1497 if self.changelog.filteredrevs:
1498 tags, tt = self._findtags()
1498 tags, tt = self._findtags()
1499 else:
1499 else:
1500 tags = self._tagscache.tags
1500 tags = self._tagscache.tags
1501 rev = self.changelog.rev
1501 rev = self.changelog.rev
1502 for k, v in tags.iteritems():
1502 for k, v in tags.iteritems():
1503 try:
1503 try:
1504 # ignore tags to unknown nodes
1504 # ignore tags to unknown nodes
1505 rev(v)
1505 rev(v)
1506 t[k] = v
1506 t[k] = v
1507 except (error.LookupError, ValueError):
1507 except (error.LookupError, ValueError):
1508 pass
1508 pass
1509 return t
1509 return t
1510
1510
1511 def _findtags(self):
1511 def _findtags(self):
1512 '''Do the hard work of finding tags. Return a pair of dicts
1512 '''Do the hard work of finding tags. Return a pair of dicts
1513 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1513 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1514 maps tag name to a string like \'global\' or \'local\'.
1514 maps tag name to a string like \'global\' or \'local\'.
1515 Subclasses or extensions are free to add their own tags, but
1515 Subclasses or extensions are free to add their own tags, but
1516 should be aware that the returned dicts will be retained for the
1516 should be aware that the returned dicts will be retained for the
1517 duration of the localrepo object.'''
1517 duration of the localrepo object.'''
1518
1518
1519 # XXX what tagtype should subclasses/extensions use? Currently
1519 # XXX what tagtype should subclasses/extensions use? Currently
1520 # mq and bookmarks add tags, but do not set the tagtype at all.
1520 # mq and bookmarks add tags, but do not set the tagtype at all.
1521 # Should each extension invent its own tag type? Should there
1521 # Should each extension invent its own tag type? Should there
1522 # be one tagtype for all such "virtual" tags? Or is the status
1522 # be one tagtype for all such "virtual" tags? Or is the status
1523 # quo fine?
1523 # quo fine?
1524
1524
1525
1525
1526 # map tag name to (node, hist)
1526 # map tag name to (node, hist)
1527 alltags = tagsmod.findglobaltags(self.ui, self)
1527 alltags = tagsmod.findglobaltags(self.ui, self)
1528 # map tag name to tag type
1528 # map tag name to tag type
1529 tagtypes = dict((tag, 'global') for tag in alltags)
1529 tagtypes = dict((tag, 'global') for tag in alltags)
1530
1530
1531 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1531 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1532
1532
1533 # Build the return dicts. Have to re-encode tag names because
1533 # Build the return dicts. Have to re-encode tag names because
1534 # the tags module always uses UTF-8 (in order not to lose info
1534 # the tags module always uses UTF-8 (in order not to lose info
1535 # writing to the cache), but the rest of Mercurial wants them in
1535 # writing to the cache), but the rest of Mercurial wants them in
1536 # local encoding.
1536 # local encoding.
1537 tags = {}
1537 tags = {}
1538 for (name, (node, hist)) in alltags.iteritems():
1538 for (name, (node, hist)) in alltags.iteritems():
1539 if node != nullid:
1539 if node != nullid:
1540 tags[encoding.tolocal(name)] = node
1540 tags[encoding.tolocal(name)] = node
1541 tags['tip'] = self.changelog.tip()
1541 tags['tip'] = self.changelog.tip()
1542 tagtypes = dict([(encoding.tolocal(name), value)
1542 tagtypes = dict([(encoding.tolocal(name), value)
1543 for (name, value) in tagtypes.iteritems()])
1543 for (name, value) in tagtypes.iteritems()])
1544 return (tags, tagtypes)
1544 return (tags, tagtypes)
1545
1545
1546 def tagtype(self, tagname):
1546 def tagtype(self, tagname):
1547 '''
1547 '''
1548 return the type of the given tag. result can be:
1548 return the type of the given tag. result can be:
1549
1549
1550 'local' : a local tag
1550 'local' : a local tag
1551 'global' : a global tag
1551 'global' : a global tag
1552 None : tag does not exist
1552 None : tag does not exist
1553 '''
1553 '''
1554
1554
1555 return self._tagscache.tagtypes.get(tagname)
1555 return self._tagscache.tagtypes.get(tagname)
1556
1556
1557 def tagslist(self):
1557 def tagslist(self):
1558 '''return a list of tags ordered by revision'''
1558 '''return a list of tags ordered by revision'''
1559 if not self._tagscache.tagslist:
1559 if not self._tagscache.tagslist:
1560 l = []
1560 l = []
1561 for t, n in self.tags().iteritems():
1561 for t, n in self.tags().iteritems():
1562 l.append((self.changelog.rev(n), t, n))
1562 l.append((self.changelog.rev(n), t, n))
1563 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1563 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1564
1564
1565 return self._tagscache.tagslist
1565 return self._tagscache.tagslist
1566
1566
1567 def nodetags(self, node):
1567 def nodetags(self, node):
1568 '''return the tags associated with a node'''
1568 '''return the tags associated with a node'''
1569 if not self._tagscache.nodetagscache:
1569 if not self._tagscache.nodetagscache:
1570 nodetagscache = {}
1570 nodetagscache = {}
1571 for t, n in self._tagscache.tags.iteritems():
1571 for t, n in self._tagscache.tags.iteritems():
1572 nodetagscache.setdefault(n, []).append(t)
1572 nodetagscache.setdefault(n, []).append(t)
1573 for tags in nodetagscache.itervalues():
1573 for tags in nodetagscache.itervalues():
1574 tags.sort()
1574 tags.sort()
1575 self._tagscache.nodetagscache = nodetagscache
1575 self._tagscache.nodetagscache = nodetagscache
1576 return self._tagscache.nodetagscache.get(node, [])
1576 return self._tagscache.nodetagscache.get(node, [])
1577
1577
1578 def nodebookmarks(self, node):
1578 def nodebookmarks(self, node):
1579 """return the list of bookmarks pointing to the specified node"""
1579 """return the list of bookmarks pointing to the specified node"""
1580 return self._bookmarks.names(node)
1580 return self._bookmarks.names(node)
1581
1581
1582 def branchmap(self):
1582 def branchmap(self):
1583 '''returns a dictionary {branch: [branchheads]} with branchheads
1583 '''returns a dictionary {branch: [branchheads]} with branchheads
1584 ordered by increasing revision number'''
1584 ordered by increasing revision number'''
1585 return self._branchcaches[self]
1585 return self._branchcaches[self]
1586
1586
1587 @unfilteredmethod
1587 @unfilteredmethod
1588 def revbranchcache(self):
1588 def revbranchcache(self):
1589 if not self._revbranchcache:
1589 if not self._revbranchcache:
1590 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1590 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1591 return self._revbranchcache
1591 return self._revbranchcache
1592
1592
1593 def branchtip(self, branch, ignoremissing=False):
1593 def branchtip(self, branch, ignoremissing=False):
1594 '''return the tip node for a given branch
1594 '''return the tip node for a given branch
1595
1595
1596 If ignoremissing is True, then this method will not raise an error.
1596 If ignoremissing is True, then this method will not raise an error.
1597 This is helpful for callers that only expect None for a missing branch
1597 This is helpful for callers that only expect None for a missing branch
1598 (e.g. namespace).
1598 (e.g. namespace).
1599
1599
1600 '''
1600 '''
1601 try:
1601 try:
1602 return self.branchmap().branchtip(branch)
1602 return self.branchmap().branchtip(branch)
1603 except KeyError:
1603 except KeyError:
1604 if not ignoremissing:
1604 if not ignoremissing:
1605 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1605 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1606 else:
1606 else:
1607 pass
1607 pass
1608
1608
1609 def lookup(self, key):
1609 def lookup(self, key):
1610 node = scmutil.revsymbol(self, key).node()
1610 node = scmutil.revsymbol(self, key).node()
1611 if node is None:
1611 if node is None:
1612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1613 return node
1613 return node
1614
1614
1615 def lookupbranch(self, key):
1615 def lookupbranch(self, key):
1616 if self.branchmap().hasbranch(key):
1616 if self.branchmap().hasbranch(key):
1617 return key
1617 return key
1618
1618
1619 return scmutil.revsymbol(self, key).branch()
1619 return scmutil.revsymbol(self, key).branch()
1620
1620
1621 def known(self, nodes):
1621 def known(self, nodes):
1622 cl = self.changelog
1622 cl = self.changelog
1623 nm = cl.nodemap
1623 nm = cl.nodemap
1624 filtered = cl.filteredrevs
1624 filtered = cl.filteredrevs
1625 result = []
1625 result = []
1626 for n in nodes:
1626 for n in nodes:
1627 r = nm.get(n)
1627 r = nm.get(n)
1628 resp = not (r is None or r in filtered)
1628 resp = not (r is None or r in filtered)
1629 result.append(resp)
1629 result.append(resp)
1630 return result
1630 return result
1631
1631
1632 def local(self):
1632 def local(self):
1633 return self
1633 return self
1634
1634
1635 def publishing(self):
1635 def publishing(self):
1636 # it's safe (and desirable) to trust the publish flag unconditionally
1636 # it's safe (and desirable) to trust the publish flag unconditionally
1637 # so that we don't finalize changes shared between users via ssh or nfs
1637 # so that we don't finalize changes shared between users via ssh or nfs
1638 return self.ui.configbool('phases', 'publish', untrusted=True)
1638 return self.ui.configbool('phases', 'publish', untrusted=True)
1639
1639
1640 def cancopy(self):
1640 def cancopy(self):
1641 # so statichttprepo's override of local() works
1641 # so statichttprepo's override of local() works
1642 if not self.local():
1642 if not self.local():
1643 return False
1643 return False
1644 if not self.publishing():
1644 if not self.publishing():
1645 return True
1645 return True
1646 # if publishing we can't copy if there is filtered content
1646 # if publishing we can't copy if there is filtered content
1647 return not self.filtered('visible').changelog.filteredrevs
1647 return not self.filtered('visible').changelog.filteredrevs
1648
1648
1649 def shared(self):
1649 def shared(self):
1650 '''the type of shared repository (None if not shared)'''
1650 '''the type of shared repository (None if not shared)'''
1651 if self.sharedpath != self.path:
1651 if self.sharedpath != self.path:
1652 return 'store'
1652 return 'store'
1653 return None
1653 return None
1654
1654
1655 def wjoin(self, f, *insidef):
1655 def wjoin(self, f, *insidef):
1656 return self.vfs.reljoin(self.root, f, *insidef)
1656 return self.vfs.reljoin(self.root, f, *insidef)
1657
1657
1658 def setparents(self, p1, p2=nullid):
1658 def setparents(self, p1, p2=nullid):
1659 with self.dirstate.parentchange():
1659 with self.dirstate.parentchange():
1660 copies = self.dirstate.setparents(p1, p2)
1660 copies = self.dirstate.setparents(p1, p2)
1661 pctx = self[p1]
1661 pctx = self[p1]
1662 if copies:
1662 if copies:
1663 # Adjust copy records, the dirstate cannot do it, it
1663 # Adjust copy records, the dirstate cannot do it, it
1664 # requires access to parents manifests. Preserve them
1664 # requires access to parents manifests. Preserve them
1665 # only for entries added to first parent.
1665 # only for entries added to first parent.
1666 for f in copies:
1666 for f in copies:
1667 if f not in pctx and copies[f] in pctx:
1667 if f not in pctx and copies[f] in pctx:
1668 self.dirstate.copy(copies[f], f)
1668 self.dirstate.copy(copies[f], f)
1669 if p2 == nullid:
1669 if p2 == nullid:
1670 for f, s in sorted(self.dirstate.copies().items()):
1670 for f, s in sorted(self.dirstate.copies().items()):
1671 if f not in pctx and s not in pctx:
1671 if f not in pctx and s not in pctx:
1672 self.dirstate.copy(None, f)
1672 self.dirstate.copy(None, f)
1673
1673
1674 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1674 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1675 """changeid must be a changeset revision, if specified.
1675 """changeid must be a changeset revision, if specified.
1676 fileid can be a file revision or node."""
1676 fileid can be a file revision or node."""
1677 return context.filectx(self, path, changeid, fileid,
1677 return context.filectx(self, path, changeid, fileid,
1678 changectx=changectx)
1678 changectx=changectx)
1679
1679
1680 def getcwd(self):
1680 def getcwd(self):
1681 return self.dirstate.getcwd()
1681 return self.dirstate.getcwd()
1682
1682
1683 def pathto(self, f, cwd=None):
1683 def pathto(self, f, cwd=None):
1684 return self.dirstate.pathto(f, cwd)
1684 return self.dirstate.pathto(f, cwd)
1685
1685
1686 def _loadfilter(self, filter):
1686 def _loadfilter(self, filter):
1687 if filter not in self._filterpats:
1687 if filter not in self._filterpats:
1688 l = []
1688 l = []
1689 for pat, cmd in self.ui.configitems(filter):
1689 for pat, cmd in self.ui.configitems(filter):
1690 if cmd == '!':
1690 if cmd == '!':
1691 continue
1691 continue
1692 mf = matchmod.match(self.root, '', [pat])
1692 mf = matchmod.match(self.root, '', [pat])
1693 fn = None
1693 fn = None
1694 params = cmd
1694 params = cmd
1695 for name, filterfn in self._datafilters.iteritems():
1695 for name, filterfn in self._datafilters.iteritems():
1696 if cmd.startswith(name):
1696 if cmd.startswith(name):
1697 fn = filterfn
1697 fn = filterfn
1698 params = cmd[len(name):].lstrip()
1698 params = cmd[len(name):].lstrip()
1699 break
1699 break
1700 if not fn:
1700 if not fn:
1701 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1701 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1702 # Wrap old filters not supporting keyword arguments
1702 # Wrap old filters not supporting keyword arguments
1703 if not pycompat.getargspec(fn)[2]:
1703 if not pycompat.getargspec(fn)[2]:
1704 oldfn = fn
1704 oldfn = fn
1705 fn = lambda s, c, **kwargs: oldfn(s, c)
1705 fn = lambda s, c, **kwargs: oldfn(s, c)
1706 l.append((mf, fn, params))
1706 l.append((mf, fn, params))
1707 self._filterpats[filter] = l
1707 self._filterpats[filter] = l
1708 return self._filterpats[filter]
1708 return self._filterpats[filter]
1709
1709
1710 def _filter(self, filterpats, filename, data):
1710 def _filter(self, filterpats, filename, data):
1711 for mf, fn, cmd in filterpats:
1711 for mf, fn, cmd in filterpats:
1712 if mf(filename):
1712 if mf(filename):
1713 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1713 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1714 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1714 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1715 break
1715 break
1716
1716
1717 return data
1717 return data
1718
1718
1719 @unfilteredpropertycache
1719 @unfilteredpropertycache
1720 def _encodefilterpats(self):
1720 def _encodefilterpats(self):
1721 return self._loadfilter('encode')
1721 return self._loadfilter('encode')
1722
1722
1723 @unfilteredpropertycache
1723 @unfilteredpropertycache
1724 def _decodefilterpats(self):
1724 def _decodefilterpats(self):
1725 return self._loadfilter('decode')
1725 return self._loadfilter('decode')
1726
1726
1727 def adddatafilter(self, name, filter):
1727 def adddatafilter(self, name, filter):
1728 self._datafilters[name] = filter
1728 self._datafilters[name] = filter
1729
1729
1730 def wread(self, filename):
1730 def wread(self, filename):
1731 if self.wvfs.islink(filename):
1731 if self.wvfs.islink(filename):
1732 data = self.wvfs.readlink(filename)
1732 data = self.wvfs.readlink(filename)
1733 else:
1733 else:
1734 data = self.wvfs.read(filename)
1734 data = self.wvfs.read(filename)
1735 return self._filter(self._encodefilterpats, filename, data)
1735 return self._filter(self._encodefilterpats, filename, data)
1736
1736
1737 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1737 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1738 """write ``data`` into ``filename`` in the working directory
1738 """write ``data`` into ``filename`` in the working directory
1739
1739
1740 This returns length of written (maybe decoded) data.
1740 This returns length of written (maybe decoded) data.
1741 """
1741 """
1742 data = self._filter(self._decodefilterpats, filename, data)
1742 data = self._filter(self._decodefilterpats, filename, data)
1743 if 'l' in flags:
1743 if 'l' in flags:
1744 self.wvfs.symlink(data, filename)
1744 self.wvfs.symlink(data, filename)
1745 else:
1745 else:
1746 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1746 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1747 **kwargs)
1747 **kwargs)
1748 if 'x' in flags:
1748 if 'x' in flags:
1749 self.wvfs.setflags(filename, False, True)
1749 self.wvfs.setflags(filename, False, True)
1750 else:
1750 else:
1751 self.wvfs.setflags(filename, False, False)
1751 self.wvfs.setflags(filename, False, False)
1752 return len(data)
1752 return len(data)
1753
1753
1754 def wwritedata(self, filename, data):
1754 def wwritedata(self, filename, data):
1755 return self._filter(self._decodefilterpats, filename, data)
1755 return self._filter(self._decodefilterpats, filename, data)
1756
1756
1757 def currenttransaction(self):
1757 def currenttransaction(self):
1758 """return the current transaction or None if non exists"""
1758 """return the current transaction or None if non exists"""
1759 if self._transref:
1759 if self._transref:
1760 tr = self._transref()
1760 tr = self._transref()
1761 else:
1761 else:
1762 tr = None
1762 tr = None
1763
1763
1764 if tr and tr.running():
1764 if tr and tr.running():
1765 return tr
1765 return tr
1766 return None
1766 return None
1767
1767
1768 def transaction(self, desc, report=None):
1768 def transaction(self, desc, report=None):
1769 if (self.ui.configbool('devel', 'all-warnings')
1769 if (self.ui.configbool('devel', 'all-warnings')
1770 or self.ui.configbool('devel', 'check-locks')):
1770 or self.ui.configbool('devel', 'check-locks')):
1771 if self._currentlock(self._lockref) is None:
1771 if self._currentlock(self._lockref) is None:
1772 raise error.ProgrammingError('transaction requires locking')
1772 raise error.ProgrammingError('transaction requires locking')
1773 tr = self.currenttransaction()
1773 tr = self.currenttransaction()
1774 if tr is not None:
1774 if tr is not None:
1775 return tr.nest(name=desc)
1775 return tr.nest(name=desc)
1776
1776
1777 # abort here if the journal already exists
1777 # abort here if the journal already exists
1778 if self.svfs.exists("journal"):
1778 if self.svfs.exists("journal"):
1779 raise error.RepoError(
1779 raise error.RepoError(
1780 _("abandoned transaction found"),
1780 _("abandoned transaction found"),
1781 hint=_("run 'hg recover' to clean up transaction"))
1781 hint=_("run 'hg recover' to clean up transaction"))
1782
1782
1783 idbase = "%.40f#%f" % (random.random(), time.time())
1783 idbase = "%.40f#%f" % (random.random(), time.time())
1784 ha = hex(hashlib.sha1(idbase).digest())
1784 ha = hex(hashlib.sha1(idbase).digest())
1785 txnid = 'TXN:' + ha
1785 txnid = 'TXN:' + ha
1786 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1786 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1787
1787
1788 self._writejournal(desc)
1788 self._writejournal(desc)
1789 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1789 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1790 if report:
1790 if report:
1791 rp = report
1791 rp = report
1792 else:
1792 else:
1793 rp = self.ui.warn
1793 rp = self.ui.warn
1794 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1794 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1795 # we must avoid cyclic reference between repo and transaction.
1795 # we must avoid cyclic reference between repo and transaction.
1796 reporef = weakref.ref(self)
1796 reporef = weakref.ref(self)
1797 # Code to track tag movement
1797 # Code to track tag movement
1798 #
1798 #
1799 # Since tags are all handled as file content, it is actually quite hard
1799 # Since tags are all handled as file content, it is actually quite hard
1800 # to track these movement from a code perspective. So we fallback to a
1800 # to track these movement from a code perspective. So we fallback to a
1801 # tracking at the repository level. One could envision to track changes
1801 # tracking at the repository level. One could envision to track changes
1802 # to the '.hgtags' file through changegroup apply but that fails to
1802 # to the '.hgtags' file through changegroup apply but that fails to
1803 # cope with case where transaction expose new heads without changegroup
1803 # cope with case where transaction expose new heads without changegroup
1804 # being involved (eg: phase movement).
1804 # being involved (eg: phase movement).
1805 #
1805 #
1806 # For now, We gate the feature behind a flag since this likely comes
1806 # For now, We gate the feature behind a flag since this likely comes
1807 # with performance impacts. The current code run more often than needed
1807 # with performance impacts. The current code run more often than needed
1808 # and do not use caches as much as it could. The current focus is on
1808 # and do not use caches as much as it could. The current focus is on
1809 # the behavior of the feature so we disable it by default. The flag
1809 # the behavior of the feature so we disable it by default. The flag
1810 # will be removed when we are happy with the performance impact.
1810 # will be removed when we are happy with the performance impact.
1811 #
1811 #
1812 # Once this feature is no longer experimental move the following
1812 # Once this feature is no longer experimental move the following
1813 # documentation to the appropriate help section:
1813 # documentation to the appropriate help section:
1814 #
1814 #
1815 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1815 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1816 # tags (new or changed or deleted tags). In addition the details of
1816 # tags (new or changed or deleted tags). In addition the details of
1817 # these changes are made available in a file at:
1817 # these changes are made available in a file at:
1818 # ``REPOROOT/.hg/changes/tags.changes``.
1818 # ``REPOROOT/.hg/changes/tags.changes``.
1819 # Make sure you check for HG_TAG_MOVED before reading that file as it
1819 # Make sure you check for HG_TAG_MOVED before reading that file as it
1820 # might exist from a previous transaction even if no tag were touched
1820 # might exist from a previous transaction even if no tag were touched
1821 # in this one. Changes are recorded in a line base format::
1821 # in this one. Changes are recorded in a line base format::
1822 #
1822 #
1823 # <action> <hex-node> <tag-name>\n
1823 # <action> <hex-node> <tag-name>\n
1824 #
1824 #
1825 # Actions are defined as follow:
1825 # Actions are defined as follow:
1826 # "-R": tag is removed,
1826 # "-R": tag is removed,
1827 # "+A": tag is added,
1827 # "+A": tag is added,
1828 # "-M": tag is moved (old value),
1828 # "-M": tag is moved (old value),
1829 # "+M": tag is moved (new value),
1829 # "+M": tag is moved (new value),
1830 tracktags = lambda x: None
1830 tracktags = lambda x: None
1831 # experimental config: experimental.hook-track-tags
1831 # experimental config: experimental.hook-track-tags
1832 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1832 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1833 if desc != 'strip' and shouldtracktags:
1833 if desc != 'strip' and shouldtracktags:
1834 oldheads = self.changelog.headrevs()
1834 oldheads = self.changelog.headrevs()
1835 def tracktags(tr2):
1835 def tracktags(tr2):
1836 repo = reporef()
1836 repo = reporef()
1837 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1837 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1838 newheads = repo.changelog.headrevs()
1838 newheads = repo.changelog.headrevs()
1839 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1839 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1840 # notes: we compare lists here.
1840 # notes: we compare lists here.
1841 # As we do it only once buiding set would not be cheaper
1841 # As we do it only once buiding set would not be cheaper
1842 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1842 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1843 if changes:
1843 if changes:
1844 tr2.hookargs['tag_moved'] = '1'
1844 tr2.hookargs['tag_moved'] = '1'
1845 with repo.vfs('changes/tags.changes', 'w',
1845 with repo.vfs('changes/tags.changes', 'w',
1846 atomictemp=True) as changesfile:
1846 atomictemp=True) as changesfile:
1847 # note: we do not register the file to the transaction
1847 # note: we do not register the file to the transaction
1848 # because we needs it to still exist on the transaction
1848 # because we needs it to still exist on the transaction
1849 # is close (for txnclose hooks)
1849 # is close (for txnclose hooks)
1850 tagsmod.writediff(changesfile, changes)
1850 tagsmod.writediff(changesfile, changes)
1851 def validate(tr2):
1851 def validate(tr2):
1852 """will run pre-closing hooks"""
1852 """will run pre-closing hooks"""
1853 # XXX the transaction API is a bit lacking here so we take a hacky
1853 # XXX the transaction API is a bit lacking here so we take a hacky
1854 # path for now
1854 # path for now
1855 #
1855 #
1856 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1856 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1857 # dict is copied before these run. In addition we needs the data
1857 # dict is copied before these run. In addition we needs the data
1858 # available to in memory hooks too.
1858 # available to in memory hooks too.
1859 #
1859 #
1860 # Moreover, we also need to make sure this runs before txnclose
1860 # Moreover, we also need to make sure this runs before txnclose
1861 # hooks and there is no "pending" mechanism that would execute
1861 # hooks and there is no "pending" mechanism that would execute
1862 # logic only if hooks are about to run.
1862 # logic only if hooks are about to run.
1863 #
1863 #
1864 # Fixing this limitation of the transaction is also needed to track
1864 # Fixing this limitation of the transaction is also needed to track
1865 # other families of changes (bookmarks, phases, obsolescence).
1865 # other families of changes (bookmarks, phases, obsolescence).
1866 #
1866 #
1867 # This will have to be fixed before we remove the experimental
1867 # This will have to be fixed before we remove the experimental
1868 # gating.
1868 # gating.
1869 tracktags(tr2)
1869 tracktags(tr2)
1870 repo = reporef()
1870 repo = reporef()
1871 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1871 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1872 scmutil.enforcesinglehead(repo, tr2, desc)
1872 scmutil.enforcesinglehead(repo, tr2, desc)
1873 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1873 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1874 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1874 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1875 args = tr.hookargs.copy()
1875 args = tr.hookargs.copy()
1876 args.update(bookmarks.preparehookargs(name, old, new))
1876 args.update(bookmarks.preparehookargs(name, old, new))
1877 repo.hook('pretxnclose-bookmark', throw=True,
1877 repo.hook('pretxnclose-bookmark', throw=True,
1878 **pycompat.strkwargs(args))
1878 **pycompat.strkwargs(args))
1879 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1879 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1880 cl = repo.unfiltered().changelog
1880 cl = repo.unfiltered().changelog
1881 for rev, (old, new) in tr.changes['phases'].items():
1881 for rev, (old, new) in tr.changes['phases'].items():
1882 args = tr.hookargs.copy()
1882 args = tr.hookargs.copy()
1883 node = hex(cl.node(rev))
1883 node = hex(cl.node(rev))
1884 args.update(phases.preparehookargs(node, old, new))
1884 args.update(phases.preparehookargs(node, old, new))
1885 repo.hook('pretxnclose-phase', throw=True,
1885 repo.hook('pretxnclose-phase', throw=True,
1886 **pycompat.strkwargs(args))
1886 **pycompat.strkwargs(args))
1887
1887
1888 repo.hook('pretxnclose', throw=True,
1888 repo.hook('pretxnclose', throw=True,
1889 **pycompat.strkwargs(tr.hookargs))
1889 **pycompat.strkwargs(tr.hookargs))
1890 def releasefn(tr, success):
1890 def releasefn(tr, success):
1891 repo = reporef()
1891 repo = reporef()
1892 if success:
1892 if success:
1893 # this should be explicitly invoked here, because
1893 # this should be explicitly invoked here, because
1894 # in-memory changes aren't written out at closing
1894 # in-memory changes aren't written out at closing
1895 # transaction, if tr.addfilegenerator (via
1895 # transaction, if tr.addfilegenerator (via
1896 # dirstate.write or so) isn't invoked while
1896 # dirstate.write or so) isn't invoked while
1897 # transaction running
1897 # transaction running
1898 repo.dirstate.write(None)
1898 repo.dirstate.write(None)
1899 else:
1899 else:
1900 # discard all changes (including ones already written
1900 # discard all changes (including ones already written
1901 # out) in this transaction
1901 # out) in this transaction
1902 narrowspec.restorebackup(self, 'journal.narrowspec')
1902 narrowspec.restorebackup(self, 'journal.narrowspec')
1903 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1903 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1904 repo.dirstate.restorebackup(None, 'journal.dirstate')
1904 repo.dirstate.restorebackup(None, 'journal.dirstate')
1905
1905
1906 repo.invalidate(clearfilecache=True)
1906 repo.invalidate(clearfilecache=True)
1907
1907
1908 tr = transaction.transaction(rp, self.svfs, vfsmap,
1908 tr = transaction.transaction(rp, self.svfs, vfsmap,
1909 "journal",
1909 "journal",
1910 "undo",
1910 "undo",
1911 aftertrans(renames),
1911 aftertrans(renames),
1912 self.store.createmode,
1912 self.store.createmode,
1913 validator=validate,
1913 validator=validate,
1914 releasefn=releasefn,
1914 releasefn=releasefn,
1915 checkambigfiles=_cachedfiles,
1915 checkambigfiles=_cachedfiles,
1916 name=desc)
1916 name=desc)
1917 tr.changes['origrepolen'] = len(self)
1917 tr.changes['origrepolen'] = len(self)
1918 tr.changes['obsmarkers'] = set()
1918 tr.changes['obsmarkers'] = set()
1919 tr.changes['phases'] = {}
1919 tr.changes['phases'] = {}
1920 tr.changes['bookmarks'] = {}
1920 tr.changes['bookmarks'] = {}
1921
1921
1922 tr.hookargs['txnid'] = txnid
1922 tr.hookargs['txnid'] = txnid
1923 tr.hookargs['txnname'] = desc
1923 tr.hookargs['txnname'] = desc
1924 # note: writing the fncache only during finalize mean that the file is
1924 # note: writing the fncache only during finalize mean that the file is
1925 # outdated when running hooks. As fncache is used for streaming clone,
1925 # outdated when running hooks. As fncache is used for streaming clone,
1926 # this is not expected to break anything that happen during the hooks.
1926 # this is not expected to break anything that happen during the hooks.
1927 tr.addfinalize('flush-fncache', self.store.write)
1927 tr.addfinalize('flush-fncache', self.store.write)
1928 def txnclosehook(tr2):
1928 def txnclosehook(tr2):
1929 """To be run if transaction is successful, will schedule a hook run
1929 """To be run if transaction is successful, will schedule a hook run
1930 """
1930 """
1931 # Don't reference tr2 in hook() so we don't hold a reference.
1931 # Don't reference tr2 in hook() so we don't hold a reference.
1932 # This reduces memory consumption when there are multiple
1932 # This reduces memory consumption when there are multiple
1933 # transactions per lock. This can likely go away if issue5045
1933 # transactions per lock. This can likely go away if issue5045
1934 # fixes the function accumulation.
1934 # fixes the function accumulation.
1935 hookargs = tr2.hookargs
1935 hookargs = tr2.hookargs
1936
1936
1937 def hookfunc():
1937 def hookfunc():
1938 repo = reporef()
1938 repo = reporef()
1939 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1939 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1940 bmchanges = sorted(tr.changes['bookmarks'].items())
1940 bmchanges = sorted(tr.changes['bookmarks'].items())
1941 for name, (old, new) in bmchanges:
1941 for name, (old, new) in bmchanges:
1942 args = tr.hookargs.copy()
1942 args = tr.hookargs.copy()
1943 args.update(bookmarks.preparehookargs(name, old, new))
1943 args.update(bookmarks.preparehookargs(name, old, new))
1944 repo.hook('txnclose-bookmark', throw=False,
1944 repo.hook('txnclose-bookmark', throw=False,
1945 **pycompat.strkwargs(args))
1945 **pycompat.strkwargs(args))
1946
1946
1947 if hook.hashook(repo.ui, 'txnclose-phase'):
1947 if hook.hashook(repo.ui, 'txnclose-phase'):
1948 cl = repo.unfiltered().changelog
1948 cl = repo.unfiltered().changelog
1949 phasemv = sorted(tr.changes['phases'].items())
1949 phasemv = sorted(tr.changes['phases'].items())
1950 for rev, (old, new) in phasemv:
1950 for rev, (old, new) in phasemv:
1951 args = tr.hookargs.copy()
1951 args = tr.hookargs.copy()
1952 node = hex(cl.node(rev))
1952 node = hex(cl.node(rev))
1953 args.update(phases.preparehookargs(node, old, new))
1953 args.update(phases.preparehookargs(node, old, new))
1954 repo.hook('txnclose-phase', throw=False,
1954 repo.hook('txnclose-phase', throw=False,
1955 **pycompat.strkwargs(args))
1955 **pycompat.strkwargs(args))
1956
1956
1957 repo.hook('txnclose', throw=False,
1957 repo.hook('txnclose', throw=False,
1958 **pycompat.strkwargs(hookargs))
1958 **pycompat.strkwargs(hookargs))
1959 reporef()._afterlock(hookfunc)
1959 reporef()._afterlock(hookfunc)
1960 tr.addfinalize('txnclose-hook', txnclosehook)
1960 tr.addfinalize('txnclose-hook', txnclosehook)
1961 # Include a leading "-" to make it happen before the transaction summary
1961 # Include a leading "-" to make it happen before the transaction summary
1962 # reports registered via scmutil.registersummarycallback() whose names
1962 # reports registered via scmutil.registersummarycallback() whose names
1963 # are 00-txnreport etc. That way, the caches will be warm when the
1963 # are 00-txnreport etc. That way, the caches will be warm when the
1964 # callbacks run.
1964 # callbacks run.
1965 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1965 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1966 def txnaborthook(tr2):
1966 def txnaborthook(tr2):
1967 """To be run if transaction is aborted
1967 """To be run if transaction is aborted
1968 """
1968 """
1969 reporef().hook('txnabort', throw=False,
1969 reporef().hook('txnabort', throw=False,
1970 **pycompat.strkwargs(tr2.hookargs))
1970 **pycompat.strkwargs(tr2.hookargs))
1971 tr.addabort('txnabort-hook', txnaborthook)
1971 tr.addabort('txnabort-hook', txnaborthook)
1972 # avoid eager cache invalidation. in-memory data should be identical
1972 # avoid eager cache invalidation. in-memory data should be identical
1973 # to stored data if transaction has no error.
1973 # to stored data if transaction has no error.
1974 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1974 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1975 self._transref = weakref.ref(tr)
1975 self._transref = weakref.ref(tr)
1976 scmutil.registersummarycallback(self, tr, desc)
1976 scmutil.registersummarycallback(self, tr, desc)
1977 return tr
1977 return tr
1978
1978
1979 def _journalfiles(self):
1979 def _journalfiles(self):
1980 return ((self.svfs, 'journal'),
1980 return ((self.svfs, 'journal'),
1981 (self.svfs, 'journal.narrowspec'),
1981 (self.svfs, 'journal.narrowspec'),
1982 (self.vfs, 'journal.narrowspec.dirstate'),
1982 (self.vfs, 'journal.narrowspec.dirstate'),
1983 (self.vfs, 'journal.dirstate'),
1983 (self.vfs, 'journal.dirstate'),
1984 (self.vfs, 'journal.branch'),
1984 (self.vfs, 'journal.branch'),
1985 (self.vfs, 'journal.desc'),
1985 (self.vfs, 'journal.desc'),
1986 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1986 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1987 (self.svfs, 'journal.phaseroots'))
1987 (self.svfs, 'journal.phaseroots'))
1988
1988
1989 def undofiles(self):
1989 def undofiles(self):
1990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1991
1991
1992 @unfilteredmethod
1992 @unfilteredmethod
1993 def _writejournal(self, desc):
1993 def _writejournal(self, desc):
1994 self.dirstate.savebackup(None, 'journal.dirstate')
1994 self.dirstate.savebackup(None, 'journal.dirstate')
1995 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1995 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1996 narrowspec.savebackup(self, 'journal.narrowspec')
1996 narrowspec.savebackup(self, 'journal.narrowspec')
1997 self.vfs.write("journal.branch",
1997 self.vfs.write("journal.branch",
1998 encoding.fromlocal(self.dirstate.branch()))
1998 encoding.fromlocal(self.dirstate.branch()))
1999 self.vfs.write("journal.desc",
1999 self.vfs.write("journal.desc",
2000 "%d\n%s\n" % (len(self), desc))
2000 "%d\n%s\n" % (len(self), desc))
2001 bookmarksvfs = bookmarks.bookmarksvfs(self)
2001 bookmarksvfs = bookmarks.bookmarksvfs(self)
2002 bookmarksvfs.write("journal.bookmarks",
2002 bookmarksvfs.write("journal.bookmarks",
2003 bookmarksvfs.tryread("bookmarks"))
2003 bookmarksvfs.tryread("bookmarks"))
2004 self.svfs.write("journal.phaseroots",
2004 self.svfs.write("journal.phaseroots",
2005 self.svfs.tryread("phaseroots"))
2005 self.svfs.tryread("phaseroots"))
2006
2006
2007 def recover(self):
2007 def recover(self):
2008 with self.lock():
2008 with self.lock():
2009 if self.svfs.exists("journal"):
2009 if self.svfs.exists("journal"):
2010 self.ui.status(_("rolling back interrupted transaction\n"))
2010 self.ui.status(_("rolling back interrupted transaction\n"))
2011 vfsmap = {'': self.svfs,
2011 vfsmap = {'': self.svfs,
2012 'plain': self.vfs,}
2012 'plain': self.vfs,}
2013 transaction.rollback(self.svfs, vfsmap, "journal",
2013 transaction.rollback(self.svfs, vfsmap, "journal",
2014 self.ui.warn,
2014 self.ui.warn,
2015 checkambigfiles=_cachedfiles)
2015 checkambigfiles=_cachedfiles)
2016 self.invalidate()
2016 self.invalidate()
2017 return True
2017 return True
2018 else:
2018 else:
2019 self.ui.warn(_("no interrupted transaction available\n"))
2019 self.ui.warn(_("no interrupted transaction available\n"))
2020 return False
2020 return False
2021
2021
2022 def rollback(self, dryrun=False, force=False):
2022 def rollback(self, dryrun=False, force=False):
2023 wlock = lock = dsguard = None
2023 wlock = lock = dsguard = None
2024 try:
2024 try:
2025 wlock = self.wlock()
2025 wlock = self.wlock()
2026 lock = self.lock()
2026 lock = self.lock()
2027 if self.svfs.exists("undo"):
2027 if self.svfs.exists("undo"):
2028 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2028 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2029
2029
2030 return self._rollback(dryrun, force, dsguard)
2030 return self._rollback(dryrun, force, dsguard)
2031 else:
2031 else:
2032 self.ui.warn(_("no rollback information available\n"))
2032 self.ui.warn(_("no rollback information available\n"))
2033 return 1
2033 return 1
2034 finally:
2034 finally:
2035 release(dsguard, lock, wlock)
2035 release(dsguard, lock, wlock)
2036
2036
2037 @unfilteredmethod # Until we get smarter cache management
2037 @unfilteredmethod # Until we get smarter cache management
2038 def _rollback(self, dryrun, force, dsguard):
2038 def _rollback(self, dryrun, force, dsguard):
2039 ui = self.ui
2039 ui = self.ui
2040 try:
2040 try:
2041 args = self.vfs.read('undo.desc').splitlines()
2041 args = self.vfs.read('undo.desc').splitlines()
2042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2043 if len(args) >= 3:
2043 if len(args) >= 3:
2044 detail = args[2]
2044 detail = args[2]
2045 oldtip = oldlen - 1
2045 oldtip = oldlen - 1
2046
2046
2047 if detail and ui.verbose:
2047 if detail and ui.verbose:
2048 msg = (_('repository tip rolled back to revision %d'
2048 msg = (_('repository tip rolled back to revision %d'
2049 ' (undo %s: %s)\n')
2049 ' (undo %s: %s)\n')
2050 % (oldtip, desc, detail))
2050 % (oldtip, desc, detail))
2051 else:
2051 else:
2052 msg = (_('repository tip rolled back to revision %d'
2052 msg = (_('repository tip rolled back to revision %d'
2053 ' (undo %s)\n')
2053 ' (undo %s)\n')
2054 % (oldtip, desc))
2054 % (oldtip, desc))
2055 except IOError:
2055 except IOError:
2056 msg = _('rolling back unknown transaction\n')
2056 msg = _('rolling back unknown transaction\n')
2057 desc = None
2057 desc = None
2058
2058
2059 if not force and self['.'] != self['tip'] and desc == 'commit':
2059 if not force and self['.'] != self['tip'] and desc == 'commit':
2060 raise error.Abort(
2060 raise error.Abort(
2061 _('rollback of last commit while not checked out '
2061 _('rollback of last commit while not checked out '
2062 'may lose data'), hint=_('use -f to force'))
2062 'may lose data'), hint=_('use -f to force'))
2063
2063
2064 ui.status(msg)
2064 ui.status(msg)
2065 if dryrun:
2065 if dryrun:
2066 return 0
2066 return 0
2067
2067
2068 parents = self.dirstate.parents()
2068 parents = self.dirstate.parents()
2069 self.destroying()
2069 self.destroying()
2070 vfsmap = {'plain': self.vfs, '': self.svfs}
2070 vfsmap = {'plain': self.vfs, '': self.svfs}
2071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2072 checkambigfiles=_cachedfiles)
2072 checkambigfiles=_cachedfiles)
2073 bookmarksvfs = bookmarks.bookmarksvfs(self)
2073 bookmarksvfs = bookmarks.bookmarksvfs(self)
2074 if bookmarksvfs.exists('undo.bookmarks'):
2074 if bookmarksvfs.exists('undo.bookmarks'):
2075 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2075 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2076 if self.svfs.exists('undo.phaseroots'):
2076 if self.svfs.exists('undo.phaseroots'):
2077 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2077 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2078 self.invalidate()
2078 self.invalidate()
2079
2079
2080 parentgone = any(p not in self.changelog.nodemap for p in parents)
2080 parentgone = any(p not in self.changelog.nodemap for p in parents)
2081 if parentgone:
2081 if parentgone:
2082 # prevent dirstateguard from overwriting already restored one
2082 # prevent dirstateguard from overwriting already restored one
2083 dsguard.close()
2083 dsguard.close()
2084
2084
2085 narrowspec.restorebackup(self, 'undo.narrowspec')
2085 narrowspec.restorebackup(self, 'undo.narrowspec')
2086 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2086 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2087 self.dirstate.restorebackup(None, 'undo.dirstate')
2087 self.dirstate.restorebackup(None, 'undo.dirstate')
2088 try:
2088 try:
2089 branch = self.vfs.read('undo.branch')
2089 branch = self.vfs.read('undo.branch')
2090 self.dirstate.setbranch(encoding.tolocal(branch))
2090 self.dirstate.setbranch(encoding.tolocal(branch))
2091 except IOError:
2091 except IOError:
2092 ui.warn(_('named branch could not be reset: '
2092 ui.warn(_('named branch could not be reset: '
2093 'current branch is still \'%s\'\n')
2093 'current branch is still \'%s\'\n')
2094 % self.dirstate.branch())
2094 % self.dirstate.branch())
2095
2095
2096 parents = tuple([p.rev() for p in self[None].parents()])
2096 parents = tuple([p.rev() for p in self[None].parents()])
2097 if len(parents) > 1:
2097 if len(parents) > 1:
2098 ui.status(_('working directory now based on '
2098 ui.status(_('working directory now based on '
2099 'revisions %d and %d\n') % parents)
2099 'revisions %d and %d\n') % parents)
2100 else:
2100 else:
2101 ui.status(_('working directory now based on '
2101 ui.status(_('working directory now based on '
2102 'revision %d\n') % parents)
2102 'revision %d\n') % parents)
2103 mergemod.mergestate.clean(self, self['.'].node())
2103 mergemod.mergestate.clean(self, self['.'].node())
2104
2104
2105 # TODO: if we know which new heads may result from this rollback, pass
2105 # TODO: if we know which new heads may result from this rollback, pass
2106 # them to destroy(), which will prevent the branchhead cache from being
2106 # them to destroy(), which will prevent the branchhead cache from being
2107 # invalidated.
2107 # invalidated.
2108 self.destroyed()
2108 self.destroyed()
2109 return 0
2109 return 0
2110
2110
2111 def _buildcacheupdater(self, newtransaction):
2111 def _buildcacheupdater(self, newtransaction):
2112 """called during transaction to build the callback updating cache
2112 """called during transaction to build the callback updating cache
2113
2113
2114 Lives on the repository to help extension who might want to augment
2114 Lives on the repository to help extension who might want to augment
2115 this logic. For this purpose, the created transaction is passed to the
2115 this logic. For this purpose, the created transaction is passed to the
2116 method.
2116 method.
2117 """
2117 """
2118 # we must avoid cyclic reference between repo and transaction.
2118 # we must avoid cyclic reference between repo and transaction.
2119 reporef = weakref.ref(self)
2119 reporef = weakref.ref(self)
2120 def updater(tr):
2120 def updater(tr):
2121 repo = reporef()
2121 repo = reporef()
2122 repo.updatecaches(tr)
2122 repo.updatecaches(tr)
2123 return updater
2123 return updater
2124
2124
2125 @unfilteredmethod
2125 @unfilteredmethod
2126 def updatecaches(self, tr=None, full=False):
2126 def updatecaches(self, tr=None, full=False):
2127 """warm appropriate caches
2127 """warm appropriate caches
2128
2128
2129 If this function is called after a transaction closed. The transaction
2129 If this function is called after a transaction closed. The transaction
2130 will be available in the 'tr' argument. This can be used to selectively
2130 will be available in the 'tr' argument. This can be used to selectively
2131 update caches relevant to the changes in that transaction.
2131 update caches relevant to the changes in that transaction.
2132
2132
2133 If 'full' is set, make sure all caches the function knows about have
2133 If 'full' is set, make sure all caches the function knows about have
2134 up-to-date data. Even the ones usually loaded more lazily.
2134 up-to-date data. Even the ones usually loaded more lazily.
2135 """
2135 """
2136 if tr is not None and tr.hookargs.get('source') == 'strip':
2136 if tr is not None and tr.hookargs.get('source') == 'strip':
2137 # During strip, many caches are invalid but
2137 # During strip, many caches are invalid but
2138 # later call to `destroyed` will refresh them.
2138 # later call to `destroyed` will refresh them.
2139 return
2139 return
2140
2140
2141 if tr is None or tr.changes['origrepolen'] < len(self):
2141 if tr is None or tr.changes['origrepolen'] < len(self):
2142 # accessing the 'ser ved' branchmap should refresh all the others,
2142 # accessing the 'ser ved' branchmap should refresh all the others,
2143 self.ui.debug('updating the branch cache\n')
2143 self.ui.debug('updating the branch cache\n')
2144 self.filtered('served').branchmap()
2144 self.filtered('served').branchmap()
2145 self.filtered('served.hidden').branchmap()
2145 self.filtered('served.hidden').branchmap()
2146
2146
2147 if full:
2147 if full:
2148 unfi = self.unfiltered()
2148 unfi = self.unfiltered()
2149 rbc = unfi.revbranchcache()
2149 rbc = unfi.revbranchcache()
2150 for r in unfi.changelog:
2150 for r in unfi.changelog:
2151 rbc.branchinfo(r)
2151 rbc.branchinfo(r)
2152 rbc.write()
2152 rbc.write()
2153
2153
2154 # ensure the working copy parents are in the manifestfulltextcache
2154 # ensure the working copy parents are in the manifestfulltextcache
2155 for ctx in self['.'].parents():
2155 for ctx in self['.'].parents():
2156 ctx.manifest() # accessing the manifest is enough
2156 ctx.manifest() # accessing the manifest is enough
2157
2157
2158 # accessing fnode cache warms the cache
2158 # accessing fnode cache warms the cache
2159 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2159 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2160 # accessing tags warm the cache
2160 # accessing tags warm the cache
2161 self.tags()
2161 self.tags()
2162 self.filtered('served').tags()
2162 self.filtered('served').tags()
2163
2163
2164 def invalidatecaches(self):
2164 def invalidatecaches(self):
2165
2165
2166 if r'_tagscache' in vars(self):
2166 if r'_tagscache' in vars(self):
2167 # can't use delattr on proxy
2167 # can't use delattr on proxy
2168 del self.__dict__[r'_tagscache']
2168 del self.__dict__[r'_tagscache']
2169
2169
2170 self._branchcaches.clear()
2170 self._branchcaches.clear()
2171 self.invalidatevolatilesets()
2171 self.invalidatevolatilesets()
2172 self._sparsesignaturecache.clear()
2172 self._sparsesignaturecache.clear()
2173
2173
2174 def invalidatevolatilesets(self):
2174 def invalidatevolatilesets(self):
2175 self.filteredrevcache.clear()
2175 self.filteredrevcache.clear()
2176 obsolete.clearobscaches(self)
2176 obsolete.clearobscaches(self)
2177
2177
2178 def invalidatedirstate(self):
2178 def invalidatedirstate(self):
2179 '''Invalidates the dirstate, causing the next call to dirstate
2179 '''Invalidates the dirstate, causing the next call to dirstate
2180 to check if it was modified since the last time it was read,
2180 to check if it was modified since the last time it was read,
2181 rereading it if it has.
2181 rereading it if it has.
2182
2182
2183 This is different to dirstate.invalidate() that it doesn't always
2183 This is different to dirstate.invalidate() that it doesn't always
2184 rereads the dirstate. Use dirstate.invalidate() if you want to
2184 rereads the dirstate. Use dirstate.invalidate() if you want to
2185 explicitly read the dirstate again (i.e. restoring it to a previous
2185 explicitly read the dirstate again (i.e. restoring it to a previous
2186 known good state).'''
2186 known good state).'''
2187 if hasunfilteredcache(self, r'dirstate'):
2187 if hasunfilteredcache(self, r'dirstate'):
2188 for k in self.dirstate._filecache:
2188 for k in self.dirstate._filecache:
2189 try:
2189 try:
2190 delattr(self.dirstate, k)
2190 delattr(self.dirstate, k)
2191 except AttributeError:
2191 except AttributeError:
2192 pass
2192 pass
2193 delattr(self.unfiltered(), r'dirstate')
2193 delattr(self.unfiltered(), r'dirstate')
2194
2194
2195 def invalidate(self, clearfilecache=False):
2195 def invalidate(self, clearfilecache=False):
2196 '''Invalidates both store and non-store parts other than dirstate
2196 '''Invalidates both store and non-store parts other than dirstate
2197
2197
2198 If a transaction is running, invalidation of store is omitted,
2198 If a transaction is running, invalidation of store is omitted,
2199 because discarding in-memory changes might cause inconsistency
2199 because discarding in-memory changes might cause inconsistency
2200 (e.g. incomplete fncache causes unintentional failure, but
2200 (e.g. incomplete fncache causes unintentional failure, but
2201 redundant one doesn't).
2201 redundant one doesn't).
2202 '''
2202 '''
2203 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2203 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2204 for k in list(self._filecache.keys()):
2204 for k in list(self._filecache.keys()):
2205 # dirstate is invalidated separately in invalidatedirstate()
2205 # dirstate is invalidated separately in invalidatedirstate()
2206 if k == 'dirstate':
2206 if k == 'dirstate':
2207 continue
2207 continue
2208 if (k == 'changelog' and
2208 if (k == 'changelog' and
2209 self.currenttransaction() and
2209 self.currenttransaction() and
2210 self.changelog._delayed):
2210 self.changelog._delayed):
2211 # The changelog object may store unwritten revisions. We don't
2211 # The changelog object may store unwritten revisions. We don't
2212 # want to lose them.
2212 # want to lose them.
2213 # TODO: Solve the problem instead of working around it.
2213 # TODO: Solve the problem instead of working around it.
2214 continue
2214 continue
2215
2215
2216 if clearfilecache:
2216 if clearfilecache:
2217 del self._filecache[k]
2217 del self._filecache[k]
2218 try:
2218 try:
2219 delattr(unfiltered, k)
2219 delattr(unfiltered, k)
2220 except AttributeError:
2220 except AttributeError:
2221 pass
2221 pass
2222 self.invalidatecaches()
2222 self.invalidatecaches()
2223 if not self.currenttransaction():
2223 if not self.currenttransaction():
2224 # TODO: Changing contents of store outside transaction
2224 # TODO: Changing contents of store outside transaction
2225 # causes inconsistency. We should make in-memory store
2225 # causes inconsistency. We should make in-memory store
2226 # changes detectable, and abort if changed.
2226 # changes detectable, and abort if changed.
2227 self.store.invalidatecaches()
2227 self.store.invalidatecaches()
2228
2228
2229 def invalidateall(self):
2229 def invalidateall(self):
2230 '''Fully invalidates both store and non-store parts, causing the
2230 '''Fully invalidates both store and non-store parts, causing the
2231 subsequent operation to reread any outside changes.'''
2231 subsequent operation to reread any outside changes.'''
2232 # extension should hook this to invalidate its caches
2232 # extension should hook this to invalidate its caches
2233 self.invalidate()
2233 self.invalidate()
2234 self.invalidatedirstate()
2234 self.invalidatedirstate()
2235
2235
2236 @unfilteredmethod
2236 @unfilteredmethod
2237 def _refreshfilecachestats(self, tr):
2237 def _refreshfilecachestats(self, tr):
2238 """Reload stats of cached files so that they are flagged as valid"""
2238 """Reload stats of cached files so that they are flagged as valid"""
2239 for k, ce in self._filecache.items():
2239 for k, ce in self._filecache.items():
2240 k = pycompat.sysstr(k)
2240 k = pycompat.sysstr(k)
2241 if k == r'dirstate' or k not in self.__dict__:
2241 if k == r'dirstate' or k not in self.__dict__:
2242 continue
2242 continue
2243 ce.refresh()
2243 ce.refresh()
2244
2244
2245 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2245 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2246 inheritchecker=None, parentenvvar=None):
2246 inheritchecker=None, parentenvvar=None):
2247 parentlock = None
2247 parentlock = None
2248 # the contents of parentenvvar are used by the underlying lock to
2248 # the contents of parentenvvar are used by the underlying lock to
2249 # determine whether it can be inherited
2249 # determine whether it can be inherited
2250 if parentenvvar is not None:
2250 if parentenvvar is not None:
2251 parentlock = encoding.environ.get(parentenvvar)
2251 parentlock = encoding.environ.get(parentenvvar)
2252
2252
2253 timeout = 0
2253 timeout = 0
2254 warntimeout = 0
2254 warntimeout = 0
2255 if wait:
2255 if wait:
2256 timeout = self.ui.configint("ui", "timeout")
2256 timeout = self.ui.configint("ui", "timeout")
2257 warntimeout = self.ui.configint("ui", "timeout.warn")
2257 warntimeout = self.ui.configint("ui", "timeout.warn")
2258 # internal config: ui.signal-safe-lock
2258 # internal config: ui.signal-safe-lock
2259 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2259 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2260
2260
2261 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2261 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2262 releasefn=releasefn,
2262 releasefn=releasefn,
2263 acquirefn=acquirefn, desc=desc,
2263 acquirefn=acquirefn, desc=desc,
2264 inheritchecker=inheritchecker,
2264 inheritchecker=inheritchecker,
2265 parentlock=parentlock,
2265 parentlock=parentlock,
2266 signalsafe=signalsafe)
2266 signalsafe=signalsafe)
2267 return l
2267 return l
2268
2268
2269 def _afterlock(self, callback):
2269 def _afterlock(self, callback):
2270 """add a callback to be run when the repository is fully unlocked
2270 """add a callback to be run when the repository is fully unlocked
2271
2271
2272 The callback will be executed when the outermost lock is released
2272 The callback will be executed when the outermost lock is released
2273 (with wlock being higher level than 'lock')."""
2273 (with wlock being higher level than 'lock')."""
2274 for ref in (self._wlockref, self._lockref):
2274 for ref in (self._wlockref, self._lockref):
2275 l = ref and ref()
2275 l = ref and ref()
2276 if l and l.held:
2276 if l and l.held:
2277 l.postrelease.append(callback)
2277 l.postrelease.append(callback)
2278 break
2278 break
2279 else: # no lock have been found.
2279 else: # no lock have been found.
2280 callback()
2280 callback()
2281
2281
2282 def lock(self, wait=True):
2282 def lock(self, wait=True):
2283 '''Lock the repository store (.hg/store) and return a weak reference
2283 '''Lock the repository store (.hg/store) and return a weak reference
2284 to the lock. Use this before modifying the store (e.g. committing or
2284 to the lock. Use this before modifying the store (e.g. committing or
2285 stripping). If you are opening a transaction, get a lock as well.)
2285 stripping). If you are opening a transaction, get a lock as well.)
2286
2286
2287 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2287 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2288 'wlock' first to avoid a dead-lock hazard.'''
2288 'wlock' first to avoid a dead-lock hazard.'''
2289 l = self._currentlock(self._lockref)
2289 l = self._currentlock(self._lockref)
2290 if l is not None:
2290 if l is not None:
2291 l.lock()
2291 l.lock()
2292 return l
2292 return l
2293
2293
2294 l = self._lock(vfs=self.svfs,
2294 l = self._lock(vfs=self.svfs,
2295 lockname="lock",
2295 lockname="lock",
2296 wait=wait,
2296 wait=wait,
2297 releasefn=None,
2297 releasefn=None,
2298 acquirefn=self.invalidate,
2298 acquirefn=self.invalidate,
2299 desc=_('repository %s') % self.origroot)
2299 desc=_('repository %s') % self.origroot)
2300 self._lockref = weakref.ref(l)
2300 self._lockref = weakref.ref(l)
2301 return l
2301 return l
2302
2302
2303 def _wlockchecktransaction(self):
2303 def _wlockchecktransaction(self):
2304 if self.currenttransaction() is not None:
2304 if self.currenttransaction() is not None:
2305 raise error.LockInheritanceContractViolation(
2305 raise error.LockInheritanceContractViolation(
2306 'wlock cannot be inherited in the middle of a transaction')
2306 'wlock cannot be inherited in the middle of a transaction')
2307
2307
2308 def wlock(self, wait=True):
2308 def wlock(self, wait=True):
2309 '''Lock the non-store parts of the repository (everything under
2309 '''Lock the non-store parts of the repository (everything under
2310 .hg except .hg/store) and return a weak reference to the lock.
2310 .hg except .hg/store) and return a weak reference to the lock.
2311
2311
2312 Use this before modifying files in .hg.
2312 Use this before modifying files in .hg.
2313
2313
2314 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2314 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2315 'wlock' first to avoid a dead-lock hazard.'''
2315 'wlock' first to avoid a dead-lock hazard.'''
2316 l = self._wlockref and self._wlockref()
2316 l = self._wlockref and self._wlockref()
2317 if l is not None and l.held:
2317 if l is not None and l.held:
2318 l.lock()
2318 l.lock()
2319 return l
2319 return l
2320
2320
2321 # We do not need to check for non-waiting lock acquisition. Such
2321 # We do not need to check for non-waiting lock acquisition. Such
2322 # acquisition would not cause dead-lock as they would just fail.
2322 # acquisition would not cause dead-lock as they would just fail.
2323 if wait and (self.ui.configbool('devel', 'all-warnings')
2323 if wait and (self.ui.configbool('devel', 'all-warnings')
2324 or self.ui.configbool('devel', 'check-locks')):
2324 or self.ui.configbool('devel', 'check-locks')):
2325 if self._currentlock(self._lockref) is not None:
2325 if self._currentlock(self._lockref) is not None:
2326 self.ui.develwarn('"wlock" acquired after "lock"')
2326 self.ui.develwarn('"wlock" acquired after "lock"')
2327
2327
2328 def unlock():
2328 def unlock():
2329 if self.dirstate.pendingparentchange():
2329 if self.dirstate.pendingparentchange():
2330 self.dirstate.invalidate()
2330 self.dirstate.invalidate()
2331 else:
2331 else:
2332 self.dirstate.write(None)
2332 self.dirstate.write(None)
2333
2333
2334 self._filecache['dirstate'].refresh()
2334 self._filecache['dirstate'].refresh()
2335
2335
2336 l = self._lock(self.vfs, "wlock", wait, unlock,
2336 l = self._lock(self.vfs, "wlock", wait, unlock,
2337 self.invalidatedirstate, _('working directory of %s') %
2337 self.invalidatedirstate, _('working directory of %s') %
2338 self.origroot,
2338 self.origroot,
2339 inheritchecker=self._wlockchecktransaction,
2339 inheritchecker=self._wlockchecktransaction,
2340 parentenvvar='HG_WLOCK_LOCKER')
2340 parentenvvar='HG_WLOCK_LOCKER')
2341 self._wlockref = weakref.ref(l)
2341 self._wlockref = weakref.ref(l)
2342 return l
2342 return l
2343
2343
2344 def _currentlock(self, lockref):
2344 def _currentlock(self, lockref):
2345 """Returns the lock if it's held, or None if it's not."""
2345 """Returns the lock if it's held, or None if it's not."""
2346 if lockref is None:
2346 if lockref is None:
2347 return None
2347 return None
2348 l = lockref()
2348 l = lockref()
2349 if l is None or not l.held:
2349 if l is None or not l.held:
2350 return None
2350 return None
2351 return l
2351 return l
2352
2352
2353 def currentwlock(self):
2353 def currentwlock(self):
2354 """Returns the wlock if it's held, or None if it's not."""
2354 """Returns the wlock if it's held, or None if it's not."""
2355 return self._currentlock(self._wlockref)
2355 return self._currentlock(self._wlockref)
2356
2356
2357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2358 includecopymeta):
2358 includecopymeta):
2359 """
2359 """
2360 commit an individual file as part of a larger transaction
2360 commit an individual file as part of a larger transaction
2361 """
2361 """
2362
2362
2363 fname = fctx.path()
2363 fname = fctx.path()
2364 fparent1 = manifest1.get(fname, nullid)
2364 fparent1 = manifest1.get(fname, nullid)
2365 fparent2 = manifest2.get(fname, nullid)
2365 fparent2 = manifest2.get(fname, nullid)
2366 if isinstance(fctx, context.filectx):
2366 if isinstance(fctx, context.filectx):
2367 node = fctx.filenode()
2367 node = fctx.filenode()
2368 if node in [fparent1, fparent2]:
2368 if node in [fparent1, fparent2]:
2369 self.ui.debug('reusing %s filelog entry\n' % fname)
2369 self.ui.debug('reusing %s filelog entry\n' % fname)
2370 if ((fparent1 != nullid and
2370 if ((fparent1 != nullid and
2371 manifest1.flags(fname) != fctx.flags()) or
2371 manifest1.flags(fname) != fctx.flags()) or
2372 (fparent2 != nullid and
2372 (fparent2 != nullid and
2373 manifest2.flags(fname) != fctx.flags())):
2373 manifest2.flags(fname) != fctx.flags())):
2374 changelist.append(fname)
2374 changelist.append(fname)
2375 return node
2375 return node
2376
2376
2377 flog = self.file(fname)
2377 flog = self.file(fname)
2378 meta = {}
2378 meta = {}
2379 cfname = fctx.copysource()
2379 cfname = fctx.copysource()
2380 if cfname and cfname != fname:
2380 if cfname and cfname != fname:
2381 # Mark the new revision of this file as a copy of another
2381 # Mark the new revision of this file as a copy of another
2382 # file. This copy data will effectively act as a parent
2382 # file. This copy data will effectively act as a parent
2383 # of this new revision. If this is a merge, the first
2383 # of this new revision. If this is a merge, the first
2384 # parent will be the nullid (meaning "look up the copy data")
2384 # parent will be the nullid (meaning "look up the copy data")
2385 # and the second one will be the other parent. For example:
2385 # and the second one will be the other parent. For example:
2386 #
2386 #
2387 # 0 --- 1 --- 3 rev1 changes file foo
2387 # 0 --- 1 --- 3 rev1 changes file foo
2388 # \ / rev2 renames foo to bar and changes it
2388 # \ / rev2 renames foo to bar and changes it
2389 # \- 2 -/ rev3 should have bar with all changes and
2389 # \- 2 -/ rev3 should have bar with all changes and
2390 # should record that bar descends from
2390 # should record that bar descends from
2391 # bar in rev2 and foo in rev1
2391 # bar in rev2 and foo in rev1
2392 #
2392 #
2393 # this allows this merge to succeed:
2393 # this allows this merge to succeed:
2394 #
2394 #
2395 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2395 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2396 # \ / merging rev3 and rev4 should use bar@rev2
2396 # \ / merging rev3 and rev4 should use bar@rev2
2397 # \- 2 --- 4 as the merge base
2397 # \- 2 --- 4 as the merge base
2398 #
2398 #
2399
2399
2400 cnode = manifest1.get(cfname)
2400 cnode = manifest1.get(cfname)
2401 newfparent = fparent2
2401 newfparent = fparent2
2402
2402
2403 if manifest2: # branch merge
2403 if manifest2: # branch merge
2404 if fparent2 == nullid or cnode is None: # copied on remote side
2404 if fparent2 == nullid or cnode is None: # copied on remote side
2405 if cfname in manifest2:
2405 if cfname in manifest2:
2406 cnode = manifest2[cfname]
2406 cnode = manifest2[cfname]
2407 newfparent = fparent1
2407 newfparent = fparent1
2408
2408
2409 # Here, we used to search backwards through history to try to find
2409 # Here, we used to search backwards through history to try to find
2410 # where the file copy came from if the source of a copy was not in
2410 # where the file copy came from if the source of a copy was not in
2411 # the parent directory. However, this doesn't actually make sense to
2411 # the parent directory. However, this doesn't actually make sense to
2412 # do (what does a copy from something not in your working copy even
2412 # do (what does a copy from something not in your working copy even
2413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2414 # the user that copy information was dropped, so if they didn't
2414 # the user that copy information was dropped, so if they didn't
2415 # expect this outcome it can be fixed, but this is the correct
2415 # expect this outcome it can be fixed, but this is the correct
2416 # behavior in this circumstance.
2416 # behavior in this circumstance.
2417
2417
2418 if cnode:
2418 if cnode:
2419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2420 if includecopymeta:
2420 if includecopymeta:
2421 meta["copy"] = cfname
2421 meta["copy"] = cfname
2422 meta["copyrev"] = hex(cnode)
2422 meta["copyrev"] = hex(cnode)
2423 fparent1, fparent2 = nullid, newfparent
2423 fparent1, fparent2 = nullid, newfparent
2424 else:
2424 else:
2425 self.ui.warn(_("warning: can't find ancestor for '%s' "
2425 self.ui.warn(_("warning: can't find ancestor for '%s' "
2426 "copied from '%s'!\n") % (fname, cfname))
2426 "copied from '%s'!\n") % (fname, cfname))
2427
2427
2428 elif fparent1 == nullid:
2428 elif fparent1 == nullid:
2429 fparent1, fparent2 = fparent2, nullid
2429 fparent1, fparent2 = fparent2, nullid
2430 elif fparent2 != nullid:
2430 elif fparent2 != nullid:
2431 # is one parent an ancestor of the other?
2431 # is one parent an ancestor of the other?
2432 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2432 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2433 if fparent1 in fparentancestors:
2433 if fparent1 in fparentancestors:
2434 fparent1, fparent2 = fparent2, nullid
2434 fparent1, fparent2 = fparent2, nullid
2435 elif fparent2 in fparentancestors:
2435 elif fparent2 in fparentancestors:
2436 fparent2 = nullid
2436 fparent2 = nullid
2437
2437
2438 # is the file changed?
2438 # is the file changed?
2439 text = fctx.data()
2439 text = fctx.data()
2440 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2440 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2441 changelist.append(fname)
2441 changelist.append(fname)
2442 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2442 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2443 # are just the flags changed during merge?
2443 # are just the flags changed during merge?
2444 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2444 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2445 changelist.append(fname)
2445 changelist.append(fname)
2446
2446
2447 return fparent1
2447 return fparent1
2448
2448
2449 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2449 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2450 """check for commit arguments that aren't committable"""
2450 """check for commit arguments that aren't committable"""
2451 if match.isexact() or match.prefix():
2451 if match.isexact() or match.prefix():
2452 matched = set(status.modified + status.added + status.removed)
2452 matched = set(status.modified + status.added + status.removed)
2453
2453
2454 for f in match.files():
2454 for f in match.files():
2455 f = self.dirstate.normalize(f)
2455 f = self.dirstate.normalize(f)
2456 if f == '.' or f in matched or f in wctx.substate:
2456 if f == '.' or f in matched or f in wctx.substate:
2457 continue
2457 continue
2458 if f in status.deleted:
2458 if f in status.deleted:
2459 fail(f, _('file not found!'))
2459 fail(f, _('file not found!'))
2460 if f in vdirs: # visited directory
2460 if f in vdirs: # visited directory
2461 d = f + '/'
2461 d = f + '/'
2462 for mf in matched:
2462 for mf in matched:
2463 if mf.startswith(d):
2463 if mf.startswith(d):
2464 break
2464 break
2465 else:
2465 else:
2466 fail(f, _("no match under directory!"))
2466 fail(f, _("no match under directory!"))
2467 elif f not in self.dirstate:
2467 elif f not in self.dirstate:
2468 fail(f, _("file not tracked!"))
2468 fail(f, _("file not tracked!"))
2469
2469
2470 @unfilteredmethod
2470 @unfilteredmethod
2471 def commit(self, text="", user=None, date=None, match=None, force=False,
2471 def commit(self, text="", user=None, date=None, match=None, force=False,
2472 editor=False, extra=None):
2472 editor=False, extra=None):
2473 """Add a new revision to current repository.
2473 """Add a new revision to current repository.
2474
2474
2475 Revision information is gathered from the working directory,
2475 Revision information is gathered from the working directory,
2476 match can be used to filter the committed files. If editor is
2476 match can be used to filter the committed files. If editor is
2477 supplied, it is called to get a commit message.
2477 supplied, it is called to get a commit message.
2478 """
2478 """
2479 if extra is None:
2479 if extra is None:
2480 extra = {}
2480 extra = {}
2481
2481
2482 def fail(f, msg):
2482 def fail(f, msg):
2483 raise error.Abort('%s: %s' % (f, msg))
2483 raise error.Abort('%s: %s' % (f, msg))
2484
2484
2485 if not match:
2485 if not match:
2486 match = matchmod.always()
2486 match = matchmod.always()
2487
2487
2488 if not force:
2488 if not force:
2489 vdirs = []
2489 vdirs = []
2490 match.explicitdir = vdirs.append
2490 match.explicitdir = vdirs.append
2491 match.bad = fail
2491 match.bad = fail
2492
2492
2493 # lock() for recent changelog (see issue4368)
2493 # lock() for recent changelog (see issue4368)
2494 with self.wlock(), self.lock():
2494 with self.wlock(), self.lock():
2495 wctx = self[None]
2495 wctx = self[None]
2496 merge = len(wctx.parents()) > 1
2496 merge = len(wctx.parents()) > 1
2497
2497
2498 if not force and merge and not match.always():
2498 if not force and merge and not match.always():
2499 raise error.Abort(_('cannot partially commit a merge '
2499 raise error.Abort(_('cannot partially commit a merge '
2500 '(do not specify files or patterns)'))
2500 '(do not specify files or patterns)'))
2501
2501
2502 status = self.status(match=match, clean=force)
2502 status = self.status(match=match, clean=force)
2503 if force:
2503 if force:
2504 status.modified.extend(status.clean) # mq may commit clean files
2504 status.modified.extend(status.clean) # mq may commit clean files
2505
2505
2506 # check subrepos
2506 # check subrepos
2507 subs, commitsubs, newstate = subrepoutil.precommit(
2507 subs, commitsubs, newstate = subrepoutil.precommit(
2508 self.ui, wctx, status, match, force=force)
2508 self.ui, wctx, status, match, force=force)
2509
2509
2510 # make sure all explicit patterns are matched
2510 # make sure all explicit patterns are matched
2511 if not force:
2511 if not force:
2512 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2512 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2513
2513
2514 cctx = context.workingcommitctx(self, status,
2514 cctx = context.workingcommitctx(self, status,
2515 text, user, date, extra)
2515 text, user, date, extra)
2516
2516
2517 # internal config: ui.allowemptycommit
2517 # internal config: ui.allowemptycommit
2518 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2518 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2519 or extra.get('close') or merge or cctx.files()
2519 or extra.get('close') or merge or cctx.files()
2520 or self.ui.configbool('ui', 'allowemptycommit'))
2520 or self.ui.configbool('ui', 'allowemptycommit'))
2521 if not allowemptycommit:
2521 if not allowemptycommit:
2522 return None
2522 return None
2523
2523
2524 if merge and cctx.deleted():
2524 if merge and cctx.deleted():
2525 raise error.Abort(_("cannot commit merge with missing files"))
2525 raise error.Abort(_("cannot commit merge with missing files"))
2526
2526
2527 ms = mergemod.mergestate.read(self)
2527 ms = mergemod.mergestate.read(self)
2528 mergeutil.checkunresolved(ms)
2528 mergeutil.checkunresolved(ms)
2529
2529
2530 if editor:
2530 if editor:
2531 cctx._text = editor(self, cctx, subs)
2531 cctx._text = editor(self, cctx, subs)
2532 edited = (text != cctx._text)
2532 edited = (text != cctx._text)
2533
2533
2534 # Save commit message in case this transaction gets rolled back
2534 # Save commit message in case this transaction gets rolled back
2535 # (e.g. by a pretxncommit hook). Leave the content alone on
2535 # (e.g. by a pretxncommit hook). Leave the content alone on
2536 # the assumption that the user will use the same editor again.
2536 # the assumption that the user will use the same editor again.
2537 msgfn = self.savecommitmessage(cctx._text)
2537 msgfn = self.savecommitmessage(cctx._text)
2538
2538
2539 # commit subs and write new state
2539 # commit subs and write new state
2540 if subs:
2540 if subs:
2541 uipathfn = scmutil.getuipathfn(self)
2541 uipathfn = scmutil.getuipathfn(self)
2542 for s in sorted(commitsubs):
2542 for s in sorted(commitsubs):
2543 sub = wctx.sub(s)
2543 sub = wctx.sub(s)
2544 self.ui.status(_('committing subrepository %s\n') %
2544 self.ui.status(_('committing subrepository %s\n') %
2545 uipathfn(subrepoutil.subrelpath(sub)))
2545 uipathfn(subrepoutil.subrelpath(sub)))
2546 sr = sub.commit(cctx._text, user, date)
2546 sr = sub.commit(cctx._text, user, date)
2547 newstate[s] = (newstate[s][0], sr)
2547 newstate[s] = (newstate[s][0], sr)
2548 subrepoutil.writestate(self, newstate)
2548 subrepoutil.writestate(self, newstate)
2549
2549
2550 p1, p2 = self.dirstate.parents()
2550 p1, p2 = self.dirstate.parents()
2551 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2551 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2552 try:
2552 try:
2553 self.hook("precommit", throw=True, parent1=hookp1,
2553 self.hook("precommit", throw=True, parent1=hookp1,
2554 parent2=hookp2)
2554 parent2=hookp2)
2555 with self.transaction('commit'):
2555 with self.transaction('commit'):
2556 ret = self.commitctx(cctx, True)
2556 ret = self.commitctx(cctx, True)
2557 # update bookmarks, dirstate and mergestate
2557 # update bookmarks, dirstate and mergestate
2558 bookmarks.update(self, [p1, p2], ret)
2558 bookmarks.update(self, [p1, p2], ret)
2559 cctx.markcommitted(ret)
2559 cctx.markcommitted(ret)
2560 ms.reset()
2560 ms.reset()
2561 except: # re-raises
2561 except: # re-raises
2562 if edited:
2562 if edited:
2563 self.ui.write(
2563 self.ui.write(
2564 _('note: commit message saved in %s\n') % msgfn)
2564 _('note: commit message saved in %s\n') % msgfn)
2565 raise
2565 raise
2566
2566
2567 def commithook():
2567 def commithook():
2568 # hack for command that use a temporary commit (eg: histedit)
2568 # hack for command that use a temporary commit (eg: histedit)
2569 # temporary commit got stripped before hook release
2569 # temporary commit got stripped before hook release
2570 if self.changelog.hasnode(ret):
2570 if self.changelog.hasnode(ret):
2571 self.hook("commit", node=hex(ret), parent1=hookp1,
2571 self.hook("commit", node=hex(ret), parent1=hookp1,
2572 parent2=hookp2)
2572 parent2=hookp2)
2573 self._afterlock(commithook)
2573 self._afterlock(commithook)
2574 return ret
2574 return ret
2575
2575
2576 @unfilteredmethod
2576 @unfilteredmethod
2577 def commitctx(self, ctx, error=False):
2577 def commitctx(self, ctx, error=False):
2578 """Add a new revision to current repository.
2578 """Add a new revision to current repository.
2579 Revision information is passed via the context argument.
2579 Revision information is passed via the context argument.
2580
2580
2581 ctx.files() should list all files involved in this commit, i.e.
2581 ctx.files() should list all files involved in this commit, i.e.
2582 modified/added/removed files. On merge, it may be wider than the
2582 modified/added/removed files. On merge, it may be wider than the
2583 ctx.files() to be committed, since any file nodes derived directly
2583 ctx.files() to be committed, since any file nodes derived directly
2584 from p1 or p2 are excluded from the committed ctx.files().
2584 from p1 or p2 are excluded from the committed ctx.files().
2585 """
2585 """
2586
2586
2587 p1, p2 = ctx.p1(), ctx.p2()
2587 p1, p2 = ctx.p1(), ctx.p2()
2588 user = ctx.user()
2588 user = ctx.user()
2589
2589
2590 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2590 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2591 writefilecopymeta = writecopiesto != 'changeset-only'
2591 writefilecopymeta = writecopiesto != 'changeset-only'
2592 p1copies, p2copies = None, None
2592 p1copies, p2copies = None, None
2593 if writecopiesto in ('changeset-only', 'compatibility'):
2593 if writecopiesto in ('changeset-only', 'compatibility'):
2594 p1copies = ctx.p1copies()
2594 p1copies = ctx.p1copies()
2595 p2copies = ctx.p2copies()
2595 p2copies = ctx.p2copies()
2596 with self.lock(), self.transaction("commit") as tr:
2596 with self.lock(), self.transaction("commit") as tr:
2597 trp = weakref.proxy(tr)
2597 trp = weakref.proxy(tr)
2598
2598
2599 if ctx.manifestnode():
2599 if ctx.manifestnode():
2600 # reuse an existing manifest revision
2600 # reuse an existing manifest revision
2601 self.ui.debug('reusing known manifest\n')
2601 self.ui.debug('reusing known manifest\n')
2602 mn = ctx.manifestnode()
2602 mn = ctx.manifestnode()
2603 files = ctx.files()
2603 files = ctx.files()
2604 elif ctx.files():
2604 elif ctx.files():
2605 m1ctx = p1.manifestctx()
2605 m1ctx = p1.manifestctx()
2606 m2ctx = p2.manifestctx()
2606 m2ctx = p2.manifestctx()
2607 mctx = m1ctx.copy()
2607 mctx = m1ctx.copy()
2608
2608
2609 m = mctx.read()
2609 m = mctx.read()
2610 m1 = m1ctx.read()
2610 m1 = m1ctx.read()
2611 m2 = m2ctx.read()
2611 m2 = m2ctx.read()
2612
2612
2613 # check in files
2613 # check in files
2614 added = []
2614 added = []
2615 changed = []
2615 changed = []
2616 removed = list(ctx.removed())
2616 removed = list(ctx.removed())
2617 linkrev = len(self)
2617 linkrev = len(self)
2618 self.ui.note(_("committing files:\n"))
2618 self.ui.note(_("committing files:\n"))
2619 uipathfn = scmutil.getuipathfn(self)
2619 uipathfn = scmutil.getuipathfn(self)
2620 for f in sorted(ctx.modified() + ctx.added()):
2620 for f in sorted(ctx.modified() + ctx.added()):
2621 self.ui.note(uipathfn(f) + "\n")
2621 self.ui.note(uipathfn(f) + "\n")
2622 try:
2622 try:
2623 fctx = ctx[f]
2623 fctx = ctx[f]
2624 if fctx is None:
2624 if fctx is None:
2625 removed.append(f)
2625 removed.append(f)
2626 else:
2626 else:
2627 added.append(f)
2627 added.append(f)
2628 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2628 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2629 trp, changed,
2629 trp, changed,
2630 writefilecopymeta)
2630 writefilecopymeta)
2631 m.setflag(f, fctx.flags())
2631 m.setflag(f, fctx.flags())
2632 except OSError:
2632 except OSError:
2633 self.ui.warn(_("trouble committing %s!\n") %
2633 self.ui.warn(_("trouble committing %s!\n") %
2634 uipathfn(f))
2634 uipathfn(f))
2635 raise
2635 raise
2636 except IOError as inst:
2636 except IOError as inst:
2637 errcode = getattr(inst, 'errno', errno.ENOENT)
2637 errcode = getattr(inst, 'errno', errno.ENOENT)
2638 if error or errcode and errcode != errno.ENOENT:
2638 if error or errcode and errcode != errno.ENOENT:
2639 self.ui.warn(_("trouble committing %s!\n") %
2639 self.ui.warn(_("trouble committing %s!\n") %
2640 uipathfn(f))
2640 uipathfn(f))
2641 raise
2641 raise
2642
2642
2643 # update manifest
2643 # update manifest
2644 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2644 removed = [f for f in removed if f in m1 or f in m2]
2645 drop = [f for f in removed if f in m]
2645 drop = sorted([f for f in removed if f in m])
2646 for f in drop:
2646 for f in drop:
2647 del m[f]
2647 del m[f]
2648 files = changed + removed
2648 files = changed + removed
2649 md = None
2649 md = None
2650 if not files:
2650 if not files:
2651 # if no "files" actually changed in terms of the changelog,
2651 # if no "files" actually changed in terms of the changelog,
2652 # try hard to detect unmodified manifest entry so that the
2652 # try hard to detect unmodified manifest entry so that the
2653 # exact same commit can be reproduced later on convert.
2653 # exact same commit can be reproduced later on convert.
2654 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2654 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2655 if not files and md:
2655 if not files and md:
2656 self.ui.debug('not reusing manifest (no file change in '
2656 self.ui.debug('not reusing manifest (no file change in '
2657 'changelog, but manifest differs)\n')
2657 'changelog, but manifest differs)\n')
2658 if files or md:
2658 if files or md:
2659 self.ui.note(_("committing manifest\n"))
2659 self.ui.note(_("committing manifest\n"))
2660 # we're using narrowmatch here since it's already applied at
2660 # we're using narrowmatch here since it's already applied at
2661 # other stages (such as dirstate.walk), so we're already
2661 # other stages (such as dirstate.walk), so we're already
2662 # ignoring things outside of narrowspec in most cases. The
2662 # ignoring things outside of narrowspec in most cases. The
2663 # one case where we might have files outside the narrowspec
2663 # one case where we might have files outside the narrowspec
2664 # at this point is merges, and we already error out in the
2664 # at this point is merges, and we already error out in the
2665 # case where the merge has files outside of the narrowspec,
2665 # case where the merge has files outside of the narrowspec,
2666 # so this is safe.
2666 # so this is safe.
2667 mn = mctx.write(trp, linkrev,
2667 mn = mctx.write(trp, linkrev,
2668 p1.manifestnode(), p2.manifestnode(),
2668 p1.manifestnode(), p2.manifestnode(),
2669 added, drop, match=self.narrowmatch())
2669 added, drop, match=self.narrowmatch())
2670 else:
2670 else:
2671 self.ui.debug('reusing manifest from p1 (listed files '
2671 self.ui.debug('reusing manifest from p1 (listed files '
2672 'actually unchanged)\n')
2672 'actually unchanged)\n')
2673 mn = p1.manifestnode()
2673 mn = p1.manifestnode()
2674 else:
2674 else:
2675 self.ui.debug('reusing manifest from p1 (no file change)\n')
2675 self.ui.debug('reusing manifest from p1 (no file change)\n')
2676 mn = p1.manifestnode()
2676 mn = p1.manifestnode()
2677 files = []
2677 files = []
2678
2678
2679 if writecopiesto == 'changeset-only':
2679 if writecopiesto == 'changeset-only':
2680 # If writing only to changeset extras, use None to indicate that
2680 # If writing only to changeset extras, use None to indicate that
2681 # no entry should be written. If writing to both, write an empty
2681 # no entry should be written. If writing to both, write an empty
2682 # entry to prevent the reader from falling back to reading
2682 # entry to prevent the reader from falling back to reading
2683 # filelogs.
2683 # filelogs.
2684 p1copies = p1copies or None
2684 p1copies = p1copies or None
2685 p2copies = p2copies or None
2685 p2copies = p2copies or None
2686
2686
2687 # update changelog
2687 # update changelog
2688 self.ui.note(_("committing changelog\n"))
2688 self.ui.note(_("committing changelog\n"))
2689 self.changelog.delayupdate(tr)
2689 self.changelog.delayupdate(tr)
2690 n = self.changelog.add(mn, files, ctx.description(),
2690 n = self.changelog.add(mn, files, ctx.description(),
2691 trp, p1.node(), p2.node(),
2691 trp, p1.node(), p2.node(),
2692 user, ctx.date(), ctx.extra().copy(),
2692 user, ctx.date(), ctx.extra().copy(),
2693 p1copies, p2copies)
2693 p1copies, p2copies)
2694 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2694 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2695 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2695 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2696 parent2=xp2)
2696 parent2=xp2)
2697 # set the new commit is proper phase
2697 # set the new commit is proper phase
2698 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2698 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2699 if targetphase:
2699 if targetphase:
2700 # retract boundary do not alter parent changeset.
2700 # retract boundary do not alter parent changeset.
2701 # if a parent have higher the resulting phase will
2701 # if a parent have higher the resulting phase will
2702 # be compliant anyway
2702 # be compliant anyway
2703 #
2703 #
2704 # if minimal phase was 0 we don't need to retract anything
2704 # if minimal phase was 0 we don't need to retract anything
2705 phases.registernew(self, tr, targetphase, [n])
2705 phases.registernew(self, tr, targetphase, [n])
2706 return n
2706 return n
2707
2707
2708 @unfilteredmethod
2708 @unfilteredmethod
2709 def destroying(self):
2709 def destroying(self):
2710 '''Inform the repository that nodes are about to be destroyed.
2710 '''Inform the repository that nodes are about to be destroyed.
2711 Intended for use by strip and rollback, so there's a common
2711 Intended for use by strip and rollback, so there's a common
2712 place for anything that has to be done before destroying history.
2712 place for anything that has to be done before destroying history.
2713
2713
2714 This is mostly useful for saving state that is in memory and waiting
2714 This is mostly useful for saving state that is in memory and waiting
2715 to be flushed when the current lock is released. Because a call to
2715 to be flushed when the current lock is released. Because a call to
2716 destroyed is imminent, the repo will be invalidated causing those
2716 destroyed is imminent, the repo will be invalidated causing those
2717 changes to stay in memory (waiting for the next unlock), or vanish
2717 changes to stay in memory (waiting for the next unlock), or vanish
2718 completely.
2718 completely.
2719 '''
2719 '''
2720 # When using the same lock to commit and strip, the phasecache is left
2720 # When using the same lock to commit and strip, the phasecache is left
2721 # dirty after committing. Then when we strip, the repo is invalidated,
2721 # dirty after committing. Then when we strip, the repo is invalidated,
2722 # causing those changes to disappear.
2722 # causing those changes to disappear.
2723 if '_phasecache' in vars(self):
2723 if '_phasecache' in vars(self):
2724 self._phasecache.write()
2724 self._phasecache.write()
2725
2725
2726 @unfilteredmethod
2726 @unfilteredmethod
2727 def destroyed(self):
2727 def destroyed(self):
2728 '''Inform the repository that nodes have been destroyed.
2728 '''Inform the repository that nodes have been destroyed.
2729 Intended for use by strip and rollback, so there's a common
2729 Intended for use by strip and rollback, so there's a common
2730 place for anything that has to be done after destroying history.
2730 place for anything that has to be done after destroying history.
2731 '''
2731 '''
2732 # When one tries to:
2732 # When one tries to:
2733 # 1) destroy nodes thus calling this method (e.g. strip)
2733 # 1) destroy nodes thus calling this method (e.g. strip)
2734 # 2) use phasecache somewhere (e.g. commit)
2734 # 2) use phasecache somewhere (e.g. commit)
2735 #
2735 #
2736 # then 2) will fail because the phasecache contains nodes that were
2736 # then 2) will fail because the phasecache contains nodes that were
2737 # removed. We can either remove phasecache from the filecache,
2737 # removed. We can either remove phasecache from the filecache,
2738 # causing it to reload next time it is accessed, or simply filter
2738 # causing it to reload next time it is accessed, or simply filter
2739 # the removed nodes now and write the updated cache.
2739 # the removed nodes now and write the updated cache.
2740 self._phasecache.filterunknown(self)
2740 self._phasecache.filterunknown(self)
2741 self._phasecache.write()
2741 self._phasecache.write()
2742
2742
2743 # refresh all repository caches
2743 # refresh all repository caches
2744 self.updatecaches()
2744 self.updatecaches()
2745
2745
2746 # Ensure the persistent tag cache is updated. Doing it now
2746 # Ensure the persistent tag cache is updated. Doing it now
2747 # means that the tag cache only has to worry about destroyed
2747 # means that the tag cache only has to worry about destroyed
2748 # heads immediately after a strip/rollback. That in turn
2748 # heads immediately after a strip/rollback. That in turn
2749 # guarantees that "cachetip == currenttip" (comparing both rev
2749 # guarantees that "cachetip == currenttip" (comparing both rev
2750 # and node) always means no nodes have been added or destroyed.
2750 # and node) always means no nodes have been added or destroyed.
2751
2751
2752 # XXX this is suboptimal when qrefresh'ing: we strip the current
2752 # XXX this is suboptimal when qrefresh'ing: we strip the current
2753 # head, refresh the tag cache, then immediately add a new head.
2753 # head, refresh the tag cache, then immediately add a new head.
2754 # But I think doing it this way is necessary for the "instant
2754 # But I think doing it this way is necessary for the "instant
2755 # tag cache retrieval" case to work.
2755 # tag cache retrieval" case to work.
2756 self.invalidate()
2756 self.invalidate()
2757
2757
2758 def status(self, node1='.', node2=None, match=None,
2758 def status(self, node1='.', node2=None, match=None,
2759 ignored=False, clean=False, unknown=False,
2759 ignored=False, clean=False, unknown=False,
2760 listsubrepos=False):
2760 listsubrepos=False):
2761 '''a convenience method that calls node1.status(node2)'''
2761 '''a convenience method that calls node1.status(node2)'''
2762 return self[node1].status(node2, match, ignored, clean, unknown,
2762 return self[node1].status(node2, match, ignored, clean, unknown,
2763 listsubrepos)
2763 listsubrepos)
2764
2764
2765 def addpostdsstatus(self, ps):
2765 def addpostdsstatus(self, ps):
2766 """Add a callback to run within the wlock, at the point at which status
2766 """Add a callback to run within the wlock, at the point at which status
2767 fixups happen.
2767 fixups happen.
2768
2768
2769 On status completion, callback(wctx, status) will be called with the
2769 On status completion, callback(wctx, status) will be called with the
2770 wlock held, unless the dirstate has changed from underneath or the wlock
2770 wlock held, unless the dirstate has changed from underneath or the wlock
2771 couldn't be grabbed.
2771 couldn't be grabbed.
2772
2772
2773 Callbacks should not capture and use a cached copy of the dirstate --
2773 Callbacks should not capture and use a cached copy of the dirstate --
2774 it might change in the meanwhile. Instead, they should access the
2774 it might change in the meanwhile. Instead, they should access the
2775 dirstate via wctx.repo().dirstate.
2775 dirstate via wctx.repo().dirstate.
2776
2776
2777 This list is emptied out after each status run -- extensions should
2777 This list is emptied out after each status run -- extensions should
2778 make sure it adds to this list each time dirstate.status is called.
2778 make sure it adds to this list each time dirstate.status is called.
2779 Extensions should also make sure they don't call this for statuses
2779 Extensions should also make sure they don't call this for statuses
2780 that don't involve the dirstate.
2780 that don't involve the dirstate.
2781 """
2781 """
2782
2782
2783 # The list is located here for uniqueness reasons -- it is actually
2783 # The list is located here for uniqueness reasons -- it is actually
2784 # managed by the workingctx, but that isn't unique per-repo.
2784 # managed by the workingctx, but that isn't unique per-repo.
2785 self._postdsstatus.append(ps)
2785 self._postdsstatus.append(ps)
2786
2786
2787 def postdsstatus(self):
2787 def postdsstatus(self):
2788 """Used by workingctx to get the list of post-dirstate-status hooks."""
2788 """Used by workingctx to get the list of post-dirstate-status hooks."""
2789 return self._postdsstatus
2789 return self._postdsstatus
2790
2790
2791 def clearpostdsstatus(self):
2791 def clearpostdsstatus(self):
2792 """Used by workingctx to clear post-dirstate-status hooks."""
2792 """Used by workingctx to clear post-dirstate-status hooks."""
2793 del self._postdsstatus[:]
2793 del self._postdsstatus[:]
2794
2794
2795 def heads(self, start=None):
2795 def heads(self, start=None):
2796 if start is None:
2796 if start is None:
2797 cl = self.changelog
2797 cl = self.changelog
2798 headrevs = reversed(cl.headrevs())
2798 headrevs = reversed(cl.headrevs())
2799 return [cl.node(rev) for rev in headrevs]
2799 return [cl.node(rev) for rev in headrevs]
2800
2800
2801 heads = self.changelog.heads(start)
2801 heads = self.changelog.heads(start)
2802 # sort the output in rev descending order
2802 # sort the output in rev descending order
2803 return sorted(heads, key=self.changelog.rev, reverse=True)
2803 return sorted(heads, key=self.changelog.rev, reverse=True)
2804
2804
2805 def branchheads(self, branch=None, start=None, closed=False):
2805 def branchheads(self, branch=None, start=None, closed=False):
2806 '''return a (possibly filtered) list of heads for the given branch
2806 '''return a (possibly filtered) list of heads for the given branch
2807
2807
2808 Heads are returned in topological order, from newest to oldest.
2808 Heads are returned in topological order, from newest to oldest.
2809 If branch is None, use the dirstate branch.
2809 If branch is None, use the dirstate branch.
2810 If start is not None, return only heads reachable from start.
2810 If start is not None, return only heads reachable from start.
2811 If closed is True, return heads that are marked as closed as well.
2811 If closed is True, return heads that are marked as closed as well.
2812 '''
2812 '''
2813 if branch is None:
2813 if branch is None:
2814 branch = self[None].branch()
2814 branch = self[None].branch()
2815 branches = self.branchmap()
2815 branches = self.branchmap()
2816 if not branches.hasbranch(branch):
2816 if not branches.hasbranch(branch):
2817 return []
2817 return []
2818 # the cache returns heads ordered lowest to highest
2818 # the cache returns heads ordered lowest to highest
2819 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2819 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2820 if start is not None:
2820 if start is not None:
2821 # filter out the heads that cannot be reached from startrev
2821 # filter out the heads that cannot be reached from startrev
2822 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2822 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2823 bheads = [h for h in bheads if h in fbheads]
2823 bheads = [h for h in bheads if h in fbheads]
2824 return bheads
2824 return bheads
2825
2825
2826 def branches(self, nodes):
2826 def branches(self, nodes):
2827 if not nodes:
2827 if not nodes:
2828 nodes = [self.changelog.tip()]
2828 nodes = [self.changelog.tip()]
2829 b = []
2829 b = []
2830 for n in nodes:
2830 for n in nodes:
2831 t = n
2831 t = n
2832 while True:
2832 while True:
2833 p = self.changelog.parents(n)
2833 p = self.changelog.parents(n)
2834 if p[1] != nullid or p[0] == nullid:
2834 if p[1] != nullid or p[0] == nullid:
2835 b.append((t, n, p[0], p[1]))
2835 b.append((t, n, p[0], p[1]))
2836 break
2836 break
2837 n = p[0]
2837 n = p[0]
2838 return b
2838 return b
2839
2839
2840 def between(self, pairs):
2840 def between(self, pairs):
2841 r = []
2841 r = []
2842
2842
2843 for top, bottom in pairs:
2843 for top, bottom in pairs:
2844 n, l, i = top, [], 0
2844 n, l, i = top, [], 0
2845 f = 1
2845 f = 1
2846
2846
2847 while n != bottom and n != nullid:
2847 while n != bottom and n != nullid:
2848 p = self.changelog.parents(n)[0]
2848 p = self.changelog.parents(n)[0]
2849 if i == f:
2849 if i == f:
2850 l.append(n)
2850 l.append(n)
2851 f = f * 2
2851 f = f * 2
2852 n = p
2852 n = p
2853 i += 1
2853 i += 1
2854
2854
2855 r.append(l)
2855 r.append(l)
2856
2856
2857 return r
2857 return r
2858
2858
2859 def checkpush(self, pushop):
2859 def checkpush(self, pushop):
2860 """Extensions can override this function if additional checks have
2860 """Extensions can override this function if additional checks have
2861 to be performed before pushing, or call it if they override push
2861 to be performed before pushing, or call it if they override push
2862 command.
2862 command.
2863 """
2863 """
2864
2864
2865 @unfilteredpropertycache
2865 @unfilteredpropertycache
2866 def prepushoutgoinghooks(self):
2866 def prepushoutgoinghooks(self):
2867 """Return util.hooks consists of a pushop with repo, remote, outgoing
2867 """Return util.hooks consists of a pushop with repo, remote, outgoing
2868 methods, which are called before pushing changesets.
2868 methods, which are called before pushing changesets.
2869 """
2869 """
2870 return util.hooks()
2870 return util.hooks()
2871
2871
2872 def pushkey(self, namespace, key, old, new):
2872 def pushkey(self, namespace, key, old, new):
2873 try:
2873 try:
2874 tr = self.currenttransaction()
2874 tr = self.currenttransaction()
2875 hookargs = {}
2875 hookargs = {}
2876 if tr is not None:
2876 if tr is not None:
2877 hookargs.update(tr.hookargs)
2877 hookargs.update(tr.hookargs)
2878 hookargs = pycompat.strkwargs(hookargs)
2878 hookargs = pycompat.strkwargs(hookargs)
2879 hookargs[r'namespace'] = namespace
2879 hookargs[r'namespace'] = namespace
2880 hookargs[r'key'] = key
2880 hookargs[r'key'] = key
2881 hookargs[r'old'] = old
2881 hookargs[r'old'] = old
2882 hookargs[r'new'] = new
2882 hookargs[r'new'] = new
2883 self.hook('prepushkey', throw=True, **hookargs)
2883 self.hook('prepushkey', throw=True, **hookargs)
2884 except error.HookAbort as exc:
2884 except error.HookAbort as exc:
2885 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2885 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2886 if exc.hint:
2886 if exc.hint:
2887 self.ui.write_err(_("(%s)\n") % exc.hint)
2887 self.ui.write_err(_("(%s)\n") % exc.hint)
2888 return False
2888 return False
2889 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2889 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2890 ret = pushkey.push(self, namespace, key, old, new)
2890 ret = pushkey.push(self, namespace, key, old, new)
2891 def runhook():
2891 def runhook():
2892 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2892 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2893 ret=ret)
2893 ret=ret)
2894 self._afterlock(runhook)
2894 self._afterlock(runhook)
2895 return ret
2895 return ret
2896
2896
2897 def listkeys(self, namespace):
2897 def listkeys(self, namespace):
2898 self.hook('prelistkeys', throw=True, namespace=namespace)
2898 self.hook('prelistkeys', throw=True, namespace=namespace)
2899 self.ui.debug('listing keys for "%s"\n' % namespace)
2899 self.ui.debug('listing keys for "%s"\n' % namespace)
2900 values = pushkey.list(self, namespace)
2900 values = pushkey.list(self, namespace)
2901 self.hook('listkeys', namespace=namespace, values=values)
2901 self.hook('listkeys', namespace=namespace, values=values)
2902 return values
2902 return values
2903
2903
2904 def debugwireargs(self, one, two, three=None, four=None, five=None):
2904 def debugwireargs(self, one, two, three=None, four=None, five=None):
2905 '''used to test argument passing over the wire'''
2905 '''used to test argument passing over the wire'''
2906 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2906 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2907 pycompat.bytestr(four),
2907 pycompat.bytestr(four),
2908 pycompat.bytestr(five))
2908 pycompat.bytestr(five))
2909
2909
2910 def savecommitmessage(self, text):
2910 def savecommitmessage(self, text):
2911 fp = self.vfs('last-message.txt', 'wb')
2911 fp = self.vfs('last-message.txt', 'wb')
2912 try:
2912 try:
2913 fp.write(text)
2913 fp.write(text)
2914 finally:
2914 finally:
2915 fp.close()
2915 fp.close()
2916 return self.pathto(fp.name[len(self.root) + 1:])
2916 return self.pathto(fp.name[len(self.root) + 1:])
2917
2917
2918 # used to avoid circular references so destructors work
2918 # used to avoid circular references so destructors work
2919 def aftertrans(files):
2919 def aftertrans(files):
2920 renamefiles = [tuple(t) for t in files]
2920 renamefiles = [tuple(t) for t in files]
2921 def a():
2921 def a():
2922 for vfs, src, dest in renamefiles:
2922 for vfs, src, dest in renamefiles:
2923 # if src and dest refer to a same file, vfs.rename is a no-op,
2923 # if src and dest refer to a same file, vfs.rename is a no-op,
2924 # leaving both src and dest on disk. delete dest to make sure
2924 # leaving both src and dest on disk. delete dest to make sure
2925 # the rename couldn't be such a no-op.
2925 # the rename couldn't be such a no-op.
2926 vfs.tryunlink(dest)
2926 vfs.tryunlink(dest)
2927 try:
2927 try:
2928 vfs.rename(src, dest)
2928 vfs.rename(src, dest)
2929 except OSError: # journal file does not yet exist
2929 except OSError: # journal file does not yet exist
2930 pass
2930 pass
2931 return a
2931 return a
2932
2932
2933 def undoname(fn):
2933 def undoname(fn):
2934 base, name = os.path.split(fn)
2934 base, name = os.path.split(fn)
2935 assert name.startswith('journal')
2935 assert name.startswith('journal')
2936 return os.path.join(base, name.replace('journal', 'undo', 1))
2936 return os.path.join(base, name.replace('journal', 'undo', 1))
2937
2937
2938 def instance(ui, path, create, intents=None, createopts=None):
2938 def instance(ui, path, create, intents=None, createopts=None):
2939 localpath = util.urllocalpath(path)
2939 localpath = util.urllocalpath(path)
2940 if create:
2940 if create:
2941 createrepository(ui, localpath, createopts=createopts)
2941 createrepository(ui, localpath, createopts=createopts)
2942
2942
2943 return makelocalrepository(ui, localpath, intents=intents)
2943 return makelocalrepository(ui, localpath, intents=intents)
2944
2944
2945 def islocal(path):
2945 def islocal(path):
2946 return True
2946 return True
2947
2947
2948 def defaultcreateopts(ui, createopts=None):
2948 def defaultcreateopts(ui, createopts=None):
2949 """Populate the default creation options for a repository.
2949 """Populate the default creation options for a repository.
2950
2950
2951 A dictionary of explicitly requested creation options can be passed
2951 A dictionary of explicitly requested creation options can be passed
2952 in. Missing keys will be populated.
2952 in. Missing keys will be populated.
2953 """
2953 """
2954 createopts = dict(createopts or {})
2954 createopts = dict(createopts or {})
2955
2955
2956 if 'backend' not in createopts:
2956 if 'backend' not in createopts:
2957 # experimental config: storage.new-repo-backend
2957 # experimental config: storage.new-repo-backend
2958 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2958 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2959
2959
2960 return createopts
2960 return createopts
2961
2961
2962 def newreporequirements(ui, createopts):
2962 def newreporequirements(ui, createopts):
2963 """Determine the set of requirements for a new local repository.
2963 """Determine the set of requirements for a new local repository.
2964
2964
2965 Extensions can wrap this function to specify custom requirements for
2965 Extensions can wrap this function to specify custom requirements for
2966 new repositories.
2966 new repositories.
2967 """
2967 """
2968 # If the repo is being created from a shared repository, we copy
2968 # If the repo is being created from a shared repository, we copy
2969 # its requirements.
2969 # its requirements.
2970 if 'sharedrepo' in createopts:
2970 if 'sharedrepo' in createopts:
2971 requirements = set(createopts['sharedrepo'].requirements)
2971 requirements = set(createopts['sharedrepo'].requirements)
2972 if createopts.get('sharedrelative'):
2972 if createopts.get('sharedrelative'):
2973 requirements.add('relshared')
2973 requirements.add('relshared')
2974 else:
2974 else:
2975 requirements.add('shared')
2975 requirements.add('shared')
2976
2976
2977 return requirements
2977 return requirements
2978
2978
2979 if 'backend' not in createopts:
2979 if 'backend' not in createopts:
2980 raise error.ProgrammingError('backend key not present in createopts; '
2980 raise error.ProgrammingError('backend key not present in createopts; '
2981 'was defaultcreateopts() called?')
2981 'was defaultcreateopts() called?')
2982
2982
2983 if createopts['backend'] != 'revlogv1':
2983 if createopts['backend'] != 'revlogv1':
2984 raise error.Abort(_('unable to determine repository requirements for '
2984 raise error.Abort(_('unable to determine repository requirements for '
2985 'storage backend: %s') % createopts['backend'])
2985 'storage backend: %s') % createopts['backend'])
2986
2986
2987 requirements = {'revlogv1'}
2987 requirements = {'revlogv1'}
2988 if ui.configbool('format', 'usestore'):
2988 if ui.configbool('format', 'usestore'):
2989 requirements.add('store')
2989 requirements.add('store')
2990 if ui.configbool('format', 'usefncache'):
2990 if ui.configbool('format', 'usefncache'):
2991 requirements.add('fncache')
2991 requirements.add('fncache')
2992 if ui.configbool('format', 'dotencode'):
2992 if ui.configbool('format', 'dotencode'):
2993 requirements.add('dotencode')
2993 requirements.add('dotencode')
2994
2994
2995 compengine = ui.config('format', 'revlog-compression')
2995 compengine = ui.config('format', 'revlog-compression')
2996 if compengine not in util.compengines:
2996 if compengine not in util.compengines:
2997 raise error.Abort(_('compression engine %s defined by '
2997 raise error.Abort(_('compression engine %s defined by '
2998 'format.revlog-compression not available') %
2998 'format.revlog-compression not available') %
2999 compengine,
2999 compengine,
3000 hint=_('run "hg debuginstall" to list available '
3000 hint=_('run "hg debuginstall" to list available '
3001 'compression engines'))
3001 'compression engines'))
3002
3002
3003 # zlib is the historical default and doesn't need an explicit requirement.
3003 # zlib is the historical default and doesn't need an explicit requirement.
3004 elif compengine == 'zstd':
3004 elif compengine == 'zstd':
3005 requirements.add('revlog-compression-zstd')
3005 requirements.add('revlog-compression-zstd')
3006 elif compengine != 'zlib':
3006 elif compengine != 'zlib':
3007 requirements.add('exp-compression-%s' % compengine)
3007 requirements.add('exp-compression-%s' % compengine)
3008
3008
3009 if scmutil.gdinitconfig(ui):
3009 if scmutil.gdinitconfig(ui):
3010 requirements.add('generaldelta')
3010 requirements.add('generaldelta')
3011 if ui.configbool('format', 'sparse-revlog'):
3011 if ui.configbool('format', 'sparse-revlog'):
3012 requirements.add(SPARSEREVLOG_REQUIREMENT)
3012 requirements.add(SPARSEREVLOG_REQUIREMENT)
3013 if ui.configbool('experimental', 'treemanifest'):
3013 if ui.configbool('experimental', 'treemanifest'):
3014 requirements.add('treemanifest')
3014 requirements.add('treemanifest')
3015
3015
3016 revlogv2 = ui.config('experimental', 'revlogv2')
3016 revlogv2 = ui.config('experimental', 'revlogv2')
3017 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3017 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3018 requirements.remove('revlogv1')
3018 requirements.remove('revlogv1')
3019 # generaldelta is implied by revlogv2.
3019 # generaldelta is implied by revlogv2.
3020 requirements.discard('generaldelta')
3020 requirements.discard('generaldelta')
3021 requirements.add(REVLOGV2_REQUIREMENT)
3021 requirements.add(REVLOGV2_REQUIREMENT)
3022 # experimental config: format.internal-phase
3022 # experimental config: format.internal-phase
3023 if ui.configbool('format', 'internal-phase'):
3023 if ui.configbool('format', 'internal-phase'):
3024 requirements.add('internal-phase')
3024 requirements.add('internal-phase')
3025
3025
3026 if createopts.get('narrowfiles'):
3026 if createopts.get('narrowfiles'):
3027 requirements.add(repository.NARROW_REQUIREMENT)
3027 requirements.add(repository.NARROW_REQUIREMENT)
3028
3028
3029 if createopts.get('lfs'):
3029 if createopts.get('lfs'):
3030 requirements.add('lfs')
3030 requirements.add('lfs')
3031
3031
3032 if ui.configbool('format', 'bookmarks-in-store'):
3032 if ui.configbool('format', 'bookmarks-in-store'):
3033 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3033 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3034
3034
3035 return requirements
3035 return requirements
3036
3036
3037 def filterknowncreateopts(ui, createopts):
3037 def filterknowncreateopts(ui, createopts):
3038 """Filters a dict of repo creation options against options that are known.
3038 """Filters a dict of repo creation options against options that are known.
3039
3039
3040 Receives a dict of repo creation options and returns a dict of those
3040 Receives a dict of repo creation options and returns a dict of those
3041 options that we don't know how to handle.
3041 options that we don't know how to handle.
3042
3042
3043 This function is called as part of repository creation. If the
3043 This function is called as part of repository creation. If the
3044 returned dict contains any items, repository creation will not
3044 returned dict contains any items, repository creation will not
3045 be allowed, as it means there was a request to create a repository
3045 be allowed, as it means there was a request to create a repository
3046 with options not recognized by loaded code.
3046 with options not recognized by loaded code.
3047
3047
3048 Extensions can wrap this function to filter out creation options
3048 Extensions can wrap this function to filter out creation options
3049 they know how to handle.
3049 they know how to handle.
3050 """
3050 """
3051 known = {
3051 known = {
3052 'backend',
3052 'backend',
3053 'lfs',
3053 'lfs',
3054 'narrowfiles',
3054 'narrowfiles',
3055 'sharedrepo',
3055 'sharedrepo',
3056 'sharedrelative',
3056 'sharedrelative',
3057 'shareditems',
3057 'shareditems',
3058 'shallowfilestore',
3058 'shallowfilestore',
3059 }
3059 }
3060
3060
3061 return {k: v for k, v in createopts.items() if k not in known}
3061 return {k: v for k, v in createopts.items() if k not in known}
3062
3062
3063 def createrepository(ui, path, createopts=None):
3063 def createrepository(ui, path, createopts=None):
3064 """Create a new repository in a vfs.
3064 """Create a new repository in a vfs.
3065
3065
3066 ``path`` path to the new repo's working directory.
3066 ``path`` path to the new repo's working directory.
3067 ``createopts`` options for the new repository.
3067 ``createopts`` options for the new repository.
3068
3068
3069 The following keys for ``createopts`` are recognized:
3069 The following keys for ``createopts`` are recognized:
3070
3070
3071 backend
3071 backend
3072 The storage backend to use.
3072 The storage backend to use.
3073 lfs
3073 lfs
3074 Repository will be created with ``lfs`` requirement. The lfs extension
3074 Repository will be created with ``lfs`` requirement. The lfs extension
3075 will automatically be loaded when the repository is accessed.
3075 will automatically be loaded when the repository is accessed.
3076 narrowfiles
3076 narrowfiles
3077 Set up repository to support narrow file storage.
3077 Set up repository to support narrow file storage.
3078 sharedrepo
3078 sharedrepo
3079 Repository object from which storage should be shared.
3079 Repository object from which storage should be shared.
3080 sharedrelative
3080 sharedrelative
3081 Boolean indicating if the path to the shared repo should be
3081 Boolean indicating if the path to the shared repo should be
3082 stored as relative. By default, the pointer to the "parent" repo
3082 stored as relative. By default, the pointer to the "parent" repo
3083 is stored as an absolute path.
3083 is stored as an absolute path.
3084 shareditems
3084 shareditems
3085 Set of items to share to the new repository (in addition to storage).
3085 Set of items to share to the new repository (in addition to storage).
3086 shallowfilestore
3086 shallowfilestore
3087 Indicates that storage for files should be shallow (not all ancestor
3087 Indicates that storage for files should be shallow (not all ancestor
3088 revisions are known).
3088 revisions are known).
3089 """
3089 """
3090 createopts = defaultcreateopts(ui, createopts=createopts)
3090 createopts = defaultcreateopts(ui, createopts=createopts)
3091
3091
3092 unknownopts = filterknowncreateopts(ui, createopts)
3092 unknownopts = filterknowncreateopts(ui, createopts)
3093
3093
3094 if not isinstance(unknownopts, dict):
3094 if not isinstance(unknownopts, dict):
3095 raise error.ProgrammingError('filterknowncreateopts() did not return '
3095 raise error.ProgrammingError('filterknowncreateopts() did not return '
3096 'a dict')
3096 'a dict')
3097
3097
3098 if unknownopts:
3098 if unknownopts:
3099 raise error.Abort(_('unable to create repository because of unknown '
3099 raise error.Abort(_('unable to create repository because of unknown '
3100 'creation option: %s') %
3100 'creation option: %s') %
3101 ', '.join(sorted(unknownopts)),
3101 ', '.join(sorted(unknownopts)),
3102 hint=_('is a required extension not loaded?'))
3102 hint=_('is a required extension not loaded?'))
3103
3103
3104 requirements = newreporequirements(ui, createopts=createopts)
3104 requirements = newreporequirements(ui, createopts=createopts)
3105
3105
3106 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3106 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3107
3107
3108 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3108 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3109 if hgvfs.exists():
3109 if hgvfs.exists():
3110 raise error.RepoError(_('repository %s already exists') % path)
3110 raise error.RepoError(_('repository %s already exists') % path)
3111
3111
3112 if 'sharedrepo' in createopts:
3112 if 'sharedrepo' in createopts:
3113 sharedpath = createopts['sharedrepo'].sharedpath
3113 sharedpath = createopts['sharedrepo'].sharedpath
3114
3114
3115 if createopts.get('sharedrelative'):
3115 if createopts.get('sharedrelative'):
3116 try:
3116 try:
3117 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3117 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3118 except (IOError, ValueError) as e:
3118 except (IOError, ValueError) as e:
3119 # ValueError is raised on Windows if the drive letters differ
3119 # ValueError is raised on Windows if the drive letters differ
3120 # on each path.
3120 # on each path.
3121 raise error.Abort(_('cannot calculate relative path'),
3121 raise error.Abort(_('cannot calculate relative path'),
3122 hint=stringutil.forcebytestr(e))
3122 hint=stringutil.forcebytestr(e))
3123
3123
3124 if not wdirvfs.exists():
3124 if not wdirvfs.exists():
3125 wdirvfs.makedirs()
3125 wdirvfs.makedirs()
3126
3126
3127 hgvfs.makedir(notindexed=True)
3127 hgvfs.makedir(notindexed=True)
3128 if 'sharedrepo' not in createopts:
3128 if 'sharedrepo' not in createopts:
3129 hgvfs.mkdir(b'cache')
3129 hgvfs.mkdir(b'cache')
3130 hgvfs.mkdir(b'wcache')
3130 hgvfs.mkdir(b'wcache')
3131
3131
3132 if b'store' in requirements and 'sharedrepo' not in createopts:
3132 if b'store' in requirements and 'sharedrepo' not in createopts:
3133 hgvfs.mkdir(b'store')
3133 hgvfs.mkdir(b'store')
3134
3134
3135 # We create an invalid changelog outside the store so very old
3135 # We create an invalid changelog outside the store so very old
3136 # Mercurial versions (which didn't know about the requirements
3136 # Mercurial versions (which didn't know about the requirements
3137 # file) encounter an error on reading the changelog. This
3137 # file) encounter an error on reading the changelog. This
3138 # effectively locks out old clients and prevents them from
3138 # effectively locks out old clients and prevents them from
3139 # mucking with a repo in an unknown format.
3139 # mucking with a repo in an unknown format.
3140 #
3140 #
3141 # The revlog header has version 2, which won't be recognized by
3141 # The revlog header has version 2, which won't be recognized by
3142 # such old clients.
3142 # such old clients.
3143 hgvfs.append(b'00changelog.i',
3143 hgvfs.append(b'00changelog.i',
3144 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3144 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3145 b'layout')
3145 b'layout')
3146
3146
3147 scmutil.writerequires(hgvfs, requirements)
3147 scmutil.writerequires(hgvfs, requirements)
3148
3148
3149 # Write out file telling readers where to find the shared store.
3149 # Write out file telling readers where to find the shared store.
3150 if 'sharedrepo' in createopts:
3150 if 'sharedrepo' in createopts:
3151 hgvfs.write(b'sharedpath', sharedpath)
3151 hgvfs.write(b'sharedpath', sharedpath)
3152
3152
3153 if createopts.get('shareditems'):
3153 if createopts.get('shareditems'):
3154 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3154 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3155 hgvfs.write(b'shared', shared)
3155 hgvfs.write(b'shared', shared)
3156
3156
3157 def poisonrepository(repo):
3157 def poisonrepository(repo):
3158 """Poison a repository instance so it can no longer be used."""
3158 """Poison a repository instance so it can no longer be used."""
3159 # Perform any cleanup on the instance.
3159 # Perform any cleanup on the instance.
3160 repo.close()
3160 repo.close()
3161
3161
3162 # Our strategy is to replace the type of the object with one that
3162 # Our strategy is to replace the type of the object with one that
3163 # has all attribute lookups result in error.
3163 # has all attribute lookups result in error.
3164 #
3164 #
3165 # But we have to allow the close() method because some constructors
3165 # But we have to allow the close() method because some constructors
3166 # of repos call close() on repo references.
3166 # of repos call close() on repo references.
3167 class poisonedrepository(object):
3167 class poisonedrepository(object):
3168 def __getattribute__(self, item):
3168 def __getattribute__(self, item):
3169 if item == r'close':
3169 if item == r'close':
3170 return object.__getattribute__(self, item)
3170 return object.__getattribute__(self, item)
3171
3171
3172 raise error.ProgrammingError('repo instances should not be used '
3172 raise error.ProgrammingError('repo instances should not be used '
3173 'after unshare')
3173 'after unshare')
3174
3174
3175 def close(self):
3175 def close(self):
3176 pass
3176 pass
3177
3177
3178 # We may have a repoview, which intercepts __setattr__. So be sure
3178 # We may have a repoview, which intercepts __setattr__. So be sure
3179 # we operate at the lowest level possible.
3179 # we operate at the lowest level possible.
3180 object.__setattr__(repo, r'__class__', poisonedrepository)
3180 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,2055 +1,2055 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from . import (
22 from . import (
23 error,
23 error,
24 mdiff,
24 mdiff,
25 policy,
25 policy,
26 pycompat,
26 pycompat,
27 repository,
27 repository,
28 revlog,
28 revlog,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 interfaceutil,
32 interfaceutil,
33 )
33 )
34
34
35 parsers = policy.importmod(r'parsers')
35 parsers = policy.importmod(r'parsers')
36 propertycache = util.propertycache
36 propertycache = util.propertycache
37
37
38 def _parse(data):
38 def _parse(data):
39 # This method does a little bit of excessive-looking
39 # This method does a little bit of excessive-looking
40 # precondition checking. This is so that the behavior of this
40 # precondition checking. This is so that the behavior of this
41 # class exactly matches its C counterpart to try and help
41 # class exactly matches its C counterpart to try and help
42 # prevent surprise breakage for anyone that develops against
42 # prevent surprise breakage for anyone that develops against
43 # the pure version.
43 # the pure version.
44 if data and data[-1:] != '\n':
44 if data and data[-1:] != '\n':
45 raise ValueError('Manifest did not end in a newline.')
45 raise ValueError('Manifest did not end in a newline.')
46 prev = None
46 prev = None
47 for l in data.splitlines():
47 for l in data.splitlines():
48 if prev is not None and prev > l:
48 if prev is not None and prev > l:
49 raise ValueError('Manifest lines not in sorted order.')
49 raise ValueError('Manifest lines not in sorted order.')
50 prev = l
50 prev = l
51 f, n = l.split('\0')
51 f, n = l.split('\0')
52 if len(n) > 40:
52 if len(n) > 40:
53 yield f, bin(n[:40]), n[40:]
53 yield f, bin(n[:40]), n[40:]
54 else:
54 else:
55 yield f, bin(n), ''
55 yield f, bin(n), ''
56
56
57 def _text(it):
57 def _text(it):
58 files = []
58 files = []
59 lines = []
59 lines = []
60 for f, n, fl in it:
60 for f, n, fl in it:
61 files.append(f)
61 files.append(f)
62 # if this is changed to support newlines in filenames,
62 # if this is changed to support newlines in filenames,
63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
65
65
66 _checkforbidden(files)
66 _checkforbidden(files)
67 return ''.join(lines)
67 return ''.join(lines)
68
68
69 class lazymanifestiter(object):
69 class lazymanifestiter(object):
70 def __init__(self, lm):
70 def __init__(self, lm):
71 self.pos = 0
71 self.pos = 0
72 self.lm = lm
72 self.lm = lm
73
73
74 def __iter__(self):
74 def __iter__(self):
75 return self
75 return self
76
76
77 def next(self):
77 def next(self):
78 try:
78 try:
79 data, pos = self.lm._get(self.pos)
79 data, pos = self.lm._get(self.pos)
80 except IndexError:
80 except IndexError:
81 raise StopIteration
81 raise StopIteration
82 if pos == -1:
82 if pos == -1:
83 self.pos += 1
83 self.pos += 1
84 return data[0]
84 return data[0]
85 self.pos += 1
85 self.pos += 1
86 zeropos = data.find('\x00', pos)
86 zeropos = data.find('\x00', pos)
87 return data[pos:zeropos]
87 return data[pos:zeropos]
88
88
89 __next__ = next
89 __next__ = next
90
90
91 class lazymanifestiterentries(object):
91 class lazymanifestiterentries(object):
92 def __init__(self, lm):
92 def __init__(self, lm):
93 self.lm = lm
93 self.lm = lm
94 self.pos = 0
94 self.pos = 0
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return self
97 return self
98
98
99 def next(self):
99 def next(self):
100 try:
100 try:
101 data, pos = self.lm._get(self.pos)
101 data, pos = self.lm._get(self.pos)
102 except IndexError:
102 except IndexError:
103 raise StopIteration
103 raise StopIteration
104 if pos == -1:
104 if pos == -1:
105 self.pos += 1
105 self.pos += 1
106 return data
106 return data
107 zeropos = data.find('\x00', pos)
107 zeropos = data.find('\x00', pos)
108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
109 zeropos + 1, 40)
109 zeropos + 1, 40)
110 flags = self.lm._getflags(data, self.pos, zeropos)
110 flags = self.lm._getflags(data, self.pos, zeropos)
111 self.pos += 1
111 self.pos += 1
112 return (data[pos:zeropos], hashval, flags)
112 return (data[pos:zeropos], hashval, flags)
113
113
114 __next__ = next
114 __next__ = next
115
115
116 def unhexlify(data, extra, pos, length):
116 def unhexlify(data, extra, pos, length):
117 s = bin(data[pos:pos + length])
117 s = bin(data[pos:pos + length])
118 if extra:
118 if extra:
119 s += chr(extra & 0xff)
119 s += chr(extra & 0xff)
120 return s
120 return s
121
121
122 def _cmp(a, b):
122 def _cmp(a, b):
123 return (a > b) - (a < b)
123 return (a > b) - (a < b)
124
124
125 class _lazymanifest(object):
125 class _lazymanifest(object):
126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
127 if positions is None:
127 if positions is None:
128 self.positions = self.findlines(data)
128 self.positions = self.findlines(data)
129 self.extrainfo = [0] * len(self.positions)
129 self.extrainfo = [0] * len(self.positions)
130 self.data = data
130 self.data = data
131 self.extradata = []
131 self.extradata = []
132 else:
132 else:
133 self.positions = positions[:]
133 self.positions = positions[:]
134 self.extrainfo = extrainfo[:]
134 self.extrainfo = extrainfo[:]
135 self.extradata = extradata[:]
135 self.extradata = extradata[:]
136 self.data = data
136 self.data = data
137
137
138 def findlines(self, data):
138 def findlines(self, data):
139 if not data:
139 if not data:
140 return []
140 return []
141 pos = data.find("\n")
141 pos = data.find("\n")
142 if pos == -1 or data[-1:] != '\n':
142 if pos == -1 or data[-1:] != '\n':
143 raise ValueError("Manifest did not end in a newline.")
143 raise ValueError("Manifest did not end in a newline.")
144 positions = [0]
144 positions = [0]
145 prev = data[:data.find('\x00')]
145 prev = data[:data.find('\x00')]
146 while pos < len(data) - 1 and pos != -1:
146 while pos < len(data) - 1 and pos != -1:
147 positions.append(pos + 1)
147 positions.append(pos + 1)
148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
149 if nexts < prev:
149 if nexts < prev:
150 raise ValueError("Manifest lines not in sorted order.")
150 raise ValueError("Manifest lines not in sorted order.")
151 prev = nexts
151 prev = nexts
152 pos = data.find("\n", pos + 1)
152 pos = data.find("\n", pos + 1)
153 return positions
153 return positions
154
154
155 def _get(self, index):
155 def _get(self, index):
156 # get the position encoded in pos:
156 # get the position encoded in pos:
157 # positive number is an index in 'data'
157 # positive number is an index in 'data'
158 # negative number is in extrapieces
158 # negative number is in extrapieces
159 pos = self.positions[index]
159 pos = self.positions[index]
160 if pos >= 0:
160 if pos >= 0:
161 return self.data, pos
161 return self.data, pos
162 return self.extradata[-pos - 1], -1
162 return self.extradata[-pos - 1], -1
163
163
164 def _getkey(self, pos):
164 def _getkey(self, pos):
165 if pos >= 0:
165 if pos >= 0:
166 return self.data[pos:self.data.find('\x00', pos + 1)]
166 return self.data[pos:self.data.find('\x00', pos + 1)]
167 return self.extradata[-pos - 1][0]
167 return self.extradata[-pos - 1][0]
168
168
169 def bsearch(self, key):
169 def bsearch(self, key):
170 first = 0
170 first = 0
171 last = len(self.positions) - 1
171 last = len(self.positions) - 1
172
172
173 while first <= last:
173 while first <= last:
174 midpoint = (first + last)//2
174 midpoint = (first + last)//2
175 nextpos = self.positions[midpoint]
175 nextpos = self.positions[midpoint]
176 candidate = self._getkey(nextpos)
176 candidate = self._getkey(nextpos)
177 r = _cmp(key, candidate)
177 r = _cmp(key, candidate)
178 if r == 0:
178 if r == 0:
179 return midpoint
179 return midpoint
180 else:
180 else:
181 if r < 0:
181 if r < 0:
182 last = midpoint - 1
182 last = midpoint - 1
183 else:
183 else:
184 first = midpoint + 1
184 first = midpoint + 1
185 return -1
185 return -1
186
186
187 def bsearch2(self, key):
187 def bsearch2(self, key):
188 # same as the above, but will always return the position
188 # same as the above, but will always return the position
189 # done for performance reasons
189 # done for performance reasons
190 first = 0
190 first = 0
191 last = len(self.positions) - 1
191 last = len(self.positions) - 1
192
192
193 while first <= last:
193 while first <= last:
194 midpoint = (first + last)//2
194 midpoint = (first + last)//2
195 nextpos = self.positions[midpoint]
195 nextpos = self.positions[midpoint]
196 candidate = self._getkey(nextpos)
196 candidate = self._getkey(nextpos)
197 r = _cmp(key, candidate)
197 r = _cmp(key, candidate)
198 if r == 0:
198 if r == 0:
199 return (midpoint, True)
199 return (midpoint, True)
200 else:
200 else:
201 if r < 0:
201 if r < 0:
202 last = midpoint - 1
202 last = midpoint - 1
203 else:
203 else:
204 first = midpoint + 1
204 first = midpoint + 1
205 return (first, False)
205 return (first, False)
206
206
207 def __contains__(self, key):
207 def __contains__(self, key):
208 return self.bsearch(key) != -1
208 return self.bsearch(key) != -1
209
209
210 def _getflags(self, data, needle, pos):
210 def _getflags(self, data, needle, pos):
211 start = pos + 41
211 start = pos + 41
212 end = data.find("\n", start)
212 end = data.find("\n", start)
213 if end == -1:
213 if end == -1:
214 end = len(data) - 1
214 end = len(data) - 1
215 if start == end:
215 if start == end:
216 return ''
216 return ''
217 return self.data[start:end]
217 return self.data[start:end]
218
218
219 def __getitem__(self, key):
219 def __getitem__(self, key):
220 if not isinstance(key, bytes):
220 if not isinstance(key, bytes):
221 raise TypeError("getitem: manifest keys must be a bytes.")
221 raise TypeError("getitem: manifest keys must be a bytes.")
222 needle = self.bsearch(key)
222 needle = self.bsearch(key)
223 if needle == -1:
223 if needle == -1:
224 raise KeyError
224 raise KeyError
225 data, pos = self._get(needle)
225 data, pos = self._get(needle)
226 if pos == -1:
226 if pos == -1:
227 return (data[1], data[2])
227 return (data[1], data[2])
228 zeropos = data.find('\x00', pos)
228 zeropos = data.find('\x00', pos)
229 assert 0 <= needle <= len(self.positions)
229 assert 0 <= needle <= len(self.positions)
230 assert len(self.extrainfo) == len(self.positions)
230 assert len(self.extrainfo) == len(self.positions)
231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
232 flags = self._getflags(data, needle, zeropos)
232 flags = self._getflags(data, needle, zeropos)
233 return (hashval, flags)
233 return (hashval, flags)
234
234
235 def __delitem__(self, key):
235 def __delitem__(self, key):
236 needle, found = self.bsearch2(key)
236 needle, found = self.bsearch2(key)
237 if not found:
237 if not found:
238 raise KeyError
238 raise KeyError
239 cur = self.positions[needle]
239 cur = self.positions[needle]
240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
242 if cur >= 0:
242 if cur >= 0:
243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
244
244
245 def __setitem__(self, key, value):
245 def __setitem__(self, key, value):
246 if not isinstance(key, bytes):
246 if not isinstance(key, bytes):
247 raise TypeError("setitem: manifest keys must be a byte string.")
247 raise TypeError("setitem: manifest keys must be a byte string.")
248 if not isinstance(value, tuple) or len(value) != 2:
248 if not isinstance(value, tuple) or len(value) != 2:
249 raise TypeError("Manifest values must be a tuple of (node, flags).")
249 raise TypeError("Manifest values must be a tuple of (node, flags).")
250 hashval = value[0]
250 hashval = value[0]
251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
252 raise TypeError("node must be a 20-byte byte string")
252 raise TypeError("node must be a 20-byte byte string")
253 flags = value[1]
253 flags = value[1]
254 if len(hashval) == 22:
254 if len(hashval) == 22:
255 hashval = hashval[:-1]
255 hashval = hashval[:-1]
256 if not isinstance(flags, bytes) or len(flags) > 1:
256 if not isinstance(flags, bytes) or len(flags) > 1:
257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
258 needle, found = self.bsearch2(key)
258 needle, found = self.bsearch2(key)
259 if found:
259 if found:
260 # put the item
260 # put the item
261 pos = self.positions[needle]
261 pos = self.positions[needle]
262 if pos < 0:
262 if pos < 0:
263 self.extradata[-pos - 1] = (key, hashval, value[1])
263 self.extradata[-pos - 1] = (key, hashval, value[1])
264 else:
264 else:
265 # just don't bother
265 # just don't bother
266 self.extradata.append((key, hashval, value[1]))
266 self.extradata.append((key, hashval, value[1]))
267 self.positions[needle] = -len(self.extradata)
267 self.positions[needle] = -len(self.extradata)
268 else:
268 else:
269 # not found, put it in with extra positions
269 # not found, put it in with extra positions
270 self.extradata.append((key, hashval, value[1]))
270 self.extradata.append((key, hashval, value[1]))
271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
272 + self.positions[needle:])
272 + self.positions[needle:])
273 self.extrainfo = (self.extrainfo[:needle] + [0] +
273 self.extrainfo = (self.extrainfo[:needle] + [0] +
274 self.extrainfo[needle:])
274 self.extrainfo[needle:])
275
275
276 def copy(self):
276 def copy(self):
277 # XXX call _compact like in C?
277 # XXX call _compact like in C?
278 return _lazymanifest(self.data, self.positions, self.extrainfo,
278 return _lazymanifest(self.data, self.positions, self.extrainfo,
279 self.extradata)
279 self.extradata)
280
280
281 def _compact(self):
281 def _compact(self):
282 # hopefully not called TOO often
282 # hopefully not called TOO often
283 if len(self.extradata) == 0:
283 if len(self.extradata) == 0:
284 return
284 return
285 l = []
285 l = []
286 i = 0
286 i = 0
287 offset = 0
287 offset = 0
288 self.extrainfo = [0] * len(self.positions)
288 self.extrainfo = [0] * len(self.positions)
289 while i < len(self.positions):
289 while i < len(self.positions):
290 if self.positions[i] >= 0:
290 if self.positions[i] >= 0:
291 cur = self.positions[i]
291 cur = self.positions[i]
292 last_cut = cur
292 last_cut = cur
293 while True:
293 while True:
294 self.positions[i] = offset
294 self.positions[i] = offset
295 i += 1
295 i += 1
296 if i == len(self.positions) or self.positions[i] < 0:
296 if i == len(self.positions) or self.positions[i] < 0:
297 break
297 break
298 offset += self.positions[i] - cur
298 offset += self.positions[i] - cur
299 cur = self.positions[i]
299 cur = self.positions[i]
300 end_cut = self.data.find('\n', cur)
300 end_cut = self.data.find('\n', cur)
301 if end_cut != -1:
301 if end_cut != -1:
302 end_cut += 1
302 end_cut += 1
303 offset += end_cut - cur
303 offset += end_cut - cur
304 l.append(self.data[last_cut:end_cut])
304 l.append(self.data[last_cut:end_cut])
305 else:
305 else:
306 while i < len(self.positions) and self.positions[i] < 0:
306 while i < len(self.positions) and self.positions[i] < 0:
307 cur = self.positions[i]
307 cur = self.positions[i]
308 t = self.extradata[-cur - 1]
308 t = self.extradata[-cur - 1]
309 l.append(self._pack(t))
309 l.append(self._pack(t))
310 self.positions[i] = offset
310 self.positions[i] = offset
311 if len(t[1]) > 20:
311 if len(t[1]) > 20:
312 self.extrainfo[i] = ord(t[1][21])
312 self.extrainfo[i] = ord(t[1][21])
313 offset += len(l[-1])
313 offset += len(l[-1])
314 i += 1
314 i += 1
315 self.data = ''.join(l)
315 self.data = ''.join(l)
316 self.extradata = []
316 self.extradata = []
317
317
318 def _pack(self, d):
318 def _pack(self, d):
319 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
319 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
320
320
321 def text(self):
321 def text(self):
322 self._compact()
322 self._compact()
323 return self.data
323 return self.data
324
324
325 def diff(self, m2, clean=False):
325 def diff(self, m2, clean=False):
326 '''Finds changes between the current manifest and m2.'''
326 '''Finds changes between the current manifest and m2.'''
327 # XXX think whether efficiency matters here
327 # XXX think whether efficiency matters here
328 diff = {}
328 diff = {}
329
329
330 for fn, e1, flags in self.iterentries():
330 for fn, e1, flags in self.iterentries():
331 if fn not in m2:
331 if fn not in m2:
332 diff[fn] = (e1, flags), (None, '')
332 diff[fn] = (e1, flags), (None, '')
333 else:
333 else:
334 e2 = m2[fn]
334 e2 = m2[fn]
335 if (e1, flags) != e2:
335 if (e1, flags) != e2:
336 diff[fn] = (e1, flags), e2
336 diff[fn] = (e1, flags), e2
337 elif clean:
337 elif clean:
338 diff[fn] = None
338 diff[fn] = None
339
339
340 for fn, e2, flags in m2.iterentries():
340 for fn, e2, flags in m2.iterentries():
341 if fn not in self:
341 if fn not in self:
342 diff[fn] = (None, ''), (e2, flags)
342 diff[fn] = (None, ''), (e2, flags)
343
343
344 return diff
344 return diff
345
345
346 def iterentries(self):
346 def iterentries(self):
347 return lazymanifestiterentries(self)
347 return lazymanifestiterentries(self)
348
348
349 def iterkeys(self):
349 def iterkeys(self):
350 return lazymanifestiter(self)
350 return lazymanifestiter(self)
351
351
352 def __iter__(self):
352 def __iter__(self):
353 return lazymanifestiter(self)
353 return lazymanifestiter(self)
354
354
355 def __len__(self):
355 def __len__(self):
356 return len(self.positions)
356 return len(self.positions)
357
357
358 def filtercopy(self, filterfn):
358 def filtercopy(self, filterfn):
359 # XXX should be optimized
359 # XXX should be optimized
360 c = _lazymanifest('')
360 c = _lazymanifest('')
361 for f, n, fl in self.iterentries():
361 for f, n, fl in self.iterentries():
362 if filterfn(f):
362 if filterfn(f):
363 c[f] = n, fl
363 c[f] = n, fl
364 return c
364 return c
365
365
366 try:
366 try:
367 _lazymanifest = parsers.lazymanifest
367 _lazymanifest = parsers.lazymanifest
368 except AttributeError:
368 except AttributeError:
369 pass
369 pass
370
370
371 @interfaceutil.implementer(repository.imanifestdict)
371 @interfaceutil.implementer(repository.imanifestdict)
372 class manifestdict(object):
372 class manifestdict(object):
373 def __init__(self, data=''):
373 def __init__(self, data=''):
374 self._lm = _lazymanifest(data)
374 self._lm = _lazymanifest(data)
375
375
376 def __getitem__(self, key):
376 def __getitem__(self, key):
377 return self._lm[key][0]
377 return self._lm[key][0]
378
378
379 def find(self, key):
379 def find(self, key):
380 return self._lm[key]
380 return self._lm[key]
381
381
382 def __len__(self):
382 def __len__(self):
383 return len(self._lm)
383 return len(self._lm)
384
384
385 def __nonzero__(self):
385 def __nonzero__(self):
386 # nonzero is covered by the __len__ function, but implementing it here
386 # nonzero is covered by the __len__ function, but implementing it here
387 # makes it easier for extensions to override.
387 # makes it easier for extensions to override.
388 return len(self._lm) != 0
388 return len(self._lm) != 0
389
389
390 __bool__ = __nonzero__
390 __bool__ = __nonzero__
391
391
392 def __setitem__(self, key, node):
392 def __setitem__(self, key, node):
393 self._lm[key] = node, self.flags(key, '')
393 self._lm[key] = node, self.flags(key, '')
394
394
395 def __contains__(self, key):
395 def __contains__(self, key):
396 if key is None:
396 if key is None:
397 return False
397 return False
398 return key in self._lm
398 return key in self._lm
399
399
400 def __delitem__(self, key):
400 def __delitem__(self, key):
401 del self._lm[key]
401 del self._lm[key]
402
402
403 def __iter__(self):
403 def __iter__(self):
404 return self._lm.__iter__()
404 return self._lm.__iter__()
405
405
406 def iterkeys(self):
406 def iterkeys(self):
407 return self._lm.iterkeys()
407 return self._lm.iterkeys()
408
408
409 def keys(self):
409 def keys(self):
410 return list(self.iterkeys())
410 return list(self.iterkeys())
411
411
412 def filesnotin(self, m2, match=None):
412 def filesnotin(self, m2, match=None):
413 '''Set of files in this manifest that are not in the other'''
413 '''Set of files in this manifest that are not in the other'''
414 if match:
414 if match:
415 m1 = self.matches(match)
415 m1 = self.matches(match)
416 m2 = m2.matches(match)
416 m2 = m2.matches(match)
417 return m1.filesnotin(m2)
417 return m1.filesnotin(m2)
418 diff = self.diff(m2)
418 diff = self.diff(m2)
419 files = set(filepath
419 files = set(filepath
420 for filepath, hashflags in diff.iteritems()
420 for filepath, hashflags in diff.iteritems()
421 if hashflags[1][0] is None)
421 if hashflags[1][0] is None)
422 return files
422 return files
423
423
424 @propertycache
424 @propertycache
425 def _dirs(self):
425 def _dirs(self):
426 return util.dirs(self)
426 return util.dirs(self)
427
427
428 def dirs(self):
428 def dirs(self):
429 return self._dirs
429 return self._dirs
430
430
431 def hasdir(self, dir):
431 def hasdir(self, dir):
432 return dir in self._dirs
432 return dir in self._dirs
433
433
434 def _filesfastpath(self, match):
434 def _filesfastpath(self, match):
435 '''Checks whether we can correctly and quickly iterate over matcher
435 '''Checks whether we can correctly and quickly iterate over matcher
436 files instead of over manifest files.'''
436 files instead of over manifest files.'''
437 files = match.files()
437 files = match.files()
438 return (len(files) < 100 and (match.isexact() or
438 return (len(files) < 100 and (match.isexact() or
439 (match.prefix() and all(fn in self for fn in files))))
439 (match.prefix() and all(fn in self for fn in files))))
440
440
441 def walk(self, match):
441 def walk(self, match):
442 '''Generates matching file names.
442 '''Generates matching file names.
443
443
444 Equivalent to manifest.matches(match).iterkeys(), but without creating
444 Equivalent to manifest.matches(match).iterkeys(), but without creating
445 an entirely new manifest.
445 an entirely new manifest.
446
446
447 It also reports nonexistent files by marking them bad with match.bad().
447 It also reports nonexistent files by marking them bad with match.bad().
448 '''
448 '''
449 if match.always():
449 if match.always():
450 for f in iter(self):
450 for f in iter(self):
451 yield f
451 yield f
452 return
452 return
453
453
454 fset = set(match.files())
454 fset = set(match.files())
455
455
456 # avoid the entire walk if we're only looking for specific files
456 # avoid the entire walk if we're only looking for specific files
457 if self._filesfastpath(match):
457 if self._filesfastpath(match):
458 for fn in sorted(fset):
458 for fn in sorted(fset):
459 yield fn
459 yield fn
460 return
460 return
461
461
462 for fn in self:
462 for fn in self:
463 if fn in fset:
463 if fn in fset:
464 # specified pattern is the exact name
464 # specified pattern is the exact name
465 fset.remove(fn)
465 fset.remove(fn)
466 if match(fn):
466 if match(fn):
467 yield fn
467 yield fn
468
468
469 # for dirstate.walk, files=[''] means "walk the whole tree".
469 # for dirstate.walk, files=[''] means "walk the whole tree".
470 # follow that here, too
470 # follow that here, too
471 fset.discard('')
471 fset.discard('')
472
472
473 for fn in sorted(fset):
473 for fn in sorted(fset):
474 if not self.hasdir(fn):
474 if not self.hasdir(fn):
475 match.bad(fn, None)
475 match.bad(fn, None)
476
476
477 def matches(self, match):
477 def matches(self, match):
478 '''generate a new manifest filtered by the match argument'''
478 '''generate a new manifest filtered by the match argument'''
479 if match.always():
479 if match.always():
480 return self.copy()
480 return self.copy()
481
481
482 if self._filesfastpath(match):
482 if self._filesfastpath(match):
483 m = manifestdict()
483 m = manifestdict()
484 lm = self._lm
484 lm = self._lm
485 for fn in match.files():
485 for fn in match.files():
486 if fn in lm:
486 if fn in lm:
487 m._lm[fn] = lm[fn]
487 m._lm[fn] = lm[fn]
488 return m
488 return m
489
489
490 m = manifestdict()
490 m = manifestdict()
491 m._lm = self._lm.filtercopy(match)
491 m._lm = self._lm.filtercopy(match)
492 return m
492 return m
493
493
494 def diff(self, m2, match=None, clean=False):
494 def diff(self, m2, match=None, clean=False):
495 '''Finds changes between the current manifest and m2.
495 '''Finds changes between the current manifest and m2.
496
496
497 Args:
497 Args:
498 m2: the manifest to which this manifest should be compared.
498 m2: the manifest to which this manifest should be compared.
499 clean: if true, include files unchanged between these manifests
499 clean: if true, include files unchanged between these manifests
500 with a None value in the returned dictionary.
500 with a None value in the returned dictionary.
501
501
502 The result is returned as a dict with filename as key and
502 The result is returned as a dict with filename as key and
503 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
503 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
504 nodeid in the current/other manifest and fl1/fl2 is the flag
504 nodeid in the current/other manifest and fl1/fl2 is the flag
505 in the current/other manifest. Where the file does not exist,
505 in the current/other manifest. Where the file does not exist,
506 the nodeid will be None and the flags will be the empty
506 the nodeid will be None and the flags will be the empty
507 string.
507 string.
508 '''
508 '''
509 if match:
509 if match:
510 m1 = self.matches(match)
510 m1 = self.matches(match)
511 m2 = m2.matches(match)
511 m2 = m2.matches(match)
512 return m1.diff(m2, clean=clean)
512 return m1.diff(m2, clean=clean)
513 return self._lm.diff(m2._lm, clean)
513 return self._lm.diff(m2._lm, clean)
514
514
515 def setflag(self, key, flag):
515 def setflag(self, key, flag):
516 self._lm[key] = self[key], flag
516 self._lm[key] = self[key], flag
517
517
518 def get(self, key, default=None):
518 def get(self, key, default=None):
519 try:
519 try:
520 return self._lm[key][0]
520 return self._lm[key][0]
521 except KeyError:
521 except KeyError:
522 return default
522 return default
523
523
524 def flags(self, key, default=''):
524 def flags(self, key, default=''):
525 try:
525 try:
526 return self._lm[key][1]
526 return self._lm[key][1]
527 except KeyError:
527 except KeyError:
528 return default
528 return default
529
529
530 def copy(self):
530 def copy(self):
531 c = manifestdict()
531 c = manifestdict()
532 c._lm = self._lm.copy()
532 c._lm = self._lm.copy()
533 return c
533 return c
534
534
535 def items(self):
535 def items(self):
536 return (x[:2] for x in self._lm.iterentries())
536 return (x[:2] for x in self._lm.iterentries())
537
537
538 def iteritems(self):
538 def iteritems(self):
539 return (x[:2] for x in self._lm.iterentries())
539 return (x[:2] for x in self._lm.iterentries())
540
540
541 def iterentries(self):
541 def iterentries(self):
542 return self._lm.iterentries()
542 return self._lm.iterentries()
543
543
544 def text(self):
544 def text(self):
545 # most likely uses native version
545 # most likely uses native version
546 return self._lm.text()
546 return self._lm.text()
547
547
548 def fastdelta(self, base, changes):
548 def fastdelta(self, base, changes):
549 """Given a base manifest text as a bytearray and a list of changes
549 """Given a base manifest text as a bytearray and a list of changes
550 relative to that text, compute a delta that can be used by revlog.
550 relative to that text, compute a delta that can be used by revlog.
551 """
551 """
552 delta = []
552 delta = []
553 dstart = None
553 dstart = None
554 dend = None
554 dend = None
555 dline = [""]
555 dline = [""]
556 start = 0
556 start = 0
557 # zero copy representation of base as a buffer
557 # zero copy representation of base as a buffer
558 addbuf = util.buffer(base)
558 addbuf = util.buffer(base)
559
559
560 changes = list(changes)
560 changes = list(changes)
561 if len(changes) < 1000:
561 if len(changes) < 1000:
562 # start with a readonly loop that finds the offset of
562 # start with a readonly loop that finds the offset of
563 # each line and creates the deltas
563 # each line and creates the deltas
564 for f, todelete in changes:
564 for f, todelete in changes:
565 # bs will either be the index of the item or the insert point
565 # bs will either be the index of the item or the insert point
566 start, end = _msearch(addbuf, f, start)
566 start, end = _msearch(addbuf, f, start)
567 if not todelete:
567 if not todelete:
568 h, fl = self._lm[f]
568 h, fl = self._lm[f]
569 l = "%s\0%s%s\n" % (f, hex(h), fl)
569 l = "%s\0%s%s\n" % (f, hex(h), fl)
570 else:
570 else:
571 if start == end:
571 if start == end:
572 # item we want to delete was not found, error out
572 # item we want to delete was not found, error out
573 raise AssertionError(
573 raise AssertionError(
574 _("failed to remove %s from manifest") % f)
574 _("failed to remove %s from manifest") % f)
575 l = ""
575 l = ""
576 if dstart is not None and dstart <= start and dend >= start:
576 if dstart is not None and dstart <= start and dend >= start:
577 if dend < end:
577 if dend < end:
578 dend = end
578 dend = end
579 if l:
579 if l:
580 dline.append(l)
580 dline.append(l)
581 else:
581 else:
582 if dstart is not None:
582 if dstart is not None:
583 delta.append([dstart, dend, "".join(dline)])
583 delta.append([dstart, dend, "".join(dline)])
584 dstart = start
584 dstart = start
585 dend = end
585 dend = end
586 dline = [l]
586 dline = [l]
587
587
588 if dstart is not None:
588 if dstart is not None:
589 delta.append([dstart, dend, "".join(dline)])
589 delta.append([dstart, dend, "".join(dline)])
590 # apply the delta to the base, and get a delta for addrevision
590 # apply the delta to the base, and get a delta for addrevision
591 deltatext, arraytext = _addlistdelta(base, delta)
591 deltatext, arraytext = _addlistdelta(base, delta)
592 else:
592 else:
593 # For large changes, it's much cheaper to just build the text and
593 # For large changes, it's much cheaper to just build the text and
594 # diff it.
594 # diff it.
595 arraytext = bytearray(self.text())
595 arraytext = bytearray(self.text())
596 deltatext = mdiff.textdiff(
596 deltatext = mdiff.textdiff(
597 util.buffer(base), util.buffer(arraytext))
597 util.buffer(base), util.buffer(arraytext))
598
598
599 return arraytext, deltatext
599 return arraytext, deltatext
600
600
601 def _msearch(m, s, lo=0, hi=None):
601 def _msearch(m, s, lo=0, hi=None):
602 '''return a tuple (start, end) that says where to find s within m.
602 '''return a tuple (start, end) that says where to find s within m.
603
603
604 If the string is found m[start:end] are the line containing
604 If the string is found m[start:end] are the line containing
605 that string. If start == end the string was not found and
605 that string. If start == end the string was not found and
606 they indicate the proper sorted insertion point.
606 they indicate the proper sorted insertion point.
607
607
608 m should be a buffer, a memoryview or a byte string.
608 m should be a buffer, a memoryview or a byte string.
609 s is a byte string'''
609 s is a byte string'''
610 def advance(i, c):
610 def advance(i, c):
611 while i < lenm and m[i:i + 1] != c:
611 while i < lenm and m[i:i + 1] != c:
612 i += 1
612 i += 1
613 return i
613 return i
614 if not s:
614 if not s:
615 return (lo, lo)
615 return (lo, lo)
616 lenm = len(m)
616 lenm = len(m)
617 if not hi:
617 if not hi:
618 hi = lenm
618 hi = lenm
619 while lo < hi:
619 while lo < hi:
620 mid = (lo + hi) // 2
620 mid = (lo + hi) // 2
621 start = mid
621 start = mid
622 while start > 0 and m[start - 1:start] != '\n':
622 while start > 0 and m[start - 1:start] != '\n':
623 start -= 1
623 start -= 1
624 end = advance(start, '\0')
624 end = advance(start, '\0')
625 if bytes(m[start:end]) < s:
625 if bytes(m[start:end]) < s:
626 # we know that after the null there are 40 bytes of sha1
626 # we know that after the null there are 40 bytes of sha1
627 # this translates to the bisect lo = mid + 1
627 # this translates to the bisect lo = mid + 1
628 lo = advance(end + 40, '\n') + 1
628 lo = advance(end + 40, '\n') + 1
629 else:
629 else:
630 # this translates to the bisect hi = mid
630 # this translates to the bisect hi = mid
631 hi = start
631 hi = start
632 end = advance(lo, '\0')
632 end = advance(lo, '\0')
633 found = m[lo:end]
633 found = m[lo:end]
634 if s == found:
634 if s == found:
635 # we know that after the null there are 40 bytes of sha1
635 # we know that after the null there are 40 bytes of sha1
636 end = advance(end + 40, '\n')
636 end = advance(end + 40, '\n')
637 return (lo, end + 1)
637 return (lo, end + 1)
638 else:
638 else:
639 return (lo, lo)
639 return (lo, lo)
640
640
641 def _checkforbidden(l):
641 def _checkforbidden(l):
642 """Check filenames for illegal characters."""
642 """Check filenames for illegal characters."""
643 for f in l:
643 for f in l:
644 if '\n' in f or '\r' in f:
644 if '\n' in f or '\r' in f:
645 raise error.StorageError(
645 raise error.StorageError(
646 _("'\\n' and '\\r' disallowed in filenames: %r")
646 _("'\\n' and '\\r' disallowed in filenames: %r")
647 % pycompat.bytestr(f))
647 % pycompat.bytestr(f))
648
648
649
649
650 # apply the changes collected during the bisect loop to our addlist
650 # apply the changes collected during the bisect loop to our addlist
651 # return a delta suitable for addrevision
651 # return a delta suitable for addrevision
652 def _addlistdelta(addlist, x):
652 def _addlistdelta(addlist, x):
653 # for large addlist arrays, building a new array is cheaper
653 # for large addlist arrays, building a new array is cheaper
654 # than repeatedly modifying the existing one
654 # than repeatedly modifying the existing one
655 currentposition = 0
655 currentposition = 0
656 newaddlist = bytearray()
656 newaddlist = bytearray()
657
657
658 for start, end, content in x:
658 for start, end, content in x:
659 newaddlist += addlist[currentposition:start]
659 newaddlist += addlist[currentposition:start]
660 if content:
660 if content:
661 newaddlist += bytearray(content)
661 newaddlist += bytearray(content)
662
662
663 currentposition = end
663 currentposition = end
664
664
665 newaddlist += addlist[currentposition:]
665 newaddlist += addlist[currentposition:]
666
666
667 deltatext = "".join(struct.pack(">lll", start, end, len(content))
667 deltatext = "".join(struct.pack(">lll", start, end, len(content))
668 + content for start, end, content in x)
668 + content for start, end, content in x)
669 return deltatext, newaddlist
669 return deltatext, newaddlist
670
670
671 def _splittopdir(f):
671 def _splittopdir(f):
672 if '/' in f:
672 if '/' in f:
673 dir, subpath = f.split('/', 1)
673 dir, subpath = f.split('/', 1)
674 return dir + '/', subpath
674 return dir + '/', subpath
675 else:
675 else:
676 return '', f
676 return '', f
677
677
678 _noop = lambda s: None
678 _noop = lambda s: None
679
679
680 class treemanifest(object):
680 class treemanifest(object):
681 def __init__(self, dir='', text=''):
681 def __init__(self, dir='', text=''):
682 self._dir = dir
682 self._dir = dir
683 self._node = nullid
683 self._node = nullid
684 self._loadfunc = _noop
684 self._loadfunc = _noop
685 self._copyfunc = _noop
685 self._copyfunc = _noop
686 self._dirty = False
686 self._dirty = False
687 self._dirs = {}
687 self._dirs = {}
688 self._lazydirs = {}
688 self._lazydirs = {}
689 # Using _lazymanifest here is a little slower than plain old dicts
689 # Using _lazymanifest here is a little slower than plain old dicts
690 self._files = {}
690 self._files = {}
691 self._flags = {}
691 self._flags = {}
692 if text:
692 if text:
693 def readsubtree(subdir, subm):
693 def readsubtree(subdir, subm):
694 raise AssertionError('treemanifest constructor only accepts '
694 raise AssertionError('treemanifest constructor only accepts '
695 'flat manifests')
695 'flat manifests')
696 self.parse(text, readsubtree)
696 self.parse(text, readsubtree)
697 self._dirty = True # Mark flat manifest dirty after parsing
697 self._dirty = True # Mark flat manifest dirty after parsing
698
698
699 def _subpath(self, path):
699 def _subpath(self, path):
700 return self._dir + path
700 return self._dir + path
701
701
702 def _loadalllazy(self):
702 def _loadalllazy(self):
703 selfdirs = self._dirs
703 selfdirs = self._dirs
704 for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
704 for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
705 if docopy:
705 if docopy:
706 selfdirs[d] = readsubtree(path, node).copy()
706 selfdirs[d] = readsubtree(path, node).copy()
707 else:
707 else:
708 selfdirs[d] = readsubtree(path, node)
708 selfdirs[d] = readsubtree(path, node)
709 self._lazydirs = {}
709 self._lazydirs = {}
710
710
711 def _loadlazy(self, d):
711 def _loadlazy(self, d):
712 v = self._lazydirs.get(d)
712 v = self._lazydirs.get(d)
713 if v:
713 if v:
714 path, node, readsubtree, docopy = v
714 path, node, readsubtree, docopy = v
715 if docopy:
715 if docopy:
716 self._dirs[d] = readsubtree(path, node).copy()
716 self._dirs[d] = readsubtree(path, node).copy()
717 else:
717 else:
718 self._dirs[d] = readsubtree(path, node)
718 self._dirs[d] = readsubtree(path, node)
719 del self._lazydirs[d]
719 del self._lazydirs[d]
720
720
721 def _loadchildrensetlazy(self, visit):
721 def _loadchildrensetlazy(self, visit):
722 if not visit:
722 if not visit:
723 return None
723 return None
724 if visit == 'all' or visit == 'this':
724 if visit == 'all' or visit == 'this':
725 self._loadalllazy()
725 self._loadalllazy()
726 return None
726 return None
727
727
728 loadlazy = self._loadlazy
728 loadlazy = self._loadlazy
729 for k in visit:
729 for k in visit:
730 loadlazy(k + '/')
730 loadlazy(k + '/')
731 return visit
731 return visit
732
732
733 def _loaddifflazy(self, t1, t2):
733 def _loaddifflazy(self, t1, t2):
734 """load items in t1 and t2 if they're needed for diffing.
734 """load items in t1 and t2 if they're needed for diffing.
735
735
736 The criteria currently is:
736 The criteria currently is:
737 - if it's not present in _lazydirs in either t1 or t2, load it in the
737 - if it's not present in _lazydirs in either t1 or t2, load it in the
738 other (it may already be loaded or it may not exist, doesn't matter)
738 other (it may already be loaded or it may not exist, doesn't matter)
739 - if it's present in _lazydirs in both, compare the nodeid; if it
739 - if it's present in _lazydirs in both, compare the nodeid; if it
740 differs, load it in both
740 differs, load it in both
741 """
741 """
742 toloadlazy = []
742 toloadlazy = []
743 for d, v1 in t1._lazydirs.iteritems():
743 for d, v1 in t1._lazydirs.iteritems():
744 v2 = t2._lazydirs.get(d)
744 v2 = t2._lazydirs.get(d)
745 if not v2 or v2[1] != v1[1]:
745 if not v2 or v2[1] != v1[1]:
746 toloadlazy.append(d)
746 toloadlazy.append(d)
747 for d, v1 in t2._lazydirs.iteritems():
747 for d, v1 in t2._lazydirs.iteritems():
748 if d not in t1._lazydirs:
748 if d not in t1._lazydirs:
749 toloadlazy.append(d)
749 toloadlazy.append(d)
750
750
751 for d in toloadlazy:
751 for d in toloadlazy:
752 t1._loadlazy(d)
752 t1._loadlazy(d)
753 t2._loadlazy(d)
753 t2._loadlazy(d)
754
754
755 def __len__(self):
755 def __len__(self):
756 self._load()
756 self._load()
757 size = len(self._files)
757 size = len(self._files)
758 self._loadalllazy()
758 self._loadalllazy()
759 for m in self._dirs.values():
759 for m in self._dirs.values():
760 size += m.__len__()
760 size += m.__len__()
761 return size
761 return size
762
762
763 def __nonzero__(self):
763 def __nonzero__(self):
764 # Faster than "__len() != 0" since it avoids loading sub-manifests
764 # Faster than "__len() != 0" since it avoids loading sub-manifests
765 return not self._isempty()
765 return not self._isempty()
766
766
767 __bool__ = __nonzero__
767 __bool__ = __nonzero__
768
768
769 def _isempty(self):
769 def _isempty(self):
770 self._load() # for consistency; already loaded by all callers
770 self._load() # for consistency; already loaded by all callers
771 # See if we can skip loading everything.
771 # See if we can skip loading everything.
772 if self._files or (self._dirs and
772 if self._files or (self._dirs and
773 any(not m._isempty() for m in self._dirs.values())):
773 any(not m._isempty() for m in self._dirs.values())):
774 return False
774 return False
775 self._loadalllazy()
775 self._loadalllazy()
776 return (not self._dirs or
776 return (not self._dirs or
777 all(m._isempty() for m in self._dirs.values()))
777 all(m._isempty() for m in self._dirs.values()))
778
778
779 def __repr__(self):
779 def __repr__(self):
780 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
780 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
781 (self._dir, hex(self._node),
781 (self._dir, hex(self._node),
782 bool(self._loadfunc is _noop),
782 bool(self._loadfunc is _noop),
783 self._dirty, id(self)))
783 self._dirty, id(self)))
784
784
785 def dir(self):
785 def dir(self):
786 '''The directory that this tree manifest represents, including a
786 '''The directory that this tree manifest represents, including a
787 trailing '/'. Empty string for the repo root directory.'''
787 trailing '/'. Empty string for the repo root directory.'''
788 return self._dir
788 return self._dir
789
789
790 def node(self):
790 def node(self):
791 '''This node of this instance. nullid for unsaved instances. Should
791 '''This node of this instance. nullid for unsaved instances. Should
792 be updated when the instance is read or written from a revlog.
792 be updated when the instance is read or written from a revlog.
793 '''
793 '''
794 assert not self._dirty
794 assert not self._dirty
795 return self._node
795 return self._node
796
796
797 def setnode(self, node):
797 def setnode(self, node):
798 self._node = node
798 self._node = node
799 self._dirty = False
799 self._dirty = False
800
800
801 def iterentries(self):
801 def iterentries(self):
802 self._load()
802 self._load()
803 self._loadalllazy()
803 self._loadalllazy()
804 for p, n in sorted(itertools.chain(self._dirs.items(),
804 for p, n in sorted(itertools.chain(self._dirs.items(),
805 self._files.items())):
805 self._files.items())):
806 if p in self._files:
806 if p in self._files:
807 yield self._subpath(p), n, self._flags.get(p, '')
807 yield self._subpath(p), n, self._flags.get(p, '')
808 else:
808 else:
809 for x in n.iterentries():
809 for x in n.iterentries():
810 yield x
810 yield x
811
811
812 def items(self):
812 def items(self):
813 self._load()
813 self._load()
814 self._loadalllazy()
814 self._loadalllazy()
815 for p, n in sorted(itertools.chain(self._dirs.items(),
815 for p, n in sorted(itertools.chain(self._dirs.items(),
816 self._files.items())):
816 self._files.items())):
817 if p in self._files:
817 if p in self._files:
818 yield self._subpath(p), n
818 yield self._subpath(p), n
819 else:
819 else:
820 for f, sn in n.iteritems():
820 for f, sn in n.iteritems():
821 yield f, sn
821 yield f, sn
822
822
823 iteritems = items
823 iteritems = items
824
824
825 def iterkeys(self):
825 def iterkeys(self):
826 self._load()
826 self._load()
827 self._loadalllazy()
827 self._loadalllazy()
828 for p in sorted(itertools.chain(self._dirs, self._files)):
828 for p in sorted(itertools.chain(self._dirs, self._files)):
829 if p in self._files:
829 if p in self._files:
830 yield self._subpath(p)
830 yield self._subpath(p)
831 else:
831 else:
832 for f in self._dirs[p]:
832 for f in self._dirs[p]:
833 yield f
833 yield f
834
834
835 def keys(self):
835 def keys(self):
836 return list(self.iterkeys())
836 return list(self.iterkeys())
837
837
838 def __iter__(self):
838 def __iter__(self):
839 return self.iterkeys()
839 return self.iterkeys()
840
840
841 def __contains__(self, f):
841 def __contains__(self, f):
842 if f is None:
842 if f is None:
843 return False
843 return False
844 self._load()
844 self._load()
845 dir, subpath = _splittopdir(f)
845 dir, subpath = _splittopdir(f)
846 if dir:
846 if dir:
847 self._loadlazy(dir)
847 self._loadlazy(dir)
848
848
849 if dir not in self._dirs:
849 if dir not in self._dirs:
850 return False
850 return False
851
851
852 return self._dirs[dir].__contains__(subpath)
852 return self._dirs[dir].__contains__(subpath)
853 else:
853 else:
854 return f in self._files
854 return f in self._files
855
855
856 def get(self, f, default=None):
856 def get(self, f, default=None):
857 self._load()
857 self._load()
858 dir, subpath = _splittopdir(f)
858 dir, subpath = _splittopdir(f)
859 if dir:
859 if dir:
860 self._loadlazy(dir)
860 self._loadlazy(dir)
861
861
862 if dir not in self._dirs:
862 if dir not in self._dirs:
863 return default
863 return default
864 return self._dirs[dir].get(subpath, default)
864 return self._dirs[dir].get(subpath, default)
865 else:
865 else:
866 return self._files.get(f, default)
866 return self._files.get(f, default)
867
867
868 def __getitem__(self, f):
868 def __getitem__(self, f):
869 self._load()
869 self._load()
870 dir, subpath = _splittopdir(f)
870 dir, subpath = _splittopdir(f)
871 if dir:
871 if dir:
872 self._loadlazy(dir)
872 self._loadlazy(dir)
873
873
874 return self._dirs[dir].__getitem__(subpath)
874 return self._dirs[dir].__getitem__(subpath)
875 else:
875 else:
876 return self._files[f]
876 return self._files[f]
877
877
878 def flags(self, f):
878 def flags(self, f):
879 self._load()
879 self._load()
880 dir, subpath = _splittopdir(f)
880 dir, subpath = _splittopdir(f)
881 if dir:
881 if dir:
882 self._loadlazy(dir)
882 self._loadlazy(dir)
883
883
884 if dir not in self._dirs:
884 if dir not in self._dirs:
885 return ''
885 return ''
886 return self._dirs[dir].flags(subpath)
886 return self._dirs[dir].flags(subpath)
887 else:
887 else:
888 if f in self._lazydirs or f in self._dirs:
888 if f in self._lazydirs or f in self._dirs:
889 return ''
889 return ''
890 return self._flags.get(f, '')
890 return self._flags.get(f, '')
891
891
892 def find(self, f):
892 def find(self, f):
893 self._load()
893 self._load()
894 dir, subpath = _splittopdir(f)
894 dir, subpath = _splittopdir(f)
895 if dir:
895 if dir:
896 self._loadlazy(dir)
896 self._loadlazy(dir)
897
897
898 return self._dirs[dir].find(subpath)
898 return self._dirs[dir].find(subpath)
899 else:
899 else:
900 return self._files[f], self._flags.get(f, '')
900 return self._files[f], self._flags.get(f, '')
901
901
902 def __delitem__(self, f):
902 def __delitem__(self, f):
903 self._load()
903 self._load()
904 dir, subpath = _splittopdir(f)
904 dir, subpath = _splittopdir(f)
905 if dir:
905 if dir:
906 self._loadlazy(dir)
906 self._loadlazy(dir)
907
907
908 self._dirs[dir].__delitem__(subpath)
908 self._dirs[dir].__delitem__(subpath)
909 # If the directory is now empty, remove it
909 # If the directory is now empty, remove it
910 if self._dirs[dir]._isempty():
910 if self._dirs[dir]._isempty():
911 del self._dirs[dir]
911 del self._dirs[dir]
912 else:
912 else:
913 del self._files[f]
913 del self._files[f]
914 if f in self._flags:
914 if f in self._flags:
915 del self._flags[f]
915 del self._flags[f]
916 self._dirty = True
916 self._dirty = True
917
917
918 def __setitem__(self, f, n):
918 def __setitem__(self, f, n):
919 assert n is not None
919 assert n is not None
920 self._load()
920 self._load()
921 dir, subpath = _splittopdir(f)
921 dir, subpath = _splittopdir(f)
922 if dir:
922 if dir:
923 self._loadlazy(dir)
923 self._loadlazy(dir)
924 if dir not in self._dirs:
924 if dir not in self._dirs:
925 self._dirs[dir] = treemanifest(self._subpath(dir))
925 self._dirs[dir] = treemanifest(self._subpath(dir))
926 self._dirs[dir].__setitem__(subpath, n)
926 self._dirs[dir].__setitem__(subpath, n)
927 else:
927 else:
928 self._files[f] = n[:21] # to match manifestdict's behavior
928 self._files[f] = n[:21] # to match manifestdict's behavior
929 self._dirty = True
929 self._dirty = True
930
930
931 def _load(self):
931 def _load(self):
932 if self._loadfunc is not _noop:
932 if self._loadfunc is not _noop:
933 lf, self._loadfunc = self._loadfunc, _noop
933 lf, self._loadfunc = self._loadfunc, _noop
934 lf(self)
934 lf(self)
935 elif self._copyfunc is not _noop:
935 elif self._copyfunc is not _noop:
936 cf, self._copyfunc = self._copyfunc, _noop
936 cf, self._copyfunc = self._copyfunc, _noop
937 cf(self)
937 cf(self)
938
938
939 def setflag(self, f, flags):
939 def setflag(self, f, flags):
940 """Set the flags (symlink, executable) for path f."""
940 """Set the flags (symlink, executable) for path f."""
941 self._load()
941 self._load()
942 dir, subpath = _splittopdir(f)
942 dir, subpath = _splittopdir(f)
943 if dir:
943 if dir:
944 self._loadlazy(dir)
944 self._loadlazy(dir)
945 if dir not in self._dirs:
945 if dir not in self._dirs:
946 self._dirs[dir] = treemanifest(self._subpath(dir))
946 self._dirs[dir] = treemanifest(self._subpath(dir))
947 self._dirs[dir].setflag(subpath, flags)
947 self._dirs[dir].setflag(subpath, flags)
948 else:
948 else:
949 self._flags[f] = flags
949 self._flags[f] = flags
950 self._dirty = True
950 self._dirty = True
951
951
952 def copy(self):
952 def copy(self):
953 copy = treemanifest(self._dir)
953 copy = treemanifest(self._dir)
954 copy._node = self._node
954 copy._node = self._node
955 copy._dirty = self._dirty
955 copy._dirty = self._dirty
956 if self._copyfunc is _noop:
956 if self._copyfunc is _noop:
957 def _copyfunc(s):
957 def _copyfunc(s):
958 self._load()
958 self._load()
959 s._lazydirs = {d: (p, n, r, True) for
959 s._lazydirs = {d: (p, n, r, True) for
960 d, (p, n, r, c) in self._lazydirs.iteritems()}
960 d, (p, n, r, c) in self._lazydirs.iteritems()}
961 sdirs = s._dirs
961 sdirs = s._dirs
962 for d, v in self._dirs.iteritems():
962 for d, v in self._dirs.iteritems():
963 sdirs[d] = v.copy()
963 sdirs[d] = v.copy()
964 s._files = dict.copy(self._files)
964 s._files = dict.copy(self._files)
965 s._flags = dict.copy(self._flags)
965 s._flags = dict.copy(self._flags)
966 if self._loadfunc is _noop:
966 if self._loadfunc is _noop:
967 _copyfunc(copy)
967 _copyfunc(copy)
968 else:
968 else:
969 copy._copyfunc = _copyfunc
969 copy._copyfunc = _copyfunc
970 else:
970 else:
971 copy._copyfunc = self._copyfunc
971 copy._copyfunc = self._copyfunc
972 return copy
972 return copy
973
973
974 def filesnotin(self, m2, match=None):
974 def filesnotin(self, m2, match=None):
975 '''Set of files in this manifest that are not in the other'''
975 '''Set of files in this manifest that are not in the other'''
976 if match and not match.always():
976 if match and not match.always():
977 m1 = self.matches(match)
977 m1 = self.matches(match)
978 m2 = m2.matches(match)
978 m2 = m2.matches(match)
979 return m1.filesnotin(m2)
979 return m1.filesnotin(m2)
980
980
981 files = set()
981 files = set()
982 def _filesnotin(t1, t2):
982 def _filesnotin(t1, t2):
983 if t1._node == t2._node and not t1._dirty and not t2._dirty:
983 if t1._node == t2._node and not t1._dirty and not t2._dirty:
984 return
984 return
985 t1._load()
985 t1._load()
986 t2._load()
986 t2._load()
987 self._loaddifflazy(t1, t2)
987 self._loaddifflazy(t1, t2)
988 for d, m1 in t1._dirs.iteritems():
988 for d, m1 in t1._dirs.iteritems():
989 if d in t2._dirs:
989 if d in t2._dirs:
990 m2 = t2._dirs[d]
990 m2 = t2._dirs[d]
991 _filesnotin(m1, m2)
991 _filesnotin(m1, m2)
992 else:
992 else:
993 files.update(m1.iterkeys())
993 files.update(m1.iterkeys())
994
994
995 for fn in t1._files:
995 for fn in t1._files:
996 if fn not in t2._files:
996 if fn not in t2._files:
997 files.add(t1._subpath(fn))
997 files.add(t1._subpath(fn))
998
998
999 _filesnotin(self, m2)
999 _filesnotin(self, m2)
1000 return files
1000 return files
1001
1001
1002 @propertycache
1002 @propertycache
1003 def _alldirs(self):
1003 def _alldirs(self):
1004 return util.dirs(self)
1004 return util.dirs(self)
1005
1005
1006 def dirs(self):
1006 def dirs(self):
1007 return self._alldirs
1007 return self._alldirs
1008
1008
1009 def hasdir(self, dir):
1009 def hasdir(self, dir):
1010 self._load()
1010 self._load()
1011 topdir, subdir = _splittopdir(dir)
1011 topdir, subdir = _splittopdir(dir)
1012 if topdir:
1012 if topdir:
1013 self._loadlazy(topdir)
1013 self._loadlazy(topdir)
1014 if topdir in self._dirs:
1014 if topdir in self._dirs:
1015 return self._dirs[topdir].hasdir(subdir)
1015 return self._dirs[topdir].hasdir(subdir)
1016 return False
1016 return False
1017 dirslash = dir + '/'
1017 dirslash = dir + '/'
1018 return dirslash in self._dirs or dirslash in self._lazydirs
1018 return dirslash in self._dirs or dirslash in self._lazydirs
1019
1019
1020 def walk(self, match):
1020 def walk(self, match):
1021 '''Generates matching file names.
1021 '''Generates matching file names.
1022
1022
1023 Equivalent to manifest.matches(match).iterkeys(), but without creating
1023 Equivalent to manifest.matches(match).iterkeys(), but without creating
1024 an entirely new manifest.
1024 an entirely new manifest.
1025
1025
1026 It also reports nonexistent files by marking them bad with match.bad().
1026 It also reports nonexistent files by marking them bad with match.bad().
1027 '''
1027 '''
1028 if match.always():
1028 if match.always():
1029 for f in iter(self):
1029 for f in iter(self):
1030 yield f
1030 yield f
1031 return
1031 return
1032
1032
1033 fset = set(match.files())
1033 fset = set(match.files())
1034
1034
1035 for fn in self._walk(match):
1035 for fn in self._walk(match):
1036 if fn in fset:
1036 if fn in fset:
1037 # specified pattern is the exact name
1037 # specified pattern is the exact name
1038 fset.remove(fn)
1038 fset.remove(fn)
1039 yield fn
1039 yield fn
1040
1040
1041 # for dirstate.walk, files=[''] means "walk the whole tree".
1041 # for dirstate.walk, files=[''] means "walk the whole tree".
1042 # follow that here, too
1042 # follow that here, too
1043 fset.discard('')
1043 fset.discard('')
1044
1044
1045 for fn in sorted(fset):
1045 for fn in sorted(fset):
1046 if not self.hasdir(fn):
1046 if not self.hasdir(fn):
1047 match.bad(fn, None)
1047 match.bad(fn, None)
1048
1048
1049 def _walk(self, match):
1049 def _walk(self, match):
1050 '''Recursively generates matching file names for walk().'''
1050 '''Recursively generates matching file names for walk().'''
1051 visit = match.visitchildrenset(self._dir[:-1])
1051 visit = match.visitchildrenset(self._dir[:-1])
1052 if not visit:
1052 if not visit:
1053 return
1053 return
1054
1054
1055 # yield this dir's files and walk its submanifests
1055 # yield this dir's files and walk its submanifests
1056 self._load()
1056 self._load()
1057 visit = self._loadchildrensetlazy(visit)
1057 visit = self._loadchildrensetlazy(visit)
1058 for p in sorted(list(self._dirs) + list(self._files)):
1058 for p in sorted(list(self._dirs) + list(self._files)):
1059 if p in self._files:
1059 if p in self._files:
1060 fullp = self._subpath(p)
1060 fullp = self._subpath(p)
1061 if match(fullp):
1061 if match(fullp):
1062 yield fullp
1062 yield fullp
1063 else:
1063 else:
1064 if not visit or p[:-1] in visit:
1064 if not visit or p[:-1] in visit:
1065 for f in self._dirs[p]._walk(match):
1065 for f in self._dirs[p]._walk(match):
1066 yield f
1066 yield f
1067
1067
1068 def matches(self, match):
1068 def matches(self, match):
1069 '''generate a new manifest filtered by the match argument'''
1069 '''generate a new manifest filtered by the match argument'''
1070 if match.always():
1070 if match.always():
1071 return self.copy()
1071 return self.copy()
1072
1072
1073 return self._matches(match)
1073 return self._matches(match)
1074
1074
1075 def _matches(self, match):
1075 def _matches(self, match):
1076 '''recursively generate a new manifest filtered by the match argument.
1076 '''recursively generate a new manifest filtered by the match argument.
1077 '''
1077 '''
1078
1078
1079 visit = match.visitchildrenset(self._dir[:-1])
1079 visit = match.visitchildrenset(self._dir[:-1])
1080 if visit == 'all':
1080 if visit == 'all':
1081 return self.copy()
1081 return self.copy()
1082 ret = treemanifest(self._dir)
1082 ret = treemanifest(self._dir)
1083 if not visit:
1083 if not visit:
1084 return ret
1084 return ret
1085
1085
1086 self._load()
1086 self._load()
1087 for fn in self._files:
1087 for fn in self._files:
1088 # While visitchildrenset *usually* lists only subdirs, this is
1088 # While visitchildrenset *usually* lists only subdirs, this is
1089 # actually up to the matcher and may have some files in the set().
1089 # actually up to the matcher and may have some files in the set().
1090 # If visit == 'this', we should obviously look at the files in this
1090 # If visit == 'this', we should obviously look at the files in this
1091 # directory; if visit is a set, and fn is in it, we should inspect
1091 # directory; if visit is a set, and fn is in it, we should inspect
1092 # fn (but no need to inspect things not in the set).
1092 # fn (but no need to inspect things not in the set).
1093 if visit != 'this' and fn not in visit:
1093 if visit != 'this' and fn not in visit:
1094 continue
1094 continue
1095 fullp = self._subpath(fn)
1095 fullp = self._subpath(fn)
1096 # visitchildrenset isn't perfect, we still need to call the regular
1096 # visitchildrenset isn't perfect, we still need to call the regular
1097 # matcher code to further filter results.
1097 # matcher code to further filter results.
1098 if not match(fullp):
1098 if not match(fullp):
1099 continue
1099 continue
1100 ret._files[fn] = self._files[fn]
1100 ret._files[fn] = self._files[fn]
1101 if fn in self._flags:
1101 if fn in self._flags:
1102 ret._flags[fn] = self._flags[fn]
1102 ret._flags[fn] = self._flags[fn]
1103
1103
1104 visit = self._loadchildrensetlazy(visit)
1104 visit = self._loadchildrensetlazy(visit)
1105 for dir, subm in self._dirs.iteritems():
1105 for dir, subm in self._dirs.iteritems():
1106 if visit and dir[:-1] not in visit:
1106 if visit and dir[:-1] not in visit:
1107 continue
1107 continue
1108 m = subm._matches(match)
1108 m = subm._matches(match)
1109 if not m._isempty():
1109 if not m._isempty():
1110 ret._dirs[dir] = m
1110 ret._dirs[dir] = m
1111
1111
1112 if not ret._isempty():
1112 if not ret._isempty():
1113 ret._dirty = True
1113 ret._dirty = True
1114 return ret
1114 return ret
1115
1115
1116 def diff(self, m2, match=None, clean=False):
1116 def diff(self, m2, match=None, clean=False):
1117 '''Finds changes between the current manifest and m2.
1117 '''Finds changes between the current manifest and m2.
1118
1118
1119 Args:
1119 Args:
1120 m2: the manifest to which this manifest should be compared.
1120 m2: the manifest to which this manifest should be compared.
1121 clean: if true, include files unchanged between these manifests
1121 clean: if true, include files unchanged between these manifests
1122 with a None value in the returned dictionary.
1122 with a None value in the returned dictionary.
1123
1123
1124 The result is returned as a dict with filename as key and
1124 The result is returned as a dict with filename as key and
1125 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1125 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1126 nodeid in the current/other manifest and fl1/fl2 is the flag
1126 nodeid in the current/other manifest and fl1/fl2 is the flag
1127 in the current/other manifest. Where the file does not exist,
1127 in the current/other manifest. Where the file does not exist,
1128 the nodeid will be None and the flags will be the empty
1128 the nodeid will be None and the flags will be the empty
1129 string.
1129 string.
1130 '''
1130 '''
1131 if match and not match.always():
1131 if match and not match.always():
1132 m1 = self.matches(match)
1132 m1 = self.matches(match)
1133 m2 = m2.matches(match)
1133 m2 = m2.matches(match)
1134 return m1.diff(m2, clean=clean)
1134 return m1.diff(m2, clean=clean)
1135 result = {}
1135 result = {}
1136 emptytree = treemanifest()
1136 emptytree = treemanifest()
1137
1137
1138 def _iterativediff(t1, t2, stack):
1138 def _iterativediff(t1, t2, stack):
1139 """compares two tree manifests and append new tree-manifests which
1139 """compares two tree manifests and append new tree-manifests which
1140 needs to be compared to stack"""
1140 needs to be compared to stack"""
1141 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1141 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1142 return
1142 return
1143 t1._load()
1143 t1._load()
1144 t2._load()
1144 t2._load()
1145 self._loaddifflazy(t1, t2)
1145 self._loaddifflazy(t1, t2)
1146
1146
1147 for d, m1 in t1._dirs.iteritems():
1147 for d, m1 in t1._dirs.iteritems():
1148 m2 = t2._dirs.get(d, emptytree)
1148 m2 = t2._dirs.get(d, emptytree)
1149 stack.append((m1, m2))
1149 stack.append((m1, m2))
1150
1150
1151 for d, m2 in t2._dirs.iteritems():
1151 for d, m2 in t2._dirs.iteritems():
1152 if d not in t1._dirs:
1152 if d not in t1._dirs:
1153 stack.append((emptytree, m2))
1153 stack.append((emptytree, m2))
1154
1154
1155 for fn, n1 in t1._files.iteritems():
1155 for fn, n1 in t1._files.iteritems():
1156 fl1 = t1._flags.get(fn, '')
1156 fl1 = t1._flags.get(fn, '')
1157 n2 = t2._files.get(fn, None)
1157 n2 = t2._files.get(fn, None)
1158 fl2 = t2._flags.get(fn, '')
1158 fl2 = t2._flags.get(fn, '')
1159 if n1 != n2 or fl1 != fl2:
1159 if n1 != n2 or fl1 != fl2:
1160 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1160 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1161 elif clean:
1161 elif clean:
1162 result[t1._subpath(fn)] = None
1162 result[t1._subpath(fn)] = None
1163
1163
1164 for fn, n2 in t2._files.iteritems():
1164 for fn, n2 in t2._files.iteritems():
1165 if fn not in t1._files:
1165 if fn not in t1._files:
1166 fl2 = t2._flags.get(fn, '')
1166 fl2 = t2._flags.get(fn, '')
1167 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1167 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1168
1168
1169 stackls = []
1169 stackls = []
1170 _iterativediff(self, m2, stackls)
1170 _iterativediff(self, m2, stackls)
1171 while stackls:
1171 while stackls:
1172 t1, t2 = stackls.pop()
1172 t1, t2 = stackls.pop()
1173 # stackls is populated in the function call
1173 # stackls is populated in the function call
1174 _iterativediff(t1, t2, stackls)
1174 _iterativediff(t1, t2, stackls)
1175 return result
1175 return result
1176
1176
1177 def unmodifiedsince(self, m2):
1177 def unmodifiedsince(self, m2):
1178 return not self._dirty and not m2._dirty and self._node == m2._node
1178 return not self._dirty and not m2._dirty and self._node == m2._node
1179
1179
1180 def parse(self, text, readsubtree):
1180 def parse(self, text, readsubtree):
1181 selflazy = self._lazydirs
1181 selflazy = self._lazydirs
1182 subpath = self._subpath
1182 subpath = self._subpath
1183 for f, n, fl in _parse(text):
1183 for f, n, fl in _parse(text):
1184 if fl == 't':
1184 if fl == 't':
1185 f = f + '/'
1185 f = f + '/'
1186 # False below means "doesn't need to be copied" and can use the
1186 # False below means "doesn't need to be copied" and can use the
1187 # cached value from readsubtree directly.
1187 # cached value from readsubtree directly.
1188 selflazy[f] = (subpath(f), n, readsubtree, False)
1188 selflazy[f] = (subpath(f), n, readsubtree, False)
1189 elif '/' in f:
1189 elif '/' in f:
1190 # This is a flat manifest, so use __setitem__ and setflag rather
1190 # This is a flat manifest, so use __setitem__ and setflag rather
1191 # than assigning directly to _files and _flags, so we can
1191 # than assigning directly to _files and _flags, so we can
1192 # assign a path in a subdirectory, and to mark dirty (compared
1192 # assign a path in a subdirectory, and to mark dirty (compared
1193 # to nullid).
1193 # to nullid).
1194 self[f] = n
1194 self[f] = n
1195 if fl:
1195 if fl:
1196 self.setflag(f, fl)
1196 self.setflag(f, fl)
1197 else:
1197 else:
1198 # Assigning to _files and _flags avoids marking as dirty,
1198 # Assigning to _files and _flags avoids marking as dirty,
1199 # and should be a little faster.
1199 # and should be a little faster.
1200 self._files[f] = n
1200 self._files[f] = n
1201 if fl:
1201 if fl:
1202 self._flags[f] = fl
1202 self._flags[f] = fl
1203
1203
1204 def text(self):
1204 def text(self):
1205 """Get the full data of this manifest as a bytestring."""
1205 """Get the full data of this manifest as a bytestring."""
1206 self._load()
1206 self._load()
1207 return _text(self.iterentries())
1207 return _text(self.iterentries())
1208
1208
1209 def dirtext(self):
1209 def dirtext(self):
1210 """Get the full data of this directory as a bytestring. Make sure that
1210 """Get the full data of this directory as a bytestring. Make sure that
1211 any submanifests have been written first, so their nodeids are correct.
1211 any submanifests have been written first, so their nodeids are correct.
1212 """
1212 """
1213 self._load()
1213 self._load()
1214 flags = self.flags
1214 flags = self.flags
1215 lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
1215 lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
1216 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1216 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1217 files = [(f, self._files[f], flags(f)) for f in self._files]
1217 files = [(f, self._files[f], flags(f)) for f in self._files]
1218 return _text(sorted(dirs + files + lazydirs))
1218 return _text(sorted(dirs + files + lazydirs))
1219
1219
1220 def read(self, gettext, readsubtree):
1220 def read(self, gettext, readsubtree):
1221 def _load_for_read(s):
1221 def _load_for_read(s):
1222 s.parse(gettext(), readsubtree)
1222 s.parse(gettext(), readsubtree)
1223 s._dirty = False
1223 s._dirty = False
1224 self._loadfunc = _load_for_read
1224 self._loadfunc = _load_for_read
1225
1225
1226 def writesubtrees(self, m1, m2, writesubtree, match):
1226 def writesubtrees(self, m1, m2, writesubtree, match):
1227 self._load() # for consistency; should never have any effect here
1227 self._load() # for consistency; should never have any effect here
1228 m1._load()
1228 m1._load()
1229 m2._load()
1229 m2._load()
1230 emptytree = treemanifest()
1230 emptytree = treemanifest()
1231 def getnode(m, d):
1231 def getnode(m, d):
1232 ld = m._lazydirs.get(d)
1232 ld = m._lazydirs.get(d)
1233 if ld:
1233 if ld:
1234 return ld[1]
1234 return ld[1]
1235 return m._dirs.get(d, emptytree)._node
1235 return m._dirs.get(d, emptytree)._node
1236
1236
1237 # let's skip investigating things that `match` says we do not need.
1237 # let's skip investigating things that `match` says we do not need.
1238 visit = match.visitchildrenset(self._dir[:-1])
1238 visit = match.visitchildrenset(self._dir[:-1])
1239 visit = self._loadchildrensetlazy(visit)
1239 visit = self._loadchildrensetlazy(visit)
1240 if visit == 'this' or visit == 'all':
1240 if visit == 'this' or visit == 'all':
1241 visit = None
1241 visit = None
1242 for d, subm in self._dirs.iteritems():
1242 for d, subm in self._dirs.iteritems():
1243 if visit and d[:-1] not in visit:
1243 if visit and d[:-1] not in visit:
1244 continue
1244 continue
1245 subp1 = getnode(m1, d)
1245 subp1 = getnode(m1, d)
1246 subp2 = getnode(m2, d)
1246 subp2 = getnode(m2, d)
1247 if subp1 == nullid:
1247 if subp1 == nullid:
1248 subp1, subp2 = subp2, subp1
1248 subp1, subp2 = subp2, subp1
1249 writesubtree(subm, subp1, subp2, match)
1249 writesubtree(subm, subp1, subp2, match)
1250
1250
1251 def walksubtrees(self, matcher=None):
1251 def walksubtrees(self, matcher=None):
1252 """Returns an iterator of the subtrees of this manifest, including this
1252 """Returns an iterator of the subtrees of this manifest, including this
1253 manifest itself.
1253 manifest itself.
1254
1254
1255 If `matcher` is provided, it only returns subtrees that match.
1255 If `matcher` is provided, it only returns subtrees that match.
1256 """
1256 """
1257 if matcher and not matcher.visitdir(self._dir[:-1]):
1257 if matcher and not matcher.visitdir(self._dir[:-1]):
1258 return
1258 return
1259 if not matcher or matcher(self._dir[:-1]):
1259 if not matcher or matcher(self._dir[:-1]):
1260 yield self
1260 yield self
1261
1261
1262 self._load()
1262 self._load()
1263 # OPT: use visitchildrenset to avoid loading everything.
1263 # OPT: use visitchildrenset to avoid loading everything.
1264 self._loadalllazy()
1264 self._loadalllazy()
1265 for d, subm in self._dirs.iteritems():
1265 for d, subm in self._dirs.iteritems():
1266 for subtree in subm.walksubtrees(matcher=matcher):
1266 for subtree in subm.walksubtrees(matcher=matcher):
1267 yield subtree
1267 yield subtree
1268
1268
1269 class manifestfulltextcache(util.lrucachedict):
1269 class manifestfulltextcache(util.lrucachedict):
1270 """File-backed LRU cache for the manifest cache
1270 """File-backed LRU cache for the manifest cache
1271
1271
1272 File consists of entries, up to EOF:
1272 File consists of entries, up to EOF:
1273
1273
1274 - 20 bytes node, 4 bytes length, <length> manifest data
1274 - 20 bytes node, 4 bytes length, <length> manifest data
1275
1275
1276 These are written in reverse cache order (oldest to newest).
1276 These are written in reverse cache order (oldest to newest).
1277
1277
1278 """
1278 """
1279
1279
1280 _file = 'manifestfulltextcache'
1280 _file = 'manifestfulltextcache'
1281
1281
1282 def __init__(self, max):
1282 def __init__(self, max):
1283 super(manifestfulltextcache, self).__init__(max)
1283 super(manifestfulltextcache, self).__init__(max)
1284 self._dirty = False
1284 self._dirty = False
1285 self._read = False
1285 self._read = False
1286 self._opener = None
1286 self._opener = None
1287
1287
1288 def read(self):
1288 def read(self):
1289 if self._read or self._opener is None:
1289 if self._read or self._opener is None:
1290 return
1290 return
1291
1291
1292 try:
1292 try:
1293 with self._opener(self._file) as fp:
1293 with self._opener(self._file) as fp:
1294 set = super(manifestfulltextcache, self).__setitem__
1294 set = super(manifestfulltextcache, self).__setitem__
1295 # ignore trailing data, this is a cache, corruption is skipped
1295 # ignore trailing data, this is a cache, corruption is skipped
1296 while True:
1296 while True:
1297 node = fp.read(20)
1297 node = fp.read(20)
1298 if len(node) < 20:
1298 if len(node) < 20:
1299 break
1299 break
1300 try:
1300 try:
1301 size = struct.unpack('>L', fp.read(4))[0]
1301 size = struct.unpack('>L', fp.read(4))[0]
1302 except struct.error:
1302 except struct.error:
1303 break
1303 break
1304 value = bytearray(fp.read(size))
1304 value = bytearray(fp.read(size))
1305 if len(value) != size:
1305 if len(value) != size:
1306 break
1306 break
1307 set(node, value)
1307 set(node, value)
1308 except IOError:
1308 except IOError:
1309 # the file is allowed to be missing
1309 # the file is allowed to be missing
1310 pass
1310 pass
1311
1311
1312 self._read = True
1312 self._read = True
1313 self._dirty = False
1313 self._dirty = False
1314
1314
1315 def write(self):
1315 def write(self):
1316 if not self._dirty or self._opener is None:
1316 if not self._dirty or self._opener is None:
1317 return
1317 return
1318 # rotate backwards to the first used node
1318 # rotate backwards to the first used node
1319 with self._opener(self._file, 'w', atomictemp=True, checkambig=True
1319 with self._opener(self._file, 'w', atomictemp=True, checkambig=True
1320 ) as fp:
1320 ) as fp:
1321 node = self._head.prev
1321 node = self._head.prev
1322 while True:
1322 while True:
1323 if node.key in self._cache:
1323 if node.key in self._cache:
1324 fp.write(node.key)
1324 fp.write(node.key)
1325 fp.write(struct.pack('>L', len(node.value)))
1325 fp.write(struct.pack('>L', len(node.value)))
1326 fp.write(node.value)
1326 fp.write(node.value)
1327 if node is self._head:
1327 if node is self._head:
1328 break
1328 break
1329 node = node.prev
1329 node = node.prev
1330
1330
1331 def __len__(self):
1331 def __len__(self):
1332 if not self._read:
1332 if not self._read:
1333 self.read()
1333 self.read()
1334 return super(manifestfulltextcache, self).__len__()
1334 return super(manifestfulltextcache, self).__len__()
1335
1335
1336 def __contains__(self, k):
1336 def __contains__(self, k):
1337 if not self._read:
1337 if not self._read:
1338 self.read()
1338 self.read()
1339 return super(manifestfulltextcache, self).__contains__(k)
1339 return super(manifestfulltextcache, self).__contains__(k)
1340
1340
1341 def __iter__(self):
1341 def __iter__(self):
1342 if not self._read:
1342 if not self._read:
1343 self.read()
1343 self.read()
1344 return super(manifestfulltextcache, self).__iter__()
1344 return super(manifestfulltextcache, self).__iter__()
1345
1345
1346 def __getitem__(self, k):
1346 def __getitem__(self, k):
1347 if not self._read:
1347 if not self._read:
1348 self.read()
1348 self.read()
1349 # the cache lru order can change on read
1349 # the cache lru order can change on read
1350 setdirty = self._cache.get(k) is not self._head
1350 setdirty = self._cache.get(k) is not self._head
1351 value = super(manifestfulltextcache, self).__getitem__(k)
1351 value = super(manifestfulltextcache, self).__getitem__(k)
1352 if setdirty:
1352 if setdirty:
1353 self._dirty = True
1353 self._dirty = True
1354 return value
1354 return value
1355
1355
1356 def __setitem__(self, k, v):
1356 def __setitem__(self, k, v):
1357 if not self._read:
1357 if not self._read:
1358 self.read()
1358 self.read()
1359 super(manifestfulltextcache, self).__setitem__(k, v)
1359 super(manifestfulltextcache, self).__setitem__(k, v)
1360 self._dirty = True
1360 self._dirty = True
1361
1361
1362 def __delitem__(self, k):
1362 def __delitem__(self, k):
1363 if not self._read:
1363 if not self._read:
1364 self.read()
1364 self.read()
1365 super(manifestfulltextcache, self).__delitem__(k)
1365 super(manifestfulltextcache, self).__delitem__(k)
1366 self._dirty = True
1366 self._dirty = True
1367
1367
1368 def get(self, k, default=None):
1368 def get(self, k, default=None):
1369 if not self._read:
1369 if not self._read:
1370 self.read()
1370 self.read()
1371 return super(manifestfulltextcache, self).get(k, default=default)
1371 return super(manifestfulltextcache, self).get(k, default=default)
1372
1372
1373 def clear(self, clear_persisted_data=False):
1373 def clear(self, clear_persisted_data=False):
1374 super(manifestfulltextcache, self).clear()
1374 super(manifestfulltextcache, self).clear()
1375 if clear_persisted_data:
1375 if clear_persisted_data:
1376 self._dirty = True
1376 self._dirty = True
1377 self.write()
1377 self.write()
1378 self._read = False
1378 self._read = False
1379
1379
1380 @interfaceutil.implementer(repository.imanifeststorage)
1380 @interfaceutil.implementer(repository.imanifeststorage)
1381 class manifestrevlog(object):
1381 class manifestrevlog(object):
1382 '''A revlog that stores manifest texts. This is responsible for caching the
1382 '''A revlog that stores manifest texts. This is responsible for caching the
1383 full-text manifest contents.
1383 full-text manifest contents.
1384 '''
1384 '''
1385 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1385 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1386 treemanifest=False):
1386 treemanifest=False):
1387 """Constructs a new manifest revlog
1387 """Constructs a new manifest revlog
1388
1388
1389 `indexfile` - used by extensions to have two manifests at once, like
1389 `indexfile` - used by extensions to have two manifests at once, like
1390 when transitioning between flatmanifeset and treemanifests.
1390 when transitioning between flatmanifeset and treemanifests.
1391
1391
1392 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1392 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1393 options can also be used to make this a tree manifest revlog. The opener
1393 options can also be used to make this a tree manifest revlog. The opener
1394 option takes precedence, so if it is set to True, we ignore whatever
1394 option takes precedence, so if it is set to True, we ignore whatever
1395 value is passed in to the constructor.
1395 value is passed in to the constructor.
1396 """
1396 """
1397 # During normal operations, we expect to deal with not more than four
1397 # During normal operations, we expect to deal with not more than four
1398 # revs at a time (such as during commit --amend). When rebasing large
1398 # revs at a time (such as during commit --amend). When rebasing large
1399 # stacks of commits, the number can go up, hence the config knob below.
1399 # stacks of commits, the number can go up, hence the config knob below.
1400 cachesize = 4
1400 cachesize = 4
1401 optiontreemanifest = False
1401 optiontreemanifest = False
1402 opts = getattr(opener, 'options', None)
1402 opts = getattr(opener, 'options', None)
1403 if opts is not None:
1403 if opts is not None:
1404 cachesize = opts.get('manifestcachesize', cachesize)
1404 cachesize = opts.get('manifestcachesize', cachesize)
1405 optiontreemanifest = opts.get('treemanifest', False)
1405 optiontreemanifest = opts.get('treemanifest', False)
1406
1406
1407 self._treeondisk = optiontreemanifest or treemanifest
1407 self._treeondisk = optiontreemanifest or treemanifest
1408
1408
1409 self._fulltextcache = manifestfulltextcache(cachesize)
1409 self._fulltextcache = manifestfulltextcache(cachesize)
1410
1410
1411 if tree:
1411 if tree:
1412 assert self._treeondisk, 'opts is %r' % opts
1412 assert self._treeondisk, 'opts is %r' % opts
1413
1413
1414 if indexfile is None:
1414 if indexfile is None:
1415 indexfile = '00manifest.i'
1415 indexfile = '00manifest.i'
1416 if tree:
1416 if tree:
1417 indexfile = "meta/" + tree + indexfile
1417 indexfile = "meta/" + tree + indexfile
1418
1418
1419 self.tree = tree
1419 self.tree = tree
1420
1420
1421 # The dirlogcache is kept on the root manifest log
1421 # The dirlogcache is kept on the root manifest log
1422 if tree:
1422 if tree:
1423 self._dirlogcache = dirlogcache
1423 self._dirlogcache = dirlogcache
1424 else:
1424 else:
1425 self._dirlogcache = {'': self}
1425 self._dirlogcache = {'': self}
1426
1426
1427 self._revlog = revlog.revlog(opener, indexfile,
1427 self._revlog = revlog.revlog(opener, indexfile,
1428 # only root indexfile is cached
1428 # only root indexfile is cached
1429 checkambig=not bool(tree),
1429 checkambig=not bool(tree),
1430 mmaplargeindex=True)
1430 mmaplargeindex=True)
1431
1431
1432 self.index = self._revlog.index
1432 self.index = self._revlog.index
1433 self.version = self._revlog.version
1433 self.version = self._revlog.version
1434 self._generaldelta = self._revlog._generaldelta
1434 self._generaldelta = self._revlog._generaldelta
1435
1435
1436 def _setupmanifestcachehooks(self, repo):
1436 def _setupmanifestcachehooks(self, repo):
1437 """Persist the manifestfulltextcache on lock release"""
1437 """Persist the manifestfulltextcache on lock release"""
1438 if not util.safehasattr(repo, '_wlockref'):
1438 if not util.safehasattr(repo, '_wlockref'):
1439 return
1439 return
1440
1440
1441 self._fulltextcache._opener = repo.wcachevfs
1441 self._fulltextcache._opener = repo.wcachevfs
1442 if repo._currentlock(repo._wlockref) is None:
1442 if repo._currentlock(repo._wlockref) is None:
1443 return
1443 return
1444
1444
1445 reporef = weakref.ref(repo)
1445 reporef = weakref.ref(repo)
1446 manifestrevlogref = weakref.ref(self)
1446 manifestrevlogref = weakref.ref(self)
1447
1447
1448 def persistmanifestcache():
1448 def persistmanifestcache():
1449 repo = reporef()
1449 repo = reporef()
1450 self = manifestrevlogref()
1450 self = manifestrevlogref()
1451 if repo is None or self is None:
1451 if repo is None or self is None:
1452 return
1452 return
1453 if repo.manifestlog.getstorage(b'') is not self:
1453 if repo.manifestlog.getstorage(b'') is not self:
1454 # there's a different manifest in play now, abort
1454 # there's a different manifest in play now, abort
1455 return
1455 return
1456 self._fulltextcache.write()
1456 self._fulltextcache.write()
1457
1457
1458 repo._afterlock(persistmanifestcache)
1458 repo._afterlock(persistmanifestcache)
1459
1459
1460 @property
1460 @property
1461 def fulltextcache(self):
1461 def fulltextcache(self):
1462 return self._fulltextcache
1462 return self._fulltextcache
1463
1463
1464 def clearcaches(self, clear_persisted_data=False):
1464 def clearcaches(self, clear_persisted_data=False):
1465 self._revlog.clearcaches()
1465 self._revlog.clearcaches()
1466 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1466 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1467 self._dirlogcache = {self.tree: self}
1467 self._dirlogcache = {self.tree: self}
1468
1468
1469 def dirlog(self, d):
1469 def dirlog(self, d):
1470 if d:
1470 if d:
1471 assert self._treeondisk
1471 assert self._treeondisk
1472 if d not in self._dirlogcache:
1472 if d not in self._dirlogcache:
1473 mfrevlog = manifestrevlog(self.opener, d,
1473 mfrevlog = manifestrevlog(self.opener, d,
1474 self._dirlogcache,
1474 self._dirlogcache,
1475 treemanifest=self._treeondisk)
1475 treemanifest=self._treeondisk)
1476 self._dirlogcache[d] = mfrevlog
1476 self._dirlogcache[d] = mfrevlog
1477 return self._dirlogcache[d]
1477 return self._dirlogcache[d]
1478
1478
1479 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1479 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1480 match=None):
1480 match=None):
1481 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1481 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1482 # If our first parent is in the manifest cache, we can
1482 # If our first parent is in the manifest cache, we can
1483 # compute a delta here using properties we know about the
1483 # compute a delta here using properties we know about the
1484 # manifest up-front, which may save time later for the
1484 # manifest up-front, which may save time later for the
1485 # revlog layer.
1485 # revlog layer.
1486
1486
1487 _checkforbidden(added)
1487 _checkforbidden(added)
1488 # combine the changed lists into one sorted iterator
1488 # combine the changed lists into one sorted iterator
1489 work = heapq.merge([(x, False) for x in added],
1489 work = heapq.merge([(x, False) for x in sorted(added)],
1490 [(x, True) for x in removed])
1490 [(x, True) for x in sorted(removed)])
1491
1491
1492 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1492 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1493 cachedelta = self._revlog.rev(p1), deltatext
1493 cachedelta = self._revlog.rev(p1), deltatext
1494 text = util.buffer(arraytext)
1494 text = util.buffer(arraytext)
1495 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1495 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1496 cachedelta)
1496 cachedelta)
1497 else:
1497 else:
1498 # The first parent manifest isn't already loaded, so we'll
1498 # The first parent manifest isn't already loaded, so we'll
1499 # just encode a fulltext of the manifest and pass that
1499 # just encode a fulltext of the manifest and pass that
1500 # through to the revlog layer, and let it handle the delta
1500 # through to the revlog layer, and let it handle the delta
1501 # process.
1501 # process.
1502 if self._treeondisk:
1502 if self._treeondisk:
1503 assert readtree, "readtree must be set for treemanifest writes"
1503 assert readtree, "readtree must be set for treemanifest writes"
1504 assert match, "match must be specified for treemanifest writes"
1504 assert match, "match must be specified for treemanifest writes"
1505 m1 = readtree(self.tree, p1)
1505 m1 = readtree(self.tree, p1)
1506 m2 = readtree(self.tree, p2)
1506 m2 = readtree(self.tree, p2)
1507 n = self._addtree(m, transaction, link, m1, m2, readtree,
1507 n = self._addtree(m, transaction, link, m1, m2, readtree,
1508 match=match)
1508 match=match)
1509 arraytext = None
1509 arraytext = None
1510 else:
1510 else:
1511 text = m.text()
1511 text = m.text()
1512 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1512 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1513 arraytext = bytearray(text)
1513 arraytext = bytearray(text)
1514
1514
1515 if arraytext is not None:
1515 if arraytext is not None:
1516 self.fulltextcache[n] = arraytext
1516 self.fulltextcache[n] = arraytext
1517
1517
1518 return n
1518 return n
1519
1519
1520 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1520 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1521 # If the manifest is unchanged compared to one parent,
1521 # If the manifest is unchanged compared to one parent,
1522 # don't write a new revision
1522 # don't write a new revision
1523 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1523 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1524 m2)):
1524 m2)):
1525 return m.node()
1525 return m.node()
1526 def writesubtree(subm, subp1, subp2, match):
1526 def writesubtree(subm, subp1, subp2, match):
1527 sublog = self.dirlog(subm.dir())
1527 sublog = self.dirlog(subm.dir())
1528 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1528 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1529 readtree=readtree, match=match)
1529 readtree=readtree, match=match)
1530 m.writesubtrees(m1, m2, writesubtree, match)
1530 m.writesubtrees(m1, m2, writesubtree, match)
1531 text = m.dirtext()
1531 text = m.dirtext()
1532 n = None
1532 n = None
1533 if self.tree != '':
1533 if self.tree != '':
1534 # Double-check whether contents are unchanged to one parent
1534 # Double-check whether contents are unchanged to one parent
1535 if text == m1.dirtext():
1535 if text == m1.dirtext():
1536 n = m1.node()
1536 n = m1.node()
1537 elif text == m2.dirtext():
1537 elif text == m2.dirtext():
1538 n = m2.node()
1538 n = m2.node()
1539
1539
1540 if not n:
1540 if not n:
1541 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1541 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1542 m2.node())
1542 m2.node())
1543
1543
1544 # Save nodeid so parent manifest can calculate its nodeid
1544 # Save nodeid so parent manifest can calculate its nodeid
1545 m.setnode(n)
1545 m.setnode(n)
1546 return n
1546 return n
1547
1547
1548 def __len__(self):
1548 def __len__(self):
1549 return len(self._revlog)
1549 return len(self._revlog)
1550
1550
1551 def __iter__(self):
1551 def __iter__(self):
1552 return self._revlog.__iter__()
1552 return self._revlog.__iter__()
1553
1553
1554 def rev(self, node):
1554 def rev(self, node):
1555 return self._revlog.rev(node)
1555 return self._revlog.rev(node)
1556
1556
1557 def node(self, rev):
1557 def node(self, rev):
1558 return self._revlog.node(rev)
1558 return self._revlog.node(rev)
1559
1559
1560 def lookup(self, value):
1560 def lookup(self, value):
1561 return self._revlog.lookup(value)
1561 return self._revlog.lookup(value)
1562
1562
1563 def parentrevs(self, rev):
1563 def parentrevs(self, rev):
1564 return self._revlog.parentrevs(rev)
1564 return self._revlog.parentrevs(rev)
1565
1565
1566 def parents(self, node):
1566 def parents(self, node):
1567 return self._revlog.parents(node)
1567 return self._revlog.parents(node)
1568
1568
1569 def linkrev(self, rev):
1569 def linkrev(self, rev):
1570 return self._revlog.linkrev(rev)
1570 return self._revlog.linkrev(rev)
1571
1571
1572 def checksize(self):
1572 def checksize(self):
1573 return self._revlog.checksize()
1573 return self._revlog.checksize()
1574
1574
1575 def revision(self, node, _df=None, raw=False):
1575 def revision(self, node, _df=None, raw=False):
1576 return self._revlog.revision(node, _df=_df, raw=raw)
1576 return self._revlog.revision(node, _df=_df, raw=raw)
1577
1577
1578 def revdiff(self, rev1, rev2):
1578 def revdiff(self, rev1, rev2):
1579 return self._revlog.revdiff(rev1, rev2)
1579 return self._revlog.revdiff(rev1, rev2)
1580
1580
1581 def cmp(self, node, text):
1581 def cmp(self, node, text):
1582 return self._revlog.cmp(node, text)
1582 return self._revlog.cmp(node, text)
1583
1583
1584 def deltaparent(self, rev):
1584 def deltaparent(self, rev):
1585 return self._revlog.deltaparent(rev)
1585 return self._revlog.deltaparent(rev)
1586
1586
1587 def emitrevisions(self, nodes, nodesorder=None,
1587 def emitrevisions(self, nodes, nodesorder=None,
1588 revisiondata=False, assumehaveparentrevisions=False,
1588 revisiondata=False, assumehaveparentrevisions=False,
1589 deltamode=repository.CG_DELTAMODE_STD):
1589 deltamode=repository.CG_DELTAMODE_STD):
1590 return self._revlog.emitrevisions(
1590 return self._revlog.emitrevisions(
1591 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
1591 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
1592 assumehaveparentrevisions=assumehaveparentrevisions,
1592 assumehaveparentrevisions=assumehaveparentrevisions,
1593 deltamode=deltamode)
1593 deltamode=deltamode)
1594
1594
1595 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1595 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1596 return self._revlog.addgroup(deltas, linkmapper, transaction,
1596 return self._revlog.addgroup(deltas, linkmapper, transaction,
1597 addrevisioncb=addrevisioncb)
1597 addrevisioncb=addrevisioncb)
1598
1598
1599 def rawsize(self, rev):
1599 def rawsize(self, rev):
1600 return self._revlog.rawsize(rev)
1600 return self._revlog.rawsize(rev)
1601
1601
1602 def getstrippoint(self, minlink):
1602 def getstrippoint(self, minlink):
1603 return self._revlog.getstrippoint(minlink)
1603 return self._revlog.getstrippoint(minlink)
1604
1604
1605 def strip(self, minlink, transaction):
1605 def strip(self, minlink, transaction):
1606 return self._revlog.strip(minlink, transaction)
1606 return self._revlog.strip(minlink, transaction)
1607
1607
1608 def files(self):
1608 def files(self):
1609 return self._revlog.files()
1609 return self._revlog.files()
1610
1610
1611 def clone(self, tr, destrevlog, **kwargs):
1611 def clone(self, tr, destrevlog, **kwargs):
1612 if not isinstance(destrevlog, manifestrevlog):
1612 if not isinstance(destrevlog, manifestrevlog):
1613 raise error.ProgrammingError('expected manifestrevlog to clone()')
1613 raise error.ProgrammingError('expected manifestrevlog to clone()')
1614
1614
1615 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1615 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1616
1616
1617 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
1617 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
1618 revisionscount=False, trackedsize=False,
1618 revisionscount=False, trackedsize=False,
1619 storedsize=False):
1619 storedsize=False):
1620 return self._revlog.storageinfo(
1620 return self._revlog.storageinfo(
1621 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
1621 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
1622 revisionscount=revisionscount, trackedsize=trackedsize,
1622 revisionscount=revisionscount, trackedsize=trackedsize,
1623 storedsize=storedsize)
1623 storedsize=storedsize)
1624
1624
1625 @property
1625 @property
1626 def indexfile(self):
1626 def indexfile(self):
1627 return self._revlog.indexfile
1627 return self._revlog.indexfile
1628
1628
1629 @indexfile.setter
1629 @indexfile.setter
1630 def indexfile(self, value):
1630 def indexfile(self, value):
1631 self._revlog.indexfile = value
1631 self._revlog.indexfile = value
1632
1632
1633 @property
1633 @property
1634 def opener(self):
1634 def opener(self):
1635 return self._revlog.opener
1635 return self._revlog.opener
1636
1636
1637 @opener.setter
1637 @opener.setter
1638 def opener(self, value):
1638 def opener(self, value):
1639 self._revlog.opener = value
1639 self._revlog.opener = value
1640
1640
1641 @interfaceutil.implementer(repository.imanifestlog)
1641 @interfaceutil.implementer(repository.imanifestlog)
1642 class manifestlog(object):
1642 class manifestlog(object):
1643 """A collection class representing the collection of manifest snapshots
1643 """A collection class representing the collection of manifest snapshots
1644 referenced by commits in the repository.
1644 referenced by commits in the repository.
1645
1645
1646 In this situation, 'manifest' refers to the abstract concept of a snapshot
1646 In this situation, 'manifest' refers to the abstract concept of a snapshot
1647 of the list of files in the given commit. Consumers of the output of this
1647 of the list of files in the given commit. Consumers of the output of this
1648 class do not care about the implementation details of the actual manifests
1648 class do not care about the implementation details of the actual manifests
1649 they receive (i.e. tree or flat or lazily loaded, etc)."""
1649 they receive (i.e. tree or flat or lazily loaded, etc)."""
1650 def __init__(self, opener, repo, rootstore, narrowmatch):
1650 def __init__(self, opener, repo, rootstore, narrowmatch):
1651 usetreemanifest = False
1651 usetreemanifest = False
1652 cachesize = 4
1652 cachesize = 4
1653
1653
1654 opts = getattr(opener, 'options', None)
1654 opts = getattr(opener, 'options', None)
1655 if opts is not None:
1655 if opts is not None:
1656 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1656 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1657 cachesize = opts.get('manifestcachesize', cachesize)
1657 cachesize = opts.get('manifestcachesize', cachesize)
1658
1658
1659 self._treemanifests = usetreemanifest
1659 self._treemanifests = usetreemanifest
1660
1660
1661 self._rootstore = rootstore
1661 self._rootstore = rootstore
1662 self._rootstore._setupmanifestcachehooks(repo)
1662 self._rootstore._setupmanifestcachehooks(repo)
1663 self._narrowmatch = narrowmatch
1663 self._narrowmatch = narrowmatch
1664
1664
1665 # A cache of the manifestctx or treemanifestctx for each directory
1665 # A cache of the manifestctx or treemanifestctx for each directory
1666 self._dirmancache = {}
1666 self._dirmancache = {}
1667 self._dirmancache[''] = util.lrucachedict(cachesize)
1667 self._dirmancache[''] = util.lrucachedict(cachesize)
1668
1668
1669 self._cachesize = cachesize
1669 self._cachesize = cachesize
1670
1670
1671 def __getitem__(self, node):
1671 def __getitem__(self, node):
1672 """Retrieves the manifest instance for the given node. Throws a
1672 """Retrieves the manifest instance for the given node. Throws a
1673 LookupError if not found.
1673 LookupError if not found.
1674 """
1674 """
1675 return self.get('', node)
1675 return self.get('', node)
1676
1676
1677 def get(self, tree, node, verify=True):
1677 def get(self, tree, node, verify=True):
1678 """Retrieves the manifest instance for the given node. Throws a
1678 """Retrieves the manifest instance for the given node. Throws a
1679 LookupError if not found.
1679 LookupError if not found.
1680
1680
1681 `verify` - if True an exception will be thrown if the node is not in
1681 `verify` - if True an exception will be thrown if the node is not in
1682 the revlog
1682 the revlog
1683 """
1683 """
1684 if node in self._dirmancache.get(tree, ()):
1684 if node in self._dirmancache.get(tree, ()):
1685 return self._dirmancache[tree][node]
1685 return self._dirmancache[tree][node]
1686
1686
1687 if not self._narrowmatch.always():
1687 if not self._narrowmatch.always():
1688 if not self._narrowmatch.visitdir(tree[:-1]):
1688 if not self._narrowmatch.visitdir(tree[:-1]):
1689 return excludeddirmanifestctx(tree, node)
1689 return excludeddirmanifestctx(tree, node)
1690 if tree:
1690 if tree:
1691 if self._rootstore._treeondisk:
1691 if self._rootstore._treeondisk:
1692 if verify:
1692 if verify:
1693 # Side-effect is LookupError is raised if node doesn't
1693 # Side-effect is LookupError is raised if node doesn't
1694 # exist.
1694 # exist.
1695 self.getstorage(tree).rev(node)
1695 self.getstorage(tree).rev(node)
1696
1696
1697 m = treemanifestctx(self, tree, node)
1697 m = treemanifestctx(self, tree, node)
1698 else:
1698 else:
1699 raise error.Abort(
1699 raise error.Abort(
1700 _("cannot ask for manifest directory '%s' in a flat "
1700 _("cannot ask for manifest directory '%s' in a flat "
1701 "manifest") % tree)
1701 "manifest") % tree)
1702 else:
1702 else:
1703 if verify:
1703 if verify:
1704 # Side-effect is LookupError is raised if node doesn't exist.
1704 # Side-effect is LookupError is raised if node doesn't exist.
1705 self._rootstore.rev(node)
1705 self._rootstore.rev(node)
1706
1706
1707 if self._treemanifests:
1707 if self._treemanifests:
1708 m = treemanifestctx(self, '', node)
1708 m = treemanifestctx(self, '', node)
1709 else:
1709 else:
1710 m = manifestctx(self, node)
1710 m = manifestctx(self, node)
1711
1711
1712 if node != nullid:
1712 if node != nullid:
1713 mancache = self._dirmancache.get(tree)
1713 mancache = self._dirmancache.get(tree)
1714 if not mancache:
1714 if not mancache:
1715 mancache = util.lrucachedict(self._cachesize)
1715 mancache = util.lrucachedict(self._cachesize)
1716 self._dirmancache[tree] = mancache
1716 self._dirmancache[tree] = mancache
1717 mancache[node] = m
1717 mancache[node] = m
1718 return m
1718 return m
1719
1719
1720 def getstorage(self, tree):
1720 def getstorage(self, tree):
1721 return self._rootstore.dirlog(tree)
1721 return self._rootstore.dirlog(tree)
1722
1722
1723 def clearcaches(self, clear_persisted_data=False):
1723 def clearcaches(self, clear_persisted_data=False):
1724 self._dirmancache.clear()
1724 self._dirmancache.clear()
1725 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1725 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1726
1726
1727 def rev(self, node):
1727 def rev(self, node):
1728 return self._rootstore.rev(node)
1728 return self._rootstore.rev(node)
1729
1729
1730 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1730 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1731 class memmanifestctx(object):
1731 class memmanifestctx(object):
1732 def __init__(self, manifestlog):
1732 def __init__(self, manifestlog):
1733 self._manifestlog = manifestlog
1733 self._manifestlog = manifestlog
1734 self._manifestdict = manifestdict()
1734 self._manifestdict = manifestdict()
1735
1735
1736 def _storage(self):
1736 def _storage(self):
1737 return self._manifestlog.getstorage(b'')
1737 return self._manifestlog.getstorage(b'')
1738
1738
1739 def new(self):
1739 def new(self):
1740 return memmanifestctx(self._manifestlog)
1740 return memmanifestctx(self._manifestlog)
1741
1741
1742 def copy(self):
1742 def copy(self):
1743 memmf = memmanifestctx(self._manifestlog)
1743 memmf = memmanifestctx(self._manifestlog)
1744 memmf._manifestdict = self.read().copy()
1744 memmf._manifestdict = self.read().copy()
1745 return memmf
1745 return memmf
1746
1746
1747 def read(self):
1747 def read(self):
1748 return self._manifestdict
1748 return self._manifestdict
1749
1749
1750 def write(self, transaction, link, p1, p2, added, removed, match=None):
1750 def write(self, transaction, link, p1, p2, added, removed, match=None):
1751 return self._storage().add(self._manifestdict, transaction, link,
1751 return self._storage().add(self._manifestdict, transaction, link,
1752 p1, p2, added, removed, match=match)
1752 p1, p2, added, removed, match=match)
1753
1753
1754 @interfaceutil.implementer(repository.imanifestrevisionstored)
1754 @interfaceutil.implementer(repository.imanifestrevisionstored)
1755 class manifestctx(object):
1755 class manifestctx(object):
1756 """A class representing a single revision of a manifest, including its
1756 """A class representing a single revision of a manifest, including its
1757 contents, its parent revs, and its linkrev.
1757 contents, its parent revs, and its linkrev.
1758 """
1758 """
1759 def __init__(self, manifestlog, node):
1759 def __init__(self, manifestlog, node):
1760 self._manifestlog = manifestlog
1760 self._manifestlog = manifestlog
1761 self._data = None
1761 self._data = None
1762
1762
1763 self._node = node
1763 self._node = node
1764
1764
1765 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1765 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1766 # but let's add it later when something needs it and we can load it
1766 # but let's add it later when something needs it and we can load it
1767 # lazily.
1767 # lazily.
1768 #self.p1, self.p2 = store.parents(node)
1768 #self.p1, self.p2 = store.parents(node)
1769 #rev = store.rev(node)
1769 #rev = store.rev(node)
1770 #self.linkrev = store.linkrev(rev)
1770 #self.linkrev = store.linkrev(rev)
1771
1771
1772 def _storage(self):
1772 def _storage(self):
1773 return self._manifestlog.getstorage(b'')
1773 return self._manifestlog.getstorage(b'')
1774
1774
1775 def node(self):
1775 def node(self):
1776 return self._node
1776 return self._node
1777
1777
1778 def new(self):
1778 def new(self):
1779 return memmanifestctx(self._manifestlog)
1779 return memmanifestctx(self._manifestlog)
1780
1780
1781 def copy(self):
1781 def copy(self):
1782 memmf = memmanifestctx(self._manifestlog)
1782 memmf = memmanifestctx(self._manifestlog)
1783 memmf._manifestdict = self.read().copy()
1783 memmf._manifestdict = self.read().copy()
1784 return memmf
1784 return memmf
1785
1785
1786 @propertycache
1786 @propertycache
1787 def parents(self):
1787 def parents(self):
1788 return self._storage().parents(self._node)
1788 return self._storage().parents(self._node)
1789
1789
1790 def read(self):
1790 def read(self):
1791 if self._data is None:
1791 if self._data is None:
1792 if self._node == nullid:
1792 if self._node == nullid:
1793 self._data = manifestdict()
1793 self._data = manifestdict()
1794 else:
1794 else:
1795 store = self._storage()
1795 store = self._storage()
1796 if self._node in store.fulltextcache:
1796 if self._node in store.fulltextcache:
1797 text = pycompat.bytestr(store.fulltextcache[self._node])
1797 text = pycompat.bytestr(store.fulltextcache[self._node])
1798 else:
1798 else:
1799 text = store.revision(self._node)
1799 text = store.revision(self._node)
1800 arraytext = bytearray(text)
1800 arraytext = bytearray(text)
1801 store.fulltextcache[self._node] = arraytext
1801 store.fulltextcache[self._node] = arraytext
1802 self._data = manifestdict(text)
1802 self._data = manifestdict(text)
1803 return self._data
1803 return self._data
1804
1804
1805 def readfast(self, shallow=False):
1805 def readfast(self, shallow=False):
1806 '''Calls either readdelta or read, based on which would be less work.
1806 '''Calls either readdelta or read, based on which would be less work.
1807 readdelta is called if the delta is against the p1, and therefore can be
1807 readdelta is called if the delta is against the p1, and therefore can be
1808 read quickly.
1808 read quickly.
1809
1809
1810 If `shallow` is True, nothing changes since this is a flat manifest.
1810 If `shallow` is True, nothing changes since this is a flat manifest.
1811 '''
1811 '''
1812 store = self._storage()
1812 store = self._storage()
1813 r = store.rev(self._node)
1813 r = store.rev(self._node)
1814 deltaparent = store.deltaparent(r)
1814 deltaparent = store.deltaparent(r)
1815 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1815 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1816 return self.readdelta()
1816 return self.readdelta()
1817 return self.read()
1817 return self.read()
1818
1818
1819 def readdelta(self, shallow=False):
1819 def readdelta(self, shallow=False):
1820 '''Returns a manifest containing just the entries that are present
1820 '''Returns a manifest containing just the entries that are present
1821 in this manifest, but not in its p1 manifest. This is efficient to read
1821 in this manifest, but not in its p1 manifest. This is efficient to read
1822 if the revlog delta is already p1.
1822 if the revlog delta is already p1.
1823
1823
1824 Changing the value of `shallow` has no effect on flat manifests.
1824 Changing the value of `shallow` has no effect on flat manifests.
1825 '''
1825 '''
1826 store = self._storage()
1826 store = self._storage()
1827 r = store.rev(self._node)
1827 r = store.rev(self._node)
1828 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1828 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1829 return manifestdict(d)
1829 return manifestdict(d)
1830
1830
1831 def find(self, key):
1831 def find(self, key):
1832 return self.read().find(key)
1832 return self.read().find(key)
1833
1833
1834 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1834 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1835 class memtreemanifestctx(object):
1835 class memtreemanifestctx(object):
1836 def __init__(self, manifestlog, dir=''):
1836 def __init__(self, manifestlog, dir=''):
1837 self._manifestlog = manifestlog
1837 self._manifestlog = manifestlog
1838 self._dir = dir
1838 self._dir = dir
1839 self._treemanifest = treemanifest()
1839 self._treemanifest = treemanifest()
1840
1840
1841 def _storage(self):
1841 def _storage(self):
1842 return self._manifestlog.getstorage(b'')
1842 return self._manifestlog.getstorage(b'')
1843
1843
1844 def new(self, dir=''):
1844 def new(self, dir=''):
1845 return memtreemanifestctx(self._manifestlog, dir=dir)
1845 return memtreemanifestctx(self._manifestlog, dir=dir)
1846
1846
1847 def copy(self):
1847 def copy(self):
1848 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1848 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1849 memmf._treemanifest = self._treemanifest.copy()
1849 memmf._treemanifest = self._treemanifest.copy()
1850 return memmf
1850 return memmf
1851
1851
1852 def read(self):
1852 def read(self):
1853 return self._treemanifest
1853 return self._treemanifest
1854
1854
1855 def write(self, transaction, link, p1, p2, added, removed, match=None):
1855 def write(self, transaction, link, p1, p2, added, removed, match=None):
1856 def readtree(dir, node):
1856 def readtree(dir, node):
1857 return self._manifestlog.get(dir, node).read()
1857 return self._manifestlog.get(dir, node).read()
1858 return self._storage().add(self._treemanifest, transaction, link,
1858 return self._storage().add(self._treemanifest, transaction, link,
1859 p1, p2, added, removed, readtree=readtree,
1859 p1, p2, added, removed, readtree=readtree,
1860 match=match)
1860 match=match)
1861
1861
1862 @interfaceutil.implementer(repository.imanifestrevisionstored)
1862 @interfaceutil.implementer(repository.imanifestrevisionstored)
1863 class treemanifestctx(object):
1863 class treemanifestctx(object):
1864 def __init__(self, manifestlog, dir, node):
1864 def __init__(self, manifestlog, dir, node):
1865 self._manifestlog = manifestlog
1865 self._manifestlog = manifestlog
1866 self._dir = dir
1866 self._dir = dir
1867 self._data = None
1867 self._data = None
1868
1868
1869 self._node = node
1869 self._node = node
1870
1870
1871 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1871 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1872 # we can instantiate treemanifestctx objects for directories we don't
1872 # we can instantiate treemanifestctx objects for directories we don't
1873 # have on disk.
1873 # have on disk.
1874 #self.p1, self.p2 = store.parents(node)
1874 #self.p1, self.p2 = store.parents(node)
1875 #rev = store.rev(node)
1875 #rev = store.rev(node)
1876 #self.linkrev = store.linkrev(rev)
1876 #self.linkrev = store.linkrev(rev)
1877
1877
1878 def _storage(self):
1878 def _storage(self):
1879 narrowmatch = self._manifestlog._narrowmatch
1879 narrowmatch = self._manifestlog._narrowmatch
1880 if not narrowmatch.always():
1880 if not narrowmatch.always():
1881 if not narrowmatch.visitdir(self._dir[:-1]):
1881 if not narrowmatch.visitdir(self._dir[:-1]):
1882 return excludedmanifestrevlog(self._dir)
1882 return excludedmanifestrevlog(self._dir)
1883 return self._manifestlog.getstorage(self._dir)
1883 return self._manifestlog.getstorage(self._dir)
1884
1884
1885 def read(self):
1885 def read(self):
1886 if self._data is None:
1886 if self._data is None:
1887 store = self._storage()
1887 store = self._storage()
1888 if self._node == nullid:
1888 if self._node == nullid:
1889 self._data = treemanifest()
1889 self._data = treemanifest()
1890 # TODO accessing non-public API
1890 # TODO accessing non-public API
1891 elif store._treeondisk:
1891 elif store._treeondisk:
1892 m = treemanifest(dir=self._dir)
1892 m = treemanifest(dir=self._dir)
1893 def gettext():
1893 def gettext():
1894 return store.revision(self._node)
1894 return store.revision(self._node)
1895 def readsubtree(dir, subm):
1895 def readsubtree(dir, subm):
1896 # Set verify to False since we need to be able to create
1896 # Set verify to False since we need to be able to create
1897 # subtrees for trees that don't exist on disk.
1897 # subtrees for trees that don't exist on disk.
1898 return self._manifestlog.get(dir, subm, verify=False).read()
1898 return self._manifestlog.get(dir, subm, verify=False).read()
1899 m.read(gettext, readsubtree)
1899 m.read(gettext, readsubtree)
1900 m.setnode(self._node)
1900 m.setnode(self._node)
1901 self._data = m
1901 self._data = m
1902 else:
1902 else:
1903 if self._node in store.fulltextcache:
1903 if self._node in store.fulltextcache:
1904 text = pycompat.bytestr(store.fulltextcache[self._node])
1904 text = pycompat.bytestr(store.fulltextcache[self._node])
1905 else:
1905 else:
1906 text = store.revision(self._node)
1906 text = store.revision(self._node)
1907 arraytext = bytearray(text)
1907 arraytext = bytearray(text)
1908 store.fulltextcache[self._node] = arraytext
1908 store.fulltextcache[self._node] = arraytext
1909 self._data = treemanifest(dir=self._dir, text=text)
1909 self._data = treemanifest(dir=self._dir, text=text)
1910
1910
1911 return self._data
1911 return self._data
1912
1912
1913 def node(self):
1913 def node(self):
1914 return self._node
1914 return self._node
1915
1915
1916 def new(self, dir=''):
1916 def new(self, dir=''):
1917 return memtreemanifestctx(self._manifestlog, dir=dir)
1917 return memtreemanifestctx(self._manifestlog, dir=dir)
1918
1918
1919 def copy(self):
1919 def copy(self):
1920 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1920 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1921 memmf._treemanifest = self.read().copy()
1921 memmf._treemanifest = self.read().copy()
1922 return memmf
1922 return memmf
1923
1923
1924 @propertycache
1924 @propertycache
1925 def parents(self):
1925 def parents(self):
1926 return self._storage().parents(self._node)
1926 return self._storage().parents(self._node)
1927
1927
1928 def readdelta(self, shallow=False):
1928 def readdelta(self, shallow=False):
1929 '''Returns a manifest containing just the entries that are present
1929 '''Returns a manifest containing just the entries that are present
1930 in this manifest, but not in its p1 manifest. This is efficient to read
1930 in this manifest, but not in its p1 manifest. This is efficient to read
1931 if the revlog delta is already p1.
1931 if the revlog delta is already p1.
1932
1932
1933 If `shallow` is True, this will read the delta for this directory,
1933 If `shallow` is True, this will read the delta for this directory,
1934 without recursively reading subdirectory manifests. Instead, any
1934 without recursively reading subdirectory manifests. Instead, any
1935 subdirectory entry will be reported as it appears in the manifest, i.e.
1935 subdirectory entry will be reported as it appears in the manifest, i.e.
1936 the subdirectory will be reported among files and distinguished only by
1936 the subdirectory will be reported among files and distinguished only by
1937 its 't' flag.
1937 its 't' flag.
1938 '''
1938 '''
1939 store = self._storage()
1939 store = self._storage()
1940 if shallow:
1940 if shallow:
1941 r = store.rev(self._node)
1941 r = store.rev(self._node)
1942 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1942 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1943 return manifestdict(d)
1943 return manifestdict(d)
1944 else:
1944 else:
1945 # Need to perform a slow delta
1945 # Need to perform a slow delta
1946 r0 = store.deltaparent(store.rev(self._node))
1946 r0 = store.deltaparent(store.rev(self._node))
1947 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1947 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1948 m1 = self.read()
1948 m1 = self.read()
1949 md = treemanifest(dir=self._dir)
1949 md = treemanifest(dir=self._dir)
1950 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1950 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1951 if n1:
1951 if n1:
1952 md[f] = n1
1952 md[f] = n1
1953 if fl1:
1953 if fl1:
1954 md.setflag(f, fl1)
1954 md.setflag(f, fl1)
1955 return md
1955 return md
1956
1956
1957 def readfast(self, shallow=False):
1957 def readfast(self, shallow=False):
1958 '''Calls either readdelta or read, based on which would be less work.
1958 '''Calls either readdelta or read, based on which would be less work.
1959 readdelta is called if the delta is against the p1, and therefore can be
1959 readdelta is called if the delta is against the p1, and therefore can be
1960 read quickly.
1960 read quickly.
1961
1961
1962 If `shallow` is True, it only returns the entries from this manifest,
1962 If `shallow` is True, it only returns the entries from this manifest,
1963 and not any submanifests.
1963 and not any submanifests.
1964 '''
1964 '''
1965 store = self._storage()
1965 store = self._storage()
1966 r = store.rev(self._node)
1966 r = store.rev(self._node)
1967 deltaparent = store.deltaparent(r)
1967 deltaparent = store.deltaparent(r)
1968 if (deltaparent != nullrev and
1968 if (deltaparent != nullrev and
1969 deltaparent in store.parentrevs(r)):
1969 deltaparent in store.parentrevs(r)):
1970 return self.readdelta(shallow=shallow)
1970 return self.readdelta(shallow=shallow)
1971
1971
1972 if shallow:
1972 if shallow:
1973 return manifestdict(store.revision(self._node))
1973 return manifestdict(store.revision(self._node))
1974 else:
1974 else:
1975 return self.read()
1975 return self.read()
1976
1976
1977 def find(self, key):
1977 def find(self, key):
1978 return self.read().find(key)
1978 return self.read().find(key)
1979
1979
1980 class excludeddir(treemanifest):
1980 class excludeddir(treemanifest):
1981 """Stand-in for a directory that is excluded from the repository.
1981 """Stand-in for a directory that is excluded from the repository.
1982
1982
1983 With narrowing active on a repository that uses treemanifests,
1983 With narrowing active on a repository that uses treemanifests,
1984 some of the directory revlogs will be excluded from the resulting
1984 some of the directory revlogs will be excluded from the resulting
1985 clone. This is a huge storage win for clients, but means we need
1985 clone. This is a huge storage win for clients, but means we need
1986 some sort of pseudo-manifest to surface to internals so we can
1986 some sort of pseudo-manifest to surface to internals so we can
1987 detect a merge conflict outside the narrowspec. That's what this
1987 detect a merge conflict outside the narrowspec. That's what this
1988 class is: it stands in for a directory whose node is known, but
1988 class is: it stands in for a directory whose node is known, but
1989 whose contents are unknown.
1989 whose contents are unknown.
1990 """
1990 """
1991 def __init__(self, dir, node):
1991 def __init__(self, dir, node):
1992 super(excludeddir, self).__init__(dir)
1992 super(excludeddir, self).__init__(dir)
1993 self._node = node
1993 self._node = node
1994 # Add an empty file, which will be included by iterators and such,
1994 # Add an empty file, which will be included by iterators and such,
1995 # appearing as the directory itself (i.e. something like "dir/")
1995 # appearing as the directory itself (i.e. something like "dir/")
1996 self._files[''] = node
1996 self._files[''] = node
1997 self._flags[''] = 't'
1997 self._flags[''] = 't'
1998
1998
1999 # Manifests outside the narrowspec should never be modified, so avoid
1999 # Manifests outside the narrowspec should never be modified, so avoid
2000 # copying. This makes a noticeable difference when there are very many
2000 # copying. This makes a noticeable difference when there are very many
2001 # directories outside the narrowspec. Also, it makes sense for the copy to
2001 # directories outside the narrowspec. Also, it makes sense for the copy to
2002 # be of the same type as the original, which would not happen with the
2002 # be of the same type as the original, which would not happen with the
2003 # super type's copy().
2003 # super type's copy().
2004 def copy(self):
2004 def copy(self):
2005 return self
2005 return self
2006
2006
2007 class excludeddirmanifestctx(treemanifestctx):
2007 class excludeddirmanifestctx(treemanifestctx):
2008 """context wrapper for excludeddir - see that docstring for rationale"""
2008 """context wrapper for excludeddir - see that docstring for rationale"""
2009 def __init__(self, dir, node):
2009 def __init__(self, dir, node):
2010 self._dir = dir
2010 self._dir = dir
2011 self._node = node
2011 self._node = node
2012
2012
2013 def read(self):
2013 def read(self):
2014 return excludeddir(self._dir, self._node)
2014 return excludeddir(self._dir, self._node)
2015
2015
2016 def write(self, *args):
2016 def write(self, *args):
2017 raise error.ProgrammingError(
2017 raise error.ProgrammingError(
2018 'attempt to write manifest from excluded dir %s' % self._dir)
2018 'attempt to write manifest from excluded dir %s' % self._dir)
2019
2019
2020 class excludedmanifestrevlog(manifestrevlog):
2020 class excludedmanifestrevlog(manifestrevlog):
2021 """Stand-in for excluded treemanifest revlogs.
2021 """Stand-in for excluded treemanifest revlogs.
2022
2022
2023 When narrowing is active on a treemanifest repository, we'll have
2023 When narrowing is active on a treemanifest repository, we'll have
2024 references to directories we can't see due to the revlog being
2024 references to directories we can't see due to the revlog being
2025 skipped. This class exists to conform to the manifestrevlog
2025 skipped. This class exists to conform to the manifestrevlog
2026 interface for those directories and proactively prevent writes to
2026 interface for those directories and proactively prevent writes to
2027 outside the narrowspec.
2027 outside the narrowspec.
2028 """
2028 """
2029
2029
2030 def __init__(self, dir):
2030 def __init__(self, dir):
2031 self._dir = dir
2031 self._dir = dir
2032
2032
2033 def __len__(self):
2033 def __len__(self):
2034 raise error.ProgrammingError(
2034 raise error.ProgrammingError(
2035 'attempt to get length of excluded dir %s' % self._dir)
2035 'attempt to get length of excluded dir %s' % self._dir)
2036
2036
2037 def rev(self, node):
2037 def rev(self, node):
2038 raise error.ProgrammingError(
2038 raise error.ProgrammingError(
2039 'attempt to get rev from excluded dir %s' % self._dir)
2039 'attempt to get rev from excluded dir %s' % self._dir)
2040
2040
2041 def linkrev(self, node):
2041 def linkrev(self, node):
2042 raise error.ProgrammingError(
2042 raise error.ProgrammingError(
2043 'attempt to get linkrev from excluded dir %s' % self._dir)
2043 'attempt to get linkrev from excluded dir %s' % self._dir)
2044
2044
2045 def node(self, rev):
2045 def node(self, rev):
2046 raise error.ProgrammingError(
2046 raise error.ProgrammingError(
2047 'attempt to get node from excluded dir %s' % self._dir)
2047 'attempt to get node from excluded dir %s' % self._dir)
2048
2048
2049 def add(self, *args, **kwargs):
2049 def add(self, *args, **kwargs):
2050 # We should never write entries in dirlogs outside the narrow clone.
2050 # We should never write entries in dirlogs outside the narrow clone.
2051 # However, the method still gets called from writesubtree() in
2051 # However, the method still gets called from writesubtree() in
2052 # _addtree(), so we need to handle it. We should possibly make that
2052 # _addtree(), so we need to handle it. We should possibly make that
2053 # avoid calling add() with a clean manifest (_dirty is always False
2053 # avoid calling add() with a clean manifest (_dirty is always False
2054 # in excludeddir instances).
2054 # in excludeddir instances).
2055 pass
2055 pass
General Comments 0
You need to be logged in to leave comments. Login now