##// END OF EJS Templates
revlog: add an experimental option to mitigated delta issues (issue5480)...
marmoute -
r33202:895ecec3 default
parent child Browse files
Show More
@@ -1,2107 +1,2110 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 class _basefilecache(scmutil.filecache):
69 class _basefilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """
71 """
72 def __get__(self, repo, type=None):
72 def __get__(self, repo, type=None):
73 if repo is None:
73 if repo is None:
74 return self
74 return self
75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
76 def __set__(self, repo, value):
76 def __set__(self, repo, value):
77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
78 def __delete__(self, repo):
78 def __delete__(self, repo):
79 return super(_basefilecache, self).__delete__(repo.unfiltered())
79 return super(_basefilecache, self).__delete__(repo.unfiltered())
80
80
81 class repofilecache(_basefilecache):
81 class repofilecache(_basefilecache):
82 """filecache for files in .hg but outside of .hg/store"""
82 """filecache for files in .hg but outside of .hg/store"""
83 def join(self, obj, fname):
83 def join(self, obj, fname):
84 return obj.vfs.join(fname)
84 return obj.vfs.join(fname)
85
85
86 class storecache(_basefilecache):
86 class storecache(_basefilecache):
87 """filecache for files in the store"""
87 """filecache for files in the store"""
88 def join(self, obj, fname):
88 def join(self, obj, fname):
89 return obj.sjoin(fname)
89 return obj.sjoin(fname)
90
90
91 class unfilteredpropertycache(util.propertycache):
91 class unfilteredpropertycache(util.propertycache):
92 """propertycache that apply to unfiltered repo only"""
92 """propertycache that apply to unfiltered repo only"""
93
93
94 def __get__(self, repo, type=None):
94 def __get__(self, repo, type=None):
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 if unfi is repo:
96 if unfi is repo:
97 return super(unfilteredpropertycache, self).__get__(unfi)
97 return super(unfilteredpropertycache, self).__get__(unfi)
98 return getattr(unfi, self.name)
98 return getattr(unfi, self.name)
99
99
100 class filteredpropertycache(util.propertycache):
100 class filteredpropertycache(util.propertycache):
101 """propertycache that must take filtering in account"""
101 """propertycache that must take filtering in account"""
102
102
103 def cachevalue(self, obj, value):
103 def cachevalue(self, obj, value):
104 object.__setattr__(obj, self.name, value)
104 object.__setattr__(obj, self.name, value)
105
105
106
106
107 def hasunfilteredcache(repo, name):
107 def hasunfilteredcache(repo, name):
108 """check if a repo has an unfilteredpropertycache value for <name>"""
108 """check if a repo has an unfilteredpropertycache value for <name>"""
109 return name in vars(repo.unfiltered())
109 return name in vars(repo.unfiltered())
110
110
111 def unfilteredmethod(orig):
111 def unfilteredmethod(orig):
112 """decorate method that always need to be run on unfiltered version"""
112 """decorate method that always need to be run on unfiltered version"""
113 def wrapper(repo, *args, **kwargs):
113 def wrapper(repo, *args, **kwargs):
114 return orig(repo.unfiltered(), *args, **kwargs)
114 return orig(repo.unfiltered(), *args, **kwargs)
115 return wrapper
115 return wrapper
116
116
117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
118 'unbundle'}
118 'unbundle'}
119 legacycaps = moderncaps.union({'changegroupsubset'})
119 legacycaps = moderncaps.union({'changegroupsubset'})
120
120
121 class localpeer(peer.peerrepository):
121 class localpeer(peer.peerrepository):
122 '''peer for a local repo; reflects only the most recent API'''
122 '''peer for a local repo; reflects only the most recent API'''
123
123
124 def __init__(self, repo, caps=None):
124 def __init__(self, repo, caps=None):
125 if caps is None:
125 if caps is None:
126 caps = moderncaps.copy()
126 caps = moderncaps.copy()
127 peer.peerrepository.__init__(self)
127 peer.peerrepository.__init__(self)
128 self._repo = repo.filtered('served')
128 self._repo = repo.filtered('served')
129 self.ui = repo.ui
129 self.ui = repo.ui
130 self._caps = repo._restrictcapabilities(caps)
130 self._caps = repo._restrictcapabilities(caps)
131 self.requirements = repo.requirements
131 self.requirements = repo.requirements
132 self.supportedformats = repo.supportedformats
132 self.supportedformats = repo.supportedformats
133
133
134 def close(self):
134 def close(self):
135 self._repo.close()
135 self._repo.close()
136
136
137 def _capabilities(self):
137 def _capabilities(self):
138 return self._caps
138 return self._caps
139
139
140 def local(self):
140 def local(self):
141 return self._repo
141 return self._repo
142
142
143 def canpush(self):
143 def canpush(self):
144 return True
144 return True
145
145
146 def url(self):
146 def url(self):
147 return self._repo.url()
147 return self._repo.url()
148
148
149 def lookup(self, key):
149 def lookup(self, key):
150 return self._repo.lookup(key)
150 return self._repo.lookup(key)
151
151
152 def branchmap(self):
152 def branchmap(self):
153 return self._repo.branchmap()
153 return self._repo.branchmap()
154
154
155 def heads(self):
155 def heads(self):
156 return self._repo.heads()
156 return self._repo.heads()
157
157
158 def known(self, nodes):
158 def known(self, nodes):
159 return self._repo.known(nodes)
159 return self._repo.known(nodes)
160
160
161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
162 **kwargs):
162 **kwargs):
163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
164 common=common, bundlecaps=bundlecaps,
164 common=common, bundlecaps=bundlecaps,
165 **kwargs)
165 **kwargs)
166 cb = util.chunkbuffer(chunks)
166 cb = util.chunkbuffer(chunks)
167
167
168 if exchange.bundle2requested(bundlecaps):
168 if exchange.bundle2requested(bundlecaps):
169 # When requesting a bundle2, getbundle returns a stream to make the
169 # When requesting a bundle2, getbundle returns a stream to make the
170 # wire level function happier. We need to build a proper object
170 # wire level function happier. We need to build a proper object
171 # from it in local peer.
171 # from it in local peer.
172 return bundle2.getunbundler(self.ui, cb)
172 return bundle2.getunbundler(self.ui, cb)
173 else:
173 else:
174 return changegroup.getunbundler('01', cb, None)
174 return changegroup.getunbundler('01', cb, None)
175
175
176 # TODO We might want to move the next two calls into legacypeer and add
176 # TODO We might want to move the next two calls into legacypeer and add
177 # unbundle instead.
177 # unbundle instead.
178
178
179 def unbundle(self, cg, heads, url):
179 def unbundle(self, cg, heads, url):
180 """apply a bundle on a repo
180 """apply a bundle on a repo
181
181
182 This function handles the repo locking itself."""
182 This function handles the repo locking itself."""
183 try:
183 try:
184 try:
184 try:
185 cg = exchange.readbundle(self.ui, cg, None)
185 cg = exchange.readbundle(self.ui, cg, None)
186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
187 if util.safehasattr(ret, 'getchunks'):
187 if util.safehasattr(ret, 'getchunks'):
188 # This is a bundle20 object, turn it into an unbundler.
188 # This is a bundle20 object, turn it into an unbundler.
189 # This little dance should be dropped eventually when the
189 # This little dance should be dropped eventually when the
190 # API is finally improved.
190 # API is finally improved.
191 stream = util.chunkbuffer(ret.getchunks())
191 stream = util.chunkbuffer(ret.getchunks())
192 ret = bundle2.getunbundler(self.ui, stream)
192 ret = bundle2.getunbundler(self.ui, stream)
193 return ret
193 return ret
194 except Exception as exc:
194 except Exception as exc:
195 # If the exception contains output salvaged from a bundle2
195 # If the exception contains output salvaged from a bundle2
196 # reply, we need to make sure it is printed before continuing
196 # reply, we need to make sure it is printed before continuing
197 # to fail. So we build a bundle2 with such output and consume
197 # to fail. So we build a bundle2 with such output and consume
198 # it directly.
198 # it directly.
199 #
199 #
200 # This is not very elegant but allows a "simple" solution for
200 # This is not very elegant but allows a "simple" solution for
201 # issue4594
201 # issue4594
202 output = getattr(exc, '_bundle2salvagedoutput', ())
202 output = getattr(exc, '_bundle2salvagedoutput', ())
203 if output:
203 if output:
204 bundler = bundle2.bundle20(self._repo.ui)
204 bundler = bundle2.bundle20(self._repo.ui)
205 for out in output:
205 for out in output:
206 bundler.addpart(out)
206 bundler.addpart(out)
207 stream = util.chunkbuffer(bundler.getchunks())
207 stream = util.chunkbuffer(bundler.getchunks())
208 b = bundle2.getunbundler(self.ui, stream)
208 b = bundle2.getunbundler(self.ui, stream)
209 bundle2.processbundle(self._repo, b)
209 bundle2.processbundle(self._repo, b)
210 raise
210 raise
211 except error.PushRaced as exc:
211 except error.PushRaced as exc:
212 raise error.ResponseError(_('push failed:'), str(exc))
212 raise error.ResponseError(_('push failed:'), str(exc))
213
213
214 def lock(self):
214 def lock(self):
215 return self._repo.lock()
215 return self._repo.lock()
216
216
217 def pushkey(self, namespace, key, old, new):
217 def pushkey(self, namespace, key, old, new):
218 return self._repo.pushkey(namespace, key, old, new)
218 return self._repo.pushkey(namespace, key, old, new)
219
219
220 def listkeys(self, namespace):
220 def listkeys(self, namespace):
221 return self._repo.listkeys(namespace)
221 return self._repo.listkeys(namespace)
222
222
223 def debugwireargs(self, one, two, three=None, four=None, five=None):
223 def debugwireargs(self, one, two, three=None, four=None, five=None):
224 '''used to test argument passing over the wire'''
224 '''used to test argument passing over the wire'''
225 return "%s %s %s %s %s" % (one, two, three, four, five)
225 return "%s %s %s %s %s" % (one, two, three, four, five)
226
226
227 class locallegacypeer(localpeer):
227 class locallegacypeer(localpeer):
228 '''peer extension which implements legacy methods too; used for tests with
228 '''peer extension which implements legacy methods too; used for tests with
229 restricted capabilities'''
229 restricted capabilities'''
230
230
231 def __init__(self, repo):
231 def __init__(self, repo):
232 localpeer.__init__(self, repo, caps=legacycaps)
232 localpeer.__init__(self, repo, caps=legacycaps)
233
233
234 def branches(self, nodes):
234 def branches(self, nodes):
235 return self._repo.branches(nodes)
235 return self._repo.branches(nodes)
236
236
237 def between(self, pairs):
237 def between(self, pairs):
238 return self._repo.between(pairs)
238 return self._repo.between(pairs)
239
239
240 def changegroup(self, basenodes, source):
240 def changegroup(self, basenodes, source):
241 return changegroup.changegroup(self._repo, basenodes, source)
241 return changegroup.changegroup(self._repo, basenodes, source)
242
242
243 def changegroupsubset(self, bases, heads, source):
243 def changegroupsubset(self, bases, heads, source):
244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
245
245
246 # Increment the sub-version when the revlog v2 format changes to lock out old
246 # Increment the sub-version when the revlog v2 format changes to lock out old
247 # clients.
247 # clients.
248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
249
249
250 class localrepository(object):
250 class localrepository(object):
251
251
252 supportedformats = {
252 supportedformats = {
253 'revlogv1',
253 'revlogv1',
254 'generaldelta',
254 'generaldelta',
255 'treemanifest',
255 'treemanifest',
256 'manifestv2',
256 'manifestv2',
257 REVLOGV2_REQUIREMENT,
257 REVLOGV2_REQUIREMENT,
258 }
258 }
259 _basesupported = supportedformats | {
259 _basesupported = supportedformats | {
260 'store',
260 'store',
261 'fncache',
261 'fncache',
262 'shared',
262 'shared',
263 'relshared',
263 'relshared',
264 'dotencode',
264 'dotencode',
265 }
265 }
266 openerreqs = {
266 openerreqs = {
267 'revlogv1',
267 'revlogv1',
268 'generaldelta',
268 'generaldelta',
269 'treemanifest',
269 'treemanifest',
270 'manifestv2',
270 'manifestv2',
271 }
271 }
272
272
273 # a list of (ui, featureset) functions.
273 # a list of (ui, featureset) functions.
274 # only functions defined in module of enabled extensions are invoked
274 # only functions defined in module of enabled extensions are invoked
275 featuresetupfuncs = set()
275 featuresetupfuncs = set()
276
276
277 def __init__(self, baseui, path, create=False):
277 def __init__(self, baseui, path, create=False):
278 self.requirements = set()
278 self.requirements = set()
279 self.filtername = None
279 self.filtername = None
280 # wvfs: rooted at the repository root, used to access the working copy
280 # wvfs: rooted at the repository root, used to access the working copy
281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
283 self.vfs = None
283 self.vfs = None
284 # svfs: usually rooted at .hg/store, used to access repository history
284 # svfs: usually rooted at .hg/store, used to access repository history
285 # If this is a shared repository, this vfs may point to another
285 # If this is a shared repository, this vfs may point to another
286 # repository's .hg/store directory.
286 # repository's .hg/store directory.
287 self.svfs = None
287 self.svfs = None
288 self.root = self.wvfs.base
288 self.root = self.wvfs.base
289 self.path = self.wvfs.join(".hg")
289 self.path = self.wvfs.join(".hg")
290 self.origroot = path
290 self.origroot = path
291 self.auditor = pathutil.pathauditor(self.root, self._checknested)
291 self.auditor = pathutil.pathauditor(self.root, self._checknested)
292 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
292 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
293 realfs=False)
293 realfs=False)
294 self.vfs = vfsmod.vfs(self.path)
294 self.vfs = vfsmod.vfs(self.path)
295 self.baseui = baseui
295 self.baseui = baseui
296 self.ui = baseui.copy()
296 self.ui = baseui.copy()
297 self.ui.copy = baseui.copy # prevent copying repo configuration
297 self.ui.copy = baseui.copy # prevent copying repo configuration
298 # A list of callback to shape the phase if no data were found.
298 # A list of callback to shape the phase if no data were found.
299 # Callback are in the form: func(repo, roots) --> processed root.
299 # Callback are in the form: func(repo, roots) --> processed root.
300 # This list it to be filled by extension during repo setup
300 # This list it to be filled by extension during repo setup
301 self._phasedefaults = []
301 self._phasedefaults = []
302 try:
302 try:
303 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
303 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
304 self._loadextensions()
304 self._loadextensions()
305 except IOError:
305 except IOError:
306 pass
306 pass
307
307
308 if self.featuresetupfuncs:
308 if self.featuresetupfuncs:
309 self.supported = set(self._basesupported) # use private copy
309 self.supported = set(self._basesupported) # use private copy
310 extmods = set(m.__name__ for n, m
310 extmods = set(m.__name__ for n, m
311 in extensions.extensions(self.ui))
311 in extensions.extensions(self.ui))
312 for setupfunc in self.featuresetupfuncs:
312 for setupfunc in self.featuresetupfuncs:
313 if setupfunc.__module__ in extmods:
313 if setupfunc.__module__ in extmods:
314 setupfunc(self.ui, self.supported)
314 setupfunc(self.ui, self.supported)
315 else:
315 else:
316 self.supported = self._basesupported
316 self.supported = self._basesupported
317 color.setup(self.ui)
317 color.setup(self.ui)
318
318
319 # Add compression engines.
319 # Add compression engines.
320 for name in util.compengines:
320 for name in util.compengines:
321 engine = util.compengines[name]
321 engine = util.compengines[name]
322 if engine.revlogheader():
322 if engine.revlogheader():
323 self.supported.add('exp-compression-%s' % name)
323 self.supported.add('exp-compression-%s' % name)
324
324
325 if not self.vfs.isdir():
325 if not self.vfs.isdir():
326 if create:
326 if create:
327 self.requirements = newreporequirements(self)
327 self.requirements = newreporequirements(self)
328
328
329 if not self.wvfs.exists():
329 if not self.wvfs.exists():
330 self.wvfs.makedirs()
330 self.wvfs.makedirs()
331 self.vfs.makedir(notindexed=True)
331 self.vfs.makedir(notindexed=True)
332
332
333 if 'store' in self.requirements:
333 if 'store' in self.requirements:
334 self.vfs.mkdir("store")
334 self.vfs.mkdir("store")
335
335
336 # create an invalid changelog
336 # create an invalid changelog
337 self.vfs.append(
337 self.vfs.append(
338 "00changelog.i",
338 "00changelog.i",
339 '\0\0\0\2' # represents revlogv2
339 '\0\0\0\2' # represents revlogv2
340 ' dummy changelog to prevent using the old repo layout'
340 ' dummy changelog to prevent using the old repo layout'
341 )
341 )
342 else:
342 else:
343 raise error.RepoError(_("repository %s not found") % path)
343 raise error.RepoError(_("repository %s not found") % path)
344 elif create:
344 elif create:
345 raise error.RepoError(_("repository %s already exists") % path)
345 raise error.RepoError(_("repository %s already exists") % path)
346 else:
346 else:
347 try:
347 try:
348 self.requirements = scmutil.readrequires(
348 self.requirements = scmutil.readrequires(
349 self.vfs, self.supported)
349 self.vfs, self.supported)
350 except IOError as inst:
350 except IOError as inst:
351 if inst.errno != errno.ENOENT:
351 if inst.errno != errno.ENOENT:
352 raise
352 raise
353
353
354 self.sharedpath = self.path
354 self.sharedpath = self.path
355 try:
355 try:
356 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
356 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
357 if 'relshared' in self.requirements:
357 if 'relshared' in self.requirements:
358 sharedpath = self.vfs.join(sharedpath)
358 sharedpath = self.vfs.join(sharedpath)
359 vfs = vfsmod.vfs(sharedpath, realpath=True)
359 vfs = vfsmod.vfs(sharedpath, realpath=True)
360 s = vfs.base
360 s = vfs.base
361 if not vfs.exists():
361 if not vfs.exists():
362 raise error.RepoError(
362 raise error.RepoError(
363 _('.hg/sharedpath points to nonexistent directory %s') % s)
363 _('.hg/sharedpath points to nonexistent directory %s') % s)
364 self.sharedpath = s
364 self.sharedpath = s
365 except IOError as inst:
365 except IOError as inst:
366 if inst.errno != errno.ENOENT:
366 if inst.errno != errno.ENOENT:
367 raise
367 raise
368
368
369 self.store = store.store(
369 self.store = store.store(
370 self.requirements, self.sharedpath, vfsmod.vfs)
370 self.requirements, self.sharedpath, vfsmod.vfs)
371 self.spath = self.store.path
371 self.spath = self.store.path
372 self.svfs = self.store.vfs
372 self.svfs = self.store.vfs
373 self.sjoin = self.store.join
373 self.sjoin = self.store.join
374 self.vfs.createmode = self.store.createmode
374 self.vfs.createmode = self.store.createmode
375 self._applyopenerreqs()
375 self._applyopenerreqs()
376 if create:
376 if create:
377 self._writerequirements()
377 self._writerequirements()
378
378
379 self._dirstatevalidatewarned = False
379 self._dirstatevalidatewarned = False
380
380
381 self._branchcaches = {}
381 self._branchcaches = {}
382 self._revbranchcache = None
382 self._revbranchcache = None
383 self.filterpats = {}
383 self.filterpats = {}
384 self._datafilters = {}
384 self._datafilters = {}
385 self._transref = self._lockref = self._wlockref = None
385 self._transref = self._lockref = self._wlockref = None
386
386
387 # A cache for various files under .hg/ that tracks file changes,
387 # A cache for various files under .hg/ that tracks file changes,
388 # (used by the filecache decorator)
388 # (used by the filecache decorator)
389 #
389 #
390 # Maps a property name to its util.filecacheentry
390 # Maps a property name to its util.filecacheentry
391 self._filecache = {}
391 self._filecache = {}
392
392
393 # hold sets of revision to be filtered
393 # hold sets of revision to be filtered
394 # should be cleared when something might have changed the filter value:
394 # should be cleared when something might have changed the filter value:
395 # - new changesets,
395 # - new changesets,
396 # - phase change,
396 # - phase change,
397 # - new obsolescence marker,
397 # - new obsolescence marker,
398 # - working directory parent change,
398 # - working directory parent change,
399 # - bookmark changes
399 # - bookmark changes
400 self.filteredrevcache = {}
400 self.filteredrevcache = {}
401
401
402 # post-dirstate-status hooks
402 # post-dirstate-status hooks
403 self._postdsstatus = []
403 self._postdsstatus = []
404
404
405 # generic mapping between names and nodes
405 # generic mapping between names and nodes
406 self.names = namespaces.namespaces()
406 self.names = namespaces.namespaces()
407
407
408 def close(self):
408 def close(self):
409 self._writecaches()
409 self._writecaches()
410
410
411 def _loadextensions(self):
411 def _loadextensions(self):
412 extensions.loadall(self.ui)
412 extensions.loadall(self.ui)
413
413
414 def _writecaches(self):
414 def _writecaches(self):
415 if self._revbranchcache:
415 if self._revbranchcache:
416 self._revbranchcache.write()
416 self._revbranchcache.write()
417
417
418 def _restrictcapabilities(self, caps):
418 def _restrictcapabilities(self, caps):
419 if self.ui.configbool('experimental', 'bundle2-advertise', True):
419 if self.ui.configbool('experimental', 'bundle2-advertise', True):
420 caps = set(caps)
420 caps = set(caps)
421 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
421 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
422 caps.add('bundle2=' + urlreq.quote(capsblob))
422 caps.add('bundle2=' + urlreq.quote(capsblob))
423 return caps
423 return caps
424
424
425 def _applyopenerreqs(self):
425 def _applyopenerreqs(self):
426 self.svfs.options = dict((r, 1) for r in self.requirements
426 self.svfs.options = dict((r, 1) for r in self.requirements
427 if r in self.openerreqs)
427 if r in self.openerreqs)
428 # experimental config: format.chunkcachesize
428 # experimental config: format.chunkcachesize
429 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
429 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
430 if chunkcachesize is not None:
430 if chunkcachesize is not None:
431 self.svfs.options['chunkcachesize'] = chunkcachesize
431 self.svfs.options['chunkcachesize'] = chunkcachesize
432 # experimental config: format.maxchainlen
432 # experimental config: format.maxchainlen
433 maxchainlen = self.ui.configint('format', 'maxchainlen')
433 maxchainlen = self.ui.configint('format', 'maxchainlen')
434 if maxchainlen is not None:
434 if maxchainlen is not None:
435 self.svfs.options['maxchainlen'] = maxchainlen
435 self.svfs.options['maxchainlen'] = maxchainlen
436 # experimental config: format.manifestcachesize
436 # experimental config: format.manifestcachesize
437 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
437 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
438 if manifestcachesize is not None:
438 if manifestcachesize is not None:
439 self.svfs.options['manifestcachesize'] = manifestcachesize
439 self.svfs.options['manifestcachesize'] = manifestcachesize
440 # experimental config: format.aggressivemergedeltas
440 # experimental config: format.aggressivemergedeltas
441 aggressivemergedeltas = self.ui.configbool('format',
441 aggressivemergedeltas = self.ui.configbool('format',
442 'aggressivemergedeltas', False)
442 'aggressivemergedeltas', False)
443 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
443 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
444 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
444 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
445 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
446 if 0 <= chainspan:
447 self.svfs.options['maxdeltachainspan'] = chainspan
445
448
446 for r in self.requirements:
449 for r in self.requirements:
447 if r.startswith('exp-compression-'):
450 if r.startswith('exp-compression-'):
448 self.svfs.options['compengine'] = r[len('exp-compression-'):]
451 self.svfs.options['compengine'] = r[len('exp-compression-'):]
449
452
450 # TODO move "revlogv2" to openerreqs once finalized.
453 # TODO move "revlogv2" to openerreqs once finalized.
451 if REVLOGV2_REQUIREMENT in self.requirements:
454 if REVLOGV2_REQUIREMENT in self.requirements:
452 self.svfs.options['revlogv2'] = True
455 self.svfs.options['revlogv2'] = True
453
456
454 def _writerequirements(self):
457 def _writerequirements(self):
455 scmutil.writerequires(self.vfs, self.requirements)
458 scmutil.writerequires(self.vfs, self.requirements)
456
459
457 def _checknested(self, path):
460 def _checknested(self, path):
458 """Determine if path is a legal nested repository."""
461 """Determine if path is a legal nested repository."""
459 if not path.startswith(self.root):
462 if not path.startswith(self.root):
460 return False
463 return False
461 subpath = path[len(self.root) + 1:]
464 subpath = path[len(self.root) + 1:]
462 normsubpath = util.pconvert(subpath)
465 normsubpath = util.pconvert(subpath)
463
466
464 # XXX: Checking against the current working copy is wrong in
467 # XXX: Checking against the current working copy is wrong in
465 # the sense that it can reject things like
468 # the sense that it can reject things like
466 #
469 #
467 # $ hg cat -r 10 sub/x.txt
470 # $ hg cat -r 10 sub/x.txt
468 #
471 #
469 # if sub/ is no longer a subrepository in the working copy
472 # if sub/ is no longer a subrepository in the working copy
470 # parent revision.
473 # parent revision.
471 #
474 #
472 # However, it can of course also allow things that would have
475 # However, it can of course also allow things that would have
473 # been rejected before, such as the above cat command if sub/
476 # been rejected before, such as the above cat command if sub/
474 # is a subrepository now, but was a normal directory before.
477 # is a subrepository now, but was a normal directory before.
475 # The old path auditor would have rejected by mistake since it
478 # The old path auditor would have rejected by mistake since it
476 # panics when it sees sub/.hg/.
479 # panics when it sees sub/.hg/.
477 #
480 #
478 # All in all, checking against the working copy seems sensible
481 # All in all, checking against the working copy seems sensible
479 # since we want to prevent access to nested repositories on
482 # since we want to prevent access to nested repositories on
480 # the filesystem *now*.
483 # the filesystem *now*.
481 ctx = self[None]
484 ctx = self[None]
482 parts = util.splitpath(subpath)
485 parts = util.splitpath(subpath)
483 while parts:
486 while parts:
484 prefix = '/'.join(parts)
487 prefix = '/'.join(parts)
485 if prefix in ctx.substate:
488 if prefix in ctx.substate:
486 if prefix == normsubpath:
489 if prefix == normsubpath:
487 return True
490 return True
488 else:
491 else:
489 sub = ctx.sub(prefix)
492 sub = ctx.sub(prefix)
490 return sub.checknested(subpath[len(prefix) + 1:])
493 return sub.checknested(subpath[len(prefix) + 1:])
491 else:
494 else:
492 parts.pop()
495 parts.pop()
493 return False
496 return False
494
497
495 def peer(self):
498 def peer(self):
496 return localpeer(self) # not cached to avoid reference cycle
499 return localpeer(self) # not cached to avoid reference cycle
497
500
498 def unfiltered(self):
501 def unfiltered(self):
499 """Return unfiltered version of the repository
502 """Return unfiltered version of the repository
500
503
501 Intended to be overwritten by filtered repo."""
504 Intended to be overwritten by filtered repo."""
502 return self
505 return self
503
506
504 def filtered(self, name):
507 def filtered(self, name):
505 """Return a filtered version of a repository"""
508 """Return a filtered version of a repository"""
506 # build a new class with the mixin and the current class
509 # build a new class with the mixin and the current class
507 # (possibly subclass of the repo)
510 # (possibly subclass of the repo)
508 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
511 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
509 pass
512 pass
510 return filteredrepo(self, name)
513 return filteredrepo(self, name)
511
514
512 @repofilecache('bookmarks', 'bookmarks.current')
515 @repofilecache('bookmarks', 'bookmarks.current')
513 def _bookmarks(self):
516 def _bookmarks(self):
514 return bookmarks.bmstore(self)
517 return bookmarks.bmstore(self)
515
518
516 @property
519 @property
517 def _activebookmark(self):
520 def _activebookmark(self):
518 return self._bookmarks.active
521 return self._bookmarks.active
519
522
520 # _phaserevs and _phasesets depend on changelog. what we need is to
523 # _phaserevs and _phasesets depend on changelog. what we need is to
521 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
524 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
522 # can't be easily expressed in filecache mechanism.
525 # can't be easily expressed in filecache mechanism.
523 @storecache('phaseroots', '00changelog.i')
526 @storecache('phaseroots', '00changelog.i')
524 def _phasecache(self):
527 def _phasecache(self):
525 return phases.phasecache(self, self._phasedefaults)
528 return phases.phasecache(self, self._phasedefaults)
526
529
527 @storecache('obsstore')
530 @storecache('obsstore')
528 def obsstore(self):
531 def obsstore(self):
529 return obsolete.makestore(self.ui, self)
532 return obsolete.makestore(self.ui, self)
530
533
531 @storecache('00changelog.i')
534 @storecache('00changelog.i')
532 def changelog(self):
535 def changelog(self):
533 return changelog.changelog(self.svfs,
536 return changelog.changelog(self.svfs,
534 trypending=txnutil.mayhavepending(self.root))
537 trypending=txnutil.mayhavepending(self.root))
535
538
536 def _constructmanifest(self):
539 def _constructmanifest(self):
537 # This is a temporary function while we migrate from manifest to
540 # This is a temporary function while we migrate from manifest to
538 # manifestlog. It allows bundlerepo and unionrepo to intercept the
541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
539 # manifest creation.
542 # manifest creation.
540 return manifest.manifestrevlog(self.svfs)
543 return manifest.manifestrevlog(self.svfs)
541
544
542 @storecache('00manifest.i')
545 @storecache('00manifest.i')
543 def manifestlog(self):
546 def manifestlog(self):
544 return manifest.manifestlog(self.svfs, self)
547 return manifest.manifestlog(self.svfs, self)
545
548
546 @repofilecache('dirstate')
549 @repofilecache('dirstate')
547 def dirstate(self):
550 def dirstate(self):
548 return dirstate.dirstate(self.vfs, self.ui, self.root,
551 return dirstate.dirstate(self.vfs, self.ui, self.root,
549 self._dirstatevalidate)
552 self._dirstatevalidate)
550
553
551 def _dirstatevalidate(self, node):
554 def _dirstatevalidate(self, node):
552 try:
555 try:
553 self.changelog.rev(node)
556 self.changelog.rev(node)
554 return node
557 return node
555 except error.LookupError:
558 except error.LookupError:
556 if not self._dirstatevalidatewarned:
559 if not self._dirstatevalidatewarned:
557 self._dirstatevalidatewarned = True
560 self._dirstatevalidatewarned = True
558 self.ui.warn(_("warning: ignoring unknown"
561 self.ui.warn(_("warning: ignoring unknown"
559 " working parent %s!\n") % short(node))
562 " working parent %s!\n") % short(node))
560 return nullid
563 return nullid
561
564
562 def __getitem__(self, changeid):
565 def __getitem__(self, changeid):
563 if changeid is None:
566 if changeid is None:
564 return context.workingctx(self)
567 return context.workingctx(self)
565 if isinstance(changeid, slice):
568 if isinstance(changeid, slice):
566 # wdirrev isn't contiguous so the slice shouldn't include it
569 # wdirrev isn't contiguous so the slice shouldn't include it
567 return [context.changectx(self, i)
570 return [context.changectx(self, i)
568 for i in xrange(*changeid.indices(len(self)))
571 for i in xrange(*changeid.indices(len(self)))
569 if i not in self.changelog.filteredrevs]
572 if i not in self.changelog.filteredrevs]
570 try:
573 try:
571 return context.changectx(self, changeid)
574 return context.changectx(self, changeid)
572 except error.WdirUnsupported:
575 except error.WdirUnsupported:
573 return context.workingctx(self)
576 return context.workingctx(self)
574
577
575 def __contains__(self, changeid):
578 def __contains__(self, changeid):
576 """True if the given changeid exists
579 """True if the given changeid exists
577
580
578 error.LookupError is raised if an ambiguous node specified.
581 error.LookupError is raised if an ambiguous node specified.
579 """
582 """
580 try:
583 try:
581 self[changeid]
584 self[changeid]
582 return True
585 return True
583 except error.RepoLookupError:
586 except error.RepoLookupError:
584 return False
587 return False
585
588
586 def __nonzero__(self):
589 def __nonzero__(self):
587 return True
590 return True
588
591
589 __bool__ = __nonzero__
592 __bool__ = __nonzero__
590
593
591 def __len__(self):
594 def __len__(self):
592 return len(self.changelog)
595 return len(self.changelog)
593
596
594 def __iter__(self):
597 def __iter__(self):
595 return iter(self.changelog)
598 return iter(self.changelog)
596
599
597 def revs(self, expr, *args):
600 def revs(self, expr, *args):
598 '''Find revisions matching a revset.
601 '''Find revisions matching a revset.
599
602
600 The revset is specified as a string ``expr`` that may contain
603 The revset is specified as a string ``expr`` that may contain
601 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604 %-formatting to escape certain types. See ``revsetlang.formatspec``.
602
605
603 Revset aliases from the configuration are not expanded. To expand
606 Revset aliases from the configuration are not expanded. To expand
604 user aliases, consider calling ``scmutil.revrange()`` or
607 user aliases, consider calling ``scmutil.revrange()`` or
605 ``repo.anyrevs([expr], user=True)``.
608 ``repo.anyrevs([expr], user=True)``.
606
609
607 Returns a revset.abstractsmartset, which is a list-like interface
610 Returns a revset.abstractsmartset, which is a list-like interface
608 that contains integer revisions.
611 that contains integer revisions.
609 '''
612 '''
610 expr = revsetlang.formatspec(expr, *args)
613 expr = revsetlang.formatspec(expr, *args)
611 m = revset.match(None, expr)
614 m = revset.match(None, expr)
612 return m(self)
615 return m(self)
613
616
614 def set(self, expr, *args):
617 def set(self, expr, *args):
615 '''Find revisions matching a revset and emit changectx instances.
618 '''Find revisions matching a revset and emit changectx instances.
616
619
617 This is a convenience wrapper around ``revs()`` that iterates the
620 This is a convenience wrapper around ``revs()`` that iterates the
618 result and is a generator of changectx instances.
621 result and is a generator of changectx instances.
619
622
620 Revset aliases from the configuration are not expanded. To expand
623 Revset aliases from the configuration are not expanded. To expand
621 user aliases, consider calling ``scmutil.revrange()``.
624 user aliases, consider calling ``scmutil.revrange()``.
622 '''
625 '''
623 for r in self.revs(expr, *args):
626 for r in self.revs(expr, *args):
624 yield self[r]
627 yield self[r]
625
628
626 def anyrevs(self, specs, user=False):
629 def anyrevs(self, specs, user=False):
627 '''Find revisions matching one of the given revsets.
630 '''Find revisions matching one of the given revsets.
628
631
629 Revset aliases from the configuration are not expanded by default. To
632 Revset aliases from the configuration are not expanded by default. To
630 expand user aliases, specify ``user=True``.
633 expand user aliases, specify ``user=True``.
631 '''
634 '''
632 if user:
635 if user:
633 m = revset.matchany(self.ui, specs, repo=self)
636 m = revset.matchany(self.ui, specs, repo=self)
634 else:
637 else:
635 m = revset.matchany(None, specs)
638 m = revset.matchany(None, specs)
636 return m(self)
639 return m(self)
637
640
638 def url(self):
641 def url(self):
639 return 'file:' + self.root
642 return 'file:' + self.root
640
643
641 def hook(self, name, throw=False, **args):
644 def hook(self, name, throw=False, **args):
642 """Call a hook, passing this repo instance.
645 """Call a hook, passing this repo instance.
643
646
644 This a convenience method to aid invoking hooks. Extensions likely
647 This a convenience method to aid invoking hooks. Extensions likely
645 won't call this unless they have registered a custom hook or are
648 won't call this unless they have registered a custom hook or are
646 replacing code that is expected to call a hook.
649 replacing code that is expected to call a hook.
647 """
650 """
648 return hook.hook(self.ui, self, name, throw, **args)
651 return hook.hook(self.ui, self, name, throw, **args)
649
652
650 @filteredpropertycache
653 @filteredpropertycache
651 def _tagscache(self):
654 def _tagscache(self):
652 '''Returns a tagscache object that contains various tags related
655 '''Returns a tagscache object that contains various tags related
653 caches.'''
656 caches.'''
654
657
655 # This simplifies its cache management by having one decorated
658 # This simplifies its cache management by having one decorated
656 # function (this one) and the rest simply fetch things from it.
659 # function (this one) and the rest simply fetch things from it.
657 class tagscache(object):
660 class tagscache(object):
658 def __init__(self):
661 def __init__(self):
659 # These two define the set of tags for this repository. tags
662 # These two define the set of tags for this repository. tags
660 # maps tag name to node; tagtypes maps tag name to 'global' or
663 # maps tag name to node; tagtypes maps tag name to 'global' or
661 # 'local'. (Global tags are defined by .hgtags across all
664 # 'local'. (Global tags are defined by .hgtags across all
662 # heads, and local tags are defined in .hg/localtags.)
665 # heads, and local tags are defined in .hg/localtags.)
663 # They constitute the in-memory cache of tags.
666 # They constitute the in-memory cache of tags.
664 self.tags = self.tagtypes = None
667 self.tags = self.tagtypes = None
665
668
666 self.nodetagscache = self.tagslist = None
669 self.nodetagscache = self.tagslist = None
667
670
668 cache = tagscache()
671 cache = tagscache()
669 cache.tags, cache.tagtypes = self._findtags()
672 cache.tags, cache.tagtypes = self._findtags()
670
673
671 return cache
674 return cache
672
675
673 def tags(self):
676 def tags(self):
674 '''return a mapping of tag to node'''
677 '''return a mapping of tag to node'''
675 t = {}
678 t = {}
676 if self.changelog.filteredrevs:
679 if self.changelog.filteredrevs:
677 tags, tt = self._findtags()
680 tags, tt = self._findtags()
678 else:
681 else:
679 tags = self._tagscache.tags
682 tags = self._tagscache.tags
680 for k, v in tags.iteritems():
683 for k, v in tags.iteritems():
681 try:
684 try:
682 # ignore tags to unknown nodes
685 # ignore tags to unknown nodes
683 self.changelog.rev(v)
686 self.changelog.rev(v)
684 t[k] = v
687 t[k] = v
685 except (error.LookupError, ValueError):
688 except (error.LookupError, ValueError):
686 pass
689 pass
687 return t
690 return t
688
691
689 def _findtags(self):
692 def _findtags(self):
690 '''Do the hard work of finding tags. Return a pair of dicts
693 '''Do the hard work of finding tags. Return a pair of dicts
691 (tags, tagtypes) where tags maps tag name to node, and tagtypes
694 (tags, tagtypes) where tags maps tag name to node, and tagtypes
692 maps tag name to a string like \'global\' or \'local\'.
695 maps tag name to a string like \'global\' or \'local\'.
693 Subclasses or extensions are free to add their own tags, but
696 Subclasses or extensions are free to add their own tags, but
694 should be aware that the returned dicts will be retained for the
697 should be aware that the returned dicts will be retained for the
695 duration of the localrepo object.'''
698 duration of the localrepo object.'''
696
699
697 # XXX what tagtype should subclasses/extensions use? Currently
700 # XXX what tagtype should subclasses/extensions use? Currently
698 # mq and bookmarks add tags, but do not set the tagtype at all.
701 # mq and bookmarks add tags, but do not set the tagtype at all.
699 # Should each extension invent its own tag type? Should there
702 # Should each extension invent its own tag type? Should there
700 # be one tagtype for all such "virtual" tags? Or is the status
703 # be one tagtype for all such "virtual" tags? Or is the status
701 # quo fine?
704 # quo fine?
702
705
703
706
704 # map tag name to (node, hist)
707 # map tag name to (node, hist)
705 alltags = tagsmod.findglobaltags(self.ui, self)
708 alltags = tagsmod.findglobaltags(self.ui, self)
706 # map tag name to tag type
709 # map tag name to tag type
707 tagtypes = dict((tag, 'global') for tag in alltags)
710 tagtypes = dict((tag, 'global') for tag in alltags)
708
711
709 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
712 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
710
713
711 # Build the return dicts. Have to re-encode tag names because
714 # Build the return dicts. Have to re-encode tag names because
712 # the tags module always uses UTF-8 (in order not to lose info
715 # the tags module always uses UTF-8 (in order not to lose info
713 # writing to the cache), but the rest of Mercurial wants them in
716 # writing to the cache), but the rest of Mercurial wants them in
714 # local encoding.
717 # local encoding.
715 tags = {}
718 tags = {}
716 for (name, (node, hist)) in alltags.iteritems():
719 for (name, (node, hist)) in alltags.iteritems():
717 if node != nullid:
720 if node != nullid:
718 tags[encoding.tolocal(name)] = node
721 tags[encoding.tolocal(name)] = node
719 tags['tip'] = self.changelog.tip()
722 tags['tip'] = self.changelog.tip()
720 tagtypes = dict([(encoding.tolocal(name), value)
723 tagtypes = dict([(encoding.tolocal(name), value)
721 for (name, value) in tagtypes.iteritems()])
724 for (name, value) in tagtypes.iteritems()])
722 return (tags, tagtypes)
725 return (tags, tagtypes)
723
726
724 def tagtype(self, tagname):
727 def tagtype(self, tagname):
725 '''
728 '''
726 return the type of the given tag. result can be:
729 return the type of the given tag. result can be:
727
730
728 'local' : a local tag
731 'local' : a local tag
729 'global' : a global tag
732 'global' : a global tag
730 None : tag does not exist
733 None : tag does not exist
731 '''
734 '''
732
735
733 return self._tagscache.tagtypes.get(tagname)
736 return self._tagscache.tagtypes.get(tagname)
734
737
735 def tagslist(self):
738 def tagslist(self):
736 '''return a list of tags ordered by revision'''
739 '''return a list of tags ordered by revision'''
737 if not self._tagscache.tagslist:
740 if not self._tagscache.tagslist:
738 l = []
741 l = []
739 for t, n in self.tags().iteritems():
742 for t, n in self.tags().iteritems():
740 l.append((self.changelog.rev(n), t, n))
743 l.append((self.changelog.rev(n), t, n))
741 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
744 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
742
745
743 return self._tagscache.tagslist
746 return self._tagscache.tagslist
744
747
745 def nodetags(self, node):
748 def nodetags(self, node):
746 '''return the tags associated with a node'''
749 '''return the tags associated with a node'''
747 if not self._tagscache.nodetagscache:
750 if not self._tagscache.nodetagscache:
748 nodetagscache = {}
751 nodetagscache = {}
749 for t, n in self._tagscache.tags.iteritems():
752 for t, n in self._tagscache.tags.iteritems():
750 nodetagscache.setdefault(n, []).append(t)
753 nodetagscache.setdefault(n, []).append(t)
751 for tags in nodetagscache.itervalues():
754 for tags in nodetagscache.itervalues():
752 tags.sort()
755 tags.sort()
753 self._tagscache.nodetagscache = nodetagscache
756 self._tagscache.nodetagscache = nodetagscache
754 return self._tagscache.nodetagscache.get(node, [])
757 return self._tagscache.nodetagscache.get(node, [])
755
758
756 def nodebookmarks(self, node):
759 def nodebookmarks(self, node):
757 """return the list of bookmarks pointing to the specified node"""
760 """return the list of bookmarks pointing to the specified node"""
758 marks = []
761 marks = []
759 for bookmark, n in self._bookmarks.iteritems():
762 for bookmark, n in self._bookmarks.iteritems():
760 if n == node:
763 if n == node:
761 marks.append(bookmark)
764 marks.append(bookmark)
762 return sorted(marks)
765 return sorted(marks)
763
766
764 def branchmap(self):
767 def branchmap(self):
765 '''returns a dictionary {branch: [branchheads]} with branchheads
768 '''returns a dictionary {branch: [branchheads]} with branchheads
766 ordered by increasing revision number'''
769 ordered by increasing revision number'''
767 branchmap.updatecache(self)
770 branchmap.updatecache(self)
768 return self._branchcaches[self.filtername]
771 return self._branchcaches[self.filtername]
769
772
770 @unfilteredmethod
773 @unfilteredmethod
771 def revbranchcache(self):
774 def revbranchcache(self):
772 if not self._revbranchcache:
775 if not self._revbranchcache:
773 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
776 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
774 return self._revbranchcache
777 return self._revbranchcache
775
778
776 def branchtip(self, branch, ignoremissing=False):
779 def branchtip(self, branch, ignoremissing=False):
777 '''return the tip node for a given branch
780 '''return the tip node for a given branch
778
781
779 If ignoremissing is True, then this method will not raise an error.
782 If ignoremissing is True, then this method will not raise an error.
780 This is helpful for callers that only expect None for a missing branch
783 This is helpful for callers that only expect None for a missing branch
781 (e.g. namespace).
784 (e.g. namespace).
782
785
783 '''
786 '''
784 try:
787 try:
785 return self.branchmap().branchtip(branch)
788 return self.branchmap().branchtip(branch)
786 except KeyError:
789 except KeyError:
787 if not ignoremissing:
790 if not ignoremissing:
788 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
791 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
789 else:
792 else:
790 pass
793 pass
791
794
792 def lookup(self, key):
795 def lookup(self, key):
793 return self[key].node()
796 return self[key].node()
794
797
795 def lookupbranch(self, key, remote=None):
798 def lookupbranch(self, key, remote=None):
796 repo = remote or self
799 repo = remote or self
797 if key in repo.branchmap():
800 if key in repo.branchmap():
798 return key
801 return key
799
802
800 repo = (remote and remote.local()) and remote or self
803 repo = (remote and remote.local()) and remote or self
801 return repo[key].branch()
804 return repo[key].branch()
802
805
803 def known(self, nodes):
806 def known(self, nodes):
804 cl = self.changelog
807 cl = self.changelog
805 nm = cl.nodemap
808 nm = cl.nodemap
806 filtered = cl.filteredrevs
809 filtered = cl.filteredrevs
807 result = []
810 result = []
808 for n in nodes:
811 for n in nodes:
809 r = nm.get(n)
812 r = nm.get(n)
810 resp = not (r is None or r in filtered)
813 resp = not (r is None or r in filtered)
811 result.append(resp)
814 result.append(resp)
812 return result
815 return result
813
816
814 def local(self):
817 def local(self):
815 return self
818 return self
816
819
817 def publishing(self):
820 def publishing(self):
818 # it's safe (and desirable) to trust the publish flag unconditionally
821 # it's safe (and desirable) to trust the publish flag unconditionally
819 # so that we don't finalize changes shared between users via ssh or nfs
822 # so that we don't finalize changes shared between users via ssh or nfs
820 return self.ui.configbool('phases', 'publish', True, untrusted=True)
823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
821
824
822 def cancopy(self):
825 def cancopy(self):
823 # so statichttprepo's override of local() works
826 # so statichttprepo's override of local() works
824 if not self.local():
827 if not self.local():
825 return False
828 return False
826 if not self.publishing():
829 if not self.publishing():
827 return True
830 return True
828 # if publishing we can't copy if there is filtered content
831 # if publishing we can't copy if there is filtered content
829 return not self.filtered('visible').changelog.filteredrevs
832 return not self.filtered('visible').changelog.filteredrevs
830
833
831 def shared(self):
834 def shared(self):
832 '''the type of shared repository (None if not shared)'''
835 '''the type of shared repository (None if not shared)'''
833 if self.sharedpath != self.path:
836 if self.sharedpath != self.path:
834 return 'store'
837 return 'store'
835 return None
838 return None
836
839
837 def wjoin(self, f, *insidef):
840 def wjoin(self, f, *insidef):
838 return self.vfs.reljoin(self.root, f, *insidef)
841 return self.vfs.reljoin(self.root, f, *insidef)
839
842
840 def file(self, f):
843 def file(self, f):
841 if f[0] == '/':
844 if f[0] == '/':
842 f = f[1:]
845 f = f[1:]
843 return filelog.filelog(self.svfs, f)
846 return filelog.filelog(self.svfs, f)
844
847
845 def changectx(self, changeid):
848 def changectx(self, changeid):
846 return self[changeid]
849 return self[changeid]
847
850
848 def setparents(self, p1, p2=nullid):
851 def setparents(self, p1, p2=nullid):
849 with self.dirstate.parentchange():
852 with self.dirstate.parentchange():
850 copies = self.dirstate.setparents(p1, p2)
853 copies = self.dirstate.setparents(p1, p2)
851 pctx = self[p1]
854 pctx = self[p1]
852 if copies:
855 if copies:
853 # Adjust copy records, the dirstate cannot do it, it
856 # Adjust copy records, the dirstate cannot do it, it
854 # requires access to parents manifests. Preserve them
857 # requires access to parents manifests. Preserve them
855 # only for entries added to first parent.
858 # only for entries added to first parent.
856 for f in copies:
859 for f in copies:
857 if f not in pctx and copies[f] in pctx:
860 if f not in pctx and copies[f] in pctx:
858 self.dirstate.copy(copies[f], f)
861 self.dirstate.copy(copies[f], f)
859 if p2 == nullid:
862 if p2 == nullid:
860 for f, s in sorted(self.dirstate.copies().items()):
863 for f, s in sorted(self.dirstate.copies().items()):
861 if f not in pctx and s not in pctx:
864 if f not in pctx and s not in pctx:
862 self.dirstate.copy(None, f)
865 self.dirstate.copy(None, f)
863
866
864 def filectx(self, path, changeid=None, fileid=None):
867 def filectx(self, path, changeid=None, fileid=None):
865 """changeid can be a changeset revision, node, or tag.
868 """changeid can be a changeset revision, node, or tag.
866 fileid can be a file revision or node."""
869 fileid can be a file revision or node."""
867 return context.filectx(self, path, changeid, fileid)
870 return context.filectx(self, path, changeid, fileid)
868
871
869 def getcwd(self):
872 def getcwd(self):
870 return self.dirstate.getcwd()
873 return self.dirstate.getcwd()
871
874
872 def pathto(self, f, cwd=None):
875 def pathto(self, f, cwd=None):
873 return self.dirstate.pathto(f, cwd)
876 return self.dirstate.pathto(f, cwd)
874
877
875 def _loadfilter(self, filter):
878 def _loadfilter(self, filter):
876 if filter not in self.filterpats:
879 if filter not in self.filterpats:
877 l = []
880 l = []
878 for pat, cmd in self.ui.configitems(filter):
881 for pat, cmd in self.ui.configitems(filter):
879 if cmd == '!':
882 if cmd == '!':
880 continue
883 continue
881 mf = matchmod.match(self.root, '', [pat])
884 mf = matchmod.match(self.root, '', [pat])
882 fn = None
885 fn = None
883 params = cmd
886 params = cmd
884 for name, filterfn in self._datafilters.iteritems():
887 for name, filterfn in self._datafilters.iteritems():
885 if cmd.startswith(name):
888 if cmd.startswith(name):
886 fn = filterfn
889 fn = filterfn
887 params = cmd[len(name):].lstrip()
890 params = cmd[len(name):].lstrip()
888 break
891 break
889 if not fn:
892 if not fn:
890 fn = lambda s, c, **kwargs: util.filter(s, c)
893 fn = lambda s, c, **kwargs: util.filter(s, c)
891 # Wrap old filters not supporting keyword arguments
894 # Wrap old filters not supporting keyword arguments
892 if not inspect.getargspec(fn)[2]:
895 if not inspect.getargspec(fn)[2]:
893 oldfn = fn
896 oldfn = fn
894 fn = lambda s, c, **kwargs: oldfn(s, c)
897 fn = lambda s, c, **kwargs: oldfn(s, c)
895 l.append((mf, fn, params))
898 l.append((mf, fn, params))
896 self.filterpats[filter] = l
899 self.filterpats[filter] = l
897 return self.filterpats[filter]
900 return self.filterpats[filter]
898
901
899 def _filter(self, filterpats, filename, data):
902 def _filter(self, filterpats, filename, data):
900 for mf, fn, cmd in filterpats:
903 for mf, fn, cmd in filterpats:
901 if mf(filename):
904 if mf(filename):
902 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
905 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
903 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
906 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
904 break
907 break
905
908
906 return data
909 return data
907
910
908 @unfilteredpropertycache
911 @unfilteredpropertycache
909 def _encodefilterpats(self):
912 def _encodefilterpats(self):
910 return self._loadfilter('encode')
913 return self._loadfilter('encode')
911
914
912 @unfilteredpropertycache
915 @unfilteredpropertycache
913 def _decodefilterpats(self):
916 def _decodefilterpats(self):
914 return self._loadfilter('decode')
917 return self._loadfilter('decode')
915
918
916 def adddatafilter(self, name, filter):
919 def adddatafilter(self, name, filter):
917 self._datafilters[name] = filter
920 self._datafilters[name] = filter
918
921
919 def wread(self, filename):
922 def wread(self, filename):
920 if self.wvfs.islink(filename):
923 if self.wvfs.islink(filename):
921 data = self.wvfs.readlink(filename)
924 data = self.wvfs.readlink(filename)
922 else:
925 else:
923 data = self.wvfs.read(filename)
926 data = self.wvfs.read(filename)
924 return self._filter(self._encodefilterpats, filename, data)
927 return self._filter(self._encodefilterpats, filename, data)
925
928
926 def wwrite(self, filename, data, flags, backgroundclose=False):
929 def wwrite(self, filename, data, flags, backgroundclose=False):
927 """write ``data`` into ``filename`` in the working directory
930 """write ``data`` into ``filename`` in the working directory
928
931
929 This returns length of written (maybe decoded) data.
932 This returns length of written (maybe decoded) data.
930 """
933 """
931 data = self._filter(self._decodefilterpats, filename, data)
934 data = self._filter(self._decodefilterpats, filename, data)
932 if 'l' in flags:
935 if 'l' in flags:
933 self.wvfs.symlink(data, filename)
936 self.wvfs.symlink(data, filename)
934 else:
937 else:
935 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
938 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
936 if 'x' in flags:
939 if 'x' in flags:
937 self.wvfs.setflags(filename, False, True)
940 self.wvfs.setflags(filename, False, True)
938 return len(data)
941 return len(data)
939
942
940 def wwritedata(self, filename, data):
943 def wwritedata(self, filename, data):
941 return self._filter(self._decodefilterpats, filename, data)
944 return self._filter(self._decodefilterpats, filename, data)
942
945
943 def currenttransaction(self):
946 def currenttransaction(self):
944 """return the current transaction or None if non exists"""
947 """return the current transaction or None if non exists"""
945 if self._transref:
948 if self._transref:
946 tr = self._transref()
949 tr = self._transref()
947 else:
950 else:
948 tr = None
951 tr = None
949
952
950 if tr and tr.running():
953 if tr and tr.running():
951 return tr
954 return tr
952 return None
955 return None
953
956
954 def transaction(self, desc, report=None):
957 def transaction(self, desc, report=None):
955 if (self.ui.configbool('devel', 'all-warnings')
958 if (self.ui.configbool('devel', 'all-warnings')
956 or self.ui.configbool('devel', 'check-locks')):
959 or self.ui.configbool('devel', 'check-locks')):
957 if self._currentlock(self._lockref) is None:
960 if self._currentlock(self._lockref) is None:
958 raise error.ProgrammingError('transaction requires locking')
961 raise error.ProgrammingError('transaction requires locking')
959 tr = self.currenttransaction()
962 tr = self.currenttransaction()
960 if tr is not None:
963 if tr is not None:
961 return tr.nest()
964 return tr.nest()
962
965
963 # abort here if the journal already exists
966 # abort here if the journal already exists
964 if self.svfs.exists("journal"):
967 if self.svfs.exists("journal"):
965 raise error.RepoError(
968 raise error.RepoError(
966 _("abandoned transaction found"),
969 _("abandoned transaction found"),
967 hint=_("run 'hg recover' to clean up transaction"))
970 hint=_("run 'hg recover' to clean up transaction"))
968
971
969 idbase = "%.40f#%f" % (random.random(), time.time())
972 idbase = "%.40f#%f" % (random.random(), time.time())
970 ha = hex(hashlib.sha1(idbase).digest())
973 ha = hex(hashlib.sha1(idbase).digest())
971 txnid = 'TXN:' + ha
974 txnid = 'TXN:' + ha
972 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
975 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
973
976
974 self._writejournal(desc)
977 self._writejournal(desc)
975 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
978 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
976 if report:
979 if report:
977 rp = report
980 rp = report
978 else:
981 else:
979 rp = self.ui.warn
982 rp = self.ui.warn
980 vfsmap = {'plain': self.vfs} # root of .hg/
983 vfsmap = {'plain': self.vfs} # root of .hg/
981 # we must avoid cyclic reference between repo and transaction.
984 # we must avoid cyclic reference between repo and transaction.
982 reporef = weakref.ref(self)
985 reporef = weakref.ref(self)
983 # Code to track tag movement
986 # Code to track tag movement
984 #
987 #
985 # Since tags are all handled as file content, it is actually quite hard
988 # Since tags are all handled as file content, it is actually quite hard
986 # to track these movement from a code perspective. So we fallback to a
989 # to track these movement from a code perspective. So we fallback to a
987 # tracking at the repository level. One could envision to track changes
990 # tracking at the repository level. One could envision to track changes
988 # to the '.hgtags' file through changegroup apply but that fails to
991 # to the '.hgtags' file through changegroup apply but that fails to
989 # cope with case where transaction expose new heads without changegroup
992 # cope with case where transaction expose new heads without changegroup
990 # being involved (eg: phase movement).
993 # being involved (eg: phase movement).
991 #
994 #
992 # For now, We gate the feature behind a flag since this likely comes
995 # For now, We gate the feature behind a flag since this likely comes
993 # with performance impacts. The current code run more often than needed
996 # with performance impacts. The current code run more often than needed
994 # and do not use caches as much as it could. The current focus is on
997 # and do not use caches as much as it could. The current focus is on
995 # the behavior of the feature so we disable it by default. The flag
998 # the behavior of the feature so we disable it by default. The flag
996 # will be removed when we are happy with the performance impact.
999 # will be removed when we are happy with the performance impact.
997 #
1000 #
998 # Once this feature is no longer experimental move the following
1001 # Once this feature is no longer experimental move the following
999 # documentation to the appropriate help section:
1002 # documentation to the appropriate help section:
1000 #
1003 #
1001 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1004 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1002 # tags (new or changed or deleted tags). In addition the details of
1005 # tags (new or changed or deleted tags). In addition the details of
1003 # these changes are made available in a file at:
1006 # these changes are made available in a file at:
1004 # ``REPOROOT/.hg/changes/tags.changes``.
1007 # ``REPOROOT/.hg/changes/tags.changes``.
1005 # Make sure you check for HG_TAG_MOVED before reading that file as it
1008 # Make sure you check for HG_TAG_MOVED before reading that file as it
1006 # might exist from a previous transaction even if no tag were touched
1009 # might exist from a previous transaction even if no tag were touched
1007 # in this one. Changes are recorded in a line base format::
1010 # in this one. Changes are recorded in a line base format::
1008 #
1011 #
1009 # <action> <hex-node> <tag-name>\n
1012 # <action> <hex-node> <tag-name>\n
1010 #
1013 #
1011 # Actions are defined as follow:
1014 # Actions are defined as follow:
1012 # "-R": tag is removed,
1015 # "-R": tag is removed,
1013 # "+A": tag is added,
1016 # "+A": tag is added,
1014 # "-M": tag is moved (old value),
1017 # "-M": tag is moved (old value),
1015 # "+M": tag is moved (new value),
1018 # "+M": tag is moved (new value),
1016 tracktags = lambda x: None
1019 tracktags = lambda x: None
1017 # experimental config: experimental.hook-track-tags
1020 # experimental config: experimental.hook-track-tags
1018 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1021 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1019 False)
1022 False)
1020 if desc != 'strip' and shouldtracktags:
1023 if desc != 'strip' and shouldtracktags:
1021 oldheads = self.changelog.headrevs()
1024 oldheads = self.changelog.headrevs()
1022 def tracktags(tr2):
1025 def tracktags(tr2):
1023 repo = reporef()
1026 repo = reporef()
1024 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1027 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1025 newheads = repo.changelog.headrevs()
1028 newheads = repo.changelog.headrevs()
1026 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1029 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1027 # notes: we compare lists here.
1030 # notes: we compare lists here.
1028 # As we do it only once buiding set would not be cheaper
1031 # As we do it only once buiding set would not be cheaper
1029 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1032 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1030 if changes:
1033 if changes:
1031 tr2.hookargs['tag_moved'] = '1'
1034 tr2.hookargs['tag_moved'] = '1'
1032 with repo.vfs('changes/tags.changes', 'w',
1035 with repo.vfs('changes/tags.changes', 'w',
1033 atomictemp=True) as changesfile:
1036 atomictemp=True) as changesfile:
1034 # note: we do not register the file to the transaction
1037 # note: we do not register the file to the transaction
1035 # because we needs it to still exist on the transaction
1038 # because we needs it to still exist on the transaction
1036 # is close (for txnclose hooks)
1039 # is close (for txnclose hooks)
1037 tagsmod.writediff(changesfile, changes)
1040 tagsmod.writediff(changesfile, changes)
1038 def validate(tr2):
1041 def validate(tr2):
1039 """will run pre-closing hooks"""
1042 """will run pre-closing hooks"""
1040 # XXX the transaction API is a bit lacking here so we take a hacky
1043 # XXX the transaction API is a bit lacking here so we take a hacky
1041 # path for now
1044 # path for now
1042 #
1045 #
1043 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1046 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1044 # dict is copied before these run. In addition we needs the data
1047 # dict is copied before these run. In addition we needs the data
1045 # available to in memory hooks too.
1048 # available to in memory hooks too.
1046 #
1049 #
1047 # Moreover, we also need to make sure this runs before txnclose
1050 # Moreover, we also need to make sure this runs before txnclose
1048 # hooks and there is no "pending" mechanism that would execute
1051 # hooks and there is no "pending" mechanism that would execute
1049 # logic only if hooks are about to run.
1052 # logic only if hooks are about to run.
1050 #
1053 #
1051 # Fixing this limitation of the transaction is also needed to track
1054 # Fixing this limitation of the transaction is also needed to track
1052 # other families of changes (bookmarks, phases, obsolescence).
1055 # other families of changes (bookmarks, phases, obsolescence).
1053 #
1056 #
1054 # This will have to be fixed before we remove the experimental
1057 # This will have to be fixed before we remove the experimental
1055 # gating.
1058 # gating.
1056 tracktags(tr2)
1059 tracktags(tr2)
1057 reporef().hook('pretxnclose', throw=True,
1060 reporef().hook('pretxnclose', throw=True,
1058 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1061 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1059 def releasefn(tr, success):
1062 def releasefn(tr, success):
1060 repo = reporef()
1063 repo = reporef()
1061 if success:
1064 if success:
1062 # this should be explicitly invoked here, because
1065 # this should be explicitly invoked here, because
1063 # in-memory changes aren't written out at closing
1066 # in-memory changes aren't written out at closing
1064 # transaction, if tr.addfilegenerator (via
1067 # transaction, if tr.addfilegenerator (via
1065 # dirstate.write or so) isn't invoked while
1068 # dirstate.write or so) isn't invoked while
1066 # transaction running
1069 # transaction running
1067 repo.dirstate.write(None)
1070 repo.dirstate.write(None)
1068 else:
1071 else:
1069 # discard all changes (including ones already written
1072 # discard all changes (including ones already written
1070 # out) in this transaction
1073 # out) in this transaction
1071 repo.dirstate.restorebackup(None, prefix='journal.')
1074 repo.dirstate.restorebackup(None, prefix='journal.')
1072
1075
1073 repo.invalidate(clearfilecache=True)
1076 repo.invalidate(clearfilecache=True)
1074
1077
1075 tr = transaction.transaction(rp, self.svfs, vfsmap,
1078 tr = transaction.transaction(rp, self.svfs, vfsmap,
1076 "journal",
1079 "journal",
1077 "undo",
1080 "undo",
1078 aftertrans(renames),
1081 aftertrans(renames),
1079 self.store.createmode,
1082 self.store.createmode,
1080 validator=validate,
1083 validator=validate,
1081 releasefn=releasefn)
1084 releasefn=releasefn)
1082 tr.changes['revs'] = set()
1085 tr.changes['revs'] = set()
1083
1086
1084 tr.hookargs['txnid'] = txnid
1087 tr.hookargs['txnid'] = txnid
1085 # note: writing the fncache only during finalize mean that the file is
1088 # note: writing the fncache only during finalize mean that the file is
1086 # outdated when running hooks. As fncache is used for streaming clone,
1089 # outdated when running hooks. As fncache is used for streaming clone,
1087 # this is not expected to break anything that happen during the hooks.
1090 # this is not expected to break anything that happen during the hooks.
1088 tr.addfinalize('flush-fncache', self.store.write)
1091 tr.addfinalize('flush-fncache', self.store.write)
1089 def txnclosehook(tr2):
1092 def txnclosehook(tr2):
1090 """To be run if transaction is successful, will schedule a hook run
1093 """To be run if transaction is successful, will schedule a hook run
1091 """
1094 """
1092 # Don't reference tr2 in hook() so we don't hold a reference.
1095 # Don't reference tr2 in hook() so we don't hold a reference.
1093 # This reduces memory consumption when there are multiple
1096 # This reduces memory consumption when there are multiple
1094 # transactions per lock. This can likely go away if issue5045
1097 # transactions per lock. This can likely go away if issue5045
1095 # fixes the function accumulation.
1098 # fixes the function accumulation.
1096 hookargs = tr2.hookargs
1099 hookargs = tr2.hookargs
1097
1100
1098 def hook():
1101 def hook():
1099 reporef().hook('txnclose', throw=False, txnname=desc,
1102 reporef().hook('txnclose', throw=False, txnname=desc,
1100 **pycompat.strkwargs(hookargs))
1103 **pycompat.strkwargs(hookargs))
1101 reporef()._afterlock(hook)
1104 reporef()._afterlock(hook)
1102 tr.addfinalize('txnclose-hook', txnclosehook)
1105 tr.addfinalize('txnclose-hook', txnclosehook)
1103 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1106 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1104 def txnaborthook(tr2):
1107 def txnaborthook(tr2):
1105 """To be run if transaction is aborted
1108 """To be run if transaction is aborted
1106 """
1109 """
1107 reporef().hook('txnabort', throw=False, txnname=desc,
1110 reporef().hook('txnabort', throw=False, txnname=desc,
1108 **tr2.hookargs)
1111 **tr2.hookargs)
1109 tr.addabort('txnabort-hook', txnaborthook)
1112 tr.addabort('txnabort-hook', txnaborthook)
1110 # avoid eager cache invalidation. in-memory data should be identical
1113 # avoid eager cache invalidation. in-memory data should be identical
1111 # to stored data if transaction has no error.
1114 # to stored data if transaction has no error.
1112 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1115 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1113 self._transref = weakref.ref(tr)
1116 self._transref = weakref.ref(tr)
1114 return tr
1117 return tr
1115
1118
1116 def _journalfiles(self):
1119 def _journalfiles(self):
1117 return ((self.svfs, 'journal'),
1120 return ((self.svfs, 'journal'),
1118 (self.vfs, 'journal.dirstate'),
1121 (self.vfs, 'journal.dirstate'),
1119 (self.vfs, 'journal.branch'),
1122 (self.vfs, 'journal.branch'),
1120 (self.vfs, 'journal.desc'),
1123 (self.vfs, 'journal.desc'),
1121 (self.vfs, 'journal.bookmarks'),
1124 (self.vfs, 'journal.bookmarks'),
1122 (self.svfs, 'journal.phaseroots'))
1125 (self.svfs, 'journal.phaseroots'))
1123
1126
1124 def undofiles(self):
1127 def undofiles(self):
1125 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1128 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1126
1129
1127 @unfilteredmethod
1130 @unfilteredmethod
1128 def _writejournal(self, desc):
1131 def _writejournal(self, desc):
1129 self.dirstate.savebackup(None, prefix='journal.')
1132 self.dirstate.savebackup(None, prefix='journal.')
1130 self.vfs.write("journal.branch",
1133 self.vfs.write("journal.branch",
1131 encoding.fromlocal(self.dirstate.branch()))
1134 encoding.fromlocal(self.dirstate.branch()))
1132 self.vfs.write("journal.desc",
1135 self.vfs.write("journal.desc",
1133 "%d\n%s\n" % (len(self), desc))
1136 "%d\n%s\n" % (len(self), desc))
1134 self.vfs.write("journal.bookmarks",
1137 self.vfs.write("journal.bookmarks",
1135 self.vfs.tryread("bookmarks"))
1138 self.vfs.tryread("bookmarks"))
1136 self.svfs.write("journal.phaseroots",
1139 self.svfs.write("journal.phaseroots",
1137 self.svfs.tryread("phaseroots"))
1140 self.svfs.tryread("phaseroots"))
1138
1141
1139 def recover(self):
1142 def recover(self):
1140 with self.lock():
1143 with self.lock():
1141 if self.svfs.exists("journal"):
1144 if self.svfs.exists("journal"):
1142 self.ui.status(_("rolling back interrupted transaction\n"))
1145 self.ui.status(_("rolling back interrupted transaction\n"))
1143 vfsmap = {'': self.svfs,
1146 vfsmap = {'': self.svfs,
1144 'plain': self.vfs,}
1147 'plain': self.vfs,}
1145 transaction.rollback(self.svfs, vfsmap, "journal",
1148 transaction.rollback(self.svfs, vfsmap, "journal",
1146 self.ui.warn)
1149 self.ui.warn)
1147 self.invalidate()
1150 self.invalidate()
1148 return True
1151 return True
1149 else:
1152 else:
1150 self.ui.warn(_("no interrupted transaction available\n"))
1153 self.ui.warn(_("no interrupted transaction available\n"))
1151 return False
1154 return False
1152
1155
1153 def rollback(self, dryrun=False, force=False):
1156 def rollback(self, dryrun=False, force=False):
1154 wlock = lock = dsguard = None
1157 wlock = lock = dsguard = None
1155 try:
1158 try:
1156 wlock = self.wlock()
1159 wlock = self.wlock()
1157 lock = self.lock()
1160 lock = self.lock()
1158 if self.svfs.exists("undo"):
1161 if self.svfs.exists("undo"):
1159 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1162 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1160
1163
1161 return self._rollback(dryrun, force, dsguard)
1164 return self._rollback(dryrun, force, dsguard)
1162 else:
1165 else:
1163 self.ui.warn(_("no rollback information available\n"))
1166 self.ui.warn(_("no rollback information available\n"))
1164 return 1
1167 return 1
1165 finally:
1168 finally:
1166 release(dsguard, lock, wlock)
1169 release(dsguard, lock, wlock)
1167
1170
1168 @unfilteredmethod # Until we get smarter cache management
1171 @unfilteredmethod # Until we get smarter cache management
1169 def _rollback(self, dryrun, force, dsguard):
1172 def _rollback(self, dryrun, force, dsguard):
1170 ui = self.ui
1173 ui = self.ui
1171 try:
1174 try:
1172 args = self.vfs.read('undo.desc').splitlines()
1175 args = self.vfs.read('undo.desc').splitlines()
1173 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1176 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1174 if len(args) >= 3:
1177 if len(args) >= 3:
1175 detail = args[2]
1178 detail = args[2]
1176 oldtip = oldlen - 1
1179 oldtip = oldlen - 1
1177
1180
1178 if detail and ui.verbose:
1181 if detail and ui.verbose:
1179 msg = (_('repository tip rolled back to revision %d'
1182 msg = (_('repository tip rolled back to revision %d'
1180 ' (undo %s: %s)\n')
1183 ' (undo %s: %s)\n')
1181 % (oldtip, desc, detail))
1184 % (oldtip, desc, detail))
1182 else:
1185 else:
1183 msg = (_('repository tip rolled back to revision %d'
1186 msg = (_('repository tip rolled back to revision %d'
1184 ' (undo %s)\n')
1187 ' (undo %s)\n')
1185 % (oldtip, desc))
1188 % (oldtip, desc))
1186 except IOError:
1189 except IOError:
1187 msg = _('rolling back unknown transaction\n')
1190 msg = _('rolling back unknown transaction\n')
1188 desc = None
1191 desc = None
1189
1192
1190 if not force and self['.'] != self['tip'] and desc == 'commit':
1193 if not force and self['.'] != self['tip'] and desc == 'commit':
1191 raise error.Abort(
1194 raise error.Abort(
1192 _('rollback of last commit while not checked out '
1195 _('rollback of last commit while not checked out '
1193 'may lose data'), hint=_('use -f to force'))
1196 'may lose data'), hint=_('use -f to force'))
1194
1197
1195 ui.status(msg)
1198 ui.status(msg)
1196 if dryrun:
1199 if dryrun:
1197 return 0
1200 return 0
1198
1201
1199 parents = self.dirstate.parents()
1202 parents = self.dirstate.parents()
1200 self.destroying()
1203 self.destroying()
1201 vfsmap = {'plain': self.vfs, '': self.svfs}
1204 vfsmap = {'plain': self.vfs, '': self.svfs}
1202 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1205 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1203 if self.vfs.exists('undo.bookmarks'):
1206 if self.vfs.exists('undo.bookmarks'):
1204 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1207 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1205 if self.svfs.exists('undo.phaseroots'):
1208 if self.svfs.exists('undo.phaseroots'):
1206 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1209 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1207 self.invalidate()
1210 self.invalidate()
1208
1211
1209 parentgone = (parents[0] not in self.changelog.nodemap or
1212 parentgone = (parents[0] not in self.changelog.nodemap or
1210 parents[1] not in self.changelog.nodemap)
1213 parents[1] not in self.changelog.nodemap)
1211 if parentgone:
1214 if parentgone:
1212 # prevent dirstateguard from overwriting already restored one
1215 # prevent dirstateguard from overwriting already restored one
1213 dsguard.close()
1216 dsguard.close()
1214
1217
1215 self.dirstate.restorebackup(None, prefix='undo.')
1218 self.dirstate.restorebackup(None, prefix='undo.')
1216 try:
1219 try:
1217 branch = self.vfs.read('undo.branch')
1220 branch = self.vfs.read('undo.branch')
1218 self.dirstate.setbranch(encoding.tolocal(branch))
1221 self.dirstate.setbranch(encoding.tolocal(branch))
1219 except IOError:
1222 except IOError:
1220 ui.warn(_('named branch could not be reset: '
1223 ui.warn(_('named branch could not be reset: '
1221 'current branch is still \'%s\'\n')
1224 'current branch is still \'%s\'\n')
1222 % self.dirstate.branch())
1225 % self.dirstate.branch())
1223
1226
1224 parents = tuple([p.rev() for p in self[None].parents()])
1227 parents = tuple([p.rev() for p in self[None].parents()])
1225 if len(parents) > 1:
1228 if len(parents) > 1:
1226 ui.status(_('working directory now based on '
1229 ui.status(_('working directory now based on '
1227 'revisions %d and %d\n') % parents)
1230 'revisions %d and %d\n') % parents)
1228 else:
1231 else:
1229 ui.status(_('working directory now based on '
1232 ui.status(_('working directory now based on '
1230 'revision %d\n') % parents)
1233 'revision %d\n') % parents)
1231 mergemod.mergestate.clean(self, self['.'].node())
1234 mergemod.mergestate.clean(self, self['.'].node())
1232
1235
1233 # TODO: if we know which new heads may result from this rollback, pass
1236 # TODO: if we know which new heads may result from this rollback, pass
1234 # them to destroy(), which will prevent the branchhead cache from being
1237 # them to destroy(), which will prevent the branchhead cache from being
1235 # invalidated.
1238 # invalidated.
1236 self.destroyed()
1239 self.destroyed()
1237 return 0
1240 return 0
1238
1241
1239 def _buildcacheupdater(self, newtransaction):
1242 def _buildcacheupdater(self, newtransaction):
1240 """called during transaction to build the callback updating cache
1243 """called during transaction to build the callback updating cache
1241
1244
1242 Lives on the repository to help extension who might want to augment
1245 Lives on the repository to help extension who might want to augment
1243 this logic. For this purpose, the created transaction is passed to the
1246 this logic. For this purpose, the created transaction is passed to the
1244 method.
1247 method.
1245 """
1248 """
1246 # we must avoid cyclic reference between repo and transaction.
1249 # we must avoid cyclic reference between repo and transaction.
1247 reporef = weakref.ref(self)
1250 reporef = weakref.ref(self)
1248 def updater(tr):
1251 def updater(tr):
1249 repo = reporef()
1252 repo = reporef()
1250 repo.updatecaches(tr)
1253 repo.updatecaches(tr)
1251 return updater
1254 return updater
1252
1255
1253 @unfilteredmethod
1256 @unfilteredmethod
1254 def updatecaches(self, tr=None):
1257 def updatecaches(self, tr=None):
1255 """warm appropriate caches
1258 """warm appropriate caches
1256
1259
1257 If this function is called after a transaction closed. The transaction
1260 If this function is called after a transaction closed. The transaction
1258 will be available in the 'tr' argument. This can be used to selectively
1261 will be available in the 'tr' argument. This can be used to selectively
1259 update caches relevant to the changes in that transaction.
1262 update caches relevant to the changes in that transaction.
1260 """
1263 """
1261 if tr is not None and tr.hookargs.get('source') == 'strip':
1264 if tr is not None and tr.hookargs.get('source') == 'strip':
1262 # During strip, many caches are invalid but
1265 # During strip, many caches are invalid but
1263 # later call to `destroyed` will refresh them.
1266 # later call to `destroyed` will refresh them.
1264 return
1267 return
1265
1268
1266 if tr is None or tr.changes['revs']:
1269 if tr is None or tr.changes['revs']:
1267 # updating the unfiltered branchmap should refresh all the others,
1270 # updating the unfiltered branchmap should refresh all the others,
1268 self.ui.debug('updating the branch cache\n')
1271 self.ui.debug('updating the branch cache\n')
1269 branchmap.updatecache(self.filtered('served'))
1272 branchmap.updatecache(self.filtered('served'))
1270
1273
1271 def invalidatecaches(self):
1274 def invalidatecaches(self):
1272
1275
1273 if '_tagscache' in vars(self):
1276 if '_tagscache' in vars(self):
1274 # can't use delattr on proxy
1277 # can't use delattr on proxy
1275 del self.__dict__['_tagscache']
1278 del self.__dict__['_tagscache']
1276
1279
1277 self.unfiltered()._branchcaches.clear()
1280 self.unfiltered()._branchcaches.clear()
1278 self.invalidatevolatilesets()
1281 self.invalidatevolatilesets()
1279
1282
1280 def invalidatevolatilesets(self):
1283 def invalidatevolatilesets(self):
1281 self.filteredrevcache.clear()
1284 self.filteredrevcache.clear()
1282 obsolete.clearobscaches(self)
1285 obsolete.clearobscaches(self)
1283
1286
1284 def invalidatedirstate(self):
1287 def invalidatedirstate(self):
1285 '''Invalidates the dirstate, causing the next call to dirstate
1288 '''Invalidates the dirstate, causing the next call to dirstate
1286 to check if it was modified since the last time it was read,
1289 to check if it was modified since the last time it was read,
1287 rereading it if it has.
1290 rereading it if it has.
1288
1291
1289 This is different to dirstate.invalidate() that it doesn't always
1292 This is different to dirstate.invalidate() that it doesn't always
1290 rereads the dirstate. Use dirstate.invalidate() if you want to
1293 rereads the dirstate. Use dirstate.invalidate() if you want to
1291 explicitly read the dirstate again (i.e. restoring it to a previous
1294 explicitly read the dirstate again (i.e. restoring it to a previous
1292 known good state).'''
1295 known good state).'''
1293 if hasunfilteredcache(self, 'dirstate'):
1296 if hasunfilteredcache(self, 'dirstate'):
1294 for k in self.dirstate._filecache:
1297 for k in self.dirstate._filecache:
1295 try:
1298 try:
1296 delattr(self.dirstate, k)
1299 delattr(self.dirstate, k)
1297 except AttributeError:
1300 except AttributeError:
1298 pass
1301 pass
1299 delattr(self.unfiltered(), 'dirstate')
1302 delattr(self.unfiltered(), 'dirstate')
1300
1303
1301 def invalidate(self, clearfilecache=False):
1304 def invalidate(self, clearfilecache=False):
1302 '''Invalidates both store and non-store parts other than dirstate
1305 '''Invalidates both store and non-store parts other than dirstate
1303
1306
1304 If a transaction is running, invalidation of store is omitted,
1307 If a transaction is running, invalidation of store is omitted,
1305 because discarding in-memory changes might cause inconsistency
1308 because discarding in-memory changes might cause inconsistency
1306 (e.g. incomplete fncache causes unintentional failure, but
1309 (e.g. incomplete fncache causes unintentional failure, but
1307 redundant one doesn't).
1310 redundant one doesn't).
1308 '''
1311 '''
1309 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1312 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1310 for k in list(self._filecache.keys()):
1313 for k in list(self._filecache.keys()):
1311 # dirstate is invalidated separately in invalidatedirstate()
1314 # dirstate is invalidated separately in invalidatedirstate()
1312 if k == 'dirstate':
1315 if k == 'dirstate':
1313 continue
1316 continue
1314
1317
1315 if clearfilecache:
1318 if clearfilecache:
1316 del self._filecache[k]
1319 del self._filecache[k]
1317 try:
1320 try:
1318 delattr(unfiltered, k)
1321 delattr(unfiltered, k)
1319 except AttributeError:
1322 except AttributeError:
1320 pass
1323 pass
1321 self.invalidatecaches()
1324 self.invalidatecaches()
1322 if not self.currenttransaction():
1325 if not self.currenttransaction():
1323 # TODO: Changing contents of store outside transaction
1326 # TODO: Changing contents of store outside transaction
1324 # causes inconsistency. We should make in-memory store
1327 # causes inconsistency. We should make in-memory store
1325 # changes detectable, and abort if changed.
1328 # changes detectable, and abort if changed.
1326 self.store.invalidatecaches()
1329 self.store.invalidatecaches()
1327
1330
1328 def invalidateall(self):
1331 def invalidateall(self):
1329 '''Fully invalidates both store and non-store parts, causing the
1332 '''Fully invalidates both store and non-store parts, causing the
1330 subsequent operation to reread any outside changes.'''
1333 subsequent operation to reread any outside changes.'''
1331 # extension should hook this to invalidate its caches
1334 # extension should hook this to invalidate its caches
1332 self.invalidate()
1335 self.invalidate()
1333 self.invalidatedirstate()
1336 self.invalidatedirstate()
1334
1337
1335 @unfilteredmethod
1338 @unfilteredmethod
1336 def _refreshfilecachestats(self, tr):
1339 def _refreshfilecachestats(self, tr):
1337 """Reload stats of cached files so that they are flagged as valid"""
1340 """Reload stats of cached files so that they are flagged as valid"""
1338 for k, ce in self._filecache.items():
1341 for k, ce in self._filecache.items():
1339 if k == 'dirstate' or k not in self.__dict__:
1342 if k == 'dirstate' or k not in self.__dict__:
1340 continue
1343 continue
1341 ce.refresh()
1344 ce.refresh()
1342
1345
1343 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1346 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1344 inheritchecker=None, parentenvvar=None):
1347 inheritchecker=None, parentenvvar=None):
1345 parentlock = None
1348 parentlock = None
1346 # the contents of parentenvvar are used by the underlying lock to
1349 # the contents of parentenvvar are used by the underlying lock to
1347 # determine whether it can be inherited
1350 # determine whether it can be inherited
1348 if parentenvvar is not None:
1351 if parentenvvar is not None:
1349 parentlock = encoding.environ.get(parentenvvar)
1352 parentlock = encoding.environ.get(parentenvvar)
1350 try:
1353 try:
1351 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1354 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1352 acquirefn=acquirefn, desc=desc,
1355 acquirefn=acquirefn, desc=desc,
1353 inheritchecker=inheritchecker,
1356 inheritchecker=inheritchecker,
1354 parentlock=parentlock)
1357 parentlock=parentlock)
1355 except error.LockHeld as inst:
1358 except error.LockHeld as inst:
1356 if not wait:
1359 if not wait:
1357 raise
1360 raise
1358 # show more details for new-style locks
1361 # show more details for new-style locks
1359 if ':' in inst.locker:
1362 if ':' in inst.locker:
1360 host, pid = inst.locker.split(":", 1)
1363 host, pid = inst.locker.split(":", 1)
1361 self.ui.warn(
1364 self.ui.warn(
1362 _("waiting for lock on %s held by process %r "
1365 _("waiting for lock on %s held by process %r "
1363 "on host %r\n") % (desc, pid, host))
1366 "on host %r\n") % (desc, pid, host))
1364 else:
1367 else:
1365 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1368 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1366 (desc, inst.locker))
1369 (desc, inst.locker))
1367 # default to 600 seconds timeout
1370 # default to 600 seconds timeout
1368 l = lockmod.lock(vfs, lockname,
1371 l = lockmod.lock(vfs, lockname,
1369 int(self.ui.config("ui", "timeout", "600")),
1372 int(self.ui.config("ui", "timeout", "600")),
1370 releasefn=releasefn, acquirefn=acquirefn,
1373 releasefn=releasefn, acquirefn=acquirefn,
1371 desc=desc)
1374 desc=desc)
1372 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1375 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1373 return l
1376 return l
1374
1377
1375 def _afterlock(self, callback):
1378 def _afterlock(self, callback):
1376 """add a callback to be run when the repository is fully unlocked
1379 """add a callback to be run when the repository is fully unlocked
1377
1380
1378 The callback will be executed when the outermost lock is released
1381 The callback will be executed when the outermost lock is released
1379 (with wlock being higher level than 'lock')."""
1382 (with wlock being higher level than 'lock')."""
1380 for ref in (self._wlockref, self._lockref):
1383 for ref in (self._wlockref, self._lockref):
1381 l = ref and ref()
1384 l = ref and ref()
1382 if l and l.held:
1385 if l and l.held:
1383 l.postrelease.append(callback)
1386 l.postrelease.append(callback)
1384 break
1387 break
1385 else: # no lock have been found.
1388 else: # no lock have been found.
1386 callback()
1389 callback()
1387
1390
1388 def lock(self, wait=True):
1391 def lock(self, wait=True):
1389 '''Lock the repository store (.hg/store) and return a weak reference
1392 '''Lock the repository store (.hg/store) and return a weak reference
1390 to the lock. Use this before modifying the store (e.g. committing or
1393 to the lock. Use this before modifying the store (e.g. committing or
1391 stripping). If you are opening a transaction, get a lock as well.)
1394 stripping). If you are opening a transaction, get a lock as well.)
1392
1395
1393 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1396 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1394 'wlock' first to avoid a dead-lock hazard.'''
1397 'wlock' first to avoid a dead-lock hazard.'''
1395 l = self._currentlock(self._lockref)
1398 l = self._currentlock(self._lockref)
1396 if l is not None:
1399 if l is not None:
1397 l.lock()
1400 l.lock()
1398 return l
1401 return l
1399
1402
1400 l = self._lock(self.svfs, "lock", wait, None,
1403 l = self._lock(self.svfs, "lock", wait, None,
1401 self.invalidate, _('repository %s') % self.origroot)
1404 self.invalidate, _('repository %s') % self.origroot)
1402 self._lockref = weakref.ref(l)
1405 self._lockref = weakref.ref(l)
1403 return l
1406 return l
1404
1407
1405 def _wlockchecktransaction(self):
1408 def _wlockchecktransaction(self):
1406 if self.currenttransaction() is not None:
1409 if self.currenttransaction() is not None:
1407 raise error.LockInheritanceContractViolation(
1410 raise error.LockInheritanceContractViolation(
1408 'wlock cannot be inherited in the middle of a transaction')
1411 'wlock cannot be inherited in the middle of a transaction')
1409
1412
1410 def wlock(self, wait=True):
1413 def wlock(self, wait=True):
1411 '''Lock the non-store parts of the repository (everything under
1414 '''Lock the non-store parts of the repository (everything under
1412 .hg except .hg/store) and return a weak reference to the lock.
1415 .hg except .hg/store) and return a weak reference to the lock.
1413
1416
1414 Use this before modifying files in .hg.
1417 Use this before modifying files in .hg.
1415
1418
1416 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1419 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1417 'wlock' first to avoid a dead-lock hazard.'''
1420 'wlock' first to avoid a dead-lock hazard.'''
1418 l = self._wlockref and self._wlockref()
1421 l = self._wlockref and self._wlockref()
1419 if l is not None and l.held:
1422 if l is not None and l.held:
1420 l.lock()
1423 l.lock()
1421 return l
1424 return l
1422
1425
1423 # We do not need to check for non-waiting lock acquisition. Such
1426 # We do not need to check for non-waiting lock acquisition. Such
1424 # acquisition would not cause dead-lock as they would just fail.
1427 # acquisition would not cause dead-lock as they would just fail.
1425 if wait and (self.ui.configbool('devel', 'all-warnings')
1428 if wait and (self.ui.configbool('devel', 'all-warnings')
1426 or self.ui.configbool('devel', 'check-locks')):
1429 or self.ui.configbool('devel', 'check-locks')):
1427 if self._currentlock(self._lockref) is not None:
1430 if self._currentlock(self._lockref) is not None:
1428 self.ui.develwarn('"wlock" acquired after "lock"')
1431 self.ui.develwarn('"wlock" acquired after "lock"')
1429
1432
1430 def unlock():
1433 def unlock():
1431 if self.dirstate.pendingparentchange():
1434 if self.dirstate.pendingparentchange():
1432 self.dirstate.invalidate()
1435 self.dirstate.invalidate()
1433 else:
1436 else:
1434 self.dirstate.write(None)
1437 self.dirstate.write(None)
1435
1438
1436 self._filecache['dirstate'].refresh()
1439 self._filecache['dirstate'].refresh()
1437
1440
1438 l = self._lock(self.vfs, "wlock", wait, unlock,
1441 l = self._lock(self.vfs, "wlock", wait, unlock,
1439 self.invalidatedirstate, _('working directory of %s') %
1442 self.invalidatedirstate, _('working directory of %s') %
1440 self.origroot,
1443 self.origroot,
1441 inheritchecker=self._wlockchecktransaction,
1444 inheritchecker=self._wlockchecktransaction,
1442 parentenvvar='HG_WLOCK_LOCKER')
1445 parentenvvar='HG_WLOCK_LOCKER')
1443 self._wlockref = weakref.ref(l)
1446 self._wlockref = weakref.ref(l)
1444 return l
1447 return l
1445
1448
1446 def _currentlock(self, lockref):
1449 def _currentlock(self, lockref):
1447 """Returns the lock if it's held, or None if it's not."""
1450 """Returns the lock if it's held, or None if it's not."""
1448 if lockref is None:
1451 if lockref is None:
1449 return None
1452 return None
1450 l = lockref()
1453 l = lockref()
1451 if l is None or not l.held:
1454 if l is None or not l.held:
1452 return None
1455 return None
1453 return l
1456 return l
1454
1457
1455 def currentwlock(self):
1458 def currentwlock(self):
1456 """Returns the wlock if it's held, or None if it's not."""
1459 """Returns the wlock if it's held, or None if it's not."""
1457 return self._currentlock(self._wlockref)
1460 return self._currentlock(self._wlockref)
1458
1461
1459 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1462 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1460 """
1463 """
1461 commit an individual file as part of a larger transaction
1464 commit an individual file as part of a larger transaction
1462 """
1465 """
1463
1466
1464 fname = fctx.path()
1467 fname = fctx.path()
1465 fparent1 = manifest1.get(fname, nullid)
1468 fparent1 = manifest1.get(fname, nullid)
1466 fparent2 = manifest2.get(fname, nullid)
1469 fparent2 = manifest2.get(fname, nullid)
1467 if isinstance(fctx, context.filectx):
1470 if isinstance(fctx, context.filectx):
1468 node = fctx.filenode()
1471 node = fctx.filenode()
1469 if node in [fparent1, fparent2]:
1472 if node in [fparent1, fparent2]:
1470 self.ui.debug('reusing %s filelog entry\n' % fname)
1473 self.ui.debug('reusing %s filelog entry\n' % fname)
1471 if manifest1.flags(fname) != fctx.flags():
1474 if manifest1.flags(fname) != fctx.flags():
1472 changelist.append(fname)
1475 changelist.append(fname)
1473 return node
1476 return node
1474
1477
1475 flog = self.file(fname)
1478 flog = self.file(fname)
1476 meta = {}
1479 meta = {}
1477 copy = fctx.renamed()
1480 copy = fctx.renamed()
1478 if copy and copy[0] != fname:
1481 if copy and copy[0] != fname:
1479 # Mark the new revision of this file as a copy of another
1482 # Mark the new revision of this file as a copy of another
1480 # file. This copy data will effectively act as a parent
1483 # file. This copy data will effectively act as a parent
1481 # of this new revision. If this is a merge, the first
1484 # of this new revision. If this is a merge, the first
1482 # parent will be the nullid (meaning "look up the copy data")
1485 # parent will be the nullid (meaning "look up the copy data")
1483 # and the second one will be the other parent. For example:
1486 # and the second one will be the other parent. For example:
1484 #
1487 #
1485 # 0 --- 1 --- 3 rev1 changes file foo
1488 # 0 --- 1 --- 3 rev1 changes file foo
1486 # \ / rev2 renames foo to bar and changes it
1489 # \ / rev2 renames foo to bar and changes it
1487 # \- 2 -/ rev3 should have bar with all changes and
1490 # \- 2 -/ rev3 should have bar with all changes and
1488 # should record that bar descends from
1491 # should record that bar descends from
1489 # bar in rev2 and foo in rev1
1492 # bar in rev2 and foo in rev1
1490 #
1493 #
1491 # this allows this merge to succeed:
1494 # this allows this merge to succeed:
1492 #
1495 #
1493 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1496 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1494 # \ / merging rev3 and rev4 should use bar@rev2
1497 # \ / merging rev3 and rev4 should use bar@rev2
1495 # \- 2 --- 4 as the merge base
1498 # \- 2 --- 4 as the merge base
1496 #
1499 #
1497
1500
1498 cfname = copy[0]
1501 cfname = copy[0]
1499 crev = manifest1.get(cfname)
1502 crev = manifest1.get(cfname)
1500 newfparent = fparent2
1503 newfparent = fparent2
1501
1504
1502 if manifest2: # branch merge
1505 if manifest2: # branch merge
1503 if fparent2 == nullid or crev is None: # copied on remote side
1506 if fparent2 == nullid or crev is None: # copied on remote side
1504 if cfname in manifest2:
1507 if cfname in manifest2:
1505 crev = manifest2[cfname]
1508 crev = manifest2[cfname]
1506 newfparent = fparent1
1509 newfparent = fparent1
1507
1510
1508 # Here, we used to search backwards through history to try to find
1511 # Here, we used to search backwards through history to try to find
1509 # where the file copy came from if the source of a copy was not in
1512 # where the file copy came from if the source of a copy was not in
1510 # the parent directory. However, this doesn't actually make sense to
1513 # the parent directory. However, this doesn't actually make sense to
1511 # do (what does a copy from something not in your working copy even
1514 # do (what does a copy from something not in your working copy even
1512 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1515 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1513 # the user that copy information was dropped, so if they didn't
1516 # the user that copy information was dropped, so if they didn't
1514 # expect this outcome it can be fixed, but this is the correct
1517 # expect this outcome it can be fixed, but this is the correct
1515 # behavior in this circumstance.
1518 # behavior in this circumstance.
1516
1519
1517 if crev:
1520 if crev:
1518 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1521 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1519 meta["copy"] = cfname
1522 meta["copy"] = cfname
1520 meta["copyrev"] = hex(crev)
1523 meta["copyrev"] = hex(crev)
1521 fparent1, fparent2 = nullid, newfparent
1524 fparent1, fparent2 = nullid, newfparent
1522 else:
1525 else:
1523 self.ui.warn(_("warning: can't find ancestor for '%s' "
1526 self.ui.warn(_("warning: can't find ancestor for '%s' "
1524 "copied from '%s'!\n") % (fname, cfname))
1527 "copied from '%s'!\n") % (fname, cfname))
1525
1528
1526 elif fparent1 == nullid:
1529 elif fparent1 == nullid:
1527 fparent1, fparent2 = fparent2, nullid
1530 fparent1, fparent2 = fparent2, nullid
1528 elif fparent2 != nullid:
1531 elif fparent2 != nullid:
1529 # is one parent an ancestor of the other?
1532 # is one parent an ancestor of the other?
1530 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1533 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1531 if fparent1 in fparentancestors:
1534 if fparent1 in fparentancestors:
1532 fparent1, fparent2 = fparent2, nullid
1535 fparent1, fparent2 = fparent2, nullid
1533 elif fparent2 in fparentancestors:
1536 elif fparent2 in fparentancestors:
1534 fparent2 = nullid
1537 fparent2 = nullid
1535
1538
1536 # is the file changed?
1539 # is the file changed?
1537 text = fctx.data()
1540 text = fctx.data()
1538 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1541 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1539 changelist.append(fname)
1542 changelist.append(fname)
1540 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1543 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1541 # are just the flags changed during merge?
1544 # are just the flags changed during merge?
1542 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1545 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1543 changelist.append(fname)
1546 changelist.append(fname)
1544
1547
1545 return fparent1
1548 return fparent1
1546
1549
1547 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1550 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1548 """check for commit arguments that aren't committable"""
1551 """check for commit arguments that aren't committable"""
1549 if match.isexact() or match.prefix():
1552 if match.isexact() or match.prefix():
1550 matched = set(status.modified + status.added + status.removed)
1553 matched = set(status.modified + status.added + status.removed)
1551
1554
1552 for f in match.files():
1555 for f in match.files():
1553 f = self.dirstate.normalize(f)
1556 f = self.dirstate.normalize(f)
1554 if f == '.' or f in matched or f in wctx.substate:
1557 if f == '.' or f in matched or f in wctx.substate:
1555 continue
1558 continue
1556 if f in status.deleted:
1559 if f in status.deleted:
1557 fail(f, _('file not found!'))
1560 fail(f, _('file not found!'))
1558 if f in vdirs: # visited directory
1561 if f in vdirs: # visited directory
1559 d = f + '/'
1562 d = f + '/'
1560 for mf in matched:
1563 for mf in matched:
1561 if mf.startswith(d):
1564 if mf.startswith(d):
1562 break
1565 break
1563 else:
1566 else:
1564 fail(f, _("no match under directory!"))
1567 fail(f, _("no match under directory!"))
1565 elif f not in self.dirstate:
1568 elif f not in self.dirstate:
1566 fail(f, _("file not tracked!"))
1569 fail(f, _("file not tracked!"))
1567
1570
1568 @unfilteredmethod
1571 @unfilteredmethod
1569 def commit(self, text="", user=None, date=None, match=None, force=False,
1572 def commit(self, text="", user=None, date=None, match=None, force=False,
1570 editor=False, extra=None):
1573 editor=False, extra=None):
1571 """Add a new revision to current repository.
1574 """Add a new revision to current repository.
1572
1575
1573 Revision information is gathered from the working directory,
1576 Revision information is gathered from the working directory,
1574 match can be used to filter the committed files. If editor is
1577 match can be used to filter the committed files. If editor is
1575 supplied, it is called to get a commit message.
1578 supplied, it is called to get a commit message.
1576 """
1579 """
1577 if extra is None:
1580 if extra is None:
1578 extra = {}
1581 extra = {}
1579
1582
1580 def fail(f, msg):
1583 def fail(f, msg):
1581 raise error.Abort('%s: %s' % (f, msg))
1584 raise error.Abort('%s: %s' % (f, msg))
1582
1585
1583 if not match:
1586 if not match:
1584 match = matchmod.always(self.root, '')
1587 match = matchmod.always(self.root, '')
1585
1588
1586 if not force:
1589 if not force:
1587 vdirs = []
1590 vdirs = []
1588 match.explicitdir = vdirs.append
1591 match.explicitdir = vdirs.append
1589 match.bad = fail
1592 match.bad = fail
1590
1593
1591 wlock = lock = tr = None
1594 wlock = lock = tr = None
1592 try:
1595 try:
1593 wlock = self.wlock()
1596 wlock = self.wlock()
1594 lock = self.lock() # for recent changelog (see issue4368)
1597 lock = self.lock() # for recent changelog (see issue4368)
1595
1598
1596 wctx = self[None]
1599 wctx = self[None]
1597 merge = len(wctx.parents()) > 1
1600 merge = len(wctx.parents()) > 1
1598
1601
1599 if not force and merge and not match.always():
1602 if not force and merge and not match.always():
1600 raise error.Abort(_('cannot partially commit a merge '
1603 raise error.Abort(_('cannot partially commit a merge '
1601 '(do not specify files or patterns)'))
1604 '(do not specify files or patterns)'))
1602
1605
1603 status = self.status(match=match, clean=force)
1606 status = self.status(match=match, clean=force)
1604 if force:
1607 if force:
1605 status.modified.extend(status.clean) # mq may commit clean files
1608 status.modified.extend(status.clean) # mq may commit clean files
1606
1609
1607 # check subrepos
1610 # check subrepos
1608 subs = []
1611 subs = []
1609 commitsubs = set()
1612 commitsubs = set()
1610 newstate = wctx.substate.copy()
1613 newstate = wctx.substate.copy()
1611 # only manage subrepos and .hgsubstate if .hgsub is present
1614 # only manage subrepos and .hgsubstate if .hgsub is present
1612 if '.hgsub' in wctx:
1615 if '.hgsub' in wctx:
1613 # we'll decide whether to track this ourselves, thanks
1616 # we'll decide whether to track this ourselves, thanks
1614 for c in status.modified, status.added, status.removed:
1617 for c in status.modified, status.added, status.removed:
1615 if '.hgsubstate' in c:
1618 if '.hgsubstate' in c:
1616 c.remove('.hgsubstate')
1619 c.remove('.hgsubstate')
1617
1620
1618 # compare current state to last committed state
1621 # compare current state to last committed state
1619 # build new substate based on last committed state
1622 # build new substate based on last committed state
1620 oldstate = wctx.p1().substate
1623 oldstate = wctx.p1().substate
1621 for s in sorted(newstate.keys()):
1624 for s in sorted(newstate.keys()):
1622 if not match(s):
1625 if not match(s):
1623 # ignore working copy, use old state if present
1626 # ignore working copy, use old state if present
1624 if s in oldstate:
1627 if s in oldstate:
1625 newstate[s] = oldstate[s]
1628 newstate[s] = oldstate[s]
1626 continue
1629 continue
1627 if not force:
1630 if not force:
1628 raise error.Abort(
1631 raise error.Abort(
1629 _("commit with new subrepo %s excluded") % s)
1632 _("commit with new subrepo %s excluded") % s)
1630 dirtyreason = wctx.sub(s).dirtyreason(True)
1633 dirtyreason = wctx.sub(s).dirtyreason(True)
1631 if dirtyreason:
1634 if dirtyreason:
1632 if not self.ui.configbool('ui', 'commitsubrepos'):
1635 if not self.ui.configbool('ui', 'commitsubrepos'):
1633 raise error.Abort(dirtyreason,
1636 raise error.Abort(dirtyreason,
1634 hint=_("use --subrepos for recursive commit"))
1637 hint=_("use --subrepos for recursive commit"))
1635 subs.append(s)
1638 subs.append(s)
1636 commitsubs.add(s)
1639 commitsubs.add(s)
1637 else:
1640 else:
1638 bs = wctx.sub(s).basestate()
1641 bs = wctx.sub(s).basestate()
1639 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1642 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1640 if oldstate.get(s, (None, None, None))[1] != bs:
1643 if oldstate.get(s, (None, None, None))[1] != bs:
1641 subs.append(s)
1644 subs.append(s)
1642
1645
1643 # check for removed subrepos
1646 # check for removed subrepos
1644 for p in wctx.parents():
1647 for p in wctx.parents():
1645 r = [s for s in p.substate if s not in newstate]
1648 r = [s for s in p.substate if s not in newstate]
1646 subs += [s for s in r if match(s)]
1649 subs += [s for s in r if match(s)]
1647 if subs:
1650 if subs:
1648 if (not match('.hgsub') and
1651 if (not match('.hgsub') and
1649 '.hgsub' in (wctx.modified() + wctx.added())):
1652 '.hgsub' in (wctx.modified() + wctx.added())):
1650 raise error.Abort(
1653 raise error.Abort(
1651 _("can't commit subrepos without .hgsub"))
1654 _("can't commit subrepos without .hgsub"))
1652 status.modified.insert(0, '.hgsubstate')
1655 status.modified.insert(0, '.hgsubstate')
1653
1656
1654 elif '.hgsub' in status.removed:
1657 elif '.hgsub' in status.removed:
1655 # clean up .hgsubstate when .hgsub is removed
1658 # clean up .hgsubstate when .hgsub is removed
1656 if ('.hgsubstate' in wctx and
1659 if ('.hgsubstate' in wctx and
1657 '.hgsubstate' not in (status.modified + status.added +
1660 '.hgsubstate' not in (status.modified + status.added +
1658 status.removed)):
1661 status.removed)):
1659 status.removed.insert(0, '.hgsubstate')
1662 status.removed.insert(0, '.hgsubstate')
1660
1663
1661 # make sure all explicit patterns are matched
1664 # make sure all explicit patterns are matched
1662 if not force:
1665 if not force:
1663 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1666 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1664
1667
1665 cctx = context.workingcommitctx(self, status,
1668 cctx = context.workingcommitctx(self, status,
1666 text, user, date, extra)
1669 text, user, date, extra)
1667
1670
1668 # internal config: ui.allowemptycommit
1671 # internal config: ui.allowemptycommit
1669 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1672 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1670 or extra.get('close') or merge or cctx.files()
1673 or extra.get('close') or merge or cctx.files()
1671 or self.ui.configbool('ui', 'allowemptycommit'))
1674 or self.ui.configbool('ui', 'allowemptycommit'))
1672 if not allowemptycommit:
1675 if not allowemptycommit:
1673 return None
1676 return None
1674
1677
1675 if merge and cctx.deleted():
1678 if merge and cctx.deleted():
1676 raise error.Abort(_("cannot commit merge with missing files"))
1679 raise error.Abort(_("cannot commit merge with missing files"))
1677
1680
1678 ms = mergemod.mergestate.read(self)
1681 ms = mergemod.mergestate.read(self)
1679 mergeutil.checkunresolved(ms)
1682 mergeutil.checkunresolved(ms)
1680
1683
1681 if editor:
1684 if editor:
1682 cctx._text = editor(self, cctx, subs)
1685 cctx._text = editor(self, cctx, subs)
1683 edited = (text != cctx._text)
1686 edited = (text != cctx._text)
1684
1687
1685 # Save commit message in case this transaction gets rolled back
1688 # Save commit message in case this transaction gets rolled back
1686 # (e.g. by a pretxncommit hook). Leave the content alone on
1689 # (e.g. by a pretxncommit hook). Leave the content alone on
1687 # the assumption that the user will use the same editor again.
1690 # the assumption that the user will use the same editor again.
1688 msgfn = self.savecommitmessage(cctx._text)
1691 msgfn = self.savecommitmessage(cctx._text)
1689
1692
1690 # commit subs and write new state
1693 # commit subs and write new state
1691 if subs:
1694 if subs:
1692 for s in sorted(commitsubs):
1695 for s in sorted(commitsubs):
1693 sub = wctx.sub(s)
1696 sub = wctx.sub(s)
1694 self.ui.status(_('committing subrepository %s\n') %
1697 self.ui.status(_('committing subrepository %s\n') %
1695 subrepo.subrelpath(sub))
1698 subrepo.subrelpath(sub))
1696 sr = sub.commit(cctx._text, user, date)
1699 sr = sub.commit(cctx._text, user, date)
1697 newstate[s] = (newstate[s][0], sr)
1700 newstate[s] = (newstate[s][0], sr)
1698 subrepo.writestate(self, newstate)
1701 subrepo.writestate(self, newstate)
1699
1702
1700 p1, p2 = self.dirstate.parents()
1703 p1, p2 = self.dirstate.parents()
1701 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1704 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1702 try:
1705 try:
1703 self.hook("precommit", throw=True, parent1=hookp1,
1706 self.hook("precommit", throw=True, parent1=hookp1,
1704 parent2=hookp2)
1707 parent2=hookp2)
1705 tr = self.transaction('commit')
1708 tr = self.transaction('commit')
1706 ret = self.commitctx(cctx, True)
1709 ret = self.commitctx(cctx, True)
1707 except: # re-raises
1710 except: # re-raises
1708 if edited:
1711 if edited:
1709 self.ui.write(
1712 self.ui.write(
1710 _('note: commit message saved in %s\n') % msgfn)
1713 _('note: commit message saved in %s\n') % msgfn)
1711 raise
1714 raise
1712 # update bookmarks, dirstate and mergestate
1715 # update bookmarks, dirstate and mergestate
1713 bookmarks.update(self, [p1, p2], ret)
1716 bookmarks.update(self, [p1, p2], ret)
1714 cctx.markcommitted(ret)
1717 cctx.markcommitted(ret)
1715 ms.reset()
1718 ms.reset()
1716 tr.close()
1719 tr.close()
1717
1720
1718 finally:
1721 finally:
1719 lockmod.release(tr, lock, wlock)
1722 lockmod.release(tr, lock, wlock)
1720
1723
1721 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1724 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1722 # hack for command that use a temporary commit (eg: histedit)
1725 # hack for command that use a temporary commit (eg: histedit)
1723 # temporary commit got stripped before hook release
1726 # temporary commit got stripped before hook release
1724 if self.changelog.hasnode(ret):
1727 if self.changelog.hasnode(ret):
1725 self.hook("commit", node=node, parent1=parent1,
1728 self.hook("commit", node=node, parent1=parent1,
1726 parent2=parent2)
1729 parent2=parent2)
1727 self._afterlock(commithook)
1730 self._afterlock(commithook)
1728 return ret
1731 return ret
1729
1732
1730 @unfilteredmethod
1733 @unfilteredmethod
1731 def commitctx(self, ctx, error=False):
1734 def commitctx(self, ctx, error=False):
1732 """Add a new revision to current repository.
1735 """Add a new revision to current repository.
1733 Revision information is passed via the context argument.
1736 Revision information is passed via the context argument.
1734 """
1737 """
1735
1738
1736 tr = None
1739 tr = None
1737 p1, p2 = ctx.p1(), ctx.p2()
1740 p1, p2 = ctx.p1(), ctx.p2()
1738 user = ctx.user()
1741 user = ctx.user()
1739
1742
1740 lock = self.lock()
1743 lock = self.lock()
1741 try:
1744 try:
1742 tr = self.transaction("commit")
1745 tr = self.transaction("commit")
1743 trp = weakref.proxy(tr)
1746 trp = weakref.proxy(tr)
1744
1747
1745 if ctx.manifestnode():
1748 if ctx.manifestnode():
1746 # reuse an existing manifest revision
1749 # reuse an existing manifest revision
1747 mn = ctx.manifestnode()
1750 mn = ctx.manifestnode()
1748 files = ctx.files()
1751 files = ctx.files()
1749 elif ctx.files():
1752 elif ctx.files():
1750 m1ctx = p1.manifestctx()
1753 m1ctx = p1.manifestctx()
1751 m2ctx = p2.manifestctx()
1754 m2ctx = p2.manifestctx()
1752 mctx = m1ctx.copy()
1755 mctx = m1ctx.copy()
1753
1756
1754 m = mctx.read()
1757 m = mctx.read()
1755 m1 = m1ctx.read()
1758 m1 = m1ctx.read()
1756 m2 = m2ctx.read()
1759 m2 = m2ctx.read()
1757
1760
1758 # check in files
1761 # check in files
1759 added = []
1762 added = []
1760 changed = []
1763 changed = []
1761 removed = list(ctx.removed())
1764 removed = list(ctx.removed())
1762 linkrev = len(self)
1765 linkrev = len(self)
1763 self.ui.note(_("committing files:\n"))
1766 self.ui.note(_("committing files:\n"))
1764 for f in sorted(ctx.modified() + ctx.added()):
1767 for f in sorted(ctx.modified() + ctx.added()):
1765 self.ui.note(f + "\n")
1768 self.ui.note(f + "\n")
1766 try:
1769 try:
1767 fctx = ctx[f]
1770 fctx = ctx[f]
1768 if fctx is None:
1771 if fctx is None:
1769 removed.append(f)
1772 removed.append(f)
1770 else:
1773 else:
1771 added.append(f)
1774 added.append(f)
1772 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1775 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1773 trp, changed)
1776 trp, changed)
1774 m.setflag(f, fctx.flags())
1777 m.setflag(f, fctx.flags())
1775 except OSError as inst:
1778 except OSError as inst:
1776 self.ui.warn(_("trouble committing %s!\n") % f)
1779 self.ui.warn(_("trouble committing %s!\n") % f)
1777 raise
1780 raise
1778 except IOError as inst:
1781 except IOError as inst:
1779 errcode = getattr(inst, 'errno', errno.ENOENT)
1782 errcode = getattr(inst, 'errno', errno.ENOENT)
1780 if error or errcode and errcode != errno.ENOENT:
1783 if error or errcode and errcode != errno.ENOENT:
1781 self.ui.warn(_("trouble committing %s!\n") % f)
1784 self.ui.warn(_("trouble committing %s!\n") % f)
1782 raise
1785 raise
1783
1786
1784 # update manifest
1787 # update manifest
1785 self.ui.note(_("committing manifest\n"))
1788 self.ui.note(_("committing manifest\n"))
1786 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1789 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1787 drop = [f for f in removed if f in m]
1790 drop = [f for f in removed if f in m]
1788 for f in drop:
1791 for f in drop:
1789 del m[f]
1792 del m[f]
1790 mn = mctx.write(trp, linkrev,
1793 mn = mctx.write(trp, linkrev,
1791 p1.manifestnode(), p2.manifestnode(),
1794 p1.manifestnode(), p2.manifestnode(),
1792 added, drop)
1795 added, drop)
1793 files = changed + removed
1796 files = changed + removed
1794 else:
1797 else:
1795 mn = p1.manifestnode()
1798 mn = p1.manifestnode()
1796 files = []
1799 files = []
1797
1800
1798 # update changelog
1801 # update changelog
1799 self.ui.note(_("committing changelog\n"))
1802 self.ui.note(_("committing changelog\n"))
1800 self.changelog.delayupdate(tr)
1803 self.changelog.delayupdate(tr)
1801 n = self.changelog.add(mn, files, ctx.description(),
1804 n = self.changelog.add(mn, files, ctx.description(),
1802 trp, p1.node(), p2.node(),
1805 trp, p1.node(), p2.node(),
1803 user, ctx.date(), ctx.extra().copy())
1806 user, ctx.date(), ctx.extra().copy())
1804 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1807 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1805 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1808 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1806 parent2=xp2)
1809 parent2=xp2)
1807 # set the new commit is proper phase
1810 # set the new commit is proper phase
1808 targetphase = subrepo.newcommitphase(self.ui, ctx)
1811 targetphase = subrepo.newcommitphase(self.ui, ctx)
1809 if targetphase:
1812 if targetphase:
1810 # retract boundary do not alter parent changeset.
1813 # retract boundary do not alter parent changeset.
1811 # if a parent have higher the resulting phase will
1814 # if a parent have higher the resulting phase will
1812 # be compliant anyway
1815 # be compliant anyway
1813 #
1816 #
1814 # if minimal phase was 0 we don't need to retract anything
1817 # if minimal phase was 0 we don't need to retract anything
1815 phases.retractboundary(self, tr, targetphase, [n])
1818 phases.retractboundary(self, tr, targetphase, [n])
1816 tr.close()
1819 tr.close()
1817 return n
1820 return n
1818 finally:
1821 finally:
1819 if tr:
1822 if tr:
1820 tr.release()
1823 tr.release()
1821 lock.release()
1824 lock.release()
1822
1825
1823 @unfilteredmethod
1826 @unfilteredmethod
1824 def destroying(self):
1827 def destroying(self):
1825 '''Inform the repository that nodes are about to be destroyed.
1828 '''Inform the repository that nodes are about to be destroyed.
1826 Intended for use by strip and rollback, so there's a common
1829 Intended for use by strip and rollback, so there's a common
1827 place for anything that has to be done before destroying history.
1830 place for anything that has to be done before destroying history.
1828
1831
1829 This is mostly useful for saving state that is in memory and waiting
1832 This is mostly useful for saving state that is in memory and waiting
1830 to be flushed when the current lock is released. Because a call to
1833 to be flushed when the current lock is released. Because a call to
1831 destroyed is imminent, the repo will be invalidated causing those
1834 destroyed is imminent, the repo will be invalidated causing those
1832 changes to stay in memory (waiting for the next unlock), or vanish
1835 changes to stay in memory (waiting for the next unlock), or vanish
1833 completely.
1836 completely.
1834 '''
1837 '''
1835 # When using the same lock to commit and strip, the phasecache is left
1838 # When using the same lock to commit and strip, the phasecache is left
1836 # dirty after committing. Then when we strip, the repo is invalidated,
1839 # dirty after committing. Then when we strip, the repo is invalidated,
1837 # causing those changes to disappear.
1840 # causing those changes to disappear.
1838 if '_phasecache' in vars(self):
1841 if '_phasecache' in vars(self):
1839 self._phasecache.write()
1842 self._phasecache.write()
1840
1843
1841 @unfilteredmethod
1844 @unfilteredmethod
1842 def destroyed(self):
1845 def destroyed(self):
1843 '''Inform the repository that nodes have been destroyed.
1846 '''Inform the repository that nodes have been destroyed.
1844 Intended for use by strip and rollback, so there's a common
1847 Intended for use by strip and rollback, so there's a common
1845 place for anything that has to be done after destroying history.
1848 place for anything that has to be done after destroying history.
1846 '''
1849 '''
1847 # When one tries to:
1850 # When one tries to:
1848 # 1) destroy nodes thus calling this method (e.g. strip)
1851 # 1) destroy nodes thus calling this method (e.g. strip)
1849 # 2) use phasecache somewhere (e.g. commit)
1852 # 2) use phasecache somewhere (e.g. commit)
1850 #
1853 #
1851 # then 2) will fail because the phasecache contains nodes that were
1854 # then 2) will fail because the phasecache contains nodes that were
1852 # removed. We can either remove phasecache from the filecache,
1855 # removed. We can either remove phasecache from the filecache,
1853 # causing it to reload next time it is accessed, or simply filter
1856 # causing it to reload next time it is accessed, or simply filter
1854 # the removed nodes now and write the updated cache.
1857 # the removed nodes now and write the updated cache.
1855 self._phasecache.filterunknown(self)
1858 self._phasecache.filterunknown(self)
1856 self._phasecache.write()
1859 self._phasecache.write()
1857
1860
1858 # refresh all repository caches
1861 # refresh all repository caches
1859 self.updatecaches()
1862 self.updatecaches()
1860
1863
1861 # Ensure the persistent tag cache is updated. Doing it now
1864 # Ensure the persistent tag cache is updated. Doing it now
1862 # means that the tag cache only has to worry about destroyed
1865 # means that the tag cache only has to worry about destroyed
1863 # heads immediately after a strip/rollback. That in turn
1866 # heads immediately after a strip/rollback. That in turn
1864 # guarantees that "cachetip == currenttip" (comparing both rev
1867 # guarantees that "cachetip == currenttip" (comparing both rev
1865 # and node) always means no nodes have been added or destroyed.
1868 # and node) always means no nodes have been added or destroyed.
1866
1869
1867 # XXX this is suboptimal when qrefresh'ing: we strip the current
1870 # XXX this is suboptimal when qrefresh'ing: we strip the current
1868 # head, refresh the tag cache, then immediately add a new head.
1871 # head, refresh the tag cache, then immediately add a new head.
1869 # But I think doing it this way is necessary for the "instant
1872 # But I think doing it this way is necessary for the "instant
1870 # tag cache retrieval" case to work.
1873 # tag cache retrieval" case to work.
1871 self.invalidate()
1874 self.invalidate()
1872
1875
1873 def walk(self, match, node=None):
1876 def walk(self, match, node=None):
1874 '''
1877 '''
1875 walk recursively through the directory tree or a given
1878 walk recursively through the directory tree or a given
1876 changeset, finding all files matched by the match
1879 changeset, finding all files matched by the match
1877 function
1880 function
1878 '''
1881 '''
1879 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1882 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1880 return self[node].walk(match)
1883 return self[node].walk(match)
1881
1884
1882 def status(self, node1='.', node2=None, match=None,
1885 def status(self, node1='.', node2=None, match=None,
1883 ignored=False, clean=False, unknown=False,
1886 ignored=False, clean=False, unknown=False,
1884 listsubrepos=False):
1887 listsubrepos=False):
1885 '''a convenience method that calls node1.status(node2)'''
1888 '''a convenience method that calls node1.status(node2)'''
1886 return self[node1].status(node2, match, ignored, clean, unknown,
1889 return self[node1].status(node2, match, ignored, clean, unknown,
1887 listsubrepos)
1890 listsubrepos)
1888
1891
1889 def addpostdsstatus(self, ps):
1892 def addpostdsstatus(self, ps):
1890 """Add a callback to run within the wlock, at the point at which status
1893 """Add a callback to run within the wlock, at the point at which status
1891 fixups happen.
1894 fixups happen.
1892
1895
1893 On status completion, callback(wctx, status) will be called with the
1896 On status completion, callback(wctx, status) will be called with the
1894 wlock held, unless the dirstate has changed from underneath or the wlock
1897 wlock held, unless the dirstate has changed from underneath or the wlock
1895 couldn't be grabbed.
1898 couldn't be grabbed.
1896
1899
1897 Callbacks should not capture and use a cached copy of the dirstate --
1900 Callbacks should not capture and use a cached copy of the dirstate --
1898 it might change in the meanwhile. Instead, they should access the
1901 it might change in the meanwhile. Instead, they should access the
1899 dirstate via wctx.repo().dirstate.
1902 dirstate via wctx.repo().dirstate.
1900
1903
1901 This list is emptied out after each status run -- extensions should
1904 This list is emptied out after each status run -- extensions should
1902 make sure it adds to this list each time dirstate.status is called.
1905 make sure it adds to this list each time dirstate.status is called.
1903 Extensions should also make sure they don't call this for statuses
1906 Extensions should also make sure they don't call this for statuses
1904 that don't involve the dirstate.
1907 that don't involve the dirstate.
1905 """
1908 """
1906
1909
1907 # The list is located here for uniqueness reasons -- it is actually
1910 # The list is located here for uniqueness reasons -- it is actually
1908 # managed by the workingctx, but that isn't unique per-repo.
1911 # managed by the workingctx, but that isn't unique per-repo.
1909 self._postdsstatus.append(ps)
1912 self._postdsstatus.append(ps)
1910
1913
1911 def postdsstatus(self):
1914 def postdsstatus(self):
1912 """Used by workingctx to get the list of post-dirstate-status hooks."""
1915 """Used by workingctx to get the list of post-dirstate-status hooks."""
1913 return self._postdsstatus
1916 return self._postdsstatus
1914
1917
1915 def clearpostdsstatus(self):
1918 def clearpostdsstatus(self):
1916 """Used by workingctx to clear post-dirstate-status hooks."""
1919 """Used by workingctx to clear post-dirstate-status hooks."""
1917 del self._postdsstatus[:]
1920 del self._postdsstatus[:]
1918
1921
1919 def heads(self, start=None):
1922 def heads(self, start=None):
1920 if start is None:
1923 if start is None:
1921 cl = self.changelog
1924 cl = self.changelog
1922 headrevs = reversed(cl.headrevs())
1925 headrevs = reversed(cl.headrevs())
1923 return [cl.node(rev) for rev in headrevs]
1926 return [cl.node(rev) for rev in headrevs]
1924
1927
1925 heads = self.changelog.heads(start)
1928 heads = self.changelog.heads(start)
1926 # sort the output in rev descending order
1929 # sort the output in rev descending order
1927 return sorted(heads, key=self.changelog.rev, reverse=True)
1930 return sorted(heads, key=self.changelog.rev, reverse=True)
1928
1931
1929 def branchheads(self, branch=None, start=None, closed=False):
1932 def branchheads(self, branch=None, start=None, closed=False):
1930 '''return a (possibly filtered) list of heads for the given branch
1933 '''return a (possibly filtered) list of heads for the given branch
1931
1934
1932 Heads are returned in topological order, from newest to oldest.
1935 Heads are returned in topological order, from newest to oldest.
1933 If branch is None, use the dirstate branch.
1936 If branch is None, use the dirstate branch.
1934 If start is not None, return only heads reachable from start.
1937 If start is not None, return only heads reachable from start.
1935 If closed is True, return heads that are marked as closed as well.
1938 If closed is True, return heads that are marked as closed as well.
1936 '''
1939 '''
1937 if branch is None:
1940 if branch is None:
1938 branch = self[None].branch()
1941 branch = self[None].branch()
1939 branches = self.branchmap()
1942 branches = self.branchmap()
1940 if branch not in branches:
1943 if branch not in branches:
1941 return []
1944 return []
1942 # the cache returns heads ordered lowest to highest
1945 # the cache returns heads ordered lowest to highest
1943 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1946 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1944 if start is not None:
1947 if start is not None:
1945 # filter out the heads that cannot be reached from startrev
1948 # filter out the heads that cannot be reached from startrev
1946 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1949 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1947 bheads = [h for h in bheads if h in fbheads]
1950 bheads = [h for h in bheads if h in fbheads]
1948 return bheads
1951 return bheads
1949
1952
1950 def branches(self, nodes):
1953 def branches(self, nodes):
1951 if not nodes:
1954 if not nodes:
1952 nodes = [self.changelog.tip()]
1955 nodes = [self.changelog.tip()]
1953 b = []
1956 b = []
1954 for n in nodes:
1957 for n in nodes:
1955 t = n
1958 t = n
1956 while True:
1959 while True:
1957 p = self.changelog.parents(n)
1960 p = self.changelog.parents(n)
1958 if p[1] != nullid or p[0] == nullid:
1961 if p[1] != nullid or p[0] == nullid:
1959 b.append((t, n, p[0], p[1]))
1962 b.append((t, n, p[0], p[1]))
1960 break
1963 break
1961 n = p[0]
1964 n = p[0]
1962 return b
1965 return b
1963
1966
1964 def between(self, pairs):
1967 def between(self, pairs):
1965 r = []
1968 r = []
1966
1969
1967 for top, bottom in pairs:
1970 for top, bottom in pairs:
1968 n, l, i = top, [], 0
1971 n, l, i = top, [], 0
1969 f = 1
1972 f = 1
1970
1973
1971 while n != bottom and n != nullid:
1974 while n != bottom and n != nullid:
1972 p = self.changelog.parents(n)[0]
1975 p = self.changelog.parents(n)[0]
1973 if i == f:
1976 if i == f:
1974 l.append(n)
1977 l.append(n)
1975 f = f * 2
1978 f = f * 2
1976 n = p
1979 n = p
1977 i += 1
1980 i += 1
1978
1981
1979 r.append(l)
1982 r.append(l)
1980
1983
1981 return r
1984 return r
1982
1985
1983 def checkpush(self, pushop):
1986 def checkpush(self, pushop):
1984 """Extensions can override this function if additional checks have
1987 """Extensions can override this function if additional checks have
1985 to be performed before pushing, or call it if they override push
1988 to be performed before pushing, or call it if they override push
1986 command.
1989 command.
1987 """
1990 """
1988 pass
1991 pass
1989
1992
1990 @unfilteredpropertycache
1993 @unfilteredpropertycache
1991 def prepushoutgoinghooks(self):
1994 def prepushoutgoinghooks(self):
1992 """Return util.hooks consists of a pushop with repo, remote, outgoing
1995 """Return util.hooks consists of a pushop with repo, remote, outgoing
1993 methods, which are called before pushing changesets.
1996 methods, which are called before pushing changesets.
1994 """
1997 """
1995 return util.hooks()
1998 return util.hooks()
1996
1999
1997 def pushkey(self, namespace, key, old, new):
2000 def pushkey(self, namespace, key, old, new):
1998 try:
2001 try:
1999 tr = self.currenttransaction()
2002 tr = self.currenttransaction()
2000 hookargs = {}
2003 hookargs = {}
2001 if tr is not None:
2004 if tr is not None:
2002 hookargs.update(tr.hookargs)
2005 hookargs.update(tr.hookargs)
2003 hookargs['namespace'] = namespace
2006 hookargs['namespace'] = namespace
2004 hookargs['key'] = key
2007 hookargs['key'] = key
2005 hookargs['old'] = old
2008 hookargs['old'] = old
2006 hookargs['new'] = new
2009 hookargs['new'] = new
2007 self.hook('prepushkey', throw=True, **hookargs)
2010 self.hook('prepushkey', throw=True, **hookargs)
2008 except error.HookAbort as exc:
2011 except error.HookAbort as exc:
2009 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2012 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2010 if exc.hint:
2013 if exc.hint:
2011 self.ui.write_err(_("(%s)\n") % exc.hint)
2014 self.ui.write_err(_("(%s)\n") % exc.hint)
2012 return False
2015 return False
2013 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2016 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2014 ret = pushkey.push(self, namespace, key, old, new)
2017 ret = pushkey.push(self, namespace, key, old, new)
2015 def runhook():
2018 def runhook():
2016 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2019 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2017 ret=ret)
2020 ret=ret)
2018 self._afterlock(runhook)
2021 self._afterlock(runhook)
2019 return ret
2022 return ret
2020
2023
2021 def listkeys(self, namespace):
2024 def listkeys(self, namespace):
2022 self.hook('prelistkeys', throw=True, namespace=namespace)
2025 self.hook('prelistkeys', throw=True, namespace=namespace)
2023 self.ui.debug('listing keys for "%s"\n' % namespace)
2026 self.ui.debug('listing keys for "%s"\n' % namespace)
2024 values = pushkey.list(self, namespace)
2027 values = pushkey.list(self, namespace)
2025 self.hook('listkeys', namespace=namespace, values=values)
2028 self.hook('listkeys', namespace=namespace, values=values)
2026 return values
2029 return values
2027
2030
2028 def debugwireargs(self, one, two, three=None, four=None, five=None):
2031 def debugwireargs(self, one, two, three=None, four=None, five=None):
2029 '''used to test argument passing over the wire'''
2032 '''used to test argument passing over the wire'''
2030 return "%s %s %s %s %s" % (one, two, three, four, five)
2033 return "%s %s %s %s %s" % (one, two, three, four, five)
2031
2034
2032 def savecommitmessage(self, text):
2035 def savecommitmessage(self, text):
2033 fp = self.vfs('last-message.txt', 'wb')
2036 fp = self.vfs('last-message.txt', 'wb')
2034 try:
2037 try:
2035 fp.write(text)
2038 fp.write(text)
2036 finally:
2039 finally:
2037 fp.close()
2040 fp.close()
2038 return self.pathto(fp.name[len(self.root) + 1:])
2041 return self.pathto(fp.name[len(self.root) + 1:])
2039
2042
2040 # used to avoid circular references so destructors work
2043 # used to avoid circular references so destructors work
2041 def aftertrans(files):
2044 def aftertrans(files):
2042 renamefiles = [tuple(t) for t in files]
2045 renamefiles = [tuple(t) for t in files]
2043 def a():
2046 def a():
2044 for vfs, src, dest in renamefiles:
2047 for vfs, src, dest in renamefiles:
2045 # if src and dest refer to a same file, vfs.rename is a no-op,
2048 # if src and dest refer to a same file, vfs.rename is a no-op,
2046 # leaving both src and dest on disk. delete dest to make sure
2049 # leaving both src and dest on disk. delete dest to make sure
2047 # the rename couldn't be such a no-op.
2050 # the rename couldn't be such a no-op.
2048 vfs.tryunlink(dest)
2051 vfs.tryunlink(dest)
2049 try:
2052 try:
2050 vfs.rename(src, dest)
2053 vfs.rename(src, dest)
2051 except OSError: # journal file does not yet exist
2054 except OSError: # journal file does not yet exist
2052 pass
2055 pass
2053 return a
2056 return a
2054
2057
2055 def undoname(fn):
2058 def undoname(fn):
2056 base, name = os.path.split(fn)
2059 base, name = os.path.split(fn)
2057 assert name.startswith('journal')
2060 assert name.startswith('journal')
2058 return os.path.join(base, name.replace('journal', 'undo', 1))
2061 return os.path.join(base, name.replace('journal', 'undo', 1))
2059
2062
2060 def instance(ui, path, create):
2063 def instance(ui, path, create):
2061 return localrepository(ui, util.urllocalpath(path), create)
2064 return localrepository(ui, util.urllocalpath(path), create)
2062
2065
2063 def islocal(path):
2066 def islocal(path):
2064 return True
2067 return True
2065
2068
2066 def newreporequirements(repo):
2069 def newreporequirements(repo):
2067 """Determine the set of requirements for a new local repository.
2070 """Determine the set of requirements for a new local repository.
2068
2071
2069 Extensions can wrap this function to specify custom requirements for
2072 Extensions can wrap this function to specify custom requirements for
2070 new repositories.
2073 new repositories.
2071 """
2074 """
2072 ui = repo.ui
2075 ui = repo.ui
2073 requirements = {'revlogv1'}
2076 requirements = {'revlogv1'}
2074 if ui.configbool('format', 'usestore', True):
2077 if ui.configbool('format', 'usestore', True):
2075 requirements.add('store')
2078 requirements.add('store')
2076 if ui.configbool('format', 'usefncache', True):
2079 if ui.configbool('format', 'usefncache', True):
2077 requirements.add('fncache')
2080 requirements.add('fncache')
2078 if ui.configbool('format', 'dotencode', True):
2081 if ui.configbool('format', 'dotencode', True):
2079 requirements.add('dotencode')
2082 requirements.add('dotencode')
2080
2083
2081 compengine = ui.config('experimental', 'format.compression', 'zlib')
2084 compengine = ui.config('experimental', 'format.compression', 'zlib')
2082 if compengine not in util.compengines:
2085 if compengine not in util.compengines:
2083 raise error.Abort(_('compression engine %s defined by '
2086 raise error.Abort(_('compression engine %s defined by '
2084 'experimental.format.compression not available') %
2087 'experimental.format.compression not available') %
2085 compengine,
2088 compengine,
2086 hint=_('run "hg debuginstall" to list available '
2089 hint=_('run "hg debuginstall" to list available '
2087 'compression engines'))
2090 'compression engines'))
2088
2091
2089 # zlib is the historical default and doesn't need an explicit requirement.
2092 # zlib is the historical default and doesn't need an explicit requirement.
2090 if compengine != 'zlib':
2093 if compengine != 'zlib':
2091 requirements.add('exp-compression-%s' % compengine)
2094 requirements.add('exp-compression-%s' % compengine)
2092
2095
2093 if scmutil.gdinitconfig(ui):
2096 if scmutil.gdinitconfig(ui):
2094 requirements.add('generaldelta')
2097 requirements.add('generaldelta')
2095 if ui.configbool('experimental', 'treemanifest', False):
2098 if ui.configbool('experimental', 'treemanifest', False):
2096 requirements.add('treemanifest')
2099 requirements.add('treemanifest')
2097 if ui.configbool('experimental', 'manifestv2', False):
2100 if ui.configbool('experimental', 'manifestv2', False):
2098 requirements.add('manifestv2')
2101 requirements.add('manifestv2')
2099
2102
2100 revlogv2 = ui.config('experimental', 'revlogv2')
2103 revlogv2 = ui.config('experimental', 'revlogv2')
2101 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2104 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2102 requirements.remove('revlogv1')
2105 requirements.remove('revlogv1')
2103 # generaldelta is implied by revlogv2.
2106 # generaldelta is implied by revlogv2.
2104 requirements.discard('generaldelta')
2107 requirements.discard('generaldelta')
2105 requirements.add(REVLOGV2_REQUIREMENT)
2108 requirements.add(REVLOGV2_REQUIREMENT)
2106
2109
2107 return requirements
2110 return requirements
@@ -1,2199 +1,2208 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import errno
18 import errno
19 import hashlib
19 import hashlib
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullid,
28 nullid,
29 nullrev,
29 nullrev,
30 wdirhex,
30 wdirhex,
31 wdirid,
31 wdirid,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from . import (
35 from . import (
36 ancestor,
36 ancestor,
37 error,
37 error,
38 mdiff,
38 mdiff,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 templatefilters,
41 templatefilters,
42 util,
42 util,
43 )
43 )
44
44
45 parsers = policy.importmod(r'parsers')
45 parsers = policy.importmod(r'parsers')
46
46
47 _pack = struct.pack
47 _pack = struct.pack
48 _unpack = struct.unpack
48 _unpack = struct.unpack
49 # Aliased for performance.
49 # Aliased for performance.
50 _zlibdecompress = zlib.decompress
50 _zlibdecompress = zlib.decompress
51
51
52 # revlog header flags
52 # revlog header flags
53 REVLOGV0 = 0
53 REVLOGV0 = 0
54 REVLOGV1 = 1
54 REVLOGV1 = 1
55 # Dummy value until file format is finalized.
55 # Dummy value until file format is finalized.
56 # Reminder: change the bounds check in revlog.__init__ when this is changed.
56 # Reminder: change the bounds check in revlog.__init__ when this is changed.
57 REVLOGV2 = 0xDEAD
57 REVLOGV2 = 0xDEAD
58 FLAG_INLINE_DATA = (1 << 16)
58 FLAG_INLINE_DATA = (1 << 16)
59 FLAG_GENERALDELTA = (1 << 17)
59 FLAG_GENERALDELTA = (1 << 17)
60 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
60 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
61 REVLOG_DEFAULT_FORMAT = REVLOGV1
61 REVLOG_DEFAULT_FORMAT = REVLOGV1
62 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
62 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
63 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
63 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
64 REVLOGV2_FLAGS = REVLOGV1_FLAGS
64 REVLOGV2_FLAGS = REVLOGV1_FLAGS
65
65
66 # revlog index flags
66 # revlog index flags
67 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
67 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
68 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
68 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
69 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
69 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
70 REVIDX_DEFAULT_FLAGS = 0
70 REVIDX_DEFAULT_FLAGS = 0
71 # stable order in which flags need to be processed and their processors applied
71 # stable order in which flags need to be processed and their processors applied
72 REVIDX_FLAGS_ORDER = [
72 REVIDX_FLAGS_ORDER = [
73 REVIDX_ISCENSORED,
73 REVIDX_ISCENSORED,
74 REVIDX_ELLIPSIS,
74 REVIDX_ELLIPSIS,
75 REVIDX_EXTSTORED,
75 REVIDX_EXTSTORED,
76 ]
76 ]
77 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
77 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
78
78
79 # max size of revlog with inline data
79 # max size of revlog with inline data
80 _maxinline = 131072
80 _maxinline = 131072
81 _chunksize = 1048576
81 _chunksize = 1048576
82
82
83 RevlogError = error.RevlogError
83 RevlogError = error.RevlogError
84 LookupError = error.LookupError
84 LookupError = error.LookupError
85 CensoredNodeError = error.CensoredNodeError
85 CensoredNodeError = error.CensoredNodeError
86 ProgrammingError = error.ProgrammingError
86 ProgrammingError = error.ProgrammingError
87
87
88 # Store flag processors (cf. 'addflagprocessor()' to register)
88 # Store flag processors (cf. 'addflagprocessor()' to register)
89 _flagprocessors = {
89 _flagprocessors = {
90 REVIDX_ISCENSORED: None,
90 REVIDX_ISCENSORED: None,
91 }
91 }
92
92
93 def addflagprocessor(flag, processor):
93 def addflagprocessor(flag, processor):
94 """Register a flag processor on a revision data flag.
94 """Register a flag processor on a revision data flag.
95
95
96 Invariant:
96 Invariant:
97 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
97 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
98 - Only one flag processor can be registered on a specific flag.
98 - Only one flag processor can be registered on a specific flag.
99 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
99 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
100 following signatures:
100 following signatures:
101 - (read) f(self, rawtext) -> text, bool
101 - (read) f(self, rawtext) -> text, bool
102 - (write) f(self, text) -> rawtext, bool
102 - (write) f(self, text) -> rawtext, bool
103 - (raw) f(self, rawtext) -> bool
103 - (raw) f(self, rawtext) -> bool
104 "text" is presented to the user. "rawtext" is stored in revlog data, not
104 "text" is presented to the user. "rawtext" is stored in revlog data, not
105 directly visible to the user.
105 directly visible to the user.
106 The boolean returned by these transforms is used to determine whether
106 The boolean returned by these transforms is used to determine whether
107 the returned text can be used for hash integrity checking. For example,
107 the returned text can be used for hash integrity checking. For example,
108 if "write" returns False, then "text" is used to generate hash. If
108 if "write" returns False, then "text" is used to generate hash. If
109 "write" returns True, that basically means "rawtext" returned by "write"
109 "write" returns True, that basically means "rawtext" returned by "write"
110 should be used to generate hash. Usually, "write" and "read" return
110 should be used to generate hash. Usually, "write" and "read" return
111 different booleans. And "raw" returns a same boolean as "write".
111 different booleans. And "raw" returns a same boolean as "write".
112
112
113 Note: The 'raw' transform is used for changegroup generation and in some
113 Note: The 'raw' transform is used for changegroup generation and in some
114 debug commands. In this case the transform only indicates whether the
114 debug commands. In this case the transform only indicates whether the
115 contents can be used for hash integrity checks.
115 contents can be used for hash integrity checks.
116 """
116 """
117 if not flag & REVIDX_KNOWN_FLAGS:
117 if not flag & REVIDX_KNOWN_FLAGS:
118 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
118 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
119 raise ProgrammingError(msg)
119 raise ProgrammingError(msg)
120 if flag not in REVIDX_FLAGS_ORDER:
120 if flag not in REVIDX_FLAGS_ORDER:
121 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
121 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
122 raise ProgrammingError(msg)
122 raise ProgrammingError(msg)
123 if flag in _flagprocessors:
123 if flag in _flagprocessors:
124 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
124 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
125 raise error.Abort(msg)
125 raise error.Abort(msg)
126 _flagprocessors[flag] = processor
126 _flagprocessors[flag] = processor
127
127
128 def getoffset(q):
128 def getoffset(q):
129 return int(q >> 16)
129 return int(q >> 16)
130
130
131 def gettype(q):
131 def gettype(q):
132 return int(q & 0xFFFF)
132 return int(q & 0xFFFF)
133
133
134 def offset_type(offset, type):
134 def offset_type(offset, type):
135 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
135 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
136 raise ValueError('unknown revlog index flags')
136 raise ValueError('unknown revlog index flags')
137 return int(int(offset) << 16 | type)
137 return int(int(offset) << 16 | type)
138
138
139 _nullhash = hashlib.sha1(nullid)
139 _nullhash = hashlib.sha1(nullid)
140
140
141 def hash(text, p1, p2):
141 def hash(text, p1, p2):
142 """generate a hash from the given text and its parent hashes
142 """generate a hash from the given text and its parent hashes
143
143
144 This hash combines both the current file contents and its history
144 This hash combines both the current file contents and its history
145 in a manner that makes it easy to distinguish nodes with the same
145 in a manner that makes it easy to distinguish nodes with the same
146 content in the revision graph.
146 content in the revision graph.
147 """
147 """
148 # As of now, if one of the parent node is null, p2 is null
148 # As of now, if one of the parent node is null, p2 is null
149 if p2 == nullid:
149 if p2 == nullid:
150 # deep copy of a hash is faster than creating one
150 # deep copy of a hash is faster than creating one
151 s = _nullhash.copy()
151 s = _nullhash.copy()
152 s.update(p1)
152 s.update(p1)
153 else:
153 else:
154 # none of the parent nodes are nullid
154 # none of the parent nodes are nullid
155 l = [p1, p2]
155 l = [p1, p2]
156 l.sort()
156 l.sort()
157 s = hashlib.sha1(l[0])
157 s = hashlib.sha1(l[0])
158 s.update(l[1])
158 s.update(l[1])
159 s.update(text)
159 s.update(text)
160 return s.digest()
160 return s.digest()
161
161
162 # index v0:
162 # index v0:
163 # 4 bytes: offset
163 # 4 bytes: offset
164 # 4 bytes: compressed length
164 # 4 bytes: compressed length
165 # 4 bytes: base rev
165 # 4 bytes: base rev
166 # 4 bytes: link rev
166 # 4 bytes: link rev
167 # 20 bytes: parent 1 nodeid
167 # 20 bytes: parent 1 nodeid
168 # 20 bytes: parent 2 nodeid
168 # 20 bytes: parent 2 nodeid
169 # 20 bytes: nodeid
169 # 20 bytes: nodeid
170 indexformatv0 = ">4l20s20s20s"
170 indexformatv0 = ">4l20s20s20s"
171
171
172 class revlogoldio(object):
172 class revlogoldio(object):
173 def __init__(self):
173 def __init__(self):
174 self.size = struct.calcsize(indexformatv0)
174 self.size = struct.calcsize(indexformatv0)
175
175
176 def parseindex(self, data, inline):
176 def parseindex(self, data, inline):
177 s = self.size
177 s = self.size
178 index = []
178 index = []
179 nodemap = {nullid: nullrev}
179 nodemap = {nullid: nullrev}
180 n = off = 0
180 n = off = 0
181 l = len(data)
181 l = len(data)
182 while off + s <= l:
182 while off + s <= l:
183 cur = data[off:off + s]
183 cur = data[off:off + s]
184 off += s
184 off += s
185 e = _unpack(indexformatv0, cur)
185 e = _unpack(indexformatv0, cur)
186 # transform to revlogv1 format
186 # transform to revlogv1 format
187 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
187 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
188 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
188 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
189 index.append(e2)
189 index.append(e2)
190 nodemap[e[6]] = n
190 nodemap[e[6]] = n
191 n += 1
191 n += 1
192
192
193 # add the magic null revision at -1
193 # add the magic null revision at -1
194 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
194 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
195
195
196 return index, nodemap, None
196 return index, nodemap, None
197
197
198 def packentry(self, entry, node, version, rev):
198 def packentry(self, entry, node, version, rev):
199 if gettype(entry[0]):
199 if gettype(entry[0]):
200 raise RevlogError(_('index entry flags need revlog version 1'))
200 raise RevlogError(_('index entry flags need revlog version 1'))
201 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
201 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
202 node(entry[5]), node(entry[6]), entry[7])
202 node(entry[5]), node(entry[6]), entry[7])
203 return _pack(indexformatv0, *e2)
203 return _pack(indexformatv0, *e2)
204
204
205 # index ng:
205 # index ng:
206 # 6 bytes: offset
206 # 6 bytes: offset
207 # 2 bytes: flags
207 # 2 bytes: flags
208 # 4 bytes: compressed length
208 # 4 bytes: compressed length
209 # 4 bytes: uncompressed length
209 # 4 bytes: uncompressed length
210 # 4 bytes: base rev
210 # 4 bytes: base rev
211 # 4 bytes: link rev
211 # 4 bytes: link rev
212 # 4 bytes: parent 1 rev
212 # 4 bytes: parent 1 rev
213 # 4 bytes: parent 2 rev
213 # 4 bytes: parent 2 rev
214 # 32 bytes: nodeid
214 # 32 bytes: nodeid
215 indexformatng = ">Qiiiiii20s12x"
215 indexformatng = ">Qiiiiii20s12x"
216 versionformat = ">I"
216 versionformat = ">I"
217
217
218 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
218 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
219 # signed integer)
219 # signed integer)
220 _maxentrysize = 0x7fffffff
220 _maxentrysize = 0x7fffffff
221
221
222 class revlogio(object):
222 class revlogio(object):
223 def __init__(self):
223 def __init__(self):
224 self.size = struct.calcsize(indexformatng)
224 self.size = struct.calcsize(indexformatng)
225
225
226 def parseindex(self, data, inline):
226 def parseindex(self, data, inline):
227 # call the C implementation to parse the index data
227 # call the C implementation to parse the index data
228 index, cache = parsers.parse_index2(data, inline)
228 index, cache = parsers.parse_index2(data, inline)
229 return index, getattr(index, 'nodemap', None), cache
229 return index, getattr(index, 'nodemap', None), cache
230
230
231 def packentry(self, entry, node, version, rev):
231 def packentry(self, entry, node, version, rev):
232 p = _pack(indexformatng, *entry)
232 p = _pack(indexformatng, *entry)
233 if rev == 0:
233 if rev == 0:
234 p = _pack(versionformat, version) + p[4:]
234 p = _pack(versionformat, version) + p[4:]
235 return p
235 return p
236
236
237 class revlog(object):
237 class revlog(object):
238 """
238 """
239 the underlying revision storage object
239 the underlying revision storage object
240
240
241 A revlog consists of two parts, an index and the revision data.
241 A revlog consists of two parts, an index and the revision data.
242
242
243 The index is a file with a fixed record size containing
243 The index is a file with a fixed record size containing
244 information on each revision, including its nodeid (hash), the
244 information on each revision, including its nodeid (hash), the
245 nodeids of its parents, the position and offset of its data within
245 nodeids of its parents, the position and offset of its data within
246 the data file, and the revision it's based on. Finally, each entry
246 the data file, and the revision it's based on. Finally, each entry
247 contains a linkrev entry that can serve as a pointer to external
247 contains a linkrev entry that can serve as a pointer to external
248 data.
248 data.
249
249
250 The revision data itself is a linear collection of data chunks.
250 The revision data itself is a linear collection of data chunks.
251 Each chunk represents a revision and is usually represented as a
251 Each chunk represents a revision and is usually represented as a
252 delta against the previous chunk. To bound lookup time, runs of
252 delta against the previous chunk. To bound lookup time, runs of
253 deltas are limited to about 2 times the length of the original
253 deltas are limited to about 2 times the length of the original
254 version data. This makes retrieval of a version proportional to
254 version data. This makes retrieval of a version proportional to
255 its size, or O(1) relative to the number of revisions.
255 its size, or O(1) relative to the number of revisions.
256
256
257 Both pieces of the revlog are written to in an append-only
257 Both pieces of the revlog are written to in an append-only
258 fashion, which means we never need to rewrite a file to insert or
258 fashion, which means we never need to rewrite a file to insert or
259 remove data, and can use some simple techniques to avoid the need
259 remove data, and can use some simple techniques to avoid the need
260 for locking while reading.
260 for locking while reading.
261
261
262 If checkambig, indexfile is opened with checkambig=True at
262 If checkambig, indexfile is opened with checkambig=True at
263 writing, to avoid file stat ambiguity.
263 writing, to avoid file stat ambiguity.
264 """
264 """
265 def __init__(self, opener, indexfile, datafile=None, checkambig=False):
265 def __init__(self, opener, indexfile, datafile=None, checkambig=False):
266 """
266 """
267 create a revlog object
267 create a revlog object
268
268
269 opener is a function that abstracts the file opening operation
269 opener is a function that abstracts the file opening operation
270 and can be used to implement COW semantics or the like.
270 and can be used to implement COW semantics or the like.
271 """
271 """
272 self.indexfile = indexfile
272 self.indexfile = indexfile
273 self.datafile = datafile or (indexfile[:-2] + ".d")
273 self.datafile = datafile or (indexfile[:-2] + ".d")
274 self.opener = opener
274 self.opener = opener
275 # When True, indexfile is opened with checkambig=True at writing, to
275 # When True, indexfile is opened with checkambig=True at writing, to
276 # avoid file stat ambiguity.
276 # avoid file stat ambiguity.
277 self._checkambig = checkambig
277 self._checkambig = checkambig
278 # 3-tuple of (node, rev, text) for a raw revision.
278 # 3-tuple of (node, rev, text) for a raw revision.
279 self._cache = None
279 self._cache = None
280 # Maps rev to chain base rev.
280 # Maps rev to chain base rev.
281 self._chainbasecache = util.lrucachedict(100)
281 self._chainbasecache = util.lrucachedict(100)
282 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
282 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
283 self._chunkcache = (0, '')
283 self._chunkcache = (0, '')
284 # How much data to read and cache into the raw revlog data cache.
284 # How much data to read and cache into the raw revlog data cache.
285 self._chunkcachesize = 65536
285 self._chunkcachesize = 65536
286 self._maxchainlen = None
286 self._maxchainlen = None
287 self._aggressivemergedeltas = False
287 self._aggressivemergedeltas = False
288 self.index = []
288 self.index = []
289 # Mapping of partial identifiers to full nodes.
289 # Mapping of partial identifiers to full nodes.
290 self._pcache = {}
290 self._pcache = {}
291 # Mapping of revision integer to full node.
291 # Mapping of revision integer to full node.
292 self._nodecache = {nullid: nullrev}
292 self._nodecache = {nullid: nullrev}
293 self._nodepos = None
293 self._nodepos = None
294 self._compengine = 'zlib'
294 self._compengine = 'zlib'
295 self._maxdeltachainspan = -1
295
296
296 v = REVLOG_DEFAULT_VERSION
297 v = REVLOG_DEFAULT_VERSION
297 opts = getattr(opener, 'options', None)
298 opts = getattr(opener, 'options', None)
298 if opts is not None:
299 if opts is not None:
299 if 'revlogv2' in opts:
300 if 'revlogv2' in opts:
300 # version 2 revlogs always use generaldelta.
301 # version 2 revlogs always use generaldelta.
301 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
302 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
302 elif 'revlogv1' in opts:
303 elif 'revlogv1' in opts:
303 if 'generaldelta' in opts:
304 if 'generaldelta' in opts:
304 v |= FLAG_GENERALDELTA
305 v |= FLAG_GENERALDELTA
305 else:
306 else:
306 v = 0
307 v = 0
307 if 'chunkcachesize' in opts:
308 if 'chunkcachesize' in opts:
308 self._chunkcachesize = opts['chunkcachesize']
309 self._chunkcachesize = opts['chunkcachesize']
309 if 'maxchainlen' in opts:
310 if 'maxchainlen' in opts:
310 self._maxchainlen = opts['maxchainlen']
311 self._maxchainlen = opts['maxchainlen']
311 if 'aggressivemergedeltas' in opts:
312 if 'aggressivemergedeltas' in opts:
312 self._aggressivemergedeltas = opts['aggressivemergedeltas']
313 self._aggressivemergedeltas = opts['aggressivemergedeltas']
313 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
314 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
314 if 'compengine' in opts:
315 if 'compengine' in opts:
315 self._compengine = opts['compengine']
316 self._compengine = opts['compengine']
317 if 'maxdeltachainspan' in opts:
318 self._maxdeltachainspan = opts['maxdeltachainspan']
316
319
317 if self._chunkcachesize <= 0:
320 if self._chunkcachesize <= 0:
318 raise RevlogError(_('revlog chunk cache size %r is not greater '
321 raise RevlogError(_('revlog chunk cache size %r is not greater '
319 'than 0') % self._chunkcachesize)
322 'than 0') % self._chunkcachesize)
320 elif self._chunkcachesize & (self._chunkcachesize - 1):
323 elif self._chunkcachesize & (self._chunkcachesize - 1):
321 raise RevlogError(_('revlog chunk cache size %r is not a power '
324 raise RevlogError(_('revlog chunk cache size %r is not a power '
322 'of 2') % self._chunkcachesize)
325 'of 2') % self._chunkcachesize)
323
326
324 indexdata = ''
327 indexdata = ''
325 self._initempty = True
328 self._initempty = True
326 try:
329 try:
327 f = self.opener(self.indexfile)
330 f = self.opener(self.indexfile)
328 indexdata = f.read()
331 indexdata = f.read()
329 f.close()
332 f.close()
330 if len(indexdata) > 0:
333 if len(indexdata) > 0:
331 v = struct.unpack(versionformat, indexdata[:4])[0]
334 v = struct.unpack(versionformat, indexdata[:4])[0]
332 self._initempty = False
335 self._initempty = False
333 except IOError as inst:
336 except IOError as inst:
334 if inst.errno != errno.ENOENT:
337 if inst.errno != errno.ENOENT:
335 raise
338 raise
336
339
337 self.version = v
340 self.version = v
338 self._inline = v & FLAG_INLINE_DATA
341 self._inline = v & FLAG_INLINE_DATA
339 self._generaldelta = v & FLAG_GENERALDELTA
342 self._generaldelta = v & FLAG_GENERALDELTA
340 flags = v & ~0xFFFF
343 flags = v & ~0xFFFF
341 fmt = v & 0xFFFF
344 fmt = v & 0xFFFF
342 if fmt == REVLOGV0:
345 if fmt == REVLOGV0:
343 if flags:
346 if flags:
344 raise RevlogError(_('unknown flags (%#04x) in version %d '
347 raise RevlogError(_('unknown flags (%#04x) in version %d '
345 'revlog %s') %
348 'revlog %s') %
346 (flags >> 16, fmt, self.indexfile))
349 (flags >> 16, fmt, self.indexfile))
347 elif fmt == REVLOGV1:
350 elif fmt == REVLOGV1:
348 if flags & ~REVLOGV1_FLAGS:
351 if flags & ~REVLOGV1_FLAGS:
349 raise RevlogError(_('unknown flags (%#04x) in version %d '
352 raise RevlogError(_('unknown flags (%#04x) in version %d '
350 'revlog %s') %
353 'revlog %s') %
351 (flags >> 16, fmt, self.indexfile))
354 (flags >> 16, fmt, self.indexfile))
352 elif fmt == REVLOGV2:
355 elif fmt == REVLOGV2:
353 if flags & ~REVLOGV2_FLAGS:
356 if flags & ~REVLOGV2_FLAGS:
354 raise RevlogError(_('unknown flags (%#04x) in version %d '
357 raise RevlogError(_('unknown flags (%#04x) in version %d '
355 'revlog %s') %
358 'revlog %s') %
356 (flags >> 16, fmt, self.indexfile))
359 (flags >> 16, fmt, self.indexfile))
357 else:
360 else:
358 raise RevlogError(_('unknown version (%d) in revlog %s') %
361 raise RevlogError(_('unknown version (%d) in revlog %s') %
359 (fmt, self.indexfile))
362 (fmt, self.indexfile))
360
363
361 self.storedeltachains = True
364 self.storedeltachains = True
362
365
363 self._io = revlogio()
366 self._io = revlogio()
364 if self.version == REVLOGV0:
367 if self.version == REVLOGV0:
365 self._io = revlogoldio()
368 self._io = revlogoldio()
366 try:
369 try:
367 d = self._io.parseindex(indexdata, self._inline)
370 d = self._io.parseindex(indexdata, self._inline)
368 except (ValueError, IndexError):
371 except (ValueError, IndexError):
369 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
372 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
370 self.index, nodemap, self._chunkcache = d
373 self.index, nodemap, self._chunkcache = d
371 if nodemap is not None:
374 if nodemap is not None:
372 self.nodemap = self._nodecache = nodemap
375 self.nodemap = self._nodecache = nodemap
373 if not self._chunkcache:
376 if not self._chunkcache:
374 self._chunkclear()
377 self._chunkclear()
375 # revnum -> (chain-length, sum-delta-length)
378 # revnum -> (chain-length, sum-delta-length)
376 self._chaininfocache = {}
379 self._chaininfocache = {}
377 # revlog header -> revlog compressor
380 # revlog header -> revlog compressor
378 self._decompressors = {}
381 self._decompressors = {}
379
382
380 @util.propertycache
383 @util.propertycache
381 def _compressor(self):
384 def _compressor(self):
382 return util.compengines[self._compengine].revlogcompressor()
385 return util.compengines[self._compengine].revlogcompressor()
383
386
384 def tip(self):
387 def tip(self):
385 return self.node(len(self.index) - 2)
388 return self.node(len(self.index) - 2)
386 def __contains__(self, rev):
389 def __contains__(self, rev):
387 return 0 <= rev < len(self)
390 return 0 <= rev < len(self)
388 def __len__(self):
391 def __len__(self):
389 return len(self.index) - 1
392 return len(self.index) - 1
390 def __iter__(self):
393 def __iter__(self):
391 return iter(xrange(len(self)))
394 return iter(xrange(len(self)))
392 def revs(self, start=0, stop=None):
395 def revs(self, start=0, stop=None):
393 """iterate over all rev in this revlog (from start to stop)"""
396 """iterate over all rev in this revlog (from start to stop)"""
394 step = 1
397 step = 1
395 if stop is not None:
398 if stop is not None:
396 if start > stop:
399 if start > stop:
397 step = -1
400 step = -1
398 stop += step
401 stop += step
399 else:
402 else:
400 stop = len(self)
403 stop = len(self)
401 return xrange(start, stop, step)
404 return xrange(start, stop, step)
402
405
403 @util.propertycache
406 @util.propertycache
404 def nodemap(self):
407 def nodemap(self):
405 self.rev(self.node(0))
408 self.rev(self.node(0))
406 return self._nodecache
409 return self._nodecache
407
410
408 def hasnode(self, node):
411 def hasnode(self, node):
409 try:
412 try:
410 self.rev(node)
413 self.rev(node)
411 return True
414 return True
412 except KeyError:
415 except KeyError:
413 return False
416 return False
414
417
415 def clearcaches(self):
418 def clearcaches(self):
416 self._cache = None
419 self._cache = None
417 self._chainbasecache.clear()
420 self._chainbasecache.clear()
418 self._chunkcache = (0, '')
421 self._chunkcache = (0, '')
419 self._pcache = {}
422 self._pcache = {}
420
423
421 try:
424 try:
422 self._nodecache.clearcaches()
425 self._nodecache.clearcaches()
423 except AttributeError:
426 except AttributeError:
424 self._nodecache = {nullid: nullrev}
427 self._nodecache = {nullid: nullrev}
425 self._nodepos = None
428 self._nodepos = None
426
429
427 def rev(self, node):
430 def rev(self, node):
428 try:
431 try:
429 return self._nodecache[node]
432 return self._nodecache[node]
430 except TypeError:
433 except TypeError:
431 raise
434 raise
432 except RevlogError:
435 except RevlogError:
433 # parsers.c radix tree lookup failed
436 # parsers.c radix tree lookup failed
434 if node == wdirid:
437 if node == wdirid:
435 raise error.WdirUnsupported
438 raise error.WdirUnsupported
436 raise LookupError(node, self.indexfile, _('no node'))
439 raise LookupError(node, self.indexfile, _('no node'))
437 except KeyError:
440 except KeyError:
438 # pure python cache lookup failed
441 # pure python cache lookup failed
439 n = self._nodecache
442 n = self._nodecache
440 i = self.index
443 i = self.index
441 p = self._nodepos
444 p = self._nodepos
442 if p is None:
445 if p is None:
443 p = len(i) - 2
446 p = len(i) - 2
444 for r in xrange(p, -1, -1):
447 for r in xrange(p, -1, -1):
445 v = i[r][7]
448 v = i[r][7]
446 n[v] = r
449 n[v] = r
447 if v == node:
450 if v == node:
448 self._nodepos = r - 1
451 self._nodepos = r - 1
449 return r
452 return r
450 if node == wdirid:
453 if node == wdirid:
451 raise error.WdirUnsupported
454 raise error.WdirUnsupported
452 raise LookupError(node, self.indexfile, _('no node'))
455 raise LookupError(node, self.indexfile, _('no node'))
453
456
454 # Accessors for index entries.
457 # Accessors for index entries.
455
458
456 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
459 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
457 # are flags.
460 # are flags.
458 def start(self, rev):
461 def start(self, rev):
459 return int(self.index[rev][0] >> 16)
462 return int(self.index[rev][0] >> 16)
460
463
461 def flags(self, rev):
464 def flags(self, rev):
462 return self.index[rev][0] & 0xFFFF
465 return self.index[rev][0] & 0xFFFF
463
466
464 def length(self, rev):
467 def length(self, rev):
465 return self.index[rev][1]
468 return self.index[rev][1]
466
469
467 def rawsize(self, rev):
470 def rawsize(self, rev):
468 """return the length of the uncompressed text for a given revision"""
471 """return the length of the uncompressed text for a given revision"""
469 l = self.index[rev][2]
472 l = self.index[rev][2]
470 if l >= 0:
473 if l >= 0:
471 return l
474 return l
472
475
473 t = self.revision(rev, raw=True)
476 t = self.revision(rev, raw=True)
474 return len(t)
477 return len(t)
475
478
476 def size(self, rev):
479 def size(self, rev):
477 """length of non-raw text (processed by a "read" flag processor)"""
480 """length of non-raw text (processed by a "read" flag processor)"""
478 # fast path: if no "read" flag processor could change the content,
481 # fast path: if no "read" flag processor could change the content,
479 # size is rawsize. note: ELLIPSIS is known to not change the content.
482 # size is rawsize. note: ELLIPSIS is known to not change the content.
480 flags = self.flags(rev)
483 flags = self.flags(rev)
481 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
484 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
482 return self.rawsize(rev)
485 return self.rawsize(rev)
483
486
484 return len(self.revision(rev, raw=False))
487 return len(self.revision(rev, raw=False))
485
488
486 def chainbase(self, rev):
489 def chainbase(self, rev):
487 base = self._chainbasecache.get(rev)
490 base = self._chainbasecache.get(rev)
488 if base is not None:
491 if base is not None:
489 return base
492 return base
490
493
491 index = self.index
494 index = self.index
492 base = index[rev][3]
495 base = index[rev][3]
493 while base != rev:
496 while base != rev:
494 rev = base
497 rev = base
495 base = index[rev][3]
498 base = index[rev][3]
496
499
497 self._chainbasecache[rev] = base
500 self._chainbasecache[rev] = base
498 return base
501 return base
499
502
500 def linkrev(self, rev):
503 def linkrev(self, rev):
501 return self.index[rev][4]
504 return self.index[rev][4]
502
505
503 def parentrevs(self, rev):
506 def parentrevs(self, rev):
504 try:
507 try:
505 return self.index[rev][5:7]
508 return self.index[rev][5:7]
506 except IndexError:
509 except IndexError:
507 if rev == wdirrev:
510 if rev == wdirrev:
508 raise error.WdirUnsupported
511 raise error.WdirUnsupported
509 raise
512 raise
510
513
511 def node(self, rev):
514 def node(self, rev):
512 try:
515 try:
513 return self.index[rev][7]
516 return self.index[rev][7]
514 except IndexError:
517 except IndexError:
515 if rev == wdirrev:
518 if rev == wdirrev:
516 raise error.WdirUnsupported
519 raise error.WdirUnsupported
517 raise
520 raise
518
521
519 # Derived from index values.
522 # Derived from index values.
520
523
521 def end(self, rev):
524 def end(self, rev):
522 return self.start(rev) + self.length(rev)
525 return self.start(rev) + self.length(rev)
523
526
524 def parents(self, node):
527 def parents(self, node):
525 i = self.index
528 i = self.index
526 d = i[self.rev(node)]
529 d = i[self.rev(node)]
527 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
530 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
528
531
529 def chainlen(self, rev):
532 def chainlen(self, rev):
530 return self._chaininfo(rev)[0]
533 return self._chaininfo(rev)[0]
531
534
532 def _chaininfo(self, rev):
535 def _chaininfo(self, rev):
533 chaininfocache = self._chaininfocache
536 chaininfocache = self._chaininfocache
534 if rev in chaininfocache:
537 if rev in chaininfocache:
535 return chaininfocache[rev]
538 return chaininfocache[rev]
536 index = self.index
539 index = self.index
537 generaldelta = self._generaldelta
540 generaldelta = self._generaldelta
538 iterrev = rev
541 iterrev = rev
539 e = index[iterrev]
542 e = index[iterrev]
540 clen = 0
543 clen = 0
541 compresseddeltalen = 0
544 compresseddeltalen = 0
542 while iterrev != e[3]:
545 while iterrev != e[3]:
543 clen += 1
546 clen += 1
544 compresseddeltalen += e[1]
547 compresseddeltalen += e[1]
545 if generaldelta:
548 if generaldelta:
546 iterrev = e[3]
549 iterrev = e[3]
547 else:
550 else:
548 iterrev -= 1
551 iterrev -= 1
549 if iterrev in chaininfocache:
552 if iterrev in chaininfocache:
550 t = chaininfocache[iterrev]
553 t = chaininfocache[iterrev]
551 clen += t[0]
554 clen += t[0]
552 compresseddeltalen += t[1]
555 compresseddeltalen += t[1]
553 break
556 break
554 e = index[iterrev]
557 e = index[iterrev]
555 else:
558 else:
556 # Add text length of base since decompressing that also takes
559 # Add text length of base since decompressing that also takes
557 # work. For cache hits the length is already included.
560 # work. For cache hits the length is already included.
558 compresseddeltalen += e[1]
561 compresseddeltalen += e[1]
559 r = (clen, compresseddeltalen)
562 r = (clen, compresseddeltalen)
560 chaininfocache[rev] = r
563 chaininfocache[rev] = r
561 return r
564 return r
562
565
563 def _deltachain(self, rev, stoprev=None):
566 def _deltachain(self, rev, stoprev=None):
564 """Obtain the delta chain for a revision.
567 """Obtain the delta chain for a revision.
565
568
566 ``stoprev`` specifies a revision to stop at. If not specified, we
569 ``stoprev`` specifies a revision to stop at. If not specified, we
567 stop at the base of the chain.
570 stop at the base of the chain.
568
571
569 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
572 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
570 revs in ascending order and ``stopped`` is a bool indicating whether
573 revs in ascending order and ``stopped`` is a bool indicating whether
571 ``stoprev`` was hit.
574 ``stoprev`` was hit.
572 """
575 """
573 # Try C implementation.
576 # Try C implementation.
574 try:
577 try:
575 return self.index.deltachain(rev, stoprev, self._generaldelta)
578 return self.index.deltachain(rev, stoprev, self._generaldelta)
576 except AttributeError:
579 except AttributeError:
577 pass
580 pass
578
581
579 chain = []
582 chain = []
580
583
581 # Alias to prevent attribute lookup in tight loop.
584 # Alias to prevent attribute lookup in tight loop.
582 index = self.index
585 index = self.index
583 generaldelta = self._generaldelta
586 generaldelta = self._generaldelta
584
587
585 iterrev = rev
588 iterrev = rev
586 e = index[iterrev]
589 e = index[iterrev]
587 while iterrev != e[3] and iterrev != stoprev:
590 while iterrev != e[3] and iterrev != stoprev:
588 chain.append(iterrev)
591 chain.append(iterrev)
589 if generaldelta:
592 if generaldelta:
590 iterrev = e[3]
593 iterrev = e[3]
591 else:
594 else:
592 iterrev -= 1
595 iterrev -= 1
593 e = index[iterrev]
596 e = index[iterrev]
594
597
595 if iterrev == stoprev:
598 if iterrev == stoprev:
596 stopped = True
599 stopped = True
597 else:
600 else:
598 chain.append(iterrev)
601 chain.append(iterrev)
599 stopped = False
602 stopped = False
600
603
601 chain.reverse()
604 chain.reverse()
602 return chain, stopped
605 return chain, stopped
603
606
604 def ancestors(self, revs, stoprev=0, inclusive=False):
607 def ancestors(self, revs, stoprev=0, inclusive=False):
605 """Generate the ancestors of 'revs' in reverse topological order.
608 """Generate the ancestors of 'revs' in reverse topological order.
606 Does not generate revs lower than stoprev.
609 Does not generate revs lower than stoprev.
607
610
608 See the documentation for ancestor.lazyancestors for more details."""
611 See the documentation for ancestor.lazyancestors for more details."""
609
612
610 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
613 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
611 inclusive=inclusive)
614 inclusive=inclusive)
612
615
613 def descendants(self, revs):
616 def descendants(self, revs):
614 """Generate the descendants of 'revs' in revision order.
617 """Generate the descendants of 'revs' in revision order.
615
618
616 Yield a sequence of revision numbers starting with a child of
619 Yield a sequence of revision numbers starting with a child of
617 some rev in revs, i.e., each revision is *not* considered a
620 some rev in revs, i.e., each revision is *not* considered a
618 descendant of itself. Results are ordered by revision number (a
621 descendant of itself. Results are ordered by revision number (a
619 topological sort)."""
622 topological sort)."""
620 first = min(revs)
623 first = min(revs)
621 if first == nullrev:
624 if first == nullrev:
622 for i in self:
625 for i in self:
623 yield i
626 yield i
624 return
627 return
625
628
626 seen = set(revs)
629 seen = set(revs)
627 for i in self.revs(start=first + 1):
630 for i in self.revs(start=first + 1):
628 for x in self.parentrevs(i):
631 for x in self.parentrevs(i):
629 if x != nullrev and x in seen:
632 if x != nullrev and x in seen:
630 seen.add(i)
633 seen.add(i)
631 yield i
634 yield i
632 break
635 break
633
636
634 def findcommonmissing(self, common=None, heads=None):
637 def findcommonmissing(self, common=None, heads=None):
635 """Return a tuple of the ancestors of common and the ancestors of heads
638 """Return a tuple of the ancestors of common and the ancestors of heads
636 that are not ancestors of common. In revset terminology, we return the
639 that are not ancestors of common. In revset terminology, we return the
637 tuple:
640 tuple:
638
641
639 ::common, (::heads) - (::common)
642 ::common, (::heads) - (::common)
640
643
641 The list is sorted by revision number, meaning it is
644 The list is sorted by revision number, meaning it is
642 topologically sorted.
645 topologically sorted.
643
646
644 'heads' and 'common' are both lists of node IDs. If heads is
647 'heads' and 'common' are both lists of node IDs. If heads is
645 not supplied, uses all of the revlog's heads. If common is not
648 not supplied, uses all of the revlog's heads. If common is not
646 supplied, uses nullid."""
649 supplied, uses nullid."""
647 if common is None:
650 if common is None:
648 common = [nullid]
651 common = [nullid]
649 if heads is None:
652 if heads is None:
650 heads = self.heads()
653 heads = self.heads()
651
654
652 common = [self.rev(n) for n in common]
655 common = [self.rev(n) for n in common]
653 heads = [self.rev(n) for n in heads]
656 heads = [self.rev(n) for n in heads]
654
657
655 # we want the ancestors, but inclusive
658 # we want the ancestors, but inclusive
656 class lazyset(object):
659 class lazyset(object):
657 def __init__(self, lazyvalues):
660 def __init__(self, lazyvalues):
658 self.addedvalues = set()
661 self.addedvalues = set()
659 self.lazyvalues = lazyvalues
662 self.lazyvalues = lazyvalues
660
663
661 def __contains__(self, value):
664 def __contains__(self, value):
662 return value in self.addedvalues or value in self.lazyvalues
665 return value in self.addedvalues or value in self.lazyvalues
663
666
664 def __iter__(self):
667 def __iter__(self):
665 added = self.addedvalues
668 added = self.addedvalues
666 for r in added:
669 for r in added:
667 yield r
670 yield r
668 for r in self.lazyvalues:
671 for r in self.lazyvalues:
669 if not r in added:
672 if not r in added:
670 yield r
673 yield r
671
674
672 def add(self, value):
675 def add(self, value):
673 self.addedvalues.add(value)
676 self.addedvalues.add(value)
674
677
675 def update(self, values):
678 def update(self, values):
676 self.addedvalues.update(values)
679 self.addedvalues.update(values)
677
680
678 has = lazyset(self.ancestors(common))
681 has = lazyset(self.ancestors(common))
679 has.add(nullrev)
682 has.add(nullrev)
680 has.update(common)
683 has.update(common)
681
684
682 # take all ancestors from heads that aren't in has
685 # take all ancestors from heads that aren't in has
683 missing = set()
686 missing = set()
684 visit = collections.deque(r for r in heads if r not in has)
687 visit = collections.deque(r for r in heads if r not in has)
685 while visit:
688 while visit:
686 r = visit.popleft()
689 r = visit.popleft()
687 if r in missing:
690 if r in missing:
688 continue
691 continue
689 else:
692 else:
690 missing.add(r)
693 missing.add(r)
691 for p in self.parentrevs(r):
694 for p in self.parentrevs(r):
692 if p not in has:
695 if p not in has:
693 visit.append(p)
696 visit.append(p)
694 missing = list(missing)
697 missing = list(missing)
695 missing.sort()
698 missing.sort()
696 return has, [self.node(miss) for miss in missing]
699 return has, [self.node(miss) for miss in missing]
697
700
698 def incrementalmissingrevs(self, common=None):
701 def incrementalmissingrevs(self, common=None):
699 """Return an object that can be used to incrementally compute the
702 """Return an object that can be used to incrementally compute the
700 revision numbers of the ancestors of arbitrary sets that are not
703 revision numbers of the ancestors of arbitrary sets that are not
701 ancestors of common. This is an ancestor.incrementalmissingancestors
704 ancestors of common. This is an ancestor.incrementalmissingancestors
702 object.
705 object.
703
706
704 'common' is a list of revision numbers. If common is not supplied, uses
707 'common' is a list of revision numbers. If common is not supplied, uses
705 nullrev.
708 nullrev.
706 """
709 """
707 if common is None:
710 if common is None:
708 common = [nullrev]
711 common = [nullrev]
709
712
710 return ancestor.incrementalmissingancestors(self.parentrevs, common)
713 return ancestor.incrementalmissingancestors(self.parentrevs, common)
711
714
712 def findmissingrevs(self, common=None, heads=None):
715 def findmissingrevs(self, common=None, heads=None):
713 """Return the revision numbers of the ancestors of heads that
716 """Return the revision numbers of the ancestors of heads that
714 are not ancestors of common.
717 are not ancestors of common.
715
718
716 More specifically, return a list of revision numbers corresponding to
719 More specifically, return a list of revision numbers corresponding to
717 nodes N such that every N satisfies the following constraints:
720 nodes N such that every N satisfies the following constraints:
718
721
719 1. N is an ancestor of some node in 'heads'
722 1. N is an ancestor of some node in 'heads'
720 2. N is not an ancestor of any node in 'common'
723 2. N is not an ancestor of any node in 'common'
721
724
722 The list is sorted by revision number, meaning it is
725 The list is sorted by revision number, meaning it is
723 topologically sorted.
726 topologically sorted.
724
727
725 'heads' and 'common' are both lists of revision numbers. If heads is
728 'heads' and 'common' are both lists of revision numbers. If heads is
726 not supplied, uses all of the revlog's heads. If common is not
729 not supplied, uses all of the revlog's heads. If common is not
727 supplied, uses nullid."""
730 supplied, uses nullid."""
728 if common is None:
731 if common is None:
729 common = [nullrev]
732 common = [nullrev]
730 if heads is None:
733 if heads is None:
731 heads = self.headrevs()
734 heads = self.headrevs()
732
735
733 inc = self.incrementalmissingrevs(common=common)
736 inc = self.incrementalmissingrevs(common=common)
734 return inc.missingancestors(heads)
737 return inc.missingancestors(heads)
735
738
736 def findmissing(self, common=None, heads=None):
739 def findmissing(self, common=None, heads=None):
737 """Return the ancestors of heads that are not ancestors of common.
740 """Return the ancestors of heads that are not ancestors of common.
738
741
739 More specifically, return a list of nodes N such that every N
742 More specifically, return a list of nodes N such that every N
740 satisfies the following constraints:
743 satisfies the following constraints:
741
744
742 1. N is an ancestor of some node in 'heads'
745 1. N is an ancestor of some node in 'heads'
743 2. N is not an ancestor of any node in 'common'
746 2. N is not an ancestor of any node in 'common'
744
747
745 The list is sorted by revision number, meaning it is
748 The list is sorted by revision number, meaning it is
746 topologically sorted.
749 topologically sorted.
747
750
748 'heads' and 'common' are both lists of node IDs. If heads is
751 'heads' and 'common' are both lists of node IDs. If heads is
749 not supplied, uses all of the revlog's heads. If common is not
752 not supplied, uses all of the revlog's heads. If common is not
750 supplied, uses nullid."""
753 supplied, uses nullid."""
751 if common is None:
754 if common is None:
752 common = [nullid]
755 common = [nullid]
753 if heads is None:
756 if heads is None:
754 heads = self.heads()
757 heads = self.heads()
755
758
756 common = [self.rev(n) for n in common]
759 common = [self.rev(n) for n in common]
757 heads = [self.rev(n) for n in heads]
760 heads = [self.rev(n) for n in heads]
758
761
759 inc = self.incrementalmissingrevs(common=common)
762 inc = self.incrementalmissingrevs(common=common)
760 return [self.node(r) for r in inc.missingancestors(heads)]
763 return [self.node(r) for r in inc.missingancestors(heads)]
761
764
762 def nodesbetween(self, roots=None, heads=None):
765 def nodesbetween(self, roots=None, heads=None):
763 """Return a topological path from 'roots' to 'heads'.
766 """Return a topological path from 'roots' to 'heads'.
764
767
765 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
768 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
766 topologically sorted list of all nodes N that satisfy both of
769 topologically sorted list of all nodes N that satisfy both of
767 these constraints:
770 these constraints:
768
771
769 1. N is a descendant of some node in 'roots'
772 1. N is a descendant of some node in 'roots'
770 2. N is an ancestor of some node in 'heads'
773 2. N is an ancestor of some node in 'heads'
771
774
772 Every node is considered to be both a descendant and an ancestor
775 Every node is considered to be both a descendant and an ancestor
773 of itself, so every reachable node in 'roots' and 'heads' will be
776 of itself, so every reachable node in 'roots' and 'heads' will be
774 included in 'nodes'.
777 included in 'nodes'.
775
778
776 'outroots' is the list of reachable nodes in 'roots', i.e., the
779 'outroots' is the list of reachable nodes in 'roots', i.e., the
777 subset of 'roots' that is returned in 'nodes'. Likewise,
780 subset of 'roots' that is returned in 'nodes'. Likewise,
778 'outheads' is the subset of 'heads' that is also in 'nodes'.
781 'outheads' is the subset of 'heads' that is also in 'nodes'.
779
782
780 'roots' and 'heads' are both lists of node IDs. If 'roots' is
783 'roots' and 'heads' are both lists of node IDs. If 'roots' is
781 unspecified, uses nullid as the only root. If 'heads' is
784 unspecified, uses nullid as the only root. If 'heads' is
782 unspecified, uses list of all of the revlog's heads."""
785 unspecified, uses list of all of the revlog's heads."""
783 nonodes = ([], [], [])
786 nonodes = ([], [], [])
784 if roots is not None:
787 if roots is not None:
785 roots = list(roots)
788 roots = list(roots)
786 if not roots:
789 if not roots:
787 return nonodes
790 return nonodes
788 lowestrev = min([self.rev(n) for n in roots])
791 lowestrev = min([self.rev(n) for n in roots])
789 else:
792 else:
790 roots = [nullid] # Everybody's a descendant of nullid
793 roots = [nullid] # Everybody's a descendant of nullid
791 lowestrev = nullrev
794 lowestrev = nullrev
792 if (lowestrev == nullrev) and (heads is None):
795 if (lowestrev == nullrev) and (heads is None):
793 # We want _all_ the nodes!
796 # We want _all_ the nodes!
794 return ([self.node(r) for r in self], [nullid], list(self.heads()))
797 return ([self.node(r) for r in self], [nullid], list(self.heads()))
795 if heads is None:
798 if heads is None:
796 # All nodes are ancestors, so the latest ancestor is the last
799 # All nodes are ancestors, so the latest ancestor is the last
797 # node.
800 # node.
798 highestrev = len(self) - 1
801 highestrev = len(self) - 1
799 # Set ancestors to None to signal that every node is an ancestor.
802 # Set ancestors to None to signal that every node is an ancestor.
800 ancestors = None
803 ancestors = None
801 # Set heads to an empty dictionary for later discovery of heads
804 # Set heads to an empty dictionary for later discovery of heads
802 heads = {}
805 heads = {}
803 else:
806 else:
804 heads = list(heads)
807 heads = list(heads)
805 if not heads:
808 if not heads:
806 return nonodes
809 return nonodes
807 ancestors = set()
810 ancestors = set()
808 # Turn heads into a dictionary so we can remove 'fake' heads.
811 # Turn heads into a dictionary so we can remove 'fake' heads.
809 # Also, later we will be using it to filter out the heads we can't
812 # Also, later we will be using it to filter out the heads we can't
810 # find from roots.
813 # find from roots.
811 heads = dict.fromkeys(heads, False)
814 heads = dict.fromkeys(heads, False)
812 # Start at the top and keep marking parents until we're done.
815 # Start at the top and keep marking parents until we're done.
813 nodestotag = set(heads)
816 nodestotag = set(heads)
814 # Remember where the top was so we can use it as a limit later.
817 # Remember where the top was so we can use it as a limit later.
815 highestrev = max([self.rev(n) for n in nodestotag])
818 highestrev = max([self.rev(n) for n in nodestotag])
816 while nodestotag:
819 while nodestotag:
817 # grab a node to tag
820 # grab a node to tag
818 n = nodestotag.pop()
821 n = nodestotag.pop()
819 # Never tag nullid
822 # Never tag nullid
820 if n == nullid:
823 if n == nullid:
821 continue
824 continue
822 # A node's revision number represents its place in a
825 # A node's revision number represents its place in a
823 # topologically sorted list of nodes.
826 # topologically sorted list of nodes.
824 r = self.rev(n)
827 r = self.rev(n)
825 if r >= lowestrev:
828 if r >= lowestrev:
826 if n not in ancestors:
829 if n not in ancestors:
827 # If we are possibly a descendant of one of the roots
830 # If we are possibly a descendant of one of the roots
828 # and we haven't already been marked as an ancestor
831 # and we haven't already been marked as an ancestor
829 ancestors.add(n) # Mark as ancestor
832 ancestors.add(n) # Mark as ancestor
830 # Add non-nullid parents to list of nodes to tag.
833 # Add non-nullid parents to list of nodes to tag.
831 nodestotag.update([p for p in self.parents(n) if
834 nodestotag.update([p for p in self.parents(n) if
832 p != nullid])
835 p != nullid])
833 elif n in heads: # We've seen it before, is it a fake head?
836 elif n in heads: # We've seen it before, is it a fake head?
834 # So it is, real heads should not be the ancestors of
837 # So it is, real heads should not be the ancestors of
835 # any other heads.
838 # any other heads.
836 heads.pop(n)
839 heads.pop(n)
837 if not ancestors:
840 if not ancestors:
838 return nonodes
841 return nonodes
839 # Now that we have our set of ancestors, we want to remove any
842 # Now that we have our set of ancestors, we want to remove any
840 # roots that are not ancestors.
843 # roots that are not ancestors.
841
844
842 # If one of the roots was nullid, everything is included anyway.
845 # If one of the roots was nullid, everything is included anyway.
843 if lowestrev > nullrev:
846 if lowestrev > nullrev:
844 # But, since we weren't, let's recompute the lowest rev to not
847 # But, since we weren't, let's recompute the lowest rev to not
845 # include roots that aren't ancestors.
848 # include roots that aren't ancestors.
846
849
847 # Filter out roots that aren't ancestors of heads
850 # Filter out roots that aren't ancestors of heads
848 roots = [root for root in roots if root in ancestors]
851 roots = [root for root in roots if root in ancestors]
849 # Recompute the lowest revision
852 # Recompute the lowest revision
850 if roots:
853 if roots:
851 lowestrev = min([self.rev(root) for root in roots])
854 lowestrev = min([self.rev(root) for root in roots])
852 else:
855 else:
853 # No more roots? Return empty list
856 # No more roots? Return empty list
854 return nonodes
857 return nonodes
855 else:
858 else:
856 # We are descending from nullid, and don't need to care about
859 # We are descending from nullid, and don't need to care about
857 # any other roots.
860 # any other roots.
858 lowestrev = nullrev
861 lowestrev = nullrev
859 roots = [nullid]
862 roots = [nullid]
860 # Transform our roots list into a set.
863 # Transform our roots list into a set.
861 descendants = set(roots)
864 descendants = set(roots)
862 # Also, keep the original roots so we can filter out roots that aren't
865 # Also, keep the original roots so we can filter out roots that aren't
863 # 'real' roots (i.e. are descended from other roots).
866 # 'real' roots (i.e. are descended from other roots).
864 roots = descendants.copy()
867 roots = descendants.copy()
865 # Our topologically sorted list of output nodes.
868 # Our topologically sorted list of output nodes.
866 orderedout = []
869 orderedout = []
867 # Don't start at nullid since we don't want nullid in our output list,
870 # Don't start at nullid since we don't want nullid in our output list,
868 # and if nullid shows up in descendants, empty parents will look like
871 # and if nullid shows up in descendants, empty parents will look like
869 # they're descendants.
872 # they're descendants.
870 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
873 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
871 n = self.node(r)
874 n = self.node(r)
872 isdescendant = False
875 isdescendant = False
873 if lowestrev == nullrev: # Everybody is a descendant of nullid
876 if lowestrev == nullrev: # Everybody is a descendant of nullid
874 isdescendant = True
877 isdescendant = True
875 elif n in descendants:
878 elif n in descendants:
876 # n is already a descendant
879 # n is already a descendant
877 isdescendant = True
880 isdescendant = True
878 # This check only needs to be done here because all the roots
881 # This check only needs to be done here because all the roots
879 # will start being marked is descendants before the loop.
882 # will start being marked is descendants before the loop.
880 if n in roots:
883 if n in roots:
881 # If n was a root, check if it's a 'real' root.
884 # If n was a root, check if it's a 'real' root.
882 p = tuple(self.parents(n))
885 p = tuple(self.parents(n))
883 # If any of its parents are descendants, it's not a root.
886 # If any of its parents are descendants, it's not a root.
884 if (p[0] in descendants) or (p[1] in descendants):
887 if (p[0] in descendants) or (p[1] in descendants):
885 roots.remove(n)
888 roots.remove(n)
886 else:
889 else:
887 p = tuple(self.parents(n))
890 p = tuple(self.parents(n))
888 # A node is a descendant if either of its parents are
891 # A node is a descendant if either of its parents are
889 # descendants. (We seeded the dependents list with the roots
892 # descendants. (We seeded the dependents list with the roots
890 # up there, remember?)
893 # up there, remember?)
891 if (p[0] in descendants) or (p[1] in descendants):
894 if (p[0] in descendants) or (p[1] in descendants):
892 descendants.add(n)
895 descendants.add(n)
893 isdescendant = True
896 isdescendant = True
894 if isdescendant and ((ancestors is None) or (n in ancestors)):
897 if isdescendant and ((ancestors is None) or (n in ancestors)):
895 # Only include nodes that are both descendants and ancestors.
898 # Only include nodes that are both descendants and ancestors.
896 orderedout.append(n)
899 orderedout.append(n)
897 if (ancestors is not None) and (n in heads):
900 if (ancestors is not None) and (n in heads):
898 # We're trying to figure out which heads are reachable
901 # We're trying to figure out which heads are reachable
899 # from roots.
902 # from roots.
900 # Mark this head as having been reached
903 # Mark this head as having been reached
901 heads[n] = True
904 heads[n] = True
902 elif ancestors is None:
905 elif ancestors is None:
903 # Otherwise, we're trying to discover the heads.
906 # Otherwise, we're trying to discover the heads.
904 # Assume this is a head because if it isn't, the next step
907 # Assume this is a head because if it isn't, the next step
905 # will eventually remove it.
908 # will eventually remove it.
906 heads[n] = True
909 heads[n] = True
907 # But, obviously its parents aren't.
910 # But, obviously its parents aren't.
908 for p in self.parents(n):
911 for p in self.parents(n):
909 heads.pop(p, None)
912 heads.pop(p, None)
910 heads = [head for head, flag in heads.iteritems() if flag]
913 heads = [head for head, flag in heads.iteritems() if flag]
911 roots = list(roots)
914 roots = list(roots)
912 assert orderedout
915 assert orderedout
913 assert roots
916 assert roots
914 assert heads
917 assert heads
915 return (orderedout, roots, heads)
918 return (orderedout, roots, heads)
916
919
917 def headrevs(self):
920 def headrevs(self):
918 try:
921 try:
919 return self.index.headrevs()
922 return self.index.headrevs()
920 except AttributeError:
923 except AttributeError:
921 return self._headrevs()
924 return self._headrevs()
922
925
923 def computephases(self, roots):
926 def computephases(self, roots):
924 return self.index.computephasesmapsets(roots)
927 return self.index.computephasesmapsets(roots)
925
928
926 def _headrevs(self):
929 def _headrevs(self):
927 count = len(self)
930 count = len(self)
928 if not count:
931 if not count:
929 return [nullrev]
932 return [nullrev]
930 # we won't iter over filtered rev so nobody is a head at start
933 # we won't iter over filtered rev so nobody is a head at start
931 ishead = [0] * (count + 1)
934 ishead = [0] * (count + 1)
932 index = self.index
935 index = self.index
933 for r in self:
936 for r in self:
934 ishead[r] = 1 # I may be an head
937 ishead[r] = 1 # I may be an head
935 e = index[r]
938 e = index[r]
936 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
939 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
937 return [r for r, val in enumerate(ishead) if val]
940 return [r for r, val in enumerate(ishead) if val]
938
941
939 def heads(self, start=None, stop=None):
942 def heads(self, start=None, stop=None):
940 """return the list of all nodes that have no children
943 """return the list of all nodes that have no children
941
944
942 if start is specified, only heads that are descendants of
945 if start is specified, only heads that are descendants of
943 start will be returned
946 start will be returned
944 if stop is specified, it will consider all the revs from stop
947 if stop is specified, it will consider all the revs from stop
945 as if they had no children
948 as if they had no children
946 """
949 """
947 if start is None and stop is None:
950 if start is None and stop is None:
948 if not len(self):
951 if not len(self):
949 return [nullid]
952 return [nullid]
950 return [self.node(r) for r in self.headrevs()]
953 return [self.node(r) for r in self.headrevs()]
951
954
952 if start is None:
955 if start is None:
953 start = nullid
956 start = nullid
954 if stop is None:
957 if stop is None:
955 stop = []
958 stop = []
956 stoprevs = set([self.rev(n) for n in stop])
959 stoprevs = set([self.rev(n) for n in stop])
957 startrev = self.rev(start)
960 startrev = self.rev(start)
958 reachable = {startrev}
961 reachable = {startrev}
959 heads = {startrev}
962 heads = {startrev}
960
963
961 parentrevs = self.parentrevs
964 parentrevs = self.parentrevs
962 for r in self.revs(start=startrev + 1):
965 for r in self.revs(start=startrev + 1):
963 for p in parentrevs(r):
966 for p in parentrevs(r):
964 if p in reachable:
967 if p in reachable:
965 if r not in stoprevs:
968 if r not in stoprevs:
966 reachable.add(r)
969 reachable.add(r)
967 heads.add(r)
970 heads.add(r)
968 if p in heads and p not in stoprevs:
971 if p in heads and p not in stoprevs:
969 heads.remove(p)
972 heads.remove(p)
970
973
971 return [self.node(r) for r in heads]
974 return [self.node(r) for r in heads]
972
975
973 def children(self, node):
976 def children(self, node):
974 """find the children of a given node"""
977 """find the children of a given node"""
975 c = []
978 c = []
976 p = self.rev(node)
979 p = self.rev(node)
977 for r in self.revs(start=p + 1):
980 for r in self.revs(start=p + 1):
978 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
981 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
979 if prevs:
982 if prevs:
980 for pr in prevs:
983 for pr in prevs:
981 if pr == p:
984 if pr == p:
982 c.append(self.node(r))
985 c.append(self.node(r))
983 elif p == nullrev:
986 elif p == nullrev:
984 c.append(self.node(r))
987 c.append(self.node(r))
985 return c
988 return c
986
989
987 def descendant(self, start, end):
990 def descendant(self, start, end):
988 if start == nullrev:
991 if start == nullrev:
989 return True
992 return True
990 for i in self.descendants([start]):
993 for i in self.descendants([start]):
991 if i == end:
994 if i == end:
992 return True
995 return True
993 elif i > end:
996 elif i > end:
994 break
997 break
995 return False
998 return False
996
999
997 def commonancestorsheads(self, a, b):
1000 def commonancestorsheads(self, a, b):
998 """calculate all the heads of the common ancestors of nodes a and b"""
1001 """calculate all the heads of the common ancestors of nodes a and b"""
999 a, b = self.rev(a), self.rev(b)
1002 a, b = self.rev(a), self.rev(b)
1000 try:
1003 try:
1001 ancs = self.index.commonancestorsheads(a, b)
1004 ancs = self.index.commonancestorsheads(a, b)
1002 except (AttributeError, OverflowError): # C implementation failed
1005 except (AttributeError, OverflowError): # C implementation failed
1003 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1006 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1004 return pycompat.maplist(self.node, ancs)
1007 return pycompat.maplist(self.node, ancs)
1005
1008
1006 def isancestor(self, a, b):
1009 def isancestor(self, a, b):
1007 """return True if node a is an ancestor of node b
1010 """return True if node a is an ancestor of node b
1008
1011
1009 The implementation of this is trivial but the use of
1012 The implementation of this is trivial but the use of
1010 commonancestorsheads is not."""
1013 commonancestorsheads is not."""
1011 return a in self.commonancestorsheads(a, b)
1014 return a in self.commonancestorsheads(a, b)
1012
1015
1013 def ancestor(self, a, b):
1016 def ancestor(self, a, b):
1014 """calculate the "best" common ancestor of nodes a and b"""
1017 """calculate the "best" common ancestor of nodes a and b"""
1015
1018
1016 a, b = self.rev(a), self.rev(b)
1019 a, b = self.rev(a), self.rev(b)
1017 try:
1020 try:
1018 ancs = self.index.ancestors(a, b)
1021 ancs = self.index.ancestors(a, b)
1019 except (AttributeError, OverflowError):
1022 except (AttributeError, OverflowError):
1020 ancs = ancestor.ancestors(self.parentrevs, a, b)
1023 ancs = ancestor.ancestors(self.parentrevs, a, b)
1021 if ancs:
1024 if ancs:
1022 # choose a consistent winner when there's a tie
1025 # choose a consistent winner when there's a tie
1023 return min(map(self.node, ancs))
1026 return min(map(self.node, ancs))
1024 return nullid
1027 return nullid
1025
1028
1026 def _match(self, id):
1029 def _match(self, id):
1027 if isinstance(id, int):
1030 if isinstance(id, int):
1028 # rev
1031 # rev
1029 return self.node(id)
1032 return self.node(id)
1030 if len(id) == 20:
1033 if len(id) == 20:
1031 # possibly a binary node
1034 # possibly a binary node
1032 # odds of a binary node being all hex in ASCII are 1 in 10**25
1035 # odds of a binary node being all hex in ASCII are 1 in 10**25
1033 try:
1036 try:
1034 node = id
1037 node = id
1035 self.rev(node) # quick search the index
1038 self.rev(node) # quick search the index
1036 return node
1039 return node
1037 except LookupError:
1040 except LookupError:
1038 pass # may be partial hex id
1041 pass # may be partial hex id
1039 try:
1042 try:
1040 # str(rev)
1043 # str(rev)
1041 rev = int(id)
1044 rev = int(id)
1042 if str(rev) != id:
1045 if str(rev) != id:
1043 raise ValueError
1046 raise ValueError
1044 if rev < 0:
1047 if rev < 0:
1045 rev = len(self) + rev
1048 rev = len(self) + rev
1046 if rev < 0 or rev >= len(self):
1049 if rev < 0 or rev >= len(self):
1047 raise ValueError
1050 raise ValueError
1048 return self.node(rev)
1051 return self.node(rev)
1049 except (ValueError, OverflowError):
1052 except (ValueError, OverflowError):
1050 pass
1053 pass
1051 if len(id) == 40:
1054 if len(id) == 40:
1052 try:
1055 try:
1053 # a full hex nodeid?
1056 # a full hex nodeid?
1054 node = bin(id)
1057 node = bin(id)
1055 self.rev(node)
1058 self.rev(node)
1056 return node
1059 return node
1057 except (TypeError, LookupError):
1060 except (TypeError, LookupError):
1058 pass
1061 pass
1059
1062
1060 def _partialmatch(self, id):
1063 def _partialmatch(self, id):
1061 maybewdir = wdirhex.startswith(id)
1064 maybewdir = wdirhex.startswith(id)
1062 try:
1065 try:
1063 partial = self.index.partialmatch(id)
1066 partial = self.index.partialmatch(id)
1064 if partial and self.hasnode(partial):
1067 if partial and self.hasnode(partial):
1065 if maybewdir:
1068 if maybewdir:
1066 # single 'ff...' match in radix tree, ambiguous with wdir
1069 # single 'ff...' match in radix tree, ambiguous with wdir
1067 raise RevlogError
1070 raise RevlogError
1068 return partial
1071 return partial
1069 if maybewdir:
1072 if maybewdir:
1070 # no 'ff...' match in radix tree, wdir identified
1073 # no 'ff...' match in radix tree, wdir identified
1071 raise error.WdirUnsupported
1074 raise error.WdirUnsupported
1072 return None
1075 return None
1073 except RevlogError:
1076 except RevlogError:
1074 # parsers.c radix tree lookup gave multiple matches
1077 # parsers.c radix tree lookup gave multiple matches
1075 # fast path: for unfiltered changelog, radix tree is accurate
1078 # fast path: for unfiltered changelog, radix tree is accurate
1076 if not getattr(self, 'filteredrevs', None):
1079 if not getattr(self, 'filteredrevs', None):
1077 raise LookupError(id, self.indexfile,
1080 raise LookupError(id, self.indexfile,
1078 _('ambiguous identifier'))
1081 _('ambiguous identifier'))
1079 # fall through to slow path that filters hidden revisions
1082 # fall through to slow path that filters hidden revisions
1080 except (AttributeError, ValueError):
1083 except (AttributeError, ValueError):
1081 # we are pure python, or key was too short to search radix tree
1084 # we are pure python, or key was too short to search radix tree
1082 pass
1085 pass
1083
1086
1084 if id in self._pcache:
1087 if id in self._pcache:
1085 return self._pcache[id]
1088 return self._pcache[id]
1086
1089
1087 if len(id) < 40:
1090 if len(id) < 40:
1088 try:
1091 try:
1089 # hex(node)[:...]
1092 # hex(node)[:...]
1090 l = len(id) // 2 # grab an even number of digits
1093 l = len(id) // 2 # grab an even number of digits
1091 prefix = bin(id[:l * 2])
1094 prefix = bin(id[:l * 2])
1092 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1095 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1093 nl = [n for n in nl if hex(n).startswith(id) and
1096 nl = [n for n in nl if hex(n).startswith(id) and
1094 self.hasnode(n)]
1097 self.hasnode(n)]
1095 if len(nl) > 0:
1098 if len(nl) > 0:
1096 if len(nl) == 1 and not maybewdir:
1099 if len(nl) == 1 and not maybewdir:
1097 self._pcache[id] = nl[0]
1100 self._pcache[id] = nl[0]
1098 return nl[0]
1101 return nl[0]
1099 raise LookupError(id, self.indexfile,
1102 raise LookupError(id, self.indexfile,
1100 _('ambiguous identifier'))
1103 _('ambiguous identifier'))
1101 if maybewdir:
1104 if maybewdir:
1102 raise error.WdirUnsupported
1105 raise error.WdirUnsupported
1103 return None
1106 return None
1104 except (TypeError, binascii.Error):
1107 except (TypeError, binascii.Error):
1105 pass
1108 pass
1106
1109
1107 def lookup(self, id):
1110 def lookup(self, id):
1108 """locate a node based on:
1111 """locate a node based on:
1109 - revision number or str(revision number)
1112 - revision number or str(revision number)
1110 - nodeid or subset of hex nodeid
1113 - nodeid or subset of hex nodeid
1111 """
1114 """
1112 n = self._match(id)
1115 n = self._match(id)
1113 if n is not None:
1116 if n is not None:
1114 return n
1117 return n
1115 n = self._partialmatch(id)
1118 n = self._partialmatch(id)
1116 if n:
1119 if n:
1117 return n
1120 return n
1118
1121
1119 raise LookupError(id, self.indexfile, _('no match found'))
1122 raise LookupError(id, self.indexfile, _('no match found'))
1120
1123
1121 def cmp(self, node, text):
1124 def cmp(self, node, text):
1122 """compare text with a given file revision
1125 """compare text with a given file revision
1123
1126
1124 returns True if text is different than what is stored.
1127 returns True if text is different than what is stored.
1125 """
1128 """
1126 p1, p2 = self.parents(node)
1129 p1, p2 = self.parents(node)
1127 return hash(text, p1, p2) != node
1130 return hash(text, p1, p2) != node
1128
1131
1129 def _cachesegment(self, offset, data):
1132 def _cachesegment(self, offset, data):
1130 """Add a segment to the revlog cache.
1133 """Add a segment to the revlog cache.
1131
1134
1132 Accepts an absolute offset and the data that is at that location.
1135 Accepts an absolute offset and the data that is at that location.
1133 """
1136 """
1134 o, d = self._chunkcache
1137 o, d = self._chunkcache
1135 # try to add to existing cache
1138 # try to add to existing cache
1136 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1139 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1137 self._chunkcache = o, d + data
1140 self._chunkcache = o, d + data
1138 else:
1141 else:
1139 self._chunkcache = offset, data
1142 self._chunkcache = offset, data
1140
1143
1141 def _readsegment(self, offset, length, df=None):
1144 def _readsegment(self, offset, length, df=None):
1142 """Load a segment of raw data from the revlog.
1145 """Load a segment of raw data from the revlog.
1143
1146
1144 Accepts an absolute offset, length to read, and an optional existing
1147 Accepts an absolute offset, length to read, and an optional existing
1145 file handle to read from.
1148 file handle to read from.
1146
1149
1147 If an existing file handle is passed, it will be seeked and the
1150 If an existing file handle is passed, it will be seeked and the
1148 original seek position will NOT be restored.
1151 original seek position will NOT be restored.
1149
1152
1150 Returns a str or buffer of raw byte data.
1153 Returns a str or buffer of raw byte data.
1151 """
1154 """
1152 if df is not None:
1155 if df is not None:
1153 closehandle = False
1156 closehandle = False
1154 else:
1157 else:
1155 if self._inline:
1158 if self._inline:
1156 df = self.opener(self.indexfile)
1159 df = self.opener(self.indexfile)
1157 else:
1160 else:
1158 df = self.opener(self.datafile)
1161 df = self.opener(self.datafile)
1159 closehandle = True
1162 closehandle = True
1160
1163
1161 # Cache data both forward and backward around the requested
1164 # Cache data both forward and backward around the requested
1162 # data, in a fixed size window. This helps speed up operations
1165 # data, in a fixed size window. This helps speed up operations
1163 # involving reading the revlog backwards.
1166 # involving reading the revlog backwards.
1164 cachesize = self._chunkcachesize
1167 cachesize = self._chunkcachesize
1165 realoffset = offset & ~(cachesize - 1)
1168 realoffset = offset & ~(cachesize - 1)
1166 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1169 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1167 - realoffset)
1170 - realoffset)
1168 df.seek(realoffset)
1171 df.seek(realoffset)
1169 d = df.read(reallength)
1172 d = df.read(reallength)
1170 if closehandle:
1173 if closehandle:
1171 df.close()
1174 df.close()
1172 self._cachesegment(realoffset, d)
1175 self._cachesegment(realoffset, d)
1173 if offset != realoffset or reallength != length:
1176 if offset != realoffset or reallength != length:
1174 return util.buffer(d, offset - realoffset, length)
1177 return util.buffer(d, offset - realoffset, length)
1175 return d
1178 return d
1176
1179
1177 def _getsegment(self, offset, length, df=None):
1180 def _getsegment(self, offset, length, df=None):
1178 """Obtain a segment of raw data from the revlog.
1181 """Obtain a segment of raw data from the revlog.
1179
1182
1180 Accepts an absolute offset, length of bytes to obtain, and an
1183 Accepts an absolute offset, length of bytes to obtain, and an
1181 optional file handle to the already-opened revlog. If the file
1184 optional file handle to the already-opened revlog. If the file
1182 handle is used, it's original seek position will not be preserved.
1185 handle is used, it's original seek position will not be preserved.
1183
1186
1184 Requests for data may be returned from a cache.
1187 Requests for data may be returned from a cache.
1185
1188
1186 Returns a str or a buffer instance of raw byte data.
1189 Returns a str or a buffer instance of raw byte data.
1187 """
1190 """
1188 o, d = self._chunkcache
1191 o, d = self._chunkcache
1189 l = len(d)
1192 l = len(d)
1190
1193
1191 # is it in the cache?
1194 # is it in the cache?
1192 cachestart = offset - o
1195 cachestart = offset - o
1193 cacheend = cachestart + length
1196 cacheend = cachestart + length
1194 if cachestart >= 0 and cacheend <= l:
1197 if cachestart >= 0 and cacheend <= l:
1195 if cachestart == 0 and cacheend == l:
1198 if cachestart == 0 and cacheend == l:
1196 return d # avoid a copy
1199 return d # avoid a copy
1197 return util.buffer(d, cachestart, cacheend - cachestart)
1200 return util.buffer(d, cachestart, cacheend - cachestart)
1198
1201
1199 return self._readsegment(offset, length, df=df)
1202 return self._readsegment(offset, length, df=df)
1200
1203
1201 def _getsegmentforrevs(self, startrev, endrev, df=None):
1204 def _getsegmentforrevs(self, startrev, endrev, df=None):
1202 """Obtain a segment of raw data corresponding to a range of revisions.
1205 """Obtain a segment of raw data corresponding to a range of revisions.
1203
1206
1204 Accepts the start and end revisions and an optional already-open
1207 Accepts the start and end revisions and an optional already-open
1205 file handle to be used for reading. If the file handle is read, its
1208 file handle to be used for reading. If the file handle is read, its
1206 seek position will not be preserved.
1209 seek position will not be preserved.
1207
1210
1208 Requests for data may be satisfied by a cache.
1211 Requests for data may be satisfied by a cache.
1209
1212
1210 Returns a 2-tuple of (offset, data) for the requested range of
1213 Returns a 2-tuple of (offset, data) for the requested range of
1211 revisions. Offset is the integer offset from the beginning of the
1214 revisions. Offset is the integer offset from the beginning of the
1212 revlog and data is a str or buffer of the raw byte data.
1215 revlog and data is a str or buffer of the raw byte data.
1213
1216
1214 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1217 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1215 to determine where each revision's data begins and ends.
1218 to determine where each revision's data begins and ends.
1216 """
1219 """
1217 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1220 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1218 # (functions are expensive).
1221 # (functions are expensive).
1219 index = self.index
1222 index = self.index
1220 istart = index[startrev]
1223 istart = index[startrev]
1221 start = int(istart[0] >> 16)
1224 start = int(istart[0] >> 16)
1222 if startrev == endrev:
1225 if startrev == endrev:
1223 end = start + istart[1]
1226 end = start + istart[1]
1224 else:
1227 else:
1225 iend = index[endrev]
1228 iend = index[endrev]
1226 end = int(iend[0] >> 16) + iend[1]
1229 end = int(iend[0] >> 16) + iend[1]
1227
1230
1228 if self._inline:
1231 if self._inline:
1229 start += (startrev + 1) * self._io.size
1232 start += (startrev + 1) * self._io.size
1230 end += (endrev + 1) * self._io.size
1233 end += (endrev + 1) * self._io.size
1231 length = end - start
1234 length = end - start
1232
1235
1233 return start, self._getsegment(start, length, df=df)
1236 return start, self._getsegment(start, length, df=df)
1234
1237
1235 def _chunk(self, rev, df=None):
1238 def _chunk(self, rev, df=None):
1236 """Obtain a single decompressed chunk for a revision.
1239 """Obtain a single decompressed chunk for a revision.
1237
1240
1238 Accepts an integer revision and an optional already-open file handle
1241 Accepts an integer revision and an optional already-open file handle
1239 to be used for reading. If used, the seek position of the file will not
1242 to be used for reading. If used, the seek position of the file will not
1240 be preserved.
1243 be preserved.
1241
1244
1242 Returns a str holding uncompressed data for the requested revision.
1245 Returns a str holding uncompressed data for the requested revision.
1243 """
1246 """
1244 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1247 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1245
1248
1246 def _chunks(self, revs, df=None):
1249 def _chunks(self, revs, df=None):
1247 """Obtain decompressed chunks for the specified revisions.
1250 """Obtain decompressed chunks for the specified revisions.
1248
1251
1249 Accepts an iterable of numeric revisions that are assumed to be in
1252 Accepts an iterable of numeric revisions that are assumed to be in
1250 ascending order. Also accepts an optional already-open file handle
1253 ascending order. Also accepts an optional already-open file handle
1251 to be used for reading. If used, the seek position of the file will
1254 to be used for reading. If used, the seek position of the file will
1252 not be preserved.
1255 not be preserved.
1253
1256
1254 This function is similar to calling ``self._chunk()`` multiple times,
1257 This function is similar to calling ``self._chunk()`` multiple times,
1255 but is faster.
1258 but is faster.
1256
1259
1257 Returns a list with decompressed data for each requested revision.
1260 Returns a list with decompressed data for each requested revision.
1258 """
1261 """
1259 if not revs:
1262 if not revs:
1260 return []
1263 return []
1261 start = self.start
1264 start = self.start
1262 length = self.length
1265 length = self.length
1263 inline = self._inline
1266 inline = self._inline
1264 iosize = self._io.size
1267 iosize = self._io.size
1265 buffer = util.buffer
1268 buffer = util.buffer
1266
1269
1267 l = []
1270 l = []
1268 ladd = l.append
1271 ladd = l.append
1269
1272
1270 try:
1273 try:
1271 offset, data = self._getsegmentforrevs(revs[0], revs[-1], df=df)
1274 offset, data = self._getsegmentforrevs(revs[0], revs[-1], df=df)
1272 except OverflowError:
1275 except OverflowError:
1273 # issue4215 - we can't cache a run of chunks greater than
1276 # issue4215 - we can't cache a run of chunks greater than
1274 # 2G on Windows
1277 # 2G on Windows
1275 return [self._chunk(rev, df=df) for rev in revs]
1278 return [self._chunk(rev, df=df) for rev in revs]
1276
1279
1277 decomp = self.decompress
1280 decomp = self.decompress
1278 for rev in revs:
1281 for rev in revs:
1279 chunkstart = start(rev)
1282 chunkstart = start(rev)
1280 if inline:
1283 if inline:
1281 chunkstart += (rev + 1) * iosize
1284 chunkstart += (rev + 1) * iosize
1282 chunklength = length(rev)
1285 chunklength = length(rev)
1283 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1286 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1284
1287
1285 return l
1288 return l
1286
1289
1287 def _chunkclear(self):
1290 def _chunkclear(self):
1288 """Clear the raw chunk cache."""
1291 """Clear the raw chunk cache."""
1289 self._chunkcache = (0, '')
1292 self._chunkcache = (0, '')
1290
1293
1291 def deltaparent(self, rev):
1294 def deltaparent(self, rev):
1292 """return deltaparent of the given revision"""
1295 """return deltaparent of the given revision"""
1293 base = self.index[rev][3]
1296 base = self.index[rev][3]
1294 if base == rev:
1297 if base == rev:
1295 return nullrev
1298 return nullrev
1296 elif self._generaldelta:
1299 elif self._generaldelta:
1297 return base
1300 return base
1298 else:
1301 else:
1299 return rev - 1
1302 return rev - 1
1300
1303
1301 def revdiff(self, rev1, rev2):
1304 def revdiff(self, rev1, rev2):
1302 """return or calculate a delta between two revisions
1305 """return or calculate a delta between two revisions
1303
1306
1304 The delta calculated is in binary form and is intended to be written to
1307 The delta calculated is in binary form and is intended to be written to
1305 revlog data directly. So this function needs raw revision data.
1308 revlog data directly. So this function needs raw revision data.
1306 """
1309 """
1307 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1310 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1308 return bytes(self._chunk(rev2))
1311 return bytes(self._chunk(rev2))
1309
1312
1310 return mdiff.textdiff(self.revision(rev1, raw=True),
1313 return mdiff.textdiff(self.revision(rev1, raw=True),
1311 self.revision(rev2, raw=True))
1314 self.revision(rev2, raw=True))
1312
1315
1313 def revision(self, nodeorrev, _df=None, raw=False):
1316 def revision(self, nodeorrev, _df=None, raw=False):
1314 """return an uncompressed revision of a given node or revision
1317 """return an uncompressed revision of a given node or revision
1315 number.
1318 number.
1316
1319
1317 _df - an existing file handle to read from. (internal-only)
1320 _df - an existing file handle to read from. (internal-only)
1318 raw - an optional argument specifying if the revision data is to be
1321 raw - an optional argument specifying if the revision data is to be
1319 treated as raw data when applying flag transforms. 'raw' should be set
1322 treated as raw data when applying flag transforms. 'raw' should be set
1320 to True when generating changegroups or in debug commands.
1323 to True when generating changegroups or in debug commands.
1321 """
1324 """
1322 if isinstance(nodeorrev, int):
1325 if isinstance(nodeorrev, int):
1323 rev = nodeorrev
1326 rev = nodeorrev
1324 node = self.node(rev)
1327 node = self.node(rev)
1325 else:
1328 else:
1326 node = nodeorrev
1329 node = nodeorrev
1327 rev = None
1330 rev = None
1328
1331
1329 cachedrev = None
1332 cachedrev = None
1330 flags = None
1333 flags = None
1331 rawtext = None
1334 rawtext = None
1332 if node == nullid:
1335 if node == nullid:
1333 return ""
1336 return ""
1334 if self._cache:
1337 if self._cache:
1335 if self._cache[0] == node:
1338 if self._cache[0] == node:
1336 # _cache only stores rawtext
1339 # _cache only stores rawtext
1337 if raw:
1340 if raw:
1338 return self._cache[2]
1341 return self._cache[2]
1339 # duplicated, but good for perf
1342 # duplicated, but good for perf
1340 if rev is None:
1343 if rev is None:
1341 rev = self.rev(node)
1344 rev = self.rev(node)
1342 if flags is None:
1345 if flags is None:
1343 flags = self.flags(rev)
1346 flags = self.flags(rev)
1344 # no extra flags set, no flag processor runs, text = rawtext
1347 # no extra flags set, no flag processor runs, text = rawtext
1345 if flags == REVIDX_DEFAULT_FLAGS:
1348 if flags == REVIDX_DEFAULT_FLAGS:
1346 return self._cache[2]
1349 return self._cache[2]
1347 # rawtext is reusable. need to run flag processor
1350 # rawtext is reusable. need to run flag processor
1348 rawtext = self._cache[2]
1351 rawtext = self._cache[2]
1349
1352
1350 cachedrev = self._cache[1]
1353 cachedrev = self._cache[1]
1351
1354
1352 # look up what we need to read
1355 # look up what we need to read
1353 if rawtext is None:
1356 if rawtext is None:
1354 if rev is None:
1357 if rev is None:
1355 rev = self.rev(node)
1358 rev = self.rev(node)
1356
1359
1357 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1360 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1358 if stopped:
1361 if stopped:
1359 rawtext = self._cache[2]
1362 rawtext = self._cache[2]
1360
1363
1361 # drop cache to save memory
1364 # drop cache to save memory
1362 self._cache = None
1365 self._cache = None
1363
1366
1364 bins = self._chunks(chain, df=_df)
1367 bins = self._chunks(chain, df=_df)
1365 if rawtext is None:
1368 if rawtext is None:
1366 rawtext = bytes(bins[0])
1369 rawtext = bytes(bins[0])
1367 bins = bins[1:]
1370 bins = bins[1:]
1368
1371
1369 rawtext = mdiff.patches(rawtext, bins)
1372 rawtext = mdiff.patches(rawtext, bins)
1370 self._cache = (node, rev, rawtext)
1373 self._cache = (node, rev, rawtext)
1371
1374
1372 if flags is None:
1375 if flags is None:
1373 if rev is None:
1376 if rev is None:
1374 rev = self.rev(node)
1377 rev = self.rev(node)
1375 flags = self.flags(rev)
1378 flags = self.flags(rev)
1376
1379
1377 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1380 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1378 if validatehash:
1381 if validatehash:
1379 self.checkhash(text, node, rev=rev)
1382 self.checkhash(text, node, rev=rev)
1380
1383
1381 return text
1384 return text
1382
1385
1383 def hash(self, text, p1, p2):
1386 def hash(self, text, p1, p2):
1384 """Compute a node hash.
1387 """Compute a node hash.
1385
1388
1386 Available as a function so that subclasses can replace the hash
1389 Available as a function so that subclasses can replace the hash
1387 as needed.
1390 as needed.
1388 """
1391 """
1389 return hash(text, p1, p2)
1392 return hash(text, p1, p2)
1390
1393
1391 def _processflags(self, text, flags, operation, raw=False):
1394 def _processflags(self, text, flags, operation, raw=False):
1392 """Inspect revision data flags and applies transforms defined by
1395 """Inspect revision data flags and applies transforms defined by
1393 registered flag processors.
1396 registered flag processors.
1394
1397
1395 ``text`` - the revision data to process
1398 ``text`` - the revision data to process
1396 ``flags`` - the revision flags
1399 ``flags`` - the revision flags
1397 ``operation`` - the operation being performed (read or write)
1400 ``operation`` - the operation being performed (read or write)
1398 ``raw`` - an optional argument describing if the raw transform should be
1401 ``raw`` - an optional argument describing if the raw transform should be
1399 applied.
1402 applied.
1400
1403
1401 This method processes the flags in the order (or reverse order if
1404 This method processes the flags in the order (or reverse order if
1402 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1405 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1403 flag processors registered for present flags. The order of flags defined
1406 flag processors registered for present flags. The order of flags defined
1404 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1407 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1405
1408
1406 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1409 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1407 processed text and ``validatehash`` is a bool indicating whether the
1410 processed text and ``validatehash`` is a bool indicating whether the
1408 returned text should be checked for hash integrity.
1411 returned text should be checked for hash integrity.
1409
1412
1410 Note: If the ``raw`` argument is set, it has precedence over the
1413 Note: If the ``raw`` argument is set, it has precedence over the
1411 operation and will only update the value of ``validatehash``.
1414 operation and will only update the value of ``validatehash``.
1412 """
1415 """
1413 # fast path: no flag processors will run
1416 # fast path: no flag processors will run
1414 if flags == 0:
1417 if flags == 0:
1415 return text, True
1418 return text, True
1416 if not operation in ('read', 'write'):
1419 if not operation in ('read', 'write'):
1417 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1420 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1418 # Check all flags are known.
1421 # Check all flags are known.
1419 if flags & ~REVIDX_KNOWN_FLAGS:
1422 if flags & ~REVIDX_KNOWN_FLAGS:
1420 raise RevlogError(_("incompatible revision flag '%#x'") %
1423 raise RevlogError(_("incompatible revision flag '%#x'") %
1421 (flags & ~REVIDX_KNOWN_FLAGS))
1424 (flags & ~REVIDX_KNOWN_FLAGS))
1422 validatehash = True
1425 validatehash = True
1423 # Depending on the operation (read or write), the order might be
1426 # Depending on the operation (read or write), the order might be
1424 # reversed due to non-commutative transforms.
1427 # reversed due to non-commutative transforms.
1425 orderedflags = REVIDX_FLAGS_ORDER
1428 orderedflags = REVIDX_FLAGS_ORDER
1426 if operation == 'write':
1429 if operation == 'write':
1427 orderedflags = reversed(orderedflags)
1430 orderedflags = reversed(orderedflags)
1428
1431
1429 for flag in orderedflags:
1432 for flag in orderedflags:
1430 # If a flagprocessor has been registered for a known flag, apply the
1433 # If a flagprocessor has been registered for a known flag, apply the
1431 # related operation transform and update result tuple.
1434 # related operation transform and update result tuple.
1432 if flag & flags:
1435 if flag & flags:
1433 vhash = True
1436 vhash = True
1434
1437
1435 if flag not in _flagprocessors:
1438 if flag not in _flagprocessors:
1436 message = _("missing processor for flag '%#x'") % (flag)
1439 message = _("missing processor for flag '%#x'") % (flag)
1437 raise RevlogError(message)
1440 raise RevlogError(message)
1438
1441
1439 processor = _flagprocessors[flag]
1442 processor = _flagprocessors[flag]
1440 if processor is not None:
1443 if processor is not None:
1441 readtransform, writetransform, rawtransform = processor
1444 readtransform, writetransform, rawtransform = processor
1442
1445
1443 if raw:
1446 if raw:
1444 vhash = rawtransform(self, text)
1447 vhash = rawtransform(self, text)
1445 elif operation == 'read':
1448 elif operation == 'read':
1446 text, vhash = readtransform(self, text)
1449 text, vhash = readtransform(self, text)
1447 else: # write operation
1450 else: # write operation
1448 text, vhash = writetransform(self, text)
1451 text, vhash = writetransform(self, text)
1449 validatehash = validatehash and vhash
1452 validatehash = validatehash and vhash
1450
1453
1451 return text, validatehash
1454 return text, validatehash
1452
1455
1453 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1456 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1454 """Check node hash integrity.
1457 """Check node hash integrity.
1455
1458
1456 Available as a function so that subclasses can extend hash mismatch
1459 Available as a function so that subclasses can extend hash mismatch
1457 behaviors as needed.
1460 behaviors as needed.
1458 """
1461 """
1459 if p1 is None and p2 is None:
1462 if p1 is None and p2 is None:
1460 p1, p2 = self.parents(node)
1463 p1, p2 = self.parents(node)
1461 if node != self.hash(text, p1, p2):
1464 if node != self.hash(text, p1, p2):
1462 revornode = rev
1465 revornode = rev
1463 if revornode is None:
1466 if revornode is None:
1464 revornode = templatefilters.short(hex(node))
1467 revornode = templatefilters.short(hex(node))
1465 raise RevlogError(_("integrity check failed on %s:%s")
1468 raise RevlogError(_("integrity check failed on %s:%s")
1466 % (self.indexfile, revornode))
1469 % (self.indexfile, revornode))
1467
1470
1468 def checkinlinesize(self, tr, fp=None):
1471 def checkinlinesize(self, tr, fp=None):
1469 """Check if the revlog is too big for inline and convert if so.
1472 """Check if the revlog is too big for inline and convert if so.
1470
1473
1471 This should be called after revisions are added to the revlog. If the
1474 This should be called after revisions are added to the revlog. If the
1472 revlog has grown too large to be an inline revlog, it will convert it
1475 revlog has grown too large to be an inline revlog, it will convert it
1473 to use multiple index and data files.
1476 to use multiple index and data files.
1474 """
1477 """
1475 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1478 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1476 return
1479 return
1477
1480
1478 trinfo = tr.find(self.indexfile)
1481 trinfo = tr.find(self.indexfile)
1479 if trinfo is None:
1482 if trinfo is None:
1480 raise RevlogError(_("%s not found in the transaction")
1483 raise RevlogError(_("%s not found in the transaction")
1481 % self.indexfile)
1484 % self.indexfile)
1482
1485
1483 trindex = trinfo[2]
1486 trindex = trinfo[2]
1484 if trindex is not None:
1487 if trindex is not None:
1485 dataoff = self.start(trindex)
1488 dataoff = self.start(trindex)
1486 else:
1489 else:
1487 # revlog was stripped at start of transaction, use all leftover data
1490 # revlog was stripped at start of transaction, use all leftover data
1488 trindex = len(self) - 1
1491 trindex = len(self) - 1
1489 dataoff = self.end(-2)
1492 dataoff = self.end(-2)
1490
1493
1491 tr.add(self.datafile, dataoff)
1494 tr.add(self.datafile, dataoff)
1492
1495
1493 if fp:
1496 if fp:
1494 fp.flush()
1497 fp.flush()
1495 fp.close()
1498 fp.close()
1496
1499
1497 df = self.opener(self.datafile, 'w')
1500 df = self.opener(self.datafile, 'w')
1498 try:
1501 try:
1499 for r in self:
1502 for r in self:
1500 df.write(self._getsegmentforrevs(r, r)[1])
1503 df.write(self._getsegmentforrevs(r, r)[1])
1501 finally:
1504 finally:
1502 df.close()
1505 df.close()
1503
1506
1504 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1507 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1505 checkambig=self._checkambig)
1508 checkambig=self._checkambig)
1506 self.version &= ~FLAG_INLINE_DATA
1509 self.version &= ~FLAG_INLINE_DATA
1507 self._inline = False
1510 self._inline = False
1508 for i in self:
1511 for i in self:
1509 e = self._io.packentry(self.index[i], self.node, self.version, i)
1512 e = self._io.packentry(self.index[i], self.node, self.version, i)
1510 fp.write(e)
1513 fp.write(e)
1511
1514
1512 # if we don't call close, the temp file will never replace the
1515 # if we don't call close, the temp file will never replace the
1513 # real index
1516 # real index
1514 fp.close()
1517 fp.close()
1515
1518
1516 tr.replace(self.indexfile, trindex * self._io.size)
1519 tr.replace(self.indexfile, trindex * self._io.size)
1517 self._chunkclear()
1520 self._chunkclear()
1518
1521
1519 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1522 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1520 node=None, flags=REVIDX_DEFAULT_FLAGS):
1523 node=None, flags=REVIDX_DEFAULT_FLAGS):
1521 """add a revision to the log
1524 """add a revision to the log
1522
1525
1523 text - the revision data to add
1526 text - the revision data to add
1524 transaction - the transaction object used for rollback
1527 transaction - the transaction object used for rollback
1525 link - the linkrev data to add
1528 link - the linkrev data to add
1526 p1, p2 - the parent nodeids of the revision
1529 p1, p2 - the parent nodeids of the revision
1527 cachedelta - an optional precomputed delta
1530 cachedelta - an optional precomputed delta
1528 node - nodeid of revision; typically node is not specified, and it is
1531 node - nodeid of revision; typically node is not specified, and it is
1529 computed by default as hash(text, p1, p2), however subclasses might
1532 computed by default as hash(text, p1, p2), however subclasses might
1530 use different hashing method (and override checkhash() in such case)
1533 use different hashing method (and override checkhash() in such case)
1531 flags - the known flags to set on the revision
1534 flags - the known flags to set on the revision
1532 """
1535 """
1533 if link == nullrev:
1536 if link == nullrev:
1534 raise RevlogError(_("attempted to add linkrev -1 to %s")
1537 raise RevlogError(_("attempted to add linkrev -1 to %s")
1535 % self.indexfile)
1538 % self.indexfile)
1536
1539
1537 if flags:
1540 if flags:
1538 node = node or self.hash(text, p1, p2)
1541 node = node or self.hash(text, p1, p2)
1539
1542
1540 rawtext, validatehash = self._processflags(text, flags, 'write')
1543 rawtext, validatehash = self._processflags(text, flags, 'write')
1541
1544
1542 # If the flag processor modifies the revision data, ignore any provided
1545 # If the flag processor modifies the revision data, ignore any provided
1543 # cachedelta.
1546 # cachedelta.
1544 if rawtext != text:
1547 if rawtext != text:
1545 cachedelta = None
1548 cachedelta = None
1546
1549
1547 if len(rawtext) > _maxentrysize:
1550 if len(rawtext) > _maxentrysize:
1548 raise RevlogError(
1551 raise RevlogError(
1549 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1552 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1550 % (self.indexfile, len(rawtext)))
1553 % (self.indexfile, len(rawtext)))
1551
1554
1552 node = node or self.hash(rawtext, p1, p2)
1555 node = node or self.hash(rawtext, p1, p2)
1553 if node in self.nodemap:
1556 if node in self.nodemap:
1554 return node
1557 return node
1555
1558
1556 if validatehash:
1559 if validatehash:
1557 self.checkhash(rawtext, node, p1=p1, p2=p2)
1560 self.checkhash(rawtext, node, p1=p1, p2=p2)
1558
1561
1559 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1562 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1560 flags, cachedelta=cachedelta)
1563 flags, cachedelta=cachedelta)
1561
1564
1562 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1565 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1563 cachedelta=None):
1566 cachedelta=None):
1564 """add a raw revision with known flags, node and parents
1567 """add a raw revision with known flags, node and parents
1565 useful when reusing a revision not stored in this revlog (ex: received
1568 useful when reusing a revision not stored in this revlog (ex: received
1566 over wire, or read from an external bundle).
1569 over wire, or read from an external bundle).
1567 """
1570 """
1568 dfh = None
1571 dfh = None
1569 if not self._inline:
1572 if not self._inline:
1570 dfh = self.opener(self.datafile, "a+")
1573 dfh = self.opener(self.datafile, "a+")
1571 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1574 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1572 try:
1575 try:
1573 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1576 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1574 flags, cachedelta, ifh, dfh)
1577 flags, cachedelta, ifh, dfh)
1575 finally:
1578 finally:
1576 if dfh:
1579 if dfh:
1577 dfh.close()
1580 dfh.close()
1578 ifh.close()
1581 ifh.close()
1579
1582
1580 def compress(self, data):
1583 def compress(self, data):
1581 """Generate a possibly-compressed representation of data."""
1584 """Generate a possibly-compressed representation of data."""
1582 if not data:
1585 if not data:
1583 return '', data
1586 return '', data
1584
1587
1585 compressed = self._compressor.compress(data)
1588 compressed = self._compressor.compress(data)
1586
1589
1587 if compressed:
1590 if compressed:
1588 # The revlog compressor added the header in the returned data.
1591 # The revlog compressor added the header in the returned data.
1589 return '', compressed
1592 return '', compressed
1590
1593
1591 if data[0:1] == '\0':
1594 if data[0:1] == '\0':
1592 return '', data
1595 return '', data
1593 return 'u', data
1596 return 'u', data
1594
1597
1595 def decompress(self, data):
1598 def decompress(self, data):
1596 """Decompress a revlog chunk.
1599 """Decompress a revlog chunk.
1597
1600
1598 The chunk is expected to begin with a header identifying the
1601 The chunk is expected to begin with a header identifying the
1599 format type so it can be routed to an appropriate decompressor.
1602 format type so it can be routed to an appropriate decompressor.
1600 """
1603 """
1601 if not data:
1604 if not data:
1602 return data
1605 return data
1603
1606
1604 # Revlogs are read much more frequently than they are written and many
1607 # Revlogs are read much more frequently than they are written and many
1605 # chunks only take microseconds to decompress, so performance is
1608 # chunks only take microseconds to decompress, so performance is
1606 # important here.
1609 # important here.
1607 #
1610 #
1608 # We can make a few assumptions about revlogs:
1611 # We can make a few assumptions about revlogs:
1609 #
1612 #
1610 # 1) the majority of chunks will be compressed (as opposed to inline
1613 # 1) the majority of chunks will be compressed (as opposed to inline
1611 # raw data).
1614 # raw data).
1612 # 2) decompressing *any* data will likely by at least 10x slower than
1615 # 2) decompressing *any* data will likely by at least 10x slower than
1613 # returning raw inline data.
1616 # returning raw inline data.
1614 # 3) we want to prioritize common and officially supported compression
1617 # 3) we want to prioritize common and officially supported compression
1615 # engines
1618 # engines
1616 #
1619 #
1617 # It follows that we want to optimize for "decompress compressed data
1620 # It follows that we want to optimize for "decompress compressed data
1618 # when encoded with common and officially supported compression engines"
1621 # when encoded with common and officially supported compression engines"
1619 # case over "raw data" and "data encoded by less common or non-official
1622 # case over "raw data" and "data encoded by less common or non-official
1620 # compression engines." That is why we have the inline lookup first
1623 # compression engines." That is why we have the inline lookup first
1621 # followed by the compengines lookup.
1624 # followed by the compengines lookup.
1622 #
1625 #
1623 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1626 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1624 # compressed chunks. And this matters for changelog and manifest reads.
1627 # compressed chunks. And this matters for changelog and manifest reads.
1625 t = data[0:1]
1628 t = data[0:1]
1626
1629
1627 if t == 'x':
1630 if t == 'x':
1628 try:
1631 try:
1629 return _zlibdecompress(data)
1632 return _zlibdecompress(data)
1630 except zlib.error as e:
1633 except zlib.error as e:
1631 raise RevlogError(_('revlog decompress error: %s') % str(e))
1634 raise RevlogError(_('revlog decompress error: %s') % str(e))
1632 # '\0' is more common than 'u' so it goes first.
1635 # '\0' is more common than 'u' so it goes first.
1633 elif t == '\0':
1636 elif t == '\0':
1634 return data
1637 return data
1635 elif t == 'u':
1638 elif t == 'u':
1636 return util.buffer(data, 1)
1639 return util.buffer(data, 1)
1637
1640
1638 try:
1641 try:
1639 compressor = self._decompressors[t]
1642 compressor = self._decompressors[t]
1640 except KeyError:
1643 except KeyError:
1641 try:
1644 try:
1642 engine = util.compengines.forrevlogheader(t)
1645 engine = util.compengines.forrevlogheader(t)
1643 compressor = engine.revlogcompressor()
1646 compressor = engine.revlogcompressor()
1644 self._decompressors[t] = compressor
1647 self._decompressors[t] = compressor
1645 except KeyError:
1648 except KeyError:
1646 raise RevlogError(_('unknown compression type %r') % t)
1649 raise RevlogError(_('unknown compression type %r') % t)
1647
1650
1648 return compressor.decompress(data)
1651 return compressor.decompress(data)
1649
1652
1650 def _isgooddelta(self, d, textlen):
1653 def _isgooddelta(self, d, textlen):
1651 """Returns True if the given delta is good. Good means that it is within
1654 """Returns True if the given delta is good. Good means that it is within
1652 the disk span, disk size, and chain length bounds that we know to be
1655 the disk span, disk size, and chain length bounds that we know to be
1653 performant."""
1656 performant."""
1654 if d is None:
1657 if d is None:
1655 return False
1658 return False
1656
1659
1657 # - 'dist' is the distance from the base revision -- bounding it limits
1660 # - 'dist' is the distance from the base revision -- bounding it limits
1658 # the amount of I/O we need to do.
1661 # the amount of I/O we need to do.
1659 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1662 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1660 # to apply -- bounding it limits the amount of CPU we consume.
1663 # to apply -- bounding it limits the amount of CPU we consume.
1661 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1664 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1662 if (dist > textlen * 4 or l > textlen or
1665
1666 defaultmax = textlen * 4
1667 maxdist = self._maxdeltachainspan
1668 if not maxdist:
1669 maxdist = dist # ensure the conditional pass
1670 maxdist = max(maxdist, defaultmax)
1671 if (dist > maxdist or l > textlen or
1663 compresseddeltalen > textlen * 2 or
1672 compresseddeltalen > textlen * 2 or
1664 (self._maxchainlen and chainlen > self._maxchainlen)):
1673 (self._maxchainlen and chainlen > self._maxchainlen)):
1665 return False
1674 return False
1666
1675
1667 return True
1676 return True
1668
1677
1669 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1678 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1670 cachedelta, ifh, dfh, alwayscache=False):
1679 cachedelta, ifh, dfh, alwayscache=False):
1671 """internal function to add revisions to the log
1680 """internal function to add revisions to the log
1672
1681
1673 see addrevision for argument descriptions.
1682 see addrevision for argument descriptions.
1674
1683
1675 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1684 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1676
1685
1677 invariants:
1686 invariants:
1678 - rawtext is optional (can be None); if not set, cachedelta must be set.
1687 - rawtext is optional (can be None); if not set, cachedelta must be set.
1679 if both are set, they must correspond to each other.
1688 if both are set, they must correspond to each other.
1680 """
1689 """
1681 btext = [rawtext]
1690 btext = [rawtext]
1682 def buildtext():
1691 def buildtext():
1683 if btext[0] is not None:
1692 if btext[0] is not None:
1684 return btext[0]
1693 return btext[0]
1685 baserev = cachedelta[0]
1694 baserev = cachedelta[0]
1686 delta = cachedelta[1]
1695 delta = cachedelta[1]
1687 # special case deltas which replace entire base; no need to decode
1696 # special case deltas which replace entire base; no need to decode
1688 # base revision. this neatly avoids censored bases, which throw when
1697 # base revision. this neatly avoids censored bases, which throw when
1689 # they're decoded.
1698 # they're decoded.
1690 hlen = struct.calcsize(">lll")
1699 hlen = struct.calcsize(">lll")
1691 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1700 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1692 len(delta) - hlen):
1701 len(delta) - hlen):
1693 btext[0] = delta[hlen:]
1702 btext[0] = delta[hlen:]
1694 else:
1703 else:
1695 if self._inline:
1704 if self._inline:
1696 fh = ifh
1705 fh = ifh
1697 else:
1706 else:
1698 fh = dfh
1707 fh = dfh
1699 basetext = self.revision(baserev, _df=fh, raw=True)
1708 basetext = self.revision(baserev, _df=fh, raw=True)
1700 btext[0] = mdiff.patch(basetext, delta)
1709 btext[0] = mdiff.patch(basetext, delta)
1701
1710
1702 try:
1711 try:
1703 res = self._processflags(btext[0], flags, 'read', raw=True)
1712 res = self._processflags(btext[0], flags, 'read', raw=True)
1704 btext[0], validatehash = res
1713 btext[0], validatehash = res
1705 if validatehash:
1714 if validatehash:
1706 self.checkhash(btext[0], node, p1=p1, p2=p2)
1715 self.checkhash(btext[0], node, p1=p1, p2=p2)
1707 if flags & REVIDX_ISCENSORED:
1716 if flags & REVIDX_ISCENSORED:
1708 raise RevlogError(_('node %s is not censored') % node)
1717 raise RevlogError(_('node %s is not censored') % node)
1709 except CensoredNodeError:
1718 except CensoredNodeError:
1710 # must pass the censored index flag to add censored revisions
1719 # must pass the censored index flag to add censored revisions
1711 if not flags & REVIDX_ISCENSORED:
1720 if not flags & REVIDX_ISCENSORED:
1712 raise
1721 raise
1713 return btext[0]
1722 return btext[0]
1714
1723
1715 def builddelta(rev):
1724 def builddelta(rev):
1716 # can we use the cached delta?
1725 # can we use the cached delta?
1717 if cachedelta and cachedelta[0] == rev:
1726 if cachedelta and cachedelta[0] == rev:
1718 delta = cachedelta[1]
1727 delta = cachedelta[1]
1719 else:
1728 else:
1720 t = buildtext()
1729 t = buildtext()
1721 if self.iscensored(rev):
1730 if self.iscensored(rev):
1722 # deltas based on a censored revision must replace the
1731 # deltas based on a censored revision must replace the
1723 # full content in one patch, so delta works everywhere
1732 # full content in one patch, so delta works everywhere
1724 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1733 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1725 delta = header + t
1734 delta = header + t
1726 else:
1735 else:
1727 if self._inline:
1736 if self._inline:
1728 fh = ifh
1737 fh = ifh
1729 else:
1738 else:
1730 fh = dfh
1739 fh = dfh
1731 ptext = self.revision(rev, _df=fh, raw=True)
1740 ptext = self.revision(rev, _df=fh, raw=True)
1732 delta = mdiff.textdiff(ptext, t)
1741 delta = mdiff.textdiff(ptext, t)
1733 header, data = self.compress(delta)
1742 header, data = self.compress(delta)
1734 deltalen = len(header) + len(data)
1743 deltalen = len(header) + len(data)
1735 chainbase = self.chainbase(rev)
1744 chainbase = self.chainbase(rev)
1736 dist = deltalen + offset - self.start(chainbase)
1745 dist = deltalen + offset - self.start(chainbase)
1737 if self._generaldelta:
1746 if self._generaldelta:
1738 base = rev
1747 base = rev
1739 else:
1748 else:
1740 base = chainbase
1749 base = chainbase
1741 chainlen, compresseddeltalen = self._chaininfo(rev)
1750 chainlen, compresseddeltalen = self._chaininfo(rev)
1742 chainlen += 1
1751 chainlen += 1
1743 compresseddeltalen += deltalen
1752 compresseddeltalen += deltalen
1744 return (dist, deltalen, (header, data), base,
1753 return (dist, deltalen, (header, data), base,
1745 chainbase, chainlen, compresseddeltalen)
1754 chainbase, chainlen, compresseddeltalen)
1746
1755
1747 curr = len(self)
1756 curr = len(self)
1748 prev = curr - 1
1757 prev = curr - 1
1749 offset = self.end(prev)
1758 offset = self.end(prev)
1750 delta = None
1759 delta = None
1751 p1r, p2r = self.rev(p1), self.rev(p2)
1760 p1r, p2r = self.rev(p1), self.rev(p2)
1752
1761
1753 # full versions are inserted when the needed deltas
1762 # full versions are inserted when the needed deltas
1754 # become comparable to the uncompressed text
1763 # become comparable to the uncompressed text
1755 if rawtext is None:
1764 if rawtext is None:
1756 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1765 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1757 cachedelta[1])
1766 cachedelta[1])
1758 else:
1767 else:
1759 textlen = len(rawtext)
1768 textlen = len(rawtext)
1760
1769
1761 # should we try to build a delta?
1770 # should we try to build a delta?
1762 if prev != nullrev and self.storedeltachains:
1771 if prev != nullrev and self.storedeltachains:
1763 tested = set()
1772 tested = set()
1764 # This condition is true most of the time when processing
1773 # This condition is true most of the time when processing
1765 # changegroup data into a generaldelta repo. The only time it
1774 # changegroup data into a generaldelta repo. The only time it
1766 # isn't true is if this is the first revision in a delta chain
1775 # isn't true is if this is the first revision in a delta chain
1767 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1776 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1768 if cachedelta and self._generaldelta and self._lazydeltabase:
1777 if cachedelta and self._generaldelta and self._lazydeltabase:
1769 # Assume what we received from the server is a good choice
1778 # Assume what we received from the server is a good choice
1770 # build delta will reuse the cache
1779 # build delta will reuse the cache
1771 candidatedelta = builddelta(cachedelta[0])
1780 candidatedelta = builddelta(cachedelta[0])
1772 tested.add(cachedelta[0])
1781 tested.add(cachedelta[0])
1773 if self._isgooddelta(candidatedelta, textlen):
1782 if self._isgooddelta(candidatedelta, textlen):
1774 delta = candidatedelta
1783 delta = candidatedelta
1775 if delta is None and self._generaldelta:
1784 if delta is None and self._generaldelta:
1776 # exclude already lazy tested base if any
1785 # exclude already lazy tested base if any
1777 parents = [p for p in (p1r, p2r)
1786 parents = [p for p in (p1r, p2r)
1778 if p != nullrev and p not in tested]
1787 if p != nullrev and p not in tested]
1779 if parents and not self._aggressivemergedeltas:
1788 if parents and not self._aggressivemergedeltas:
1780 # Pick whichever parent is closer to us (to minimize the
1789 # Pick whichever parent is closer to us (to minimize the
1781 # chance of having to build a fulltext).
1790 # chance of having to build a fulltext).
1782 parents = [max(parents)]
1791 parents = [max(parents)]
1783 tested.update(parents)
1792 tested.update(parents)
1784 pdeltas = []
1793 pdeltas = []
1785 for p in parents:
1794 for p in parents:
1786 pd = builddelta(p)
1795 pd = builddelta(p)
1787 if self._isgooddelta(pd, textlen):
1796 if self._isgooddelta(pd, textlen):
1788 pdeltas.append(pd)
1797 pdeltas.append(pd)
1789 if pdeltas:
1798 if pdeltas:
1790 delta = min(pdeltas, key=lambda x: x[1])
1799 delta = min(pdeltas, key=lambda x: x[1])
1791 if delta is None and prev not in tested:
1800 if delta is None and prev not in tested:
1792 # other approach failed try against prev to hopefully save us a
1801 # other approach failed try against prev to hopefully save us a
1793 # fulltext.
1802 # fulltext.
1794 candidatedelta = builddelta(prev)
1803 candidatedelta = builddelta(prev)
1795 if self._isgooddelta(candidatedelta, textlen):
1804 if self._isgooddelta(candidatedelta, textlen):
1796 delta = candidatedelta
1805 delta = candidatedelta
1797 if delta is not None:
1806 if delta is not None:
1798 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1807 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1799 else:
1808 else:
1800 rawtext = buildtext()
1809 rawtext = buildtext()
1801 data = self.compress(rawtext)
1810 data = self.compress(rawtext)
1802 l = len(data[1]) + len(data[0])
1811 l = len(data[1]) + len(data[0])
1803 base = chainbase = curr
1812 base = chainbase = curr
1804
1813
1805 e = (offset_type(offset, flags), l, textlen,
1814 e = (offset_type(offset, flags), l, textlen,
1806 base, link, p1r, p2r, node)
1815 base, link, p1r, p2r, node)
1807 self.index.insert(-1, e)
1816 self.index.insert(-1, e)
1808 self.nodemap[node] = curr
1817 self.nodemap[node] = curr
1809
1818
1810 entry = self._io.packentry(e, self.node, self.version, curr)
1819 entry = self._io.packentry(e, self.node, self.version, curr)
1811 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1820 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1812
1821
1813 if alwayscache and rawtext is None:
1822 if alwayscache and rawtext is None:
1814 rawtext = buildtext()
1823 rawtext = buildtext()
1815
1824
1816 if type(rawtext) == str: # only accept immutable objects
1825 if type(rawtext) == str: # only accept immutable objects
1817 self._cache = (node, curr, rawtext)
1826 self._cache = (node, curr, rawtext)
1818 self._chainbasecache[curr] = chainbase
1827 self._chainbasecache[curr] = chainbase
1819 return node
1828 return node
1820
1829
1821 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1830 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1822 # Files opened in a+ mode have inconsistent behavior on various
1831 # Files opened in a+ mode have inconsistent behavior on various
1823 # platforms. Windows requires that a file positioning call be made
1832 # platforms. Windows requires that a file positioning call be made
1824 # when the file handle transitions between reads and writes. See
1833 # when the file handle transitions between reads and writes. See
1825 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1834 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1826 # platforms, Python or the platform itself can be buggy. Some versions
1835 # platforms, Python or the platform itself can be buggy. Some versions
1827 # of Solaris have been observed to not append at the end of the file
1836 # of Solaris have been observed to not append at the end of the file
1828 # if the file was seeked to before the end. See issue4943 for more.
1837 # if the file was seeked to before the end. See issue4943 for more.
1829 #
1838 #
1830 # We work around this issue by inserting a seek() before writing.
1839 # We work around this issue by inserting a seek() before writing.
1831 # Note: This is likely not necessary on Python 3.
1840 # Note: This is likely not necessary on Python 3.
1832 ifh.seek(0, os.SEEK_END)
1841 ifh.seek(0, os.SEEK_END)
1833 if dfh:
1842 if dfh:
1834 dfh.seek(0, os.SEEK_END)
1843 dfh.seek(0, os.SEEK_END)
1835
1844
1836 curr = len(self) - 1
1845 curr = len(self) - 1
1837 if not self._inline:
1846 if not self._inline:
1838 transaction.add(self.datafile, offset)
1847 transaction.add(self.datafile, offset)
1839 transaction.add(self.indexfile, curr * len(entry))
1848 transaction.add(self.indexfile, curr * len(entry))
1840 if data[0]:
1849 if data[0]:
1841 dfh.write(data[0])
1850 dfh.write(data[0])
1842 dfh.write(data[1])
1851 dfh.write(data[1])
1843 ifh.write(entry)
1852 ifh.write(entry)
1844 else:
1853 else:
1845 offset += curr * self._io.size
1854 offset += curr * self._io.size
1846 transaction.add(self.indexfile, offset, curr)
1855 transaction.add(self.indexfile, offset, curr)
1847 ifh.write(entry)
1856 ifh.write(entry)
1848 ifh.write(data[0])
1857 ifh.write(data[0])
1849 ifh.write(data[1])
1858 ifh.write(data[1])
1850 self.checkinlinesize(transaction, ifh)
1859 self.checkinlinesize(transaction, ifh)
1851
1860
1852 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1861 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1853 """
1862 """
1854 add a delta group
1863 add a delta group
1855
1864
1856 given a set of deltas, add them to the revision log. the
1865 given a set of deltas, add them to the revision log. the
1857 first delta is against its parent, which should be in our
1866 first delta is against its parent, which should be in our
1858 log, the rest are against the previous delta.
1867 log, the rest are against the previous delta.
1859
1868
1860 If ``addrevisioncb`` is defined, it will be called with arguments of
1869 If ``addrevisioncb`` is defined, it will be called with arguments of
1861 this revlog and the node that was added.
1870 this revlog and the node that was added.
1862 """
1871 """
1863
1872
1864 nodes = []
1873 nodes = []
1865
1874
1866 r = len(self)
1875 r = len(self)
1867 end = 0
1876 end = 0
1868 if r:
1877 if r:
1869 end = self.end(r - 1)
1878 end = self.end(r - 1)
1870 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1879 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1871 isize = r * self._io.size
1880 isize = r * self._io.size
1872 if self._inline:
1881 if self._inline:
1873 transaction.add(self.indexfile, end + isize, r)
1882 transaction.add(self.indexfile, end + isize, r)
1874 dfh = None
1883 dfh = None
1875 else:
1884 else:
1876 transaction.add(self.indexfile, isize, r)
1885 transaction.add(self.indexfile, isize, r)
1877 transaction.add(self.datafile, end)
1886 transaction.add(self.datafile, end)
1878 dfh = self.opener(self.datafile, "a+")
1887 dfh = self.opener(self.datafile, "a+")
1879 def flush():
1888 def flush():
1880 if dfh:
1889 if dfh:
1881 dfh.flush()
1890 dfh.flush()
1882 ifh.flush()
1891 ifh.flush()
1883 try:
1892 try:
1884 # loop through our set of deltas
1893 # loop through our set of deltas
1885 chain = None
1894 chain = None
1886 for chunkdata in iter(lambda: cg.deltachunk(chain), {}):
1895 for chunkdata in iter(lambda: cg.deltachunk(chain), {}):
1887 node = chunkdata['node']
1896 node = chunkdata['node']
1888 p1 = chunkdata['p1']
1897 p1 = chunkdata['p1']
1889 p2 = chunkdata['p2']
1898 p2 = chunkdata['p2']
1890 cs = chunkdata['cs']
1899 cs = chunkdata['cs']
1891 deltabase = chunkdata['deltabase']
1900 deltabase = chunkdata['deltabase']
1892 delta = chunkdata['delta']
1901 delta = chunkdata['delta']
1893 flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS
1902 flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS
1894
1903
1895 nodes.append(node)
1904 nodes.append(node)
1896
1905
1897 link = linkmapper(cs)
1906 link = linkmapper(cs)
1898 if node in self.nodemap:
1907 if node in self.nodemap:
1899 # this can happen if two branches make the same change
1908 # this can happen if two branches make the same change
1900 chain = node
1909 chain = node
1901 continue
1910 continue
1902
1911
1903 for p in (p1, p2):
1912 for p in (p1, p2):
1904 if p not in self.nodemap:
1913 if p not in self.nodemap:
1905 raise LookupError(p, self.indexfile,
1914 raise LookupError(p, self.indexfile,
1906 _('unknown parent'))
1915 _('unknown parent'))
1907
1916
1908 if deltabase not in self.nodemap:
1917 if deltabase not in self.nodemap:
1909 raise LookupError(deltabase, self.indexfile,
1918 raise LookupError(deltabase, self.indexfile,
1910 _('unknown delta base'))
1919 _('unknown delta base'))
1911
1920
1912 baserev = self.rev(deltabase)
1921 baserev = self.rev(deltabase)
1913
1922
1914 if baserev != nullrev and self.iscensored(baserev):
1923 if baserev != nullrev and self.iscensored(baserev):
1915 # if base is censored, delta must be full replacement in a
1924 # if base is censored, delta must be full replacement in a
1916 # single patch operation
1925 # single patch operation
1917 hlen = struct.calcsize(">lll")
1926 hlen = struct.calcsize(">lll")
1918 oldlen = self.rawsize(baserev)
1927 oldlen = self.rawsize(baserev)
1919 newlen = len(delta) - hlen
1928 newlen = len(delta) - hlen
1920 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1929 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1921 raise error.CensoredBaseError(self.indexfile,
1930 raise error.CensoredBaseError(self.indexfile,
1922 self.node(baserev))
1931 self.node(baserev))
1923
1932
1924 if not flags and self._peek_iscensored(baserev, delta, flush):
1933 if not flags and self._peek_iscensored(baserev, delta, flush):
1925 flags |= REVIDX_ISCENSORED
1934 flags |= REVIDX_ISCENSORED
1926
1935
1927 # We assume consumers of addrevisioncb will want to retrieve
1936 # We assume consumers of addrevisioncb will want to retrieve
1928 # the added revision, which will require a call to
1937 # the added revision, which will require a call to
1929 # revision(). revision() will fast path if there is a cache
1938 # revision(). revision() will fast path if there is a cache
1930 # hit. So, we tell _addrevision() to always cache in this case.
1939 # hit. So, we tell _addrevision() to always cache in this case.
1931 # We're only using addgroup() in the context of changegroup
1940 # We're only using addgroup() in the context of changegroup
1932 # generation so the revision data can always be handled as raw
1941 # generation so the revision data can always be handled as raw
1933 # by the flagprocessor.
1942 # by the flagprocessor.
1934 chain = self._addrevision(node, None, transaction, link,
1943 chain = self._addrevision(node, None, transaction, link,
1935 p1, p2, flags, (baserev, delta),
1944 p1, p2, flags, (baserev, delta),
1936 ifh, dfh,
1945 ifh, dfh,
1937 alwayscache=bool(addrevisioncb))
1946 alwayscache=bool(addrevisioncb))
1938
1947
1939 if addrevisioncb:
1948 if addrevisioncb:
1940 addrevisioncb(self, chain)
1949 addrevisioncb(self, chain)
1941
1950
1942 if not dfh and not self._inline:
1951 if not dfh and not self._inline:
1943 # addrevision switched from inline to conventional
1952 # addrevision switched from inline to conventional
1944 # reopen the index
1953 # reopen the index
1945 ifh.close()
1954 ifh.close()
1946 dfh = self.opener(self.datafile, "a+")
1955 dfh = self.opener(self.datafile, "a+")
1947 ifh = self.opener(self.indexfile, "a+",
1956 ifh = self.opener(self.indexfile, "a+",
1948 checkambig=self._checkambig)
1957 checkambig=self._checkambig)
1949 finally:
1958 finally:
1950 if dfh:
1959 if dfh:
1951 dfh.close()
1960 dfh.close()
1952 ifh.close()
1961 ifh.close()
1953
1962
1954 return nodes
1963 return nodes
1955
1964
1956 def iscensored(self, rev):
1965 def iscensored(self, rev):
1957 """Check if a file revision is censored."""
1966 """Check if a file revision is censored."""
1958 return False
1967 return False
1959
1968
1960 def _peek_iscensored(self, baserev, delta, flush):
1969 def _peek_iscensored(self, baserev, delta, flush):
1961 """Quickly check if a delta produces a censored revision."""
1970 """Quickly check if a delta produces a censored revision."""
1962 return False
1971 return False
1963
1972
1964 def getstrippoint(self, minlink):
1973 def getstrippoint(self, minlink):
1965 """find the minimum rev that must be stripped to strip the linkrev
1974 """find the minimum rev that must be stripped to strip the linkrev
1966
1975
1967 Returns a tuple containing the minimum rev and a set of all revs that
1976 Returns a tuple containing the minimum rev and a set of all revs that
1968 have linkrevs that will be broken by this strip.
1977 have linkrevs that will be broken by this strip.
1969 """
1978 """
1970 brokenrevs = set()
1979 brokenrevs = set()
1971 strippoint = len(self)
1980 strippoint = len(self)
1972
1981
1973 heads = {}
1982 heads = {}
1974 futurelargelinkrevs = set()
1983 futurelargelinkrevs = set()
1975 for head in self.headrevs():
1984 for head in self.headrevs():
1976 headlinkrev = self.linkrev(head)
1985 headlinkrev = self.linkrev(head)
1977 heads[head] = headlinkrev
1986 heads[head] = headlinkrev
1978 if headlinkrev >= minlink:
1987 if headlinkrev >= minlink:
1979 futurelargelinkrevs.add(headlinkrev)
1988 futurelargelinkrevs.add(headlinkrev)
1980
1989
1981 # This algorithm involves walking down the rev graph, starting at the
1990 # This algorithm involves walking down the rev graph, starting at the
1982 # heads. Since the revs are topologically sorted according to linkrev,
1991 # heads. Since the revs are topologically sorted according to linkrev,
1983 # once all head linkrevs are below the minlink, we know there are
1992 # once all head linkrevs are below the minlink, we know there are
1984 # no more revs that could have a linkrev greater than minlink.
1993 # no more revs that could have a linkrev greater than minlink.
1985 # So we can stop walking.
1994 # So we can stop walking.
1986 while futurelargelinkrevs:
1995 while futurelargelinkrevs:
1987 strippoint -= 1
1996 strippoint -= 1
1988 linkrev = heads.pop(strippoint)
1997 linkrev = heads.pop(strippoint)
1989
1998
1990 if linkrev < minlink:
1999 if linkrev < minlink:
1991 brokenrevs.add(strippoint)
2000 brokenrevs.add(strippoint)
1992 else:
2001 else:
1993 futurelargelinkrevs.remove(linkrev)
2002 futurelargelinkrevs.remove(linkrev)
1994
2003
1995 for p in self.parentrevs(strippoint):
2004 for p in self.parentrevs(strippoint):
1996 if p != nullrev:
2005 if p != nullrev:
1997 plinkrev = self.linkrev(p)
2006 plinkrev = self.linkrev(p)
1998 heads[p] = plinkrev
2007 heads[p] = plinkrev
1999 if plinkrev >= minlink:
2008 if plinkrev >= minlink:
2000 futurelargelinkrevs.add(plinkrev)
2009 futurelargelinkrevs.add(plinkrev)
2001
2010
2002 return strippoint, brokenrevs
2011 return strippoint, brokenrevs
2003
2012
2004 def strip(self, minlink, transaction):
2013 def strip(self, minlink, transaction):
2005 """truncate the revlog on the first revision with a linkrev >= minlink
2014 """truncate the revlog on the first revision with a linkrev >= minlink
2006
2015
2007 This function is called when we're stripping revision minlink and
2016 This function is called when we're stripping revision minlink and
2008 its descendants from the repository.
2017 its descendants from the repository.
2009
2018
2010 We have to remove all revisions with linkrev >= minlink, because
2019 We have to remove all revisions with linkrev >= minlink, because
2011 the equivalent changelog revisions will be renumbered after the
2020 the equivalent changelog revisions will be renumbered after the
2012 strip.
2021 strip.
2013
2022
2014 So we truncate the revlog on the first of these revisions, and
2023 So we truncate the revlog on the first of these revisions, and
2015 trust that the caller has saved the revisions that shouldn't be
2024 trust that the caller has saved the revisions that shouldn't be
2016 removed and that it'll re-add them after this truncation.
2025 removed and that it'll re-add them after this truncation.
2017 """
2026 """
2018 if len(self) == 0:
2027 if len(self) == 0:
2019 return
2028 return
2020
2029
2021 rev, _ = self.getstrippoint(minlink)
2030 rev, _ = self.getstrippoint(minlink)
2022 if rev == len(self):
2031 if rev == len(self):
2023 return
2032 return
2024
2033
2025 # first truncate the files on disk
2034 # first truncate the files on disk
2026 end = self.start(rev)
2035 end = self.start(rev)
2027 if not self._inline:
2036 if not self._inline:
2028 transaction.add(self.datafile, end)
2037 transaction.add(self.datafile, end)
2029 end = rev * self._io.size
2038 end = rev * self._io.size
2030 else:
2039 else:
2031 end += rev * self._io.size
2040 end += rev * self._io.size
2032
2041
2033 transaction.add(self.indexfile, end)
2042 transaction.add(self.indexfile, end)
2034
2043
2035 # then reset internal state in memory to forget those revisions
2044 # then reset internal state in memory to forget those revisions
2036 self._cache = None
2045 self._cache = None
2037 self._chaininfocache = {}
2046 self._chaininfocache = {}
2038 self._chunkclear()
2047 self._chunkclear()
2039 for x in xrange(rev, len(self)):
2048 for x in xrange(rev, len(self)):
2040 del self.nodemap[self.node(x)]
2049 del self.nodemap[self.node(x)]
2041
2050
2042 del self.index[rev:-1]
2051 del self.index[rev:-1]
2043
2052
2044 def checksize(self):
2053 def checksize(self):
2045 expected = 0
2054 expected = 0
2046 if len(self):
2055 if len(self):
2047 expected = max(0, self.end(len(self) - 1))
2056 expected = max(0, self.end(len(self) - 1))
2048
2057
2049 try:
2058 try:
2050 f = self.opener(self.datafile)
2059 f = self.opener(self.datafile)
2051 f.seek(0, 2)
2060 f.seek(0, 2)
2052 actual = f.tell()
2061 actual = f.tell()
2053 f.close()
2062 f.close()
2054 dd = actual - expected
2063 dd = actual - expected
2055 except IOError as inst:
2064 except IOError as inst:
2056 if inst.errno != errno.ENOENT:
2065 if inst.errno != errno.ENOENT:
2057 raise
2066 raise
2058 dd = 0
2067 dd = 0
2059
2068
2060 try:
2069 try:
2061 f = self.opener(self.indexfile)
2070 f = self.opener(self.indexfile)
2062 f.seek(0, 2)
2071 f.seek(0, 2)
2063 actual = f.tell()
2072 actual = f.tell()
2064 f.close()
2073 f.close()
2065 s = self._io.size
2074 s = self._io.size
2066 i = max(0, actual // s)
2075 i = max(0, actual // s)
2067 di = actual - (i * s)
2076 di = actual - (i * s)
2068 if self._inline:
2077 if self._inline:
2069 databytes = 0
2078 databytes = 0
2070 for r in self:
2079 for r in self:
2071 databytes += max(0, self.length(r))
2080 databytes += max(0, self.length(r))
2072 dd = 0
2081 dd = 0
2073 di = actual - len(self) * s - databytes
2082 di = actual - len(self) * s - databytes
2074 except IOError as inst:
2083 except IOError as inst:
2075 if inst.errno != errno.ENOENT:
2084 if inst.errno != errno.ENOENT:
2076 raise
2085 raise
2077 di = 0
2086 di = 0
2078
2087
2079 return (dd, di)
2088 return (dd, di)
2080
2089
2081 def files(self):
2090 def files(self):
2082 res = [self.indexfile]
2091 res = [self.indexfile]
2083 if not self._inline:
2092 if not self._inline:
2084 res.append(self.datafile)
2093 res.append(self.datafile)
2085 return res
2094 return res
2086
2095
2087 DELTAREUSEALWAYS = 'always'
2096 DELTAREUSEALWAYS = 'always'
2088 DELTAREUSESAMEREVS = 'samerevs'
2097 DELTAREUSESAMEREVS = 'samerevs'
2089 DELTAREUSENEVER = 'never'
2098 DELTAREUSENEVER = 'never'
2090
2099
2091 DELTAREUSEALL = {'always', 'samerevs', 'never'}
2100 DELTAREUSEALL = {'always', 'samerevs', 'never'}
2092
2101
2093 def clone(self, tr, destrevlog, addrevisioncb=None,
2102 def clone(self, tr, destrevlog, addrevisioncb=None,
2094 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2103 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2095 """Copy this revlog to another, possibly with format changes.
2104 """Copy this revlog to another, possibly with format changes.
2096
2105
2097 The destination revlog will contain the same revisions and nodes.
2106 The destination revlog will contain the same revisions and nodes.
2098 However, it may not be bit-for-bit identical due to e.g. delta encoding
2107 However, it may not be bit-for-bit identical due to e.g. delta encoding
2099 differences.
2108 differences.
2100
2109
2101 The ``deltareuse`` argument control how deltas from the existing revlog
2110 The ``deltareuse`` argument control how deltas from the existing revlog
2102 are preserved in the destination revlog. The argument can have the
2111 are preserved in the destination revlog. The argument can have the
2103 following values:
2112 following values:
2104
2113
2105 DELTAREUSEALWAYS
2114 DELTAREUSEALWAYS
2106 Deltas will always be reused (if possible), even if the destination
2115 Deltas will always be reused (if possible), even if the destination
2107 revlog would not select the same revisions for the delta. This is the
2116 revlog would not select the same revisions for the delta. This is the
2108 fastest mode of operation.
2117 fastest mode of operation.
2109 DELTAREUSESAMEREVS
2118 DELTAREUSESAMEREVS
2110 Deltas will be reused if the destination revlog would pick the same
2119 Deltas will be reused if the destination revlog would pick the same
2111 revisions for the delta. This mode strikes a balance between speed
2120 revisions for the delta. This mode strikes a balance between speed
2112 and optimization.
2121 and optimization.
2113 DELTAREUSENEVER
2122 DELTAREUSENEVER
2114 Deltas will never be reused. This is the slowest mode of execution.
2123 Deltas will never be reused. This is the slowest mode of execution.
2115 This mode can be used to recompute deltas (e.g. if the diff/delta
2124 This mode can be used to recompute deltas (e.g. if the diff/delta
2116 algorithm changes).
2125 algorithm changes).
2117
2126
2118 Delta computation can be slow, so the choice of delta reuse policy can
2127 Delta computation can be slow, so the choice of delta reuse policy can
2119 significantly affect run time.
2128 significantly affect run time.
2120
2129
2121 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2130 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2122 two extremes. Deltas will be reused if they are appropriate. But if the
2131 two extremes. Deltas will be reused if they are appropriate. But if the
2123 delta could choose a better revision, it will do so. This means if you
2132 delta could choose a better revision, it will do so. This means if you
2124 are converting a non-generaldelta revlog to a generaldelta revlog,
2133 are converting a non-generaldelta revlog to a generaldelta revlog,
2125 deltas will be recomputed if the delta's parent isn't a parent of the
2134 deltas will be recomputed if the delta's parent isn't a parent of the
2126 revision.
2135 revision.
2127
2136
2128 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2137 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2129 controls whether to compute deltas against both parents for merges.
2138 controls whether to compute deltas against both parents for merges.
2130 By default, the current default is used.
2139 By default, the current default is used.
2131 """
2140 """
2132 if deltareuse not in self.DELTAREUSEALL:
2141 if deltareuse not in self.DELTAREUSEALL:
2133 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2142 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2134
2143
2135 if len(destrevlog):
2144 if len(destrevlog):
2136 raise ValueError(_('destination revlog is not empty'))
2145 raise ValueError(_('destination revlog is not empty'))
2137
2146
2138 if getattr(self, 'filteredrevs', None):
2147 if getattr(self, 'filteredrevs', None):
2139 raise ValueError(_('source revlog has filtered revisions'))
2148 raise ValueError(_('source revlog has filtered revisions'))
2140 if getattr(destrevlog, 'filteredrevs', None):
2149 if getattr(destrevlog, 'filteredrevs', None):
2141 raise ValueError(_('destination revlog has filtered revisions'))
2150 raise ValueError(_('destination revlog has filtered revisions'))
2142
2151
2143 # lazydeltabase controls whether to reuse a cached delta, if possible.
2152 # lazydeltabase controls whether to reuse a cached delta, if possible.
2144 oldlazydeltabase = destrevlog._lazydeltabase
2153 oldlazydeltabase = destrevlog._lazydeltabase
2145 oldamd = destrevlog._aggressivemergedeltas
2154 oldamd = destrevlog._aggressivemergedeltas
2146
2155
2147 try:
2156 try:
2148 if deltareuse == self.DELTAREUSEALWAYS:
2157 if deltareuse == self.DELTAREUSEALWAYS:
2149 destrevlog._lazydeltabase = True
2158 destrevlog._lazydeltabase = True
2150 elif deltareuse == self.DELTAREUSESAMEREVS:
2159 elif deltareuse == self.DELTAREUSESAMEREVS:
2151 destrevlog._lazydeltabase = False
2160 destrevlog._lazydeltabase = False
2152
2161
2153 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2162 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2154
2163
2155 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2164 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2156 self.DELTAREUSESAMEREVS)
2165 self.DELTAREUSESAMEREVS)
2157
2166
2158 index = self.index
2167 index = self.index
2159 for rev in self:
2168 for rev in self:
2160 entry = index[rev]
2169 entry = index[rev]
2161
2170
2162 # Some classes override linkrev to take filtered revs into
2171 # Some classes override linkrev to take filtered revs into
2163 # account. Use raw entry from index.
2172 # account. Use raw entry from index.
2164 flags = entry[0] & 0xffff
2173 flags = entry[0] & 0xffff
2165 linkrev = entry[4]
2174 linkrev = entry[4]
2166 p1 = index[entry[5]][7]
2175 p1 = index[entry[5]][7]
2167 p2 = index[entry[6]][7]
2176 p2 = index[entry[6]][7]
2168 node = entry[7]
2177 node = entry[7]
2169
2178
2170 # (Possibly) reuse the delta from the revlog if allowed and
2179 # (Possibly) reuse the delta from the revlog if allowed and
2171 # the revlog chunk is a delta.
2180 # the revlog chunk is a delta.
2172 cachedelta = None
2181 cachedelta = None
2173 rawtext = None
2182 rawtext = None
2174 if populatecachedelta:
2183 if populatecachedelta:
2175 dp = self.deltaparent(rev)
2184 dp = self.deltaparent(rev)
2176 if dp != nullrev:
2185 if dp != nullrev:
2177 cachedelta = (dp, str(self._chunk(rev)))
2186 cachedelta = (dp, str(self._chunk(rev)))
2178
2187
2179 if not cachedelta:
2188 if not cachedelta:
2180 rawtext = self.revision(rev, raw=True)
2189 rawtext = self.revision(rev, raw=True)
2181
2190
2182 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2191 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2183 checkambig=False)
2192 checkambig=False)
2184 dfh = None
2193 dfh = None
2185 if not destrevlog._inline:
2194 if not destrevlog._inline:
2186 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2195 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2187 try:
2196 try:
2188 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2197 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2189 flags, cachedelta, ifh, dfh)
2198 flags, cachedelta, ifh, dfh)
2190 finally:
2199 finally:
2191 if dfh:
2200 if dfh:
2192 dfh.close()
2201 dfh.close()
2193 ifh.close()
2202 ifh.close()
2194
2203
2195 if addrevisioncb:
2204 if addrevisioncb:
2196 addrevisioncb(self, rev, node)
2205 addrevisioncb(self, rev, node)
2197 finally:
2206 finally:
2198 destrevlog._lazydeltabase = oldlazydeltabase
2207 destrevlog._lazydeltabase = oldlazydeltabase
2199 destrevlog._aggressivemergedeltas = oldamd
2208 destrevlog._aggressivemergedeltas = oldamd
@@ -1,163 +1,351 b''
1 Check whether size of generaldelta revlog is not bigger than its
1 Check whether size of generaldelta revlog is not bigger than its
2 regular equivalent. Test would fail if generaldelta was naive
2 regular equivalent. Test would fail if generaldelta was naive
3 implementation of parentdelta: third manifest revision would be fully
3 implementation of parentdelta: third manifest revision would be fully
4 inserted due to big distance from its paren revision (zero).
4 inserted due to big distance from its paren revision (zero).
5
5
6 $ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no
6 $ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no
7 $ cd repo
7 $ cd repo
8 $ echo foo > foo
8 $ echo foo > foo
9 $ echo bar > bar
9 $ echo bar > bar
10 $ echo baz > baz
10 $ echo baz > baz
11 $ hg commit -q -Am boo
11 $ hg commit -q -Am boo
12 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
12 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
13 $ for r in 1 2 3; do
13 $ for r in 1 2 3; do
14 > echo $r > foo
14 > echo $r > foo
15 > hg commit -q -m $r
15 > hg commit -q -m $r
16 > hg up -q -r 0
16 > hg up -q -r 0
17 > hg pull . -q -r $r -R ../gdrepo
17 > hg pull . -q -r $r -R ../gdrepo
18 > done
18 > done
19
19
20 $ cd ..
20 $ cd ..
21 >>> from __future__ import print_function
21 >>> from __future__ import print_function
22 >>> import os
22 >>> import os
23 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
23 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
24 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
24 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
25 >>> if regsize < gdsize:
25 >>> if regsize < gdsize:
26 ... print('generaldata increased size of manifest')
26 ... print('generaldata increased size of manifest')
27
27
28 Verify rev reordering doesnt create invalid bundles (issue4462)
28 Verify rev reordering doesnt create invalid bundles (issue4462)
29 This requires a commit tree that when pulled will reorder manifest revs such
29 This requires a commit tree that when pulled will reorder manifest revs such
30 that the second manifest to create a file rev will be ordered before the first
30 that the second manifest to create a file rev will be ordered before the first
31 manifest to create that file rev. We also need to do a partial pull to ensure
31 manifest to create that file rev. We also need to do a partial pull to ensure
32 reordering happens. At the end we verify the linkrev points at the earliest
32 reordering happens. At the end we verify the linkrev points at the earliest
33 commit.
33 commit.
34
34
35 $ hg init server --config format.generaldelta=True
35 $ hg init server --config format.generaldelta=True
36 $ cd server
36 $ cd server
37 $ touch a
37 $ touch a
38 $ hg commit -Aqm a
38 $ hg commit -Aqm a
39 $ echo x > x
39 $ echo x > x
40 $ echo y > y
40 $ echo y > y
41 $ hg commit -Aqm xy
41 $ hg commit -Aqm xy
42 $ hg up -q '.^'
42 $ hg up -q '.^'
43 $ echo x > x
43 $ echo x > x
44 $ echo z > z
44 $ echo z > z
45 $ hg commit -Aqm xz
45 $ hg commit -Aqm xz
46 $ hg up -q 1
46 $ hg up -q 1
47 $ echo b > b
47 $ echo b > b
48 $ hg commit -Aqm b
48 $ hg commit -Aqm b
49 $ hg merge -q 2
49 $ hg merge -q 2
50 $ hg commit -Aqm merge
50 $ hg commit -Aqm merge
51 $ echo c > c
51 $ echo c > c
52 $ hg commit -Aqm c
52 $ hg commit -Aqm c
53 $ hg log -G -T '{rev} {shortest(node)} {desc}'
53 $ hg log -G -T '{rev} {shortest(node)} {desc}'
54 @ 5 ebb8 c
54 @ 5 ebb8 c
55 |
55 |
56 o 4 baf7 merge
56 o 4 baf7 merge
57 |\
57 |\
58 | o 3 a129 b
58 | o 3 a129 b
59 | |
59 | |
60 o | 2 958c xz
60 o | 2 958c xz
61 | |
61 | |
62 | o 1 f00c xy
62 | o 1 f00c xy
63 |/
63 |/
64 o 0 3903 a
64 o 0 3903 a
65
65
66 $ cd ..
66 $ cd ..
67 $ hg init client --config format.generaldelta=false --config format.usegeneraldelta=false
67 $ hg init client --config format.generaldelta=false --config format.usegeneraldelta=false
68 $ cd client
68 $ cd client
69 $ hg pull -q ../server -r 4
69 $ hg pull -q ../server -r 4
70 $ hg debugindex x
70 $ hg debugindex x
71 rev offset length base linkrev nodeid p1 p2
71 rev offset length base linkrev nodeid p1 p2
72 0 0 3 0 1 1406e7411862 000000000000 000000000000
72 0 0 3 0 1 1406e7411862 000000000000 000000000000
73
73
74 $ cd ..
74 $ cd ..
75
75
76 Test "usegeneraldelta" config
76 Test "usegeneraldelta" config
77 (repo are general delta, but incoming bundle are not re-deltafied)
77 (repo are general delta, but incoming bundle are not re-deltafied)
78
78
79 delta coming from the server base delta server are not recompressed.
79 delta coming from the server base delta server are not recompressed.
80 (also include the aggressive version for comparison)
80 (also include the aggressive version for comparison)
81
81
82 $ hg clone repo --pull --config format.usegeneraldelta=1 usegd
82 $ hg clone repo --pull --config format.usegeneraldelta=1 usegd
83 requesting all changes
83 requesting all changes
84 adding changesets
84 adding changesets
85 adding manifests
85 adding manifests
86 adding file changes
86 adding file changes
87 added 4 changesets with 6 changes to 3 files (+2 heads)
87 added 4 changesets with 6 changes to 3 files (+2 heads)
88 updating to branch default
88 updating to branch default
89 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 $ hg clone repo --pull --config format.generaldelta=1 full
90 $ hg clone repo --pull --config format.generaldelta=1 full
91 requesting all changes
91 requesting all changes
92 adding changesets
92 adding changesets
93 adding manifests
93 adding manifests
94 adding file changes
94 adding file changes
95 added 4 changesets with 6 changes to 3 files (+2 heads)
95 added 4 changesets with 6 changes to 3 files (+2 heads)
96 updating to branch default
96 updating to branch default
97 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 $ hg -R repo debugindex -m
98 $ hg -R repo debugindex -m
99 rev offset length base linkrev nodeid p1 p2
99 rev offset length base linkrev nodeid p1 p2
100 0 0 104 0 0 cef96823c800 000000000000 000000000000
100 0 0 104 0 0 cef96823c800 000000000000 000000000000
101 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
101 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
102 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
102 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
103 3 218 104 3 3 723508934dad cef96823c800 000000000000
103 3 218 104 3 3 723508934dad cef96823c800 000000000000
104 $ hg -R usegd debugindex -m
104 $ hg -R usegd debugindex -m
105 rev offset length delta linkrev nodeid p1 p2
105 rev offset length delta linkrev nodeid p1 p2
106 0 0 104 -1 0 cef96823c800 000000000000 000000000000
106 0 0 104 -1 0 cef96823c800 000000000000 000000000000
107 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
107 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
108 2 161 57 1 2 134fdc6fd680 cef96823c800 000000000000
108 2 161 57 1 2 134fdc6fd680 cef96823c800 000000000000
109 3 218 57 0 3 723508934dad cef96823c800 000000000000
109 3 218 57 0 3 723508934dad cef96823c800 000000000000
110 $ hg -R full debugindex -m
110 $ hg -R full debugindex -m
111 rev offset length delta linkrev nodeid p1 p2
111 rev offset length delta linkrev nodeid p1 p2
112 0 0 104 -1 0 cef96823c800 000000000000 000000000000
112 0 0 104 -1 0 cef96823c800 000000000000 000000000000
113 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
113 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
114 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
114 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
115 3 218 57 0 3 723508934dad cef96823c800 000000000000
115 3 218 57 0 3 723508934dad cef96823c800 000000000000
116
116
117 Test format.aggressivemergedeltas
117 Test format.aggressivemergedeltas
118
118
119 $ hg init --config format.generaldelta=1 aggressive
119 $ hg init --config format.generaldelta=1 aggressive
120 $ cd aggressive
120 $ cd aggressive
121 $ cat << EOF >> .hg/hgrc
121 $ cat << EOF >> .hg/hgrc
122 > [format]
122 > [format]
123 > generaldelta = 1
123 > generaldelta = 1
124 > EOF
124 > EOF
125 $ touch a b c d e
125 $ touch a b c d e
126 $ hg commit -Aqm side1
126 $ hg commit -Aqm side1
127 $ hg up -q null
127 $ hg up -q null
128 $ touch x y
128 $ touch x y
129 $ hg commit -Aqm side2
129 $ hg commit -Aqm side2
130
130
131 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
131 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
132 $ hg merge -q 0
132 $ hg merge -q 0
133 $ hg commit -q -m merge
133 $ hg commit -q -m merge
134 $ hg debugindex -m
134 $ hg debugindex -m
135 rev offset length delta linkrev nodeid p1 p2
135 rev offset length delta linkrev nodeid p1 p2
136 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
136 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
137 1 59 61 0 1 315c023f341d 000000000000 000000000000
137 1 59 61 0 1 315c023f341d 000000000000 000000000000
138 2 120 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
138 2 120 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
139
139
140 $ hg strip -q -r . --config extensions.strip=
140 $ hg strip -q -r . --config extensions.strip=
141
141
142 - Verify aggressive merge uses p2 (commit 0) as delta parent
142 - Verify aggressive merge uses p2 (commit 0) as delta parent
143 $ hg up -q -C 1
143 $ hg up -q -C 1
144 $ hg merge -q 0
144 $ hg merge -q 0
145 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
145 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
146 $ hg debugindex -m
146 $ hg debugindex -m
147 rev offset length delta linkrev nodeid p1 p2
147 rev offset length delta linkrev nodeid p1 p2
148 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
148 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
149 1 59 61 0 1 315c023f341d 000000000000 000000000000
149 1 59 61 0 1 315c023f341d 000000000000 000000000000
150 2 120 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
150 2 120 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
151
151
152 Test that strip bundle use bundle2
152 Test that strip bundle use bundle2
153 $ hg --config extensions.strip= strip .
153 $ hg --config extensions.strip= strip .
154 0 files updated, 0 files merged, 5 files removed, 0 files unresolved
154 0 files updated, 0 files merged, 5 files removed, 0 files unresolved
155 saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg (glob)
155 saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg (glob)
156 $ hg debugbundle .hg/strip-backup/*
156 $ hg debugbundle .hg/strip-backup/*
157 Stream params: sortdict([('Compression', 'BZ')])
157 Stream params: sortdict([('Compression', 'BZ')])
158 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
158 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
159 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
159 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
160 phase-heads -- 'sortdict()'
160 phase-heads -- 'sortdict()'
161 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9 draft
161 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9 draft
162
162
163 $ cd ..
163 $ cd ..
164
165 test maxdeltachainspan
166
167 $ hg init source-repo
168 $ cd source-repo
169 $ hg debugbuilddag --new-file '.+5:brancha$.+11:branchb$.+30:branchc<brancha+2<branchb+2'
170 $ cd ..
171 $ hg -R source-repo debugindex -m
172 rev offset length delta linkrev nodeid p1 p2
173 0 0 46 -1 0 19deeef41503 000000000000 000000000000
174 1 46 57 0 1 fffc37b38c40 19deeef41503 000000000000
175 2 103 57 1 2 5822d75c83d9 fffc37b38c40 000000000000
176 3 160 57 2 3 19cf2273e601 5822d75c83d9 000000000000
177 4 217 57 3 4 d45ead487afe 19cf2273e601 000000000000
178 5 274 57 4 5 96e0c2ce55ed d45ead487afe 000000000000
179 6 331 46 -1 6 0c2ea5222c74 000000000000 000000000000
180 7 377 57 6 7 4ca08a89134d 0c2ea5222c74 000000000000
181 8 434 57 7 8 c973dbfd30ac 4ca08a89134d 000000000000
182 9 491 57 8 9 d81d878ff2cd c973dbfd30ac 000000000000
183 10 548 58 9 10 dbee7f0dd760 d81d878ff2cd 000000000000
184 11 606 58 10 11 474be9f1fd4e dbee7f0dd760 000000000000
185 12 664 58 11 12 594a27502c85 474be9f1fd4e 000000000000
186 13 722 58 12 13 a7d25307d6a9 594a27502c85 000000000000
187 14 780 58 13 14 3eb53082272e a7d25307d6a9 000000000000
188 15 838 58 14 15 d1e94c85caf6 3eb53082272e 000000000000
189 16 896 58 15 16 8933d9629788 d1e94c85caf6 000000000000
190 17 954 58 16 17 a33416e52d91 8933d9629788 000000000000
191 18 1012 47 -1 18 4ccbf31021ed 000000000000 000000000000
192 19 1059 58 18 19 dcad7a25656c 4ccbf31021ed 000000000000
193 20 1117 58 19 20 617c4f8be75f dcad7a25656c 000000000000
194 21 1175 58 20 21 975b9c1d75bb 617c4f8be75f 000000000000
195 22 1233 58 21 22 74f09cd33b70 975b9c1d75bb 000000000000
196 23 1291 58 22 23 54e79bfa7ef1 74f09cd33b70 000000000000
197 24 1349 58 23 24 c556e7ff90af 54e79bfa7ef1 000000000000
198 25 1407 58 24 25 42daedfe9c6b c556e7ff90af 000000000000
199 26 1465 58 25 26 f302566947c7 42daedfe9c6b 000000000000
200 27 1523 58 26 27 2346959851cb f302566947c7 000000000000
201 28 1581 58 27 28 ca8d867106b4 2346959851cb 000000000000
202 29 1639 58 28 29 fd9152decab2 ca8d867106b4 000000000000
203 30 1697 58 29 30 3fe34080a79b fd9152decab2 000000000000
204 31 1755 58 30 31 bce61a95078e 3fe34080a79b 000000000000
205 32 1813 58 31 32 1dd9ba54ba15 bce61a95078e 000000000000
206 33 1871 58 32 33 3cd9b90a9972 1dd9ba54ba15 000000000000
207 34 1929 58 33 34 5db8c9754ef5 3cd9b90a9972 000000000000
208 35 1987 58 34 35 ee4a240cc16c 5db8c9754ef5 000000000000
209 36 2045 58 35 36 9e1d38725343 ee4a240cc16c 000000000000
210 37 2103 58 36 37 3463f73086a8 9e1d38725343 000000000000
211 38 2161 58 37 38 88af72fab449 3463f73086a8 000000000000
212 39 2219 58 38 39 472f5ce73785 88af72fab449 000000000000
213 40 2277 58 39 40 c91b8351e5b8 472f5ce73785 000000000000
214 41 2335 58 40 41 9c8289c5c5c0 c91b8351e5b8 000000000000
215 42 2393 58 41 42 a13fd4a09d76 9c8289c5c5c0 000000000000
216 43 2451 58 42 43 2ec2c81cafe0 a13fd4a09d76 000000000000
217 44 2509 58 43 44 f27fdd174392 2ec2c81cafe0 000000000000
218 45 2567 58 44 45 a539ec59fe41 f27fdd174392 000000000000
219 46 2625 58 45 46 5e98b9ecb738 a539ec59fe41 000000000000
220 47 2683 58 46 47 31e6b47899d0 5e98b9ecb738 000000000000
221 48 2741 58 47 48 2cf25d6636bd 31e6b47899d0 000000000000
222 49 2799 197 -1 49 9fff62ea0624 96e0c2ce55ed 000000000000
223 50 2996 58 49 50 467f8e30a066 9fff62ea0624 000000000000
224 51 3054 356 50 51 346db97283df a33416e52d91 000000000000
225 52 3410 58 51 52 4e003fd4d5cd 346db97283df 000000000000
226 $ hg clone --pull source-repo --config experimental.maxdeltachainspan=2800 relax-chain --config format.generaldelta=yes
227 requesting all changes
228 adding changesets
229 adding manifests
230 adding file changes
231 added 53 changesets with 53 changes to 53 files (+2 heads)
232 updating to branch default
233 14 files updated, 0 files merged, 0 files removed, 0 files unresolved
234 $ hg -R relax-chain debugindex -m
235 rev offset length delta linkrev nodeid p1 p2
236 0 0 46 -1 0 19deeef41503 000000000000 000000000000
237 1 46 57 0 1 fffc37b38c40 19deeef41503 000000000000
238 2 103 57 1 2 5822d75c83d9 fffc37b38c40 000000000000
239 3 160 57 2 3 19cf2273e601 5822d75c83d9 000000000000
240 4 217 57 3 4 d45ead487afe 19cf2273e601 000000000000
241 5 274 57 4 5 96e0c2ce55ed d45ead487afe 000000000000
242 6 331 46 -1 6 0c2ea5222c74 000000000000 000000000000
243 7 377 57 6 7 4ca08a89134d 0c2ea5222c74 000000000000
244 8 434 57 7 8 c973dbfd30ac 4ca08a89134d 000000000000
245 9 491 57 8 9 d81d878ff2cd c973dbfd30ac 000000000000
246 10 548 58 9 10 dbee7f0dd760 d81d878ff2cd 000000000000
247 11 606 58 10 11 474be9f1fd4e dbee7f0dd760 000000000000
248 12 664 58 11 12 594a27502c85 474be9f1fd4e 000000000000
249 13 722 58 12 13 a7d25307d6a9 594a27502c85 000000000000
250 14 780 58 13 14 3eb53082272e a7d25307d6a9 000000000000
251 15 838 58 14 15 d1e94c85caf6 3eb53082272e 000000000000
252 16 896 58 15 16 8933d9629788 d1e94c85caf6 000000000000
253 17 954 58 16 17 a33416e52d91 8933d9629788 000000000000
254 18 1012 47 -1 18 4ccbf31021ed 000000000000 000000000000
255 19 1059 58 18 19 dcad7a25656c 4ccbf31021ed 000000000000
256 20 1117 58 19 20 617c4f8be75f dcad7a25656c 000000000000
257 21 1175 58 20 21 975b9c1d75bb 617c4f8be75f 000000000000
258 22 1233 58 21 22 74f09cd33b70 975b9c1d75bb 000000000000
259 23 1291 58 22 23 54e79bfa7ef1 74f09cd33b70 000000000000
260 24 1349 58 23 24 c556e7ff90af 54e79bfa7ef1 000000000000
261 25 1407 58 24 25 42daedfe9c6b c556e7ff90af 000000000000
262 26 1465 58 25 26 f302566947c7 42daedfe9c6b 000000000000
263 27 1523 58 26 27 2346959851cb f302566947c7 000000000000
264 28 1581 58 27 28 ca8d867106b4 2346959851cb 000000000000
265 29 1639 58 28 29 fd9152decab2 ca8d867106b4 000000000000
266 30 1697 58 29 30 3fe34080a79b fd9152decab2 000000000000
267 31 1755 58 30 31 bce61a95078e 3fe34080a79b 000000000000
268 32 1813 58 31 32 1dd9ba54ba15 bce61a95078e 000000000000
269 33 1871 58 32 33 3cd9b90a9972 1dd9ba54ba15 000000000000
270 34 1929 58 33 34 5db8c9754ef5 3cd9b90a9972 000000000000
271 35 1987 58 34 35 ee4a240cc16c 5db8c9754ef5 000000000000
272 36 2045 58 35 36 9e1d38725343 ee4a240cc16c 000000000000
273 37 2103 58 36 37 3463f73086a8 9e1d38725343 000000000000
274 38 2161 58 37 38 88af72fab449 3463f73086a8 000000000000
275 39 2219 58 38 39 472f5ce73785 88af72fab449 000000000000
276 40 2277 58 39 40 c91b8351e5b8 472f5ce73785 000000000000
277 41 2335 58 40 41 9c8289c5c5c0 c91b8351e5b8 000000000000
278 42 2393 58 41 42 a13fd4a09d76 9c8289c5c5c0 000000000000
279 43 2451 58 42 43 2ec2c81cafe0 a13fd4a09d76 000000000000
280 44 2509 58 43 44 f27fdd174392 2ec2c81cafe0 000000000000
281 45 2567 58 44 45 a539ec59fe41 f27fdd174392 000000000000
282 46 2625 58 45 46 5e98b9ecb738 a539ec59fe41 000000000000
283 47 2683 58 46 47 31e6b47899d0 5e98b9ecb738 000000000000
284 48 2741 58 47 48 2cf25d6636bd 31e6b47899d0 000000000000
285 49 2799 197 -1 49 9fff62ea0624 96e0c2ce55ed 000000000000
286 50 2996 58 49 50 467f8e30a066 9fff62ea0624 000000000000
287 51 3054 58 17 51 346db97283df a33416e52d91 000000000000
288 52 3112 369 -1 52 4e003fd4d5cd 346db97283df 000000000000
289 $ hg clone --pull source-repo --config experimental.maxdeltachainspan=0 noconst-chain --config format.generaldelta=yes
290 requesting all changes
291 adding changesets
292 adding manifests
293 adding file changes
294 added 53 changesets with 53 changes to 53 files (+2 heads)
295 updating to branch default
296 14 files updated, 0 files merged, 0 files removed, 0 files unresolved
297 $ hg -R noconst-chain debugindex -m
298 rev offset length delta linkrev nodeid p1 p2
299 0 0 46 -1 0 19deeef41503 000000000000 000000000000
300 1 46 57 0 1 fffc37b38c40 19deeef41503 000000000000
301 2 103 57 1 2 5822d75c83d9 fffc37b38c40 000000000000
302 3 160 57 2 3 19cf2273e601 5822d75c83d9 000000000000
303 4 217 57 3 4 d45ead487afe 19cf2273e601 000000000000
304 5 274 57 4 5 96e0c2ce55ed d45ead487afe 000000000000
305 6 331 46 -1 6 0c2ea5222c74 000000000000 000000000000
306 7 377 57 6 7 4ca08a89134d 0c2ea5222c74 000000000000
307 8 434 57 7 8 c973dbfd30ac 4ca08a89134d 000000000000
308 9 491 57 8 9 d81d878ff2cd c973dbfd30ac 000000000000
309 10 548 58 9 10 dbee7f0dd760 d81d878ff2cd 000000000000
310 11 606 58 10 11 474be9f1fd4e dbee7f0dd760 000000000000
311 12 664 58 11 12 594a27502c85 474be9f1fd4e 000000000000
312 13 722 58 12 13 a7d25307d6a9 594a27502c85 000000000000
313 14 780 58 13 14 3eb53082272e a7d25307d6a9 000000000000
314 15 838 58 14 15 d1e94c85caf6 3eb53082272e 000000000000
315 16 896 58 15 16 8933d9629788 d1e94c85caf6 000000000000
316 17 954 58 16 17 a33416e52d91 8933d9629788 000000000000
317 18 1012 47 -1 18 4ccbf31021ed 000000000000 000000000000
318 19 1059 58 18 19 dcad7a25656c 4ccbf31021ed 000000000000
319 20 1117 58 19 20 617c4f8be75f dcad7a25656c 000000000000
320 21 1175 58 20 21 975b9c1d75bb 617c4f8be75f 000000000000
321 22 1233 58 21 22 74f09cd33b70 975b9c1d75bb 000000000000
322 23 1291 58 22 23 54e79bfa7ef1 74f09cd33b70 000000000000
323 24 1349 58 23 24 c556e7ff90af 54e79bfa7ef1 000000000000
324 25 1407 58 24 25 42daedfe9c6b c556e7ff90af 000000000000
325 26 1465 58 25 26 f302566947c7 42daedfe9c6b 000000000000
326 27 1523 58 26 27 2346959851cb f302566947c7 000000000000
327 28 1581 58 27 28 ca8d867106b4 2346959851cb 000000000000
328 29 1639 58 28 29 fd9152decab2 ca8d867106b4 000000000000
329 30 1697 58 29 30 3fe34080a79b fd9152decab2 000000000000
330 31 1755 58 30 31 bce61a95078e 3fe34080a79b 000000000000
331 32 1813 58 31 32 1dd9ba54ba15 bce61a95078e 000000000000
332 33 1871 58 32 33 3cd9b90a9972 1dd9ba54ba15 000000000000
333 34 1929 58 33 34 5db8c9754ef5 3cd9b90a9972 000000000000
334 35 1987 58 34 35 ee4a240cc16c 5db8c9754ef5 000000000000
335 36 2045 58 35 36 9e1d38725343 ee4a240cc16c 000000000000
336 37 2103 58 36 37 3463f73086a8 9e1d38725343 000000000000
337 38 2161 58 37 38 88af72fab449 3463f73086a8 000000000000
338 39 2219 58 38 39 472f5ce73785 88af72fab449 000000000000
339 40 2277 58 39 40 c91b8351e5b8 472f5ce73785 000000000000
340 41 2335 58 40 41 9c8289c5c5c0 c91b8351e5b8 000000000000
341 42 2393 58 41 42 a13fd4a09d76 9c8289c5c5c0 000000000000
342 43 2451 58 42 43 2ec2c81cafe0 a13fd4a09d76 000000000000
343 44 2509 58 43 44 f27fdd174392 2ec2c81cafe0 000000000000
344 45 2567 58 44 45 a539ec59fe41 f27fdd174392 000000000000
345 46 2625 58 45 46 5e98b9ecb738 a539ec59fe41 000000000000
346 47 2683 58 46 47 31e6b47899d0 5e98b9ecb738 000000000000
347 48 2741 58 47 48 2cf25d6636bd 31e6b47899d0 000000000000
348 49 2799 58 5 49 9fff62ea0624 96e0c2ce55ed 000000000000
349 50 2857 58 49 50 467f8e30a066 9fff62ea0624 000000000000
350 51 2915 58 17 51 346db97283df a33416e52d91 000000000000
351 52 2973 58 51 52 4e003fd4d5cd 346db97283df 000000000000
General Comments 0
You need to be logged in to leave comments. Login now