##// END OF EJS Templates
revlog: add an aggressivemergedelta option...
Durham Goode -
r26118:049005de default
parent child Browse files
Show More
@@ -1,1952 +1,1956 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, wdirrev, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception as exc:
138 except Exception as exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced as exc:
155 except error.PushRaced as exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 # experimental config: format.generaldelta
261 # experimental config: format.generaldelta
262 if self.ui.configbool('format', 'generaldelta', False):
262 if self.ui.configbool('format', 'generaldelta', False):
263 self.requirements.add("generaldelta")
263 self.requirements.add("generaldelta")
264 if self.ui.configbool('experimental', 'treemanifest', False):
264 if self.ui.configbool('experimental', 'treemanifest', False):
265 self.requirements.add("treemanifest")
265 self.requirements.add("treemanifest")
266 if self.ui.configbool('experimental', 'manifestv2', False):
266 if self.ui.configbool('experimental', 'manifestv2', False):
267 self.requirements.add("manifestv2")
267 self.requirements.add("manifestv2")
268 else:
268 else:
269 raise error.RepoError(_("repository %s not found") % path)
269 raise error.RepoError(_("repository %s not found") % path)
270 elif create:
270 elif create:
271 raise error.RepoError(_("repository %s already exists") % path)
271 raise error.RepoError(_("repository %s already exists") % path)
272 else:
272 else:
273 try:
273 try:
274 self.requirements = scmutil.readrequires(
274 self.requirements = scmutil.readrequires(
275 self.vfs, self.supported)
275 self.vfs, self.supported)
276 except IOError as inst:
276 except IOError as inst:
277 if inst.errno != errno.ENOENT:
277 if inst.errno != errno.ENOENT:
278 raise
278 raise
279
279
280 self.sharedpath = self.path
280 self.sharedpath = self.path
281 try:
281 try:
282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 realpath=True)
283 realpath=True)
284 s = vfs.base
284 s = vfs.base
285 if not vfs.exists():
285 if not vfs.exists():
286 raise error.RepoError(
286 raise error.RepoError(
287 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 self.sharedpath = s
288 self.sharedpath = s
289 except IOError as inst:
289 except IOError as inst:
290 if inst.errno != errno.ENOENT:
290 if inst.errno != errno.ENOENT:
291 raise
291 raise
292
292
293 self.store = store.store(
293 self.store = store.store(
294 self.requirements, self.sharedpath, scmutil.vfs)
294 self.requirements, self.sharedpath, scmutil.vfs)
295 self.spath = self.store.path
295 self.spath = self.store.path
296 self.svfs = self.store.vfs
296 self.svfs = self.store.vfs
297 self.sjoin = self.store.join
297 self.sjoin = self.store.join
298 self.vfs.createmode = self.store.createmode
298 self.vfs.createmode = self.store.createmode
299 self._applyopenerreqs()
299 self._applyopenerreqs()
300 if create:
300 if create:
301 self._writerequirements()
301 self._writerequirements()
302
302
303
303
304 self._branchcaches = {}
304 self._branchcaches = {}
305 self._revbranchcache = None
305 self._revbranchcache = None
306 self.filterpats = {}
306 self.filterpats = {}
307 self._datafilters = {}
307 self._datafilters = {}
308 self._transref = self._lockref = self._wlockref = None
308 self._transref = self._lockref = self._wlockref = None
309
309
310 # A cache for various files under .hg/ that tracks file changes,
310 # A cache for various files under .hg/ that tracks file changes,
311 # (used by the filecache decorator)
311 # (used by the filecache decorator)
312 #
312 #
313 # Maps a property name to its util.filecacheentry
313 # Maps a property name to its util.filecacheentry
314 self._filecache = {}
314 self._filecache = {}
315
315
316 # hold sets of revision to be filtered
316 # hold sets of revision to be filtered
317 # should be cleared when something might have changed the filter value:
317 # should be cleared when something might have changed the filter value:
318 # - new changesets,
318 # - new changesets,
319 # - phase change,
319 # - phase change,
320 # - new obsolescence marker,
320 # - new obsolescence marker,
321 # - working directory parent change,
321 # - working directory parent change,
322 # - bookmark changes
322 # - bookmark changes
323 self.filteredrevcache = {}
323 self.filteredrevcache = {}
324
324
325 # generic mapping between names and nodes
325 # generic mapping between names and nodes
326 self.names = namespaces.namespaces()
326 self.names = namespaces.namespaces()
327
327
328 def close(self):
328 def close(self):
329 self._writecaches()
329 self._writecaches()
330
330
331 def _writecaches(self):
331 def _writecaches(self):
332 if self._revbranchcache:
332 if self._revbranchcache:
333 self._revbranchcache.write()
333 self._revbranchcache.write()
334
334
335 def _restrictcapabilities(self, caps):
335 def _restrictcapabilities(self, caps):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 caps = set(caps)
337 caps = set(caps)
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 caps.add('bundle2=' + urllib.quote(capsblob))
339 caps.add('bundle2=' + urllib.quote(capsblob))
340 return caps
340 return caps
341
341
342 def _applyopenerreqs(self):
342 def _applyopenerreqs(self):
343 self.svfs.options = dict((r, 1) for r in self.requirements
343 self.svfs.options = dict((r, 1) for r in self.requirements
344 if r in self.openerreqs)
344 if r in self.openerreqs)
345 # experimental config: format.chunkcachesize
345 # experimental config: format.chunkcachesize
346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 if chunkcachesize is not None:
347 if chunkcachesize is not None:
348 self.svfs.options['chunkcachesize'] = chunkcachesize
348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 # experimental config: format.maxchainlen
349 # experimental config: format.maxchainlen
350 maxchainlen = self.ui.configint('format', 'maxchainlen')
350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 if maxchainlen is not None:
351 if maxchainlen is not None:
352 self.svfs.options['maxchainlen'] = maxchainlen
352 self.svfs.options['maxchainlen'] = maxchainlen
353 # experimental config: format.manifestcachesize
353 # experimental config: format.manifestcachesize
354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 if manifestcachesize is not None:
355 if manifestcachesize is not None:
356 self.svfs.options['manifestcachesize'] = manifestcachesize
356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 # experimental config: format.aggressivemergedeltas
358 aggressivemergedeltas = self.ui.configbool('format',
359 'aggressivemergedeltas', False)
360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
357
361
358 def _writerequirements(self):
362 def _writerequirements(self):
359 scmutil.writerequires(self.vfs, self.requirements)
363 scmutil.writerequires(self.vfs, self.requirements)
360
364
361 def _checknested(self, path):
365 def _checknested(self, path):
362 """Determine if path is a legal nested repository."""
366 """Determine if path is a legal nested repository."""
363 if not path.startswith(self.root):
367 if not path.startswith(self.root):
364 return False
368 return False
365 subpath = path[len(self.root) + 1:]
369 subpath = path[len(self.root) + 1:]
366 normsubpath = util.pconvert(subpath)
370 normsubpath = util.pconvert(subpath)
367
371
368 # XXX: Checking against the current working copy is wrong in
372 # XXX: Checking against the current working copy is wrong in
369 # the sense that it can reject things like
373 # the sense that it can reject things like
370 #
374 #
371 # $ hg cat -r 10 sub/x.txt
375 # $ hg cat -r 10 sub/x.txt
372 #
376 #
373 # if sub/ is no longer a subrepository in the working copy
377 # if sub/ is no longer a subrepository in the working copy
374 # parent revision.
378 # parent revision.
375 #
379 #
376 # However, it can of course also allow things that would have
380 # However, it can of course also allow things that would have
377 # been rejected before, such as the above cat command if sub/
381 # been rejected before, such as the above cat command if sub/
378 # is a subrepository now, but was a normal directory before.
382 # is a subrepository now, but was a normal directory before.
379 # The old path auditor would have rejected by mistake since it
383 # The old path auditor would have rejected by mistake since it
380 # panics when it sees sub/.hg/.
384 # panics when it sees sub/.hg/.
381 #
385 #
382 # All in all, checking against the working copy seems sensible
386 # All in all, checking against the working copy seems sensible
383 # since we want to prevent access to nested repositories on
387 # since we want to prevent access to nested repositories on
384 # the filesystem *now*.
388 # the filesystem *now*.
385 ctx = self[None]
389 ctx = self[None]
386 parts = util.splitpath(subpath)
390 parts = util.splitpath(subpath)
387 while parts:
391 while parts:
388 prefix = '/'.join(parts)
392 prefix = '/'.join(parts)
389 if prefix in ctx.substate:
393 if prefix in ctx.substate:
390 if prefix == normsubpath:
394 if prefix == normsubpath:
391 return True
395 return True
392 else:
396 else:
393 sub = ctx.sub(prefix)
397 sub = ctx.sub(prefix)
394 return sub.checknested(subpath[len(prefix) + 1:])
398 return sub.checknested(subpath[len(prefix) + 1:])
395 else:
399 else:
396 parts.pop()
400 parts.pop()
397 return False
401 return False
398
402
399 def peer(self):
403 def peer(self):
400 return localpeer(self) # not cached to avoid reference cycle
404 return localpeer(self) # not cached to avoid reference cycle
401
405
402 def unfiltered(self):
406 def unfiltered(self):
403 """Return unfiltered version of the repository
407 """Return unfiltered version of the repository
404
408
405 Intended to be overwritten by filtered repo."""
409 Intended to be overwritten by filtered repo."""
406 return self
410 return self
407
411
408 def filtered(self, name):
412 def filtered(self, name):
409 """Return a filtered version of a repository"""
413 """Return a filtered version of a repository"""
410 # build a new class with the mixin and the current class
414 # build a new class with the mixin and the current class
411 # (possibly subclass of the repo)
415 # (possibly subclass of the repo)
412 class proxycls(repoview.repoview, self.unfiltered().__class__):
416 class proxycls(repoview.repoview, self.unfiltered().__class__):
413 pass
417 pass
414 return proxycls(self, name)
418 return proxycls(self, name)
415
419
416 @repofilecache('bookmarks')
420 @repofilecache('bookmarks')
417 def _bookmarks(self):
421 def _bookmarks(self):
418 return bookmarks.bmstore(self)
422 return bookmarks.bmstore(self)
419
423
420 @repofilecache('bookmarks.current')
424 @repofilecache('bookmarks.current')
421 def _activebookmark(self):
425 def _activebookmark(self):
422 return bookmarks.readactive(self)
426 return bookmarks.readactive(self)
423
427
424 def bookmarkheads(self, bookmark):
428 def bookmarkheads(self, bookmark):
425 name = bookmark.split('@', 1)[0]
429 name = bookmark.split('@', 1)[0]
426 heads = []
430 heads = []
427 for mark, n in self._bookmarks.iteritems():
431 for mark, n in self._bookmarks.iteritems():
428 if mark.split('@', 1)[0] == name:
432 if mark.split('@', 1)[0] == name:
429 heads.append(n)
433 heads.append(n)
430 return heads
434 return heads
431
435
432 @storecache('phaseroots')
436 @storecache('phaseroots')
433 def _phasecache(self):
437 def _phasecache(self):
434 return phases.phasecache(self, self._phasedefaults)
438 return phases.phasecache(self, self._phasedefaults)
435
439
436 @storecache('obsstore')
440 @storecache('obsstore')
437 def obsstore(self):
441 def obsstore(self):
438 # read default format for new obsstore.
442 # read default format for new obsstore.
439 # developer config: format.obsstore-version
443 # developer config: format.obsstore-version
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
444 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 # rely on obsstore class default when possible.
445 # rely on obsstore class default when possible.
442 kwargs = {}
446 kwargs = {}
443 if defaultformat is not None:
447 if defaultformat is not None:
444 kwargs['defaultformat'] = defaultformat
448 kwargs['defaultformat'] = defaultformat
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
449 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
450 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 **kwargs)
451 **kwargs)
448 if store and readonly:
452 if store and readonly:
449 self.ui.warn(
453 self.ui.warn(
450 _('obsolete feature not enabled but %i markers found!\n')
454 _('obsolete feature not enabled but %i markers found!\n')
451 % len(list(store)))
455 % len(list(store)))
452 return store
456 return store
453
457
454 @storecache('00changelog.i')
458 @storecache('00changelog.i')
455 def changelog(self):
459 def changelog(self):
456 c = changelog.changelog(self.svfs)
460 c = changelog.changelog(self.svfs)
457 if 'HG_PENDING' in os.environ:
461 if 'HG_PENDING' in os.environ:
458 p = os.environ['HG_PENDING']
462 p = os.environ['HG_PENDING']
459 if p.startswith(self.root):
463 if p.startswith(self.root):
460 c.readpending('00changelog.i.a')
464 c.readpending('00changelog.i.a')
461 return c
465 return c
462
466
463 @storecache('00manifest.i')
467 @storecache('00manifest.i')
464 def manifest(self):
468 def manifest(self):
465 return manifest.manifest(self.svfs)
469 return manifest.manifest(self.svfs)
466
470
467 def dirlog(self, dir):
471 def dirlog(self, dir):
468 return self.manifest.dirlog(dir)
472 return self.manifest.dirlog(dir)
469
473
470 @repofilecache('dirstate')
474 @repofilecache('dirstate')
471 def dirstate(self):
475 def dirstate(self):
472 warned = [0]
476 warned = [0]
473 def validate(node):
477 def validate(node):
474 try:
478 try:
475 self.changelog.rev(node)
479 self.changelog.rev(node)
476 return node
480 return node
477 except error.LookupError:
481 except error.LookupError:
478 if not warned[0]:
482 if not warned[0]:
479 warned[0] = True
483 warned[0] = True
480 self.ui.warn(_("warning: ignoring unknown"
484 self.ui.warn(_("warning: ignoring unknown"
481 " working parent %s!\n") % short(node))
485 " working parent %s!\n") % short(node))
482 return nullid
486 return nullid
483
487
484 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
488 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
485
489
486 def __getitem__(self, changeid):
490 def __getitem__(self, changeid):
487 if changeid is None or changeid == wdirrev:
491 if changeid is None or changeid == wdirrev:
488 return context.workingctx(self)
492 return context.workingctx(self)
489 if isinstance(changeid, slice):
493 if isinstance(changeid, slice):
490 return [context.changectx(self, i)
494 return [context.changectx(self, i)
491 for i in xrange(*changeid.indices(len(self)))
495 for i in xrange(*changeid.indices(len(self)))
492 if i not in self.changelog.filteredrevs]
496 if i not in self.changelog.filteredrevs]
493 return context.changectx(self, changeid)
497 return context.changectx(self, changeid)
494
498
495 def __contains__(self, changeid):
499 def __contains__(self, changeid):
496 try:
500 try:
497 self[changeid]
501 self[changeid]
498 return True
502 return True
499 except error.RepoLookupError:
503 except error.RepoLookupError:
500 return False
504 return False
501
505
502 def __nonzero__(self):
506 def __nonzero__(self):
503 return True
507 return True
504
508
505 def __len__(self):
509 def __len__(self):
506 return len(self.changelog)
510 return len(self.changelog)
507
511
508 def __iter__(self):
512 def __iter__(self):
509 return iter(self.changelog)
513 return iter(self.changelog)
510
514
511 def revs(self, expr, *args):
515 def revs(self, expr, *args):
512 '''Return a list of revisions matching the given revset'''
516 '''Return a list of revisions matching the given revset'''
513 expr = revset.formatspec(expr, *args)
517 expr = revset.formatspec(expr, *args)
514 m = revset.match(None, expr)
518 m = revset.match(None, expr)
515 return m(self)
519 return m(self)
516
520
517 def set(self, expr, *args):
521 def set(self, expr, *args):
518 '''
522 '''
519 Yield a context for each matching revision, after doing arg
523 Yield a context for each matching revision, after doing arg
520 replacement via revset.formatspec
524 replacement via revset.formatspec
521 '''
525 '''
522 for r in self.revs(expr, *args):
526 for r in self.revs(expr, *args):
523 yield self[r]
527 yield self[r]
524
528
525 def url(self):
529 def url(self):
526 return 'file:' + self.root
530 return 'file:' + self.root
527
531
528 def hook(self, name, throw=False, **args):
532 def hook(self, name, throw=False, **args):
529 """Call a hook, passing this repo instance.
533 """Call a hook, passing this repo instance.
530
534
531 This a convenience method to aid invoking hooks. Extensions likely
535 This a convenience method to aid invoking hooks. Extensions likely
532 won't call this unless they have registered a custom hook or are
536 won't call this unless they have registered a custom hook or are
533 replacing code that is expected to call a hook.
537 replacing code that is expected to call a hook.
534 """
538 """
535 return hook.hook(self.ui, self, name, throw, **args)
539 return hook.hook(self.ui, self, name, throw, **args)
536
540
537 @unfilteredmethod
541 @unfilteredmethod
538 def _tag(self, names, node, message, local, user, date, extra={},
542 def _tag(self, names, node, message, local, user, date, extra={},
539 editor=False):
543 editor=False):
540 if isinstance(names, str):
544 if isinstance(names, str):
541 names = (names,)
545 names = (names,)
542
546
543 branches = self.branchmap()
547 branches = self.branchmap()
544 for name in names:
548 for name in names:
545 self.hook('pretag', throw=True, node=hex(node), tag=name,
549 self.hook('pretag', throw=True, node=hex(node), tag=name,
546 local=local)
550 local=local)
547 if name in branches:
551 if name in branches:
548 self.ui.warn(_("warning: tag %s conflicts with existing"
552 self.ui.warn(_("warning: tag %s conflicts with existing"
549 " branch name\n") % name)
553 " branch name\n") % name)
550
554
551 def writetags(fp, names, munge, prevtags):
555 def writetags(fp, names, munge, prevtags):
552 fp.seek(0, 2)
556 fp.seek(0, 2)
553 if prevtags and prevtags[-1] != '\n':
557 if prevtags and prevtags[-1] != '\n':
554 fp.write('\n')
558 fp.write('\n')
555 for name in names:
559 for name in names:
556 if munge:
560 if munge:
557 m = munge(name)
561 m = munge(name)
558 else:
562 else:
559 m = name
563 m = name
560
564
561 if (self._tagscache.tagtypes and
565 if (self._tagscache.tagtypes and
562 name in self._tagscache.tagtypes):
566 name in self._tagscache.tagtypes):
563 old = self.tags().get(name, nullid)
567 old = self.tags().get(name, nullid)
564 fp.write('%s %s\n' % (hex(old), m))
568 fp.write('%s %s\n' % (hex(old), m))
565 fp.write('%s %s\n' % (hex(node), m))
569 fp.write('%s %s\n' % (hex(node), m))
566 fp.close()
570 fp.close()
567
571
568 prevtags = ''
572 prevtags = ''
569 if local:
573 if local:
570 try:
574 try:
571 fp = self.vfs('localtags', 'r+')
575 fp = self.vfs('localtags', 'r+')
572 except IOError:
576 except IOError:
573 fp = self.vfs('localtags', 'a')
577 fp = self.vfs('localtags', 'a')
574 else:
578 else:
575 prevtags = fp.read()
579 prevtags = fp.read()
576
580
577 # local tags are stored in the current charset
581 # local tags are stored in the current charset
578 writetags(fp, names, None, prevtags)
582 writetags(fp, names, None, prevtags)
579 for name in names:
583 for name in names:
580 self.hook('tag', node=hex(node), tag=name, local=local)
584 self.hook('tag', node=hex(node), tag=name, local=local)
581 return
585 return
582
586
583 try:
587 try:
584 fp = self.wfile('.hgtags', 'rb+')
588 fp = self.wfile('.hgtags', 'rb+')
585 except IOError as e:
589 except IOError as e:
586 if e.errno != errno.ENOENT:
590 if e.errno != errno.ENOENT:
587 raise
591 raise
588 fp = self.wfile('.hgtags', 'ab')
592 fp = self.wfile('.hgtags', 'ab')
589 else:
593 else:
590 prevtags = fp.read()
594 prevtags = fp.read()
591
595
592 # committed tags are stored in UTF-8
596 # committed tags are stored in UTF-8
593 writetags(fp, names, encoding.fromlocal, prevtags)
597 writetags(fp, names, encoding.fromlocal, prevtags)
594
598
595 fp.close()
599 fp.close()
596
600
597 self.invalidatecaches()
601 self.invalidatecaches()
598
602
599 if '.hgtags' not in self.dirstate:
603 if '.hgtags' not in self.dirstate:
600 self[None].add(['.hgtags'])
604 self[None].add(['.hgtags'])
601
605
602 m = matchmod.exact(self.root, '', ['.hgtags'])
606 m = matchmod.exact(self.root, '', ['.hgtags'])
603 tagnode = self.commit(message, user, date, extra=extra, match=m,
607 tagnode = self.commit(message, user, date, extra=extra, match=m,
604 editor=editor)
608 editor=editor)
605
609
606 for name in names:
610 for name in names:
607 self.hook('tag', node=hex(node), tag=name, local=local)
611 self.hook('tag', node=hex(node), tag=name, local=local)
608
612
609 return tagnode
613 return tagnode
610
614
611 def tag(self, names, node, message, local, user, date, editor=False):
615 def tag(self, names, node, message, local, user, date, editor=False):
612 '''tag a revision with one or more symbolic names.
616 '''tag a revision with one or more symbolic names.
613
617
614 names is a list of strings or, when adding a single tag, names may be a
618 names is a list of strings or, when adding a single tag, names may be a
615 string.
619 string.
616
620
617 if local is True, the tags are stored in a per-repository file.
621 if local is True, the tags are stored in a per-repository file.
618 otherwise, they are stored in the .hgtags file, and a new
622 otherwise, they are stored in the .hgtags file, and a new
619 changeset is committed with the change.
623 changeset is committed with the change.
620
624
621 keyword arguments:
625 keyword arguments:
622
626
623 local: whether to store tags in non-version-controlled file
627 local: whether to store tags in non-version-controlled file
624 (default False)
628 (default False)
625
629
626 message: commit message to use if committing
630 message: commit message to use if committing
627
631
628 user: name of user to use if committing
632 user: name of user to use if committing
629
633
630 date: date tuple to use if committing'''
634 date: date tuple to use if committing'''
631
635
632 if not local:
636 if not local:
633 m = matchmod.exact(self.root, '', ['.hgtags'])
637 m = matchmod.exact(self.root, '', ['.hgtags'])
634 if any(self.status(match=m, unknown=True, ignored=True)):
638 if any(self.status(match=m, unknown=True, ignored=True)):
635 raise util.Abort(_('working copy of .hgtags is changed'),
639 raise util.Abort(_('working copy of .hgtags is changed'),
636 hint=_('please commit .hgtags manually'))
640 hint=_('please commit .hgtags manually'))
637
641
638 self.tags() # instantiate the cache
642 self.tags() # instantiate the cache
639 self._tag(names, node, message, local, user, date, editor=editor)
643 self._tag(names, node, message, local, user, date, editor=editor)
640
644
641 @filteredpropertycache
645 @filteredpropertycache
642 def _tagscache(self):
646 def _tagscache(self):
643 '''Returns a tagscache object that contains various tags related
647 '''Returns a tagscache object that contains various tags related
644 caches.'''
648 caches.'''
645
649
646 # This simplifies its cache management by having one decorated
650 # This simplifies its cache management by having one decorated
647 # function (this one) and the rest simply fetch things from it.
651 # function (this one) and the rest simply fetch things from it.
648 class tagscache(object):
652 class tagscache(object):
649 def __init__(self):
653 def __init__(self):
650 # These two define the set of tags for this repository. tags
654 # These two define the set of tags for this repository. tags
651 # maps tag name to node; tagtypes maps tag name to 'global' or
655 # maps tag name to node; tagtypes maps tag name to 'global' or
652 # 'local'. (Global tags are defined by .hgtags across all
656 # 'local'. (Global tags are defined by .hgtags across all
653 # heads, and local tags are defined in .hg/localtags.)
657 # heads, and local tags are defined in .hg/localtags.)
654 # They constitute the in-memory cache of tags.
658 # They constitute the in-memory cache of tags.
655 self.tags = self.tagtypes = None
659 self.tags = self.tagtypes = None
656
660
657 self.nodetagscache = self.tagslist = None
661 self.nodetagscache = self.tagslist = None
658
662
659 cache = tagscache()
663 cache = tagscache()
660 cache.tags, cache.tagtypes = self._findtags()
664 cache.tags, cache.tagtypes = self._findtags()
661
665
662 return cache
666 return cache
663
667
664 def tags(self):
668 def tags(self):
665 '''return a mapping of tag to node'''
669 '''return a mapping of tag to node'''
666 t = {}
670 t = {}
667 if self.changelog.filteredrevs:
671 if self.changelog.filteredrevs:
668 tags, tt = self._findtags()
672 tags, tt = self._findtags()
669 else:
673 else:
670 tags = self._tagscache.tags
674 tags = self._tagscache.tags
671 for k, v in tags.iteritems():
675 for k, v in tags.iteritems():
672 try:
676 try:
673 # ignore tags to unknown nodes
677 # ignore tags to unknown nodes
674 self.changelog.rev(v)
678 self.changelog.rev(v)
675 t[k] = v
679 t[k] = v
676 except (error.LookupError, ValueError):
680 except (error.LookupError, ValueError):
677 pass
681 pass
678 return t
682 return t
679
683
680 def _findtags(self):
684 def _findtags(self):
681 '''Do the hard work of finding tags. Return a pair of dicts
685 '''Do the hard work of finding tags. Return a pair of dicts
682 (tags, tagtypes) where tags maps tag name to node, and tagtypes
686 (tags, tagtypes) where tags maps tag name to node, and tagtypes
683 maps tag name to a string like \'global\' or \'local\'.
687 maps tag name to a string like \'global\' or \'local\'.
684 Subclasses or extensions are free to add their own tags, but
688 Subclasses or extensions are free to add their own tags, but
685 should be aware that the returned dicts will be retained for the
689 should be aware that the returned dicts will be retained for the
686 duration of the localrepo object.'''
690 duration of the localrepo object.'''
687
691
688 # XXX what tagtype should subclasses/extensions use? Currently
692 # XXX what tagtype should subclasses/extensions use? Currently
689 # mq and bookmarks add tags, but do not set the tagtype at all.
693 # mq and bookmarks add tags, but do not set the tagtype at all.
690 # Should each extension invent its own tag type? Should there
694 # Should each extension invent its own tag type? Should there
691 # be one tagtype for all such "virtual" tags? Or is the status
695 # be one tagtype for all such "virtual" tags? Or is the status
692 # quo fine?
696 # quo fine?
693
697
694 alltags = {} # map tag name to (node, hist)
698 alltags = {} # map tag name to (node, hist)
695 tagtypes = {}
699 tagtypes = {}
696
700
697 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
701 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
698 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
702 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
699
703
700 # Build the return dicts. Have to re-encode tag names because
704 # Build the return dicts. Have to re-encode tag names because
701 # the tags module always uses UTF-8 (in order not to lose info
705 # the tags module always uses UTF-8 (in order not to lose info
702 # writing to the cache), but the rest of Mercurial wants them in
706 # writing to the cache), but the rest of Mercurial wants them in
703 # local encoding.
707 # local encoding.
704 tags = {}
708 tags = {}
705 for (name, (node, hist)) in alltags.iteritems():
709 for (name, (node, hist)) in alltags.iteritems():
706 if node != nullid:
710 if node != nullid:
707 tags[encoding.tolocal(name)] = node
711 tags[encoding.tolocal(name)] = node
708 tags['tip'] = self.changelog.tip()
712 tags['tip'] = self.changelog.tip()
709 tagtypes = dict([(encoding.tolocal(name), value)
713 tagtypes = dict([(encoding.tolocal(name), value)
710 for (name, value) in tagtypes.iteritems()])
714 for (name, value) in tagtypes.iteritems()])
711 return (tags, tagtypes)
715 return (tags, tagtypes)
712
716
713 def tagtype(self, tagname):
717 def tagtype(self, tagname):
714 '''
718 '''
715 return the type of the given tag. result can be:
719 return the type of the given tag. result can be:
716
720
717 'local' : a local tag
721 'local' : a local tag
718 'global' : a global tag
722 'global' : a global tag
719 None : tag does not exist
723 None : tag does not exist
720 '''
724 '''
721
725
722 return self._tagscache.tagtypes.get(tagname)
726 return self._tagscache.tagtypes.get(tagname)
723
727
724 def tagslist(self):
728 def tagslist(self):
725 '''return a list of tags ordered by revision'''
729 '''return a list of tags ordered by revision'''
726 if not self._tagscache.tagslist:
730 if not self._tagscache.tagslist:
727 l = []
731 l = []
728 for t, n in self.tags().iteritems():
732 for t, n in self.tags().iteritems():
729 l.append((self.changelog.rev(n), t, n))
733 l.append((self.changelog.rev(n), t, n))
730 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
734 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
731
735
732 return self._tagscache.tagslist
736 return self._tagscache.tagslist
733
737
734 def nodetags(self, node):
738 def nodetags(self, node):
735 '''return the tags associated with a node'''
739 '''return the tags associated with a node'''
736 if not self._tagscache.nodetagscache:
740 if not self._tagscache.nodetagscache:
737 nodetagscache = {}
741 nodetagscache = {}
738 for t, n in self._tagscache.tags.iteritems():
742 for t, n in self._tagscache.tags.iteritems():
739 nodetagscache.setdefault(n, []).append(t)
743 nodetagscache.setdefault(n, []).append(t)
740 for tags in nodetagscache.itervalues():
744 for tags in nodetagscache.itervalues():
741 tags.sort()
745 tags.sort()
742 self._tagscache.nodetagscache = nodetagscache
746 self._tagscache.nodetagscache = nodetagscache
743 return self._tagscache.nodetagscache.get(node, [])
747 return self._tagscache.nodetagscache.get(node, [])
744
748
745 def nodebookmarks(self, node):
749 def nodebookmarks(self, node):
746 marks = []
750 marks = []
747 for bookmark, n in self._bookmarks.iteritems():
751 for bookmark, n in self._bookmarks.iteritems():
748 if n == node:
752 if n == node:
749 marks.append(bookmark)
753 marks.append(bookmark)
750 return sorted(marks)
754 return sorted(marks)
751
755
752 def branchmap(self):
756 def branchmap(self):
753 '''returns a dictionary {branch: [branchheads]} with branchheads
757 '''returns a dictionary {branch: [branchheads]} with branchheads
754 ordered by increasing revision number'''
758 ordered by increasing revision number'''
755 branchmap.updatecache(self)
759 branchmap.updatecache(self)
756 return self._branchcaches[self.filtername]
760 return self._branchcaches[self.filtername]
757
761
758 @unfilteredmethod
762 @unfilteredmethod
759 def revbranchcache(self):
763 def revbranchcache(self):
760 if not self._revbranchcache:
764 if not self._revbranchcache:
761 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
765 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
762 return self._revbranchcache
766 return self._revbranchcache
763
767
764 def branchtip(self, branch, ignoremissing=False):
768 def branchtip(self, branch, ignoremissing=False):
765 '''return the tip node for a given branch
769 '''return the tip node for a given branch
766
770
767 If ignoremissing is True, then this method will not raise an error.
771 If ignoremissing is True, then this method will not raise an error.
768 This is helpful for callers that only expect None for a missing branch
772 This is helpful for callers that only expect None for a missing branch
769 (e.g. namespace).
773 (e.g. namespace).
770
774
771 '''
775 '''
772 try:
776 try:
773 return self.branchmap().branchtip(branch)
777 return self.branchmap().branchtip(branch)
774 except KeyError:
778 except KeyError:
775 if not ignoremissing:
779 if not ignoremissing:
776 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
780 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
777 else:
781 else:
778 pass
782 pass
779
783
780 def lookup(self, key):
784 def lookup(self, key):
781 return self[key].node()
785 return self[key].node()
782
786
783 def lookupbranch(self, key, remote=None):
787 def lookupbranch(self, key, remote=None):
784 repo = remote or self
788 repo = remote or self
785 if key in repo.branchmap():
789 if key in repo.branchmap():
786 return key
790 return key
787
791
788 repo = (remote and remote.local()) and remote or self
792 repo = (remote and remote.local()) and remote or self
789 return repo[key].branch()
793 return repo[key].branch()
790
794
791 def known(self, nodes):
795 def known(self, nodes):
792 nm = self.changelog.nodemap
796 nm = self.changelog.nodemap
793 pc = self._phasecache
797 pc = self._phasecache
794 result = []
798 result = []
795 for n in nodes:
799 for n in nodes:
796 r = nm.get(n)
800 r = nm.get(n)
797 resp = not (r is None or pc.phase(self, r) >= phases.secret)
801 resp = not (r is None or pc.phase(self, r) >= phases.secret)
798 result.append(resp)
802 result.append(resp)
799 return result
803 return result
800
804
801 def local(self):
805 def local(self):
802 return self
806 return self
803
807
804 def publishing(self):
808 def publishing(self):
805 # it's safe (and desirable) to trust the publish flag unconditionally
809 # it's safe (and desirable) to trust the publish flag unconditionally
806 # so that we don't finalize changes shared between users via ssh or nfs
810 # so that we don't finalize changes shared between users via ssh or nfs
807 return self.ui.configbool('phases', 'publish', True, untrusted=True)
811 return self.ui.configbool('phases', 'publish', True, untrusted=True)
808
812
809 def cancopy(self):
813 def cancopy(self):
810 # so statichttprepo's override of local() works
814 # so statichttprepo's override of local() works
811 if not self.local():
815 if not self.local():
812 return False
816 return False
813 if not self.publishing():
817 if not self.publishing():
814 return True
818 return True
815 # if publishing we can't copy if there is filtered content
819 # if publishing we can't copy if there is filtered content
816 return not self.filtered('visible').changelog.filteredrevs
820 return not self.filtered('visible').changelog.filteredrevs
817
821
818 def shared(self):
822 def shared(self):
819 '''the type of shared repository (None if not shared)'''
823 '''the type of shared repository (None if not shared)'''
820 if self.sharedpath != self.path:
824 if self.sharedpath != self.path:
821 return 'store'
825 return 'store'
822 return None
826 return None
823
827
824 def join(self, f, *insidef):
828 def join(self, f, *insidef):
825 return self.vfs.join(os.path.join(f, *insidef))
829 return self.vfs.join(os.path.join(f, *insidef))
826
830
827 def wjoin(self, f, *insidef):
831 def wjoin(self, f, *insidef):
828 return self.vfs.reljoin(self.root, f, *insidef)
832 return self.vfs.reljoin(self.root, f, *insidef)
829
833
830 def file(self, f):
834 def file(self, f):
831 if f[0] == '/':
835 if f[0] == '/':
832 f = f[1:]
836 f = f[1:]
833 return filelog.filelog(self.svfs, f)
837 return filelog.filelog(self.svfs, f)
834
838
835 def changectx(self, changeid):
839 def changectx(self, changeid):
836 return self[changeid]
840 return self[changeid]
837
841
838 def parents(self, changeid=None):
842 def parents(self, changeid=None):
839 '''get list of changectxs for parents of changeid'''
843 '''get list of changectxs for parents of changeid'''
840 return self[changeid].parents()
844 return self[changeid].parents()
841
845
842 def setparents(self, p1, p2=nullid):
846 def setparents(self, p1, p2=nullid):
843 self.dirstate.beginparentchange()
847 self.dirstate.beginparentchange()
844 copies = self.dirstate.setparents(p1, p2)
848 copies = self.dirstate.setparents(p1, p2)
845 pctx = self[p1]
849 pctx = self[p1]
846 if copies:
850 if copies:
847 # Adjust copy records, the dirstate cannot do it, it
851 # Adjust copy records, the dirstate cannot do it, it
848 # requires access to parents manifests. Preserve them
852 # requires access to parents manifests. Preserve them
849 # only for entries added to first parent.
853 # only for entries added to first parent.
850 for f in copies:
854 for f in copies:
851 if f not in pctx and copies[f] in pctx:
855 if f not in pctx and copies[f] in pctx:
852 self.dirstate.copy(copies[f], f)
856 self.dirstate.copy(copies[f], f)
853 if p2 == nullid:
857 if p2 == nullid:
854 for f, s in sorted(self.dirstate.copies().items()):
858 for f, s in sorted(self.dirstate.copies().items()):
855 if f not in pctx and s not in pctx:
859 if f not in pctx and s not in pctx:
856 self.dirstate.copy(None, f)
860 self.dirstate.copy(None, f)
857 self.dirstate.endparentchange()
861 self.dirstate.endparentchange()
858
862
859 def filectx(self, path, changeid=None, fileid=None):
863 def filectx(self, path, changeid=None, fileid=None):
860 """changeid can be a changeset revision, node, or tag.
864 """changeid can be a changeset revision, node, or tag.
861 fileid can be a file revision or node."""
865 fileid can be a file revision or node."""
862 return context.filectx(self, path, changeid, fileid)
866 return context.filectx(self, path, changeid, fileid)
863
867
864 def getcwd(self):
868 def getcwd(self):
865 return self.dirstate.getcwd()
869 return self.dirstate.getcwd()
866
870
867 def pathto(self, f, cwd=None):
871 def pathto(self, f, cwd=None):
868 return self.dirstate.pathto(f, cwd)
872 return self.dirstate.pathto(f, cwd)
869
873
870 def wfile(self, f, mode='r'):
874 def wfile(self, f, mode='r'):
871 return self.wvfs(f, mode)
875 return self.wvfs(f, mode)
872
876
873 def _link(self, f):
877 def _link(self, f):
874 return self.wvfs.islink(f)
878 return self.wvfs.islink(f)
875
879
876 def _loadfilter(self, filter):
880 def _loadfilter(self, filter):
877 if filter not in self.filterpats:
881 if filter not in self.filterpats:
878 l = []
882 l = []
879 for pat, cmd in self.ui.configitems(filter):
883 for pat, cmd in self.ui.configitems(filter):
880 if cmd == '!':
884 if cmd == '!':
881 continue
885 continue
882 mf = matchmod.match(self.root, '', [pat])
886 mf = matchmod.match(self.root, '', [pat])
883 fn = None
887 fn = None
884 params = cmd
888 params = cmd
885 for name, filterfn in self._datafilters.iteritems():
889 for name, filterfn in self._datafilters.iteritems():
886 if cmd.startswith(name):
890 if cmd.startswith(name):
887 fn = filterfn
891 fn = filterfn
888 params = cmd[len(name):].lstrip()
892 params = cmd[len(name):].lstrip()
889 break
893 break
890 if not fn:
894 if not fn:
891 fn = lambda s, c, **kwargs: util.filter(s, c)
895 fn = lambda s, c, **kwargs: util.filter(s, c)
892 # Wrap old filters not supporting keyword arguments
896 # Wrap old filters not supporting keyword arguments
893 if not inspect.getargspec(fn)[2]:
897 if not inspect.getargspec(fn)[2]:
894 oldfn = fn
898 oldfn = fn
895 fn = lambda s, c, **kwargs: oldfn(s, c)
899 fn = lambda s, c, **kwargs: oldfn(s, c)
896 l.append((mf, fn, params))
900 l.append((mf, fn, params))
897 self.filterpats[filter] = l
901 self.filterpats[filter] = l
898 return self.filterpats[filter]
902 return self.filterpats[filter]
899
903
900 def _filter(self, filterpats, filename, data):
904 def _filter(self, filterpats, filename, data):
901 for mf, fn, cmd in filterpats:
905 for mf, fn, cmd in filterpats:
902 if mf(filename):
906 if mf(filename):
903 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
907 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
904 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
908 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
905 break
909 break
906
910
907 return data
911 return data
908
912
909 @unfilteredpropertycache
913 @unfilteredpropertycache
910 def _encodefilterpats(self):
914 def _encodefilterpats(self):
911 return self._loadfilter('encode')
915 return self._loadfilter('encode')
912
916
913 @unfilteredpropertycache
917 @unfilteredpropertycache
914 def _decodefilterpats(self):
918 def _decodefilterpats(self):
915 return self._loadfilter('decode')
919 return self._loadfilter('decode')
916
920
917 def adddatafilter(self, name, filter):
921 def adddatafilter(self, name, filter):
918 self._datafilters[name] = filter
922 self._datafilters[name] = filter
919
923
920 def wread(self, filename):
924 def wread(self, filename):
921 if self._link(filename):
925 if self._link(filename):
922 data = self.wvfs.readlink(filename)
926 data = self.wvfs.readlink(filename)
923 else:
927 else:
924 data = self.wvfs.read(filename)
928 data = self.wvfs.read(filename)
925 return self._filter(self._encodefilterpats, filename, data)
929 return self._filter(self._encodefilterpats, filename, data)
926
930
927 def wwrite(self, filename, data, flags):
931 def wwrite(self, filename, data, flags):
928 """write ``data`` into ``filename`` in the working directory
932 """write ``data`` into ``filename`` in the working directory
929
933
930 This returns length of written (maybe decoded) data.
934 This returns length of written (maybe decoded) data.
931 """
935 """
932 data = self._filter(self._decodefilterpats, filename, data)
936 data = self._filter(self._decodefilterpats, filename, data)
933 if 'l' in flags:
937 if 'l' in flags:
934 self.wvfs.symlink(data, filename)
938 self.wvfs.symlink(data, filename)
935 else:
939 else:
936 self.wvfs.write(filename, data)
940 self.wvfs.write(filename, data)
937 if 'x' in flags:
941 if 'x' in flags:
938 self.wvfs.setflags(filename, False, True)
942 self.wvfs.setflags(filename, False, True)
939 return len(data)
943 return len(data)
940
944
941 def wwritedata(self, filename, data):
945 def wwritedata(self, filename, data):
942 return self._filter(self._decodefilterpats, filename, data)
946 return self._filter(self._decodefilterpats, filename, data)
943
947
944 def currenttransaction(self):
948 def currenttransaction(self):
945 """return the current transaction or None if non exists"""
949 """return the current transaction or None if non exists"""
946 if self._transref:
950 if self._transref:
947 tr = self._transref()
951 tr = self._transref()
948 else:
952 else:
949 tr = None
953 tr = None
950
954
951 if tr and tr.running():
955 if tr and tr.running():
952 return tr
956 return tr
953 return None
957 return None
954
958
955 def transaction(self, desc, report=None):
959 def transaction(self, desc, report=None):
956 if (self.ui.configbool('devel', 'all-warnings')
960 if (self.ui.configbool('devel', 'all-warnings')
957 or self.ui.configbool('devel', 'check-locks')):
961 or self.ui.configbool('devel', 'check-locks')):
958 l = self._lockref and self._lockref()
962 l = self._lockref and self._lockref()
959 if l is None or not l.held:
963 if l is None or not l.held:
960 self.ui.develwarn('transaction with no lock')
964 self.ui.develwarn('transaction with no lock')
961 tr = self.currenttransaction()
965 tr = self.currenttransaction()
962 if tr is not None:
966 if tr is not None:
963 return tr.nest()
967 return tr.nest()
964
968
965 # abort here if the journal already exists
969 # abort here if the journal already exists
966 if self.svfs.exists("journal"):
970 if self.svfs.exists("journal"):
967 raise error.RepoError(
971 raise error.RepoError(
968 _("abandoned transaction found"),
972 _("abandoned transaction found"),
969 hint=_("run 'hg recover' to clean up transaction"))
973 hint=_("run 'hg recover' to clean up transaction"))
970
974
971 # make journal.dirstate contain in-memory changes at this point
975 # make journal.dirstate contain in-memory changes at this point
972 self.dirstate.write()
976 self.dirstate.write()
973
977
974 idbase = "%.40f#%f" % (random.random(), time.time())
978 idbase = "%.40f#%f" % (random.random(), time.time())
975 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
979 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
976 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
980 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
977
981
978 self._writejournal(desc)
982 self._writejournal(desc)
979 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
983 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
980 if report:
984 if report:
981 rp = report
985 rp = report
982 else:
986 else:
983 rp = self.ui.warn
987 rp = self.ui.warn
984 vfsmap = {'plain': self.vfs} # root of .hg/
988 vfsmap = {'plain': self.vfs} # root of .hg/
985 # we must avoid cyclic reference between repo and transaction.
989 # we must avoid cyclic reference between repo and transaction.
986 reporef = weakref.ref(self)
990 reporef = weakref.ref(self)
987 def validate(tr):
991 def validate(tr):
988 """will run pre-closing hooks"""
992 """will run pre-closing hooks"""
989 pending = lambda: tr.writepending() and self.root or ""
993 pending = lambda: tr.writepending() and self.root or ""
990 reporef().hook('pretxnclose', throw=True, pending=pending,
994 reporef().hook('pretxnclose', throw=True, pending=pending,
991 txnname=desc, **tr.hookargs)
995 txnname=desc, **tr.hookargs)
992
996
993 tr = transaction.transaction(rp, self.svfs, vfsmap,
997 tr = transaction.transaction(rp, self.svfs, vfsmap,
994 "journal",
998 "journal",
995 "undo",
999 "undo",
996 aftertrans(renames),
1000 aftertrans(renames),
997 self.store.createmode,
1001 self.store.createmode,
998 validator=validate)
1002 validator=validate)
999
1003
1000 tr.hookargs['txnid'] = txnid
1004 tr.hookargs['txnid'] = txnid
1001 # note: writing the fncache only during finalize mean that the file is
1005 # note: writing the fncache only during finalize mean that the file is
1002 # outdated when running hooks. As fncache is used for streaming clone,
1006 # outdated when running hooks. As fncache is used for streaming clone,
1003 # this is not expected to break anything that happen during the hooks.
1007 # this is not expected to break anything that happen during the hooks.
1004 tr.addfinalize('flush-fncache', self.store.write)
1008 tr.addfinalize('flush-fncache', self.store.write)
1005 def txnclosehook(tr2):
1009 def txnclosehook(tr2):
1006 """To be run if transaction is successful, will schedule a hook run
1010 """To be run if transaction is successful, will schedule a hook run
1007 """
1011 """
1008 def hook():
1012 def hook():
1009 reporef().hook('txnclose', throw=False, txnname=desc,
1013 reporef().hook('txnclose', throw=False, txnname=desc,
1010 **tr2.hookargs)
1014 **tr2.hookargs)
1011 reporef()._afterlock(hook)
1015 reporef()._afterlock(hook)
1012 tr.addfinalize('txnclose-hook', txnclosehook)
1016 tr.addfinalize('txnclose-hook', txnclosehook)
1013 def txnaborthook(tr2):
1017 def txnaborthook(tr2):
1014 """To be run if transaction is aborted
1018 """To be run if transaction is aborted
1015 """
1019 """
1016 reporef().hook('txnabort', throw=False, txnname=desc,
1020 reporef().hook('txnabort', throw=False, txnname=desc,
1017 **tr2.hookargs)
1021 **tr2.hookargs)
1018 tr.addabort('txnabort-hook', txnaborthook)
1022 tr.addabort('txnabort-hook', txnaborthook)
1019 self._transref = weakref.ref(tr)
1023 self._transref = weakref.ref(tr)
1020 return tr
1024 return tr
1021
1025
1022 def _journalfiles(self):
1026 def _journalfiles(self):
1023 return ((self.svfs, 'journal'),
1027 return ((self.svfs, 'journal'),
1024 (self.vfs, 'journal.dirstate'),
1028 (self.vfs, 'journal.dirstate'),
1025 (self.vfs, 'journal.branch'),
1029 (self.vfs, 'journal.branch'),
1026 (self.vfs, 'journal.desc'),
1030 (self.vfs, 'journal.desc'),
1027 (self.vfs, 'journal.bookmarks'),
1031 (self.vfs, 'journal.bookmarks'),
1028 (self.svfs, 'journal.phaseroots'))
1032 (self.svfs, 'journal.phaseroots'))
1029
1033
1030 def undofiles(self):
1034 def undofiles(self):
1031 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1035 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1032
1036
1033 def _writejournal(self, desc):
1037 def _writejournal(self, desc):
1034 self.vfs.write("journal.dirstate",
1038 self.vfs.write("journal.dirstate",
1035 self.vfs.tryread("dirstate"))
1039 self.vfs.tryread("dirstate"))
1036 self.vfs.write("journal.branch",
1040 self.vfs.write("journal.branch",
1037 encoding.fromlocal(self.dirstate.branch()))
1041 encoding.fromlocal(self.dirstate.branch()))
1038 self.vfs.write("journal.desc",
1042 self.vfs.write("journal.desc",
1039 "%d\n%s\n" % (len(self), desc))
1043 "%d\n%s\n" % (len(self), desc))
1040 self.vfs.write("journal.bookmarks",
1044 self.vfs.write("journal.bookmarks",
1041 self.vfs.tryread("bookmarks"))
1045 self.vfs.tryread("bookmarks"))
1042 self.svfs.write("journal.phaseroots",
1046 self.svfs.write("journal.phaseroots",
1043 self.svfs.tryread("phaseroots"))
1047 self.svfs.tryread("phaseroots"))
1044
1048
1045 def recover(self):
1049 def recover(self):
1046 lock = self.lock()
1050 lock = self.lock()
1047 try:
1051 try:
1048 if self.svfs.exists("journal"):
1052 if self.svfs.exists("journal"):
1049 self.ui.status(_("rolling back interrupted transaction\n"))
1053 self.ui.status(_("rolling back interrupted transaction\n"))
1050 vfsmap = {'': self.svfs,
1054 vfsmap = {'': self.svfs,
1051 'plain': self.vfs,}
1055 'plain': self.vfs,}
1052 transaction.rollback(self.svfs, vfsmap, "journal",
1056 transaction.rollback(self.svfs, vfsmap, "journal",
1053 self.ui.warn)
1057 self.ui.warn)
1054 self.invalidate()
1058 self.invalidate()
1055 return True
1059 return True
1056 else:
1060 else:
1057 self.ui.warn(_("no interrupted transaction available\n"))
1061 self.ui.warn(_("no interrupted transaction available\n"))
1058 return False
1062 return False
1059 finally:
1063 finally:
1060 lock.release()
1064 lock.release()
1061
1065
1062 def rollback(self, dryrun=False, force=False):
1066 def rollback(self, dryrun=False, force=False):
1063 wlock = lock = None
1067 wlock = lock = None
1064 try:
1068 try:
1065 wlock = self.wlock()
1069 wlock = self.wlock()
1066 lock = self.lock()
1070 lock = self.lock()
1067 if self.svfs.exists("undo"):
1071 if self.svfs.exists("undo"):
1068 return self._rollback(dryrun, force)
1072 return self._rollback(dryrun, force)
1069 else:
1073 else:
1070 self.ui.warn(_("no rollback information available\n"))
1074 self.ui.warn(_("no rollback information available\n"))
1071 return 1
1075 return 1
1072 finally:
1076 finally:
1073 release(lock, wlock)
1077 release(lock, wlock)
1074
1078
1075 @unfilteredmethod # Until we get smarter cache management
1079 @unfilteredmethod # Until we get smarter cache management
1076 def _rollback(self, dryrun, force):
1080 def _rollback(self, dryrun, force):
1077 ui = self.ui
1081 ui = self.ui
1078 try:
1082 try:
1079 args = self.vfs.read('undo.desc').splitlines()
1083 args = self.vfs.read('undo.desc').splitlines()
1080 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1084 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1081 if len(args) >= 3:
1085 if len(args) >= 3:
1082 detail = args[2]
1086 detail = args[2]
1083 oldtip = oldlen - 1
1087 oldtip = oldlen - 1
1084
1088
1085 if detail and ui.verbose:
1089 if detail and ui.verbose:
1086 msg = (_('repository tip rolled back to revision %s'
1090 msg = (_('repository tip rolled back to revision %s'
1087 ' (undo %s: %s)\n')
1091 ' (undo %s: %s)\n')
1088 % (oldtip, desc, detail))
1092 % (oldtip, desc, detail))
1089 else:
1093 else:
1090 msg = (_('repository tip rolled back to revision %s'
1094 msg = (_('repository tip rolled back to revision %s'
1091 ' (undo %s)\n')
1095 ' (undo %s)\n')
1092 % (oldtip, desc))
1096 % (oldtip, desc))
1093 except IOError:
1097 except IOError:
1094 msg = _('rolling back unknown transaction\n')
1098 msg = _('rolling back unknown transaction\n')
1095 desc = None
1099 desc = None
1096
1100
1097 if not force and self['.'] != self['tip'] and desc == 'commit':
1101 if not force and self['.'] != self['tip'] and desc == 'commit':
1098 raise util.Abort(
1102 raise util.Abort(
1099 _('rollback of last commit while not checked out '
1103 _('rollback of last commit while not checked out '
1100 'may lose data'), hint=_('use -f to force'))
1104 'may lose data'), hint=_('use -f to force'))
1101
1105
1102 ui.status(msg)
1106 ui.status(msg)
1103 if dryrun:
1107 if dryrun:
1104 return 0
1108 return 0
1105
1109
1106 parents = self.dirstate.parents()
1110 parents = self.dirstate.parents()
1107 self.destroying()
1111 self.destroying()
1108 vfsmap = {'plain': self.vfs, '': self.svfs}
1112 vfsmap = {'plain': self.vfs, '': self.svfs}
1109 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1113 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1110 if self.vfs.exists('undo.bookmarks'):
1114 if self.vfs.exists('undo.bookmarks'):
1111 self.vfs.rename('undo.bookmarks', 'bookmarks')
1115 self.vfs.rename('undo.bookmarks', 'bookmarks')
1112 if self.svfs.exists('undo.phaseroots'):
1116 if self.svfs.exists('undo.phaseroots'):
1113 self.svfs.rename('undo.phaseroots', 'phaseroots')
1117 self.svfs.rename('undo.phaseroots', 'phaseroots')
1114 self.invalidate()
1118 self.invalidate()
1115
1119
1116 parentgone = (parents[0] not in self.changelog.nodemap or
1120 parentgone = (parents[0] not in self.changelog.nodemap or
1117 parents[1] not in self.changelog.nodemap)
1121 parents[1] not in self.changelog.nodemap)
1118 if parentgone:
1122 if parentgone:
1119 self.vfs.rename('undo.dirstate', 'dirstate')
1123 self.vfs.rename('undo.dirstate', 'dirstate')
1120 try:
1124 try:
1121 branch = self.vfs.read('undo.branch')
1125 branch = self.vfs.read('undo.branch')
1122 self.dirstate.setbranch(encoding.tolocal(branch))
1126 self.dirstate.setbranch(encoding.tolocal(branch))
1123 except IOError:
1127 except IOError:
1124 ui.warn(_('named branch could not be reset: '
1128 ui.warn(_('named branch could not be reset: '
1125 'current branch is still \'%s\'\n')
1129 'current branch is still \'%s\'\n')
1126 % self.dirstate.branch())
1130 % self.dirstate.branch())
1127
1131
1128 self.dirstate.invalidate()
1132 self.dirstate.invalidate()
1129 parents = tuple([p.rev() for p in self.parents()])
1133 parents = tuple([p.rev() for p in self.parents()])
1130 if len(parents) > 1:
1134 if len(parents) > 1:
1131 ui.status(_('working directory now based on '
1135 ui.status(_('working directory now based on '
1132 'revisions %d and %d\n') % parents)
1136 'revisions %d and %d\n') % parents)
1133 else:
1137 else:
1134 ui.status(_('working directory now based on '
1138 ui.status(_('working directory now based on '
1135 'revision %d\n') % parents)
1139 'revision %d\n') % parents)
1136 ms = mergemod.mergestate(self)
1140 ms = mergemod.mergestate(self)
1137 ms.reset(self['.'].node())
1141 ms.reset(self['.'].node())
1138
1142
1139 # TODO: if we know which new heads may result from this rollback, pass
1143 # TODO: if we know which new heads may result from this rollback, pass
1140 # them to destroy(), which will prevent the branchhead cache from being
1144 # them to destroy(), which will prevent the branchhead cache from being
1141 # invalidated.
1145 # invalidated.
1142 self.destroyed()
1146 self.destroyed()
1143 return 0
1147 return 0
1144
1148
1145 def invalidatecaches(self):
1149 def invalidatecaches(self):
1146
1150
1147 if '_tagscache' in vars(self):
1151 if '_tagscache' in vars(self):
1148 # can't use delattr on proxy
1152 # can't use delattr on proxy
1149 del self.__dict__['_tagscache']
1153 del self.__dict__['_tagscache']
1150
1154
1151 self.unfiltered()._branchcaches.clear()
1155 self.unfiltered()._branchcaches.clear()
1152 self.invalidatevolatilesets()
1156 self.invalidatevolatilesets()
1153
1157
1154 def invalidatevolatilesets(self):
1158 def invalidatevolatilesets(self):
1155 self.filteredrevcache.clear()
1159 self.filteredrevcache.clear()
1156 obsolete.clearobscaches(self)
1160 obsolete.clearobscaches(self)
1157
1161
1158 def invalidatedirstate(self):
1162 def invalidatedirstate(self):
1159 '''Invalidates the dirstate, causing the next call to dirstate
1163 '''Invalidates the dirstate, causing the next call to dirstate
1160 to check if it was modified since the last time it was read,
1164 to check if it was modified since the last time it was read,
1161 rereading it if it has.
1165 rereading it if it has.
1162
1166
1163 This is different to dirstate.invalidate() that it doesn't always
1167 This is different to dirstate.invalidate() that it doesn't always
1164 rereads the dirstate. Use dirstate.invalidate() if you want to
1168 rereads the dirstate. Use dirstate.invalidate() if you want to
1165 explicitly read the dirstate again (i.e. restoring it to a previous
1169 explicitly read the dirstate again (i.e. restoring it to a previous
1166 known good state).'''
1170 known good state).'''
1167 if hasunfilteredcache(self, 'dirstate'):
1171 if hasunfilteredcache(self, 'dirstate'):
1168 for k in self.dirstate._filecache:
1172 for k in self.dirstate._filecache:
1169 try:
1173 try:
1170 delattr(self.dirstate, k)
1174 delattr(self.dirstate, k)
1171 except AttributeError:
1175 except AttributeError:
1172 pass
1176 pass
1173 delattr(self.unfiltered(), 'dirstate')
1177 delattr(self.unfiltered(), 'dirstate')
1174
1178
1175 def invalidate(self):
1179 def invalidate(self):
1176 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1180 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1177 for k in self._filecache:
1181 for k in self._filecache:
1178 # dirstate is invalidated separately in invalidatedirstate()
1182 # dirstate is invalidated separately in invalidatedirstate()
1179 if k == 'dirstate':
1183 if k == 'dirstate':
1180 continue
1184 continue
1181
1185
1182 try:
1186 try:
1183 delattr(unfiltered, k)
1187 delattr(unfiltered, k)
1184 except AttributeError:
1188 except AttributeError:
1185 pass
1189 pass
1186 self.invalidatecaches()
1190 self.invalidatecaches()
1187 self.store.invalidatecaches()
1191 self.store.invalidatecaches()
1188
1192
1189 def invalidateall(self):
1193 def invalidateall(self):
1190 '''Fully invalidates both store and non-store parts, causing the
1194 '''Fully invalidates both store and non-store parts, causing the
1191 subsequent operation to reread any outside changes.'''
1195 subsequent operation to reread any outside changes.'''
1192 # extension should hook this to invalidate its caches
1196 # extension should hook this to invalidate its caches
1193 self.invalidate()
1197 self.invalidate()
1194 self.invalidatedirstate()
1198 self.invalidatedirstate()
1195
1199
1196 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1200 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1197 try:
1201 try:
1198 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1202 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1199 except error.LockHeld as inst:
1203 except error.LockHeld as inst:
1200 if not wait:
1204 if not wait:
1201 raise
1205 raise
1202 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1206 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1203 (desc, inst.locker))
1207 (desc, inst.locker))
1204 # default to 600 seconds timeout
1208 # default to 600 seconds timeout
1205 l = lockmod.lock(vfs, lockname,
1209 l = lockmod.lock(vfs, lockname,
1206 int(self.ui.config("ui", "timeout", "600")),
1210 int(self.ui.config("ui", "timeout", "600")),
1207 releasefn, desc=desc)
1211 releasefn, desc=desc)
1208 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1212 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1209 if acquirefn:
1213 if acquirefn:
1210 acquirefn()
1214 acquirefn()
1211 return l
1215 return l
1212
1216
1213 def _afterlock(self, callback):
1217 def _afterlock(self, callback):
1214 """add a callback to be run when the repository is fully unlocked
1218 """add a callback to be run when the repository is fully unlocked
1215
1219
1216 The callback will be executed when the outermost lock is released
1220 The callback will be executed when the outermost lock is released
1217 (with wlock being higher level than 'lock')."""
1221 (with wlock being higher level than 'lock')."""
1218 for ref in (self._wlockref, self._lockref):
1222 for ref in (self._wlockref, self._lockref):
1219 l = ref and ref()
1223 l = ref and ref()
1220 if l and l.held:
1224 if l and l.held:
1221 l.postrelease.append(callback)
1225 l.postrelease.append(callback)
1222 break
1226 break
1223 else: # no lock have been found.
1227 else: # no lock have been found.
1224 callback()
1228 callback()
1225
1229
1226 def lock(self, wait=True):
1230 def lock(self, wait=True):
1227 '''Lock the repository store (.hg/store) and return a weak reference
1231 '''Lock the repository store (.hg/store) and return a weak reference
1228 to the lock. Use this before modifying the store (e.g. committing or
1232 to the lock. Use this before modifying the store (e.g. committing or
1229 stripping). If you are opening a transaction, get a lock as well.)
1233 stripping). If you are opening a transaction, get a lock as well.)
1230
1234
1231 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1235 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1232 'wlock' first to avoid a dead-lock hazard.'''
1236 'wlock' first to avoid a dead-lock hazard.'''
1233 l = self._lockref and self._lockref()
1237 l = self._lockref and self._lockref()
1234 if l is not None and l.held:
1238 if l is not None and l.held:
1235 l.lock()
1239 l.lock()
1236 return l
1240 return l
1237
1241
1238 def unlock():
1242 def unlock():
1239 for k, ce in self._filecache.items():
1243 for k, ce in self._filecache.items():
1240 if k == 'dirstate' or k not in self.__dict__:
1244 if k == 'dirstate' or k not in self.__dict__:
1241 continue
1245 continue
1242 ce.refresh()
1246 ce.refresh()
1243
1247
1244 l = self._lock(self.svfs, "lock", wait, unlock,
1248 l = self._lock(self.svfs, "lock", wait, unlock,
1245 self.invalidate, _('repository %s') % self.origroot)
1249 self.invalidate, _('repository %s') % self.origroot)
1246 self._lockref = weakref.ref(l)
1250 self._lockref = weakref.ref(l)
1247 return l
1251 return l
1248
1252
1249 def wlock(self, wait=True):
1253 def wlock(self, wait=True):
1250 '''Lock the non-store parts of the repository (everything under
1254 '''Lock the non-store parts of the repository (everything under
1251 .hg except .hg/store) and return a weak reference to the lock.
1255 .hg except .hg/store) and return a weak reference to the lock.
1252
1256
1253 Use this before modifying files in .hg.
1257 Use this before modifying files in .hg.
1254
1258
1255 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1259 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1256 'wlock' first to avoid a dead-lock hazard.'''
1260 'wlock' first to avoid a dead-lock hazard.'''
1257 l = self._wlockref and self._wlockref()
1261 l = self._wlockref and self._wlockref()
1258 if l is not None and l.held:
1262 if l is not None and l.held:
1259 l.lock()
1263 l.lock()
1260 return l
1264 return l
1261
1265
1262 # We do not need to check for non-waiting lock aquisition. Such
1266 # We do not need to check for non-waiting lock aquisition. Such
1263 # acquisition would not cause dead-lock as they would just fail.
1267 # acquisition would not cause dead-lock as they would just fail.
1264 if wait and (self.ui.configbool('devel', 'all-warnings')
1268 if wait and (self.ui.configbool('devel', 'all-warnings')
1265 or self.ui.configbool('devel', 'check-locks')):
1269 or self.ui.configbool('devel', 'check-locks')):
1266 l = self._lockref and self._lockref()
1270 l = self._lockref and self._lockref()
1267 if l is not None and l.held:
1271 if l is not None and l.held:
1268 self.ui.develwarn('"wlock" acquired after "lock"')
1272 self.ui.develwarn('"wlock" acquired after "lock"')
1269
1273
1270 def unlock():
1274 def unlock():
1271 if self.dirstate.pendingparentchange():
1275 if self.dirstate.pendingparentchange():
1272 self.dirstate.invalidate()
1276 self.dirstate.invalidate()
1273 else:
1277 else:
1274 self.dirstate.write()
1278 self.dirstate.write()
1275
1279
1276 self._filecache['dirstate'].refresh()
1280 self._filecache['dirstate'].refresh()
1277
1281
1278 l = self._lock(self.vfs, "wlock", wait, unlock,
1282 l = self._lock(self.vfs, "wlock", wait, unlock,
1279 self.invalidatedirstate, _('working directory of %s') %
1283 self.invalidatedirstate, _('working directory of %s') %
1280 self.origroot)
1284 self.origroot)
1281 self._wlockref = weakref.ref(l)
1285 self._wlockref = weakref.ref(l)
1282 return l
1286 return l
1283
1287
1284 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1288 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1285 """
1289 """
1286 commit an individual file as part of a larger transaction
1290 commit an individual file as part of a larger transaction
1287 """
1291 """
1288
1292
1289 fname = fctx.path()
1293 fname = fctx.path()
1290 fparent1 = manifest1.get(fname, nullid)
1294 fparent1 = manifest1.get(fname, nullid)
1291 fparent2 = manifest2.get(fname, nullid)
1295 fparent2 = manifest2.get(fname, nullid)
1292 if isinstance(fctx, context.filectx):
1296 if isinstance(fctx, context.filectx):
1293 node = fctx.filenode()
1297 node = fctx.filenode()
1294 if node in [fparent1, fparent2]:
1298 if node in [fparent1, fparent2]:
1295 self.ui.debug('reusing %s filelog entry\n' % fname)
1299 self.ui.debug('reusing %s filelog entry\n' % fname)
1296 return node
1300 return node
1297
1301
1298 flog = self.file(fname)
1302 flog = self.file(fname)
1299 meta = {}
1303 meta = {}
1300 copy = fctx.renamed()
1304 copy = fctx.renamed()
1301 if copy and copy[0] != fname:
1305 if copy and copy[0] != fname:
1302 # Mark the new revision of this file as a copy of another
1306 # Mark the new revision of this file as a copy of another
1303 # file. This copy data will effectively act as a parent
1307 # file. This copy data will effectively act as a parent
1304 # of this new revision. If this is a merge, the first
1308 # of this new revision. If this is a merge, the first
1305 # parent will be the nullid (meaning "look up the copy data")
1309 # parent will be the nullid (meaning "look up the copy data")
1306 # and the second one will be the other parent. For example:
1310 # and the second one will be the other parent. For example:
1307 #
1311 #
1308 # 0 --- 1 --- 3 rev1 changes file foo
1312 # 0 --- 1 --- 3 rev1 changes file foo
1309 # \ / rev2 renames foo to bar and changes it
1313 # \ / rev2 renames foo to bar and changes it
1310 # \- 2 -/ rev3 should have bar with all changes and
1314 # \- 2 -/ rev3 should have bar with all changes and
1311 # should record that bar descends from
1315 # should record that bar descends from
1312 # bar in rev2 and foo in rev1
1316 # bar in rev2 and foo in rev1
1313 #
1317 #
1314 # this allows this merge to succeed:
1318 # this allows this merge to succeed:
1315 #
1319 #
1316 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1320 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1317 # \ / merging rev3 and rev4 should use bar@rev2
1321 # \ / merging rev3 and rev4 should use bar@rev2
1318 # \- 2 --- 4 as the merge base
1322 # \- 2 --- 4 as the merge base
1319 #
1323 #
1320
1324
1321 cfname = copy[0]
1325 cfname = copy[0]
1322 crev = manifest1.get(cfname)
1326 crev = manifest1.get(cfname)
1323 newfparent = fparent2
1327 newfparent = fparent2
1324
1328
1325 if manifest2: # branch merge
1329 if manifest2: # branch merge
1326 if fparent2 == nullid or crev is None: # copied on remote side
1330 if fparent2 == nullid or crev is None: # copied on remote side
1327 if cfname in manifest2:
1331 if cfname in manifest2:
1328 crev = manifest2[cfname]
1332 crev = manifest2[cfname]
1329 newfparent = fparent1
1333 newfparent = fparent1
1330
1334
1331 # Here, we used to search backwards through history to try to find
1335 # Here, we used to search backwards through history to try to find
1332 # where the file copy came from if the source of a copy was not in
1336 # where the file copy came from if the source of a copy was not in
1333 # the parent directory. However, this doesn't actually make sense to
1337 # the parent directory. However, this doesn't actually make sense to
1334 # do (what does a copy from something not in your working copy even
1338 # do (what does a copy from something not in your working copy even
1335 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1339 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1336 # the user that copy information was dropped, so if they didn't
1340 # the user that copy information was dropped, so if they didn't
1337 # expect this outcome it can be fixed, but this is the correct
1341 # expect this outcome it can be fixed, but this is the correct
1338 # behavior in this circumstance.
1342 # behavior in this circumstance.
1339
1343
1340 if crev:
1344 if crev:
1341 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1345 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1342 meta["copy"] = cfname
1346 meta["copy"] = cfname
1343 meta["copyrev"] = hex(crev)
1347 meta["copyrev"] = hex(crev)
1344 fparent1, fparent2 = nullid, newfparent
1348 fparent1, fparent2 = nullid, newfparent
1345 else:
1349 else:
1346 self.ui.warn(_("warning: can't find ancestor for '%s' "
1350 self.ui.warn(_("warning: can't find ancestor for '%s' "
1347 "copied from '%s'!\n") % (fname, cfname))
1351 "copied from '%s'!\n") % (fname, cfname))
1348
1352
1349 elif fparent1 == nullid:
1353 elif fparent1 == nullid:
1350 fparent1, fparent2 = fparent2, nullid
1354 fparent1, fparent2 = fparent2, nullid
1351 elif fparent2 != nullid:
1355 elif fparent2 != nullid:
1352 # is one parent an ancestor of the other?
1356 # is one parent an ancestor of the other?
1353 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1357 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1354 if fparent1 in fparentancestors:
1358 if fparent1 in fparentancestors:
1355 fparent1, fparent2 = fparent2, nullid
1359 fparent1, fparent2 = fparent2, nullid
1356 elif fparent2 in fparentancestors:
1360 elif fparent2 in fparentancestors:
1357 fparent2 = nullid
1361 fparent2 = nullid
1358
1362
1359 # is the file changed?
1363 # is the file changed?
1360 text = fctx.data()
1364 text = fctx.data()
1361 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1365 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1362 changelist.append(fname)
1366 changelist.append(fname)
1363 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1367 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1364 # are just the flags changed during merge?
1368 # are just the flags changed during merge?
1365 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1369 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1366 changelist.append(fname)
1370 changelist.append(fname)
1367
1371
1368 return fparent1
1372 return fparent1
1369
1373
1370 @unfilteredmethod
1374 @unfilteredmethod
1371 def commit(self, text="", user=None, date=None, match=None, force=False,
1375 def commit(self, text="", user=None, date=None, match=None, force=False,
1372 editor=False, extra={}):
1376 editor=False, extra={}):
1373 """Add a new revision to current repository.
1377 """Add a new revision to current repository.
1374
1378
1375 Revision information is gathered from the working directory,
1379 Revision information is gathered from the working directory,
1376 match can be used to filter the committed files. If editor is
1380 match can be used to filter the committed files. If editor is
1377 supplied, it is called to get a commit message.
1381 supplied, it is called to get a commit message.
1378 """
1382 """
1379
1383
1380 def fail(f, msg):
1384 def fail(f, msg):
1381 raise util.Abort('%s: %s' % (f, msg))
1385 raise util.Abort('%s: %s' % (f, msg))
1382
1386
1383 if not match:
1387 if not match:
1384 match = matchmod.always(self.root, '')
1388 match = matchmod.always(self.root, '')
1385
1389
1386 if not force:
1390 if not force:
1387 vdirs = []
1391 vdirs = []
1388 match.explicitdir = vdirs.append
1392 match.explicitdir = vdirs.append
1389 match.bad = fail
1393 match.bad = fail
1390
1394
1391 wlock = self.wlock()
1395 wlock = self.wlock()
1392 try:
1396 try:
1393 wctx = self[None]
1397 wctx = self[None]
1394 merge = len(wctx.parents()) > 1
1398 merge = len(wctx.parents()) > 1
1395
1399
1396 if not force and merge and match.ispartial():
1400 if not force and merge and match.ispartial():
1397 raise util.Abort(_('cannot partially commit a merge '
1401 raise util.Abort(_('cannot partially commit a merge '
1398 '(do not specify files or patterns)'))
1402 '(do not specify files or patterns)'))
1399
1403
1400 status = self.status(match=match, clean=force)
1404 status = self.status(match=match, clean=force)
1401 if force:
1405 if force:
1402 status.modified.extend(status.clean) # mq may commit clean files
1406 status.modified.extend(status.clean) # mq may commit clean files
1403
1407
1404 # check subrepos
1408 # check subrepos
1405 subs = []
1409 subs = []
1406 commitsubs = set()
1410 commitsubs = set()
1407 newstate = wctx.substate.copy()
1411 newstate = wctx.substate.copy()
1408 # only manage subrepos and .hgsubstate if .hgsub is present
1412 # only manage subrepos and .hgsubstate if .hgsub is present
1409 if '.hgsub' in wctx:
1413 if '.hgsub' in wctx:
1410 # we'll decide whether to track this ourselves, thanks
1414 # we'll decide whether to track this ourselves, thanks
1411 for c in status.modified, status.added, status.removed:
1415 for c in status.modified, status.added, status.removed:
1412 if '.hgsubstate' in c:
1416 if '.hgsubstate' in c:
1413 c.remove('.hgsubstate')
1417 c.remove('.hgsubstate')
1414
1418
1415 # compare current state to last committed state
1419 # compare current state to last committed state
1416 # build new substate based on last committed state
1420 # build new substate based on last committed state
1417 oldstate = wctx.p1().substate
1421 oldstate = wctx.p1().substate
1418 for s in sorted(newstate.keys()):
1422 for s in sorted(newstate.keys()):
1419 if not match(s):
1423 if not match(s):
1420 # ignore working copy, use old state if present
1424 # ignore working copy, use old state if present
1421 if s in oldstate:
1425 if s in oldstate:
1422 newstate[s] = oldstate[s]
1426 newstate[s] = oldstate[s]
1423 continue
1427 continue
1424 if not force:
1428 if not force:
1425 raise util.Abort(
1429 raise util.Abort(
1426 _("commit with new subrepo %s excluded") % s)
1430 _("commit with new subrepo %s excluded") % s)
1427 dirtyreason = wctx.sub(s).dirtyreason(True)
1431 dirtyreason = wctx.sub(s).dirtyreason(True)
1428 if dirtyreason:
1432 if dirtyreason:
1429 if not self.ui.configbool('ui', 'commitsubrepos'):
1433 if not self.ui.configbool('ui', 'commitsubrepos'):
1430 raise util.Abort(dirtyreason,
1434 raise util.Abort(dirtyreason,
1431 hint=_("use --subrepos for recursive commit"))
1435 hint=_("use --subrepos for recursive commit"))
1432 subs.append(s)
1436 subs.append(s)
1433 commitsubs.add(s)
1437 commitsubs.add(s)
1434 else:
1438 else:
1435 bs = wctx.sub(s).basestate()
1439 bs = wctx.sub(s).basestate()
1436 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1440 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1437 if oldstate.get(s, (None, None, None))[1] != bs:
1441 if oldstate.get(s, (None, None, None))[1] != bs:
1438 subs.append(s)
1442 subs.append(s)
1439
1443
1440 # check for removed subrepos
1444 # check for removed subrepos
1441 for p in wctx.parents():
1445 for p in wctx.parents():
1442 r = [s for s in p.substate if s not in newstate]
1446 r = [s for s in p.substate if s not in newstate]
1443 subs += [s for s in r if match(s)]
1447 subs += [s for s in r if match(s)]
1444 if subs:
1448 if subs:
1445 if (not match('.hgsub') and
1449 if (not match('.hgsub') and
1446 '.hgsub' in (wctx.modified() + wctx.added())):
1450 '.hgsub' in (wctx.modified() + wctx.added())):
1447 raise util.Abort(
1451 raise util.Abort(
1448 _("can't commit subrepos without .hgsub"))
1452 _("can't commit subrepos without .hgsub"))
1449 status.modified.insert(0, '.hgsubstate')
1453 status.modified.insert(0, '.hgsubstate')
1450
1454
1451 elif '.hgsub' in status.removed:
1455 elif '.hgsub' in status.removed:
1452 # clean up .hgsubstate when .hgsub is removed
1456 # clean up .hgsubstate when .hgsub is removed
1453 if ('.hgsubstate' in wctx and
1457 if ('.hgsubstate' in wctx and
1454 '.hgsubstate' not in (status.modified + status.added +
1458 '.hgsubstate' not in (status.modified + status.added +
1455 status.removed)):
1459 status.removed)):
1456 status.removed.insert(0, '.hgsubstate')
1460 status.removed.insert(0, '.hgsubstate')
1457
1461
1458 # make sure all explicit patterns are matched
1462 # make sure all explicit patterns are matched
1459 if not force and (match.isexact() or match.prefix()):
1463 if not force and (match.isexact() or match.prefix()):
1460 matched = set(status.modified + status.added + status.removed)
1464 matched = set(status.modified + status.added + status.removed)
1461
1465
1462 for f in match.files():
1466 for f in match.files():
1463 f = self.dirstate.normalize(f)
1467 f = self.dirstate.normalize(f)
1464 if f == '.' or f in matched or f in wctx.substate:
1468 if f == '.' or f in matched or f in wctx.substate:
1465 continue
1469 continue
1466 if f in status.deleted:
1470 if f in status.deleted:
1467 fail(f, _('file not found!'))
1471 fail(f, _('file not found!'))
1468 if f in vdirs: # visited directory
1472 if f in vdirs: # visited directory
1469 d = f + '/'
1473 d = f + '/'
1470 for mf in matched:
1474 for mf in matched:
1471 if mf.startswith(d):
1475 if mf.startswith(d):
1472 break
1476 break
1473 else:
1477 else:
1474 fail(f, _("no match under directory!"))
1478 fail(f, _("no match under directory!"))
1475 elif f not in self.dirstate:
1479 elif f not in self.dirstate:
1476 fail(f, _("file not tracked!"))
1480 fail(f, _("file not tracked!"))
1477
1481
1478 cctx = context.workingcommitctx(self, status,
1482 cctx = context.workingcommitctx(self, status,
1479 text, user, date, extra)
1483 text, user, date, extra)
1480
1484
1481 # internal config: ui.allowemptycommit
1485 # internal config: ui.allowemptycommit
1482 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1486 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1483 or extra.get('close') or merge or cctx.files()
1487 or extra.get('close') or merge or cctx.files()
1484 or self.ui.configbool('ui', 'allowemptycommit'))
1488 or self.ui.configbool('ui', 'allowemptycommit'))
1485 if not allowemptycommit:
1489 if not allowemptycommit:
1486 return None
1490 return None
1487
1491
1488 if merge and cctx.deleted():
1492 if merge and cctx.deleted():
1489 raise util.Abort(_("cannot commit merge with missing files"))
1493 raise util.Abort(_("cannot commit merge with missing files"))
1490
1494
1491 ms = mergemod.mergestate(self)
1495 ms = mergemod.mergestate(self)
1492 for f in status.modified:
1496 for f in status.modified:
1493 if f in ms and ms[f] == 'u':
1497 if f in ms and ms[f] == 'u':
1494 raise util.Abort(_('unresolved merge conflicts '
1498 raise util.Abort(_('unresolved merge conflicts '
1495 '(see "hg help resolve")'))
1499 '(see "hg help resolve")'))
1496
1500
1497 if editor:
1501 if editor:
1498 cctx._text = editor(self, cctx, subs)
1502 cctx._text = editor(self, cctx, subs)
1499 edited = (text != cctx._text)
1503 edited = (text != cctx._text)
1500
1504
1501 # Save commit message in case this transaction gets rolled back
1505 # Save commit message in case this transaction gets rolled back
1502 # (e.g. by a pretxncommit hook). Leave the content alone on
1506 # (e.g. by a pretxncommit hook). Leave the content alone on
1503 # the assumption that the user will use the same editor again.
1507 # the assumption that the user will use the same editor again.
1504 msgfn = self.savecommitmessage(cctx._text)
1508 msgfn = self.savecommitmessage(cctx._text)
1505
1509
1506 # commit subs and write new state
1510 # commit subs and write new state
1507 if subs:
1511 if subs:
1508 for s in sorted(commitsubs):
1512 for s in sorted(commitsubs):
1509 sub = wctx.sub(s)
1513 sub = wctx.sub(s)
1510 self.ui.status(_('committing subrepository %s\n') %
1514 self.ui.status(_('committing subrepository %s\n') %
1511 subrepo.subrelpath(sub))
1515 subrepo.subrelpath(sub))
1512 sr = sub.commit(cctx._text, user, date)
1516 sr = sub.commit(cctx._text, user, date)
1513 newstate[s] = (newstate[s][0], sr)
1517 newstate[s] = (newstate[s][0], sr)
1514 subrepo.writestate(self, newstate)
1518 subrepo.writestate(self, newstate)
1515
1519
1516 p1, p2 = self.dirstate.parents()
1520 p1, p2 = self.dirstate.parents()
1517 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1521 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1518 try:
1522 try:
1519 self.hook("precommit", throw=True, parent1=hookp1,
1523 self.hook("precommit", throw=True, parent1=hookp1,
1520 parent2=hookp2)
1524 parent2=hookp2)
1521 ret = self.commitctx(cctx, True)
1525 ret = self.commitctx(cctx, True)
1522 except: # re-raises
1526 except: # re-raises
1523 if edited:
1527 if edited:
1524 self.ui.write(
1528 self.ui.write(
1525 _('note: commit message saved in %s\n') % msgfn)
1529 _('note: commit message saved in %s\n') % msgfn)
1526 raise
1530 raise
1527
1531
1528 # update bookmarks, dirstate and mergestate
1532 # update bookmarks, dirstate and mergestate
1529 bookmarks.update(self, [p1, p2], ret)
1533 bookmarks.update(self, [p1, p2], ret)
1530 cctx.markcommitted(ret)
1534 cctx.markcommitted(ret)
1531 ms.reset()
1535 ms.reset()
1532 finally:
1536 finally:
1533 wlock.release()
1537 wlock.release()
1534
1538
1535 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1539 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1536 # hack for command that use a temporary commit (eg: histedit)
1540 # hack for command that use a temporary commit (eg: histedit)
1537 # temporary commit got stripped before hook release
1541 # temporary commit got stripped before hook release
1538 if self.changelog.hasnode(ret):
1542 if self.changelog.hasnode(ret):
1539 self.hook("commit", node=node, parent1=parent1,
1543 self.hook("commit", node=node, parent1=parent1,
1540 parent2=parent2)
1544 parent2=parent2)
1541 self._afterlock(commithook)
1545 self._afterlock(commithook)
1542 return ret
1546 return ret
1543
1547
1544 @unfilteredmethod
1548 @unfilteredmethod
1545 def commitctx(self, ctx, error=False):
1549 def commitctx(self, ctx, error=False):
1546 """Add a new revision to current repository.
1550 """Add a new revision to current repository.
1547 Revision information is passed via the context argument.
1551 Revision information is passed via the context argument.
1548 """
1552 """
1549
1553
1550 tr = None
1554 tr = None
1551 p1, p2 = ctx.p1(), ctx.p2()
1555 p1, p2 = ctx.p1(), ctx.p2()
1552 user = ctx.user()
1556 user = ctx.user()
1553
1557
1554 lock = self.lock()
1558 lock = self.lock()
1555 try:
1559 try:
1556 tr = self.transaction("commit")
1560 tr = self.transaction("commit")
1557 trp = weakref.proxy(tr)
1561 trp = weakref.proxy(tr)
1558
1562
1559 if ctx.files():
1563 if ctx.files():
1560 m1 = p1.manifest()
1564 m1 = p1.manifest()
1561 m2 = p2.manifest()
1565 m2 = p2.manifest()
1562 m = m1.copy()
1566 m = m1.copy()
1563
1567
1564 # check in files
1568 # check in files
1565 added = []
1569 added = []
1566 changed = []
1570 changed = []
1567 removed = list(ctx.removed())
1571 removed = list(ctx.removed())
1568 linkrev = len(self)
1572 linkrev = len(self)
1569 self.ui.note(_("committing files:\n"))
1573 self.ui.note(_("committing files:\n"))
1570 for f in sorted(ctx.modified() + ctx.added()):
1574 for f in sorted(ctx.modified() + ctx.added()):
1571 self.ui.note(f + "\n")
1575 self.ui.note(f + "\n")
1572 try:
1576 try:
1573 fctx = ctx[f]
1577 fctx = ctx[f]
1574 if fctx is None:
1578 if fctx is None:
1575 removed.append(f)
1579 removed.append(f)
1576 else:
1580 else:
1577 added.append(f)
1581 added.append(f)
1578 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1582 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1579 trp, changed)
1583 trp, changed)
1580 m.setflag(f, fctx.flags())
1584 m.setflag(f, fctx.flags())
1581 except OSError as inst:
1585 except OSError as inst:
1582 self.ui.warn(_("trouble committing %s!\n") % f)
1586 self.ui.warn(_("trouble committing %s!\n") % f)
1583 raise
1587 raise
1584 except IOError as inst:
1588 except IOError as inst:
1585 errcode = getattr(inst, 'errno', errno.ENOENT)
1589 errcode = getattr(inst, 'errno', errno.ENOENT)
1586 if error or errcode and errcode != errno.ENOENT:
1590 if error or errcode and errcode != errno.ENOENT:
1587 self.ui.warn(_("trouble committing %s!\n") % f)
1591 self.ui.warn(_("trouble committing %s!\n") % f)
1588 raise
1592 raise
1589
1593
1590 # update manifest
1594 # update manifest
1591 self.ui.note(_("committing manifest\n"))
1595 self.ui.note(_("committing manifest\n"))
1592 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1596 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1593 drop = [f for f in removed if f in m]
1597 drop = [f for f in removed if f in m]
1594 for f in drop:
1598 for f in drop:
1595 del m[f]
1599 del m[f]
1596 mn = self.manifest.add(m, trp, linkrev,
1600 mn = self.manifest.add(m, trp, linkrev,
1597 p1.manifestnode(), p2.manifestnode(),
1601 p1.manifestnode(), p2.manifestnode(),
1598 added, drop)
1602 added, drop)
1599 files = changed + removed
1603 files = changed + removed
1600 else:
1604 else:
1601 mn = p1.manifestnode()
1605 mn = p1.manifestnode()
1602 files = []
1606 files = []
1603
1607
1604 # update changelog
1608 # update changelog
1605 self.ui.note(_("committing changelog\n"))
1609 self.ui.note(_("committing changelog\n"))
1606 self.changelog.delayupdate(tr)
1610 self.changelog.delayupdate(tr)
1607 n = self.changelog.add(mn, files, ctx.description(),
1611 n = self.changelog.add(mn, files, ctx.description(),
1608 trp, p1.node(), p2.node(),
1612 trp, p1.node(), p2.node(),
1609 user, ctx.date(), ctx.extra().copy())
1613 user, ctx.date(), ctx.extra().copy())
1610 p = lambda: tr.writepending() and self.root or ""
1614 p = lambda: tr.writepending() and self.root or ""
1611 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1615 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1612 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1616 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1613 parent2=xp2, pending=p)
1617 parent2=xp2, pending=p)
1614 # set the new commit is proper phase
1618 # set the new commit is proper phase
1615 targetphase = subrepo.newcommitphase(self.ui, ctx)
1619 targetphase = subrepo.newcommitphase(self.ui, ctx)
1616 if targetphase:
1620 if targetphase:
1617 # retract boundary do not alter parent changeset.
1621 # retract boundary do not alter parent changeset.
1618 # if a parent have higher the resulting phase will
1622 # if a parent have higher the resulting phase will
1619 # be compliant anyway
1623 # be compliant anyway
1620 #
1624 #
1621 # if minimal phase was 0 we don't need to retract anything
1625 # if minimal phase was 0 we don't need to retract anything
1622 phases.retractboundary(self, tr, targetphase, [n])
1626 phases.retractboundary(self, tr, targetphase, [n])
1623 tr.close()
1627 tr.close()
1624 branchmap.updatecache(self.filtered('served'))
1628 branchmap.updatecache(self.filtered('served'))
1625 return n
1629 return n
1626 finally:
1630 finally:
1627 if tr:
1631 if tr:
1628 tr.release()
1632 tr.release()
1629 lock.release()
1633 lock.release()
1630
1634
1631 @unfilteredmethod
1635 @unfilteredmethod
1632 def destroying(self):
1636 def destroying(self):
1633 '''Inform the repository that nodes are about to be destroyed.
1637 '''Inform the repository that nodes are about to be destroyed.
1634 Intended for use by strip and rollback, so there's a common
1638 Intended for use by strip and rollback, so there's a common
1635 place for anything that has to be done before destroying history.
1639 place for anything that has to be done before destroying history.
1636
1640
1637 This is mostly useful for saving state that is in memory and waiting
1641 This is mostly useful for saving state that is in memory and waiting
1638 to be flushed when the current lock is released. Because a call to
1642 to be flushed when the current lock is released. Because a call to
1639 destroyed is imminent, the repo will be invalidated causing those
1643 destroyed is imminent, the repo will be invalidated causing those
1640 changes to stay in memory (waiting for the next unlock), or vanish
1644 changes to stay in memory (waiting for the next unlock), or vanish
1641 completely.
1645 completely.
1642 '''
1646 '''
1643 # When using the same lock to commit and strip, the phasecache is left
1647 # When using the same lock to commit and strip, the phasecache is left
1644 # dirty after committing. Then when we strip, the repo is invalidated,
1648 # dirty after committing. Then when we strip, the repo is invalidated,
1645 # causing those changes to disappear.
1649 # causing those changes to disappear.
1646 if '_phasecache' in vars(self):
1650 if '_phasecache' in vars(self):
1647 self._phasecache.write()
1651 self._phasecache.write()
1648
1652
1649 @unfilteredmethod
1653 @unfilteredmethod
1650 def destroyed(self):
1654 def destroyed(self):
1651 '''Inform the repository that nodes have been destroyed.
1655 '''Inform the repository that nodes have been destroyed.
1652 Intended for use by strip and rollback, so there's a common
1656 Intended for use by strip and rollback, so there's a common
1653 place for anything that has to be done after destroying history.
1657 place for anything that has to be done after destroying history.
1654 '''
1658 '''
1655 # When one tries to:
1659 # When one tries to:
1656 # 1) destroy nodes thus calling this method (e.g. strip)
1660 # 1) destroy nodes thus calling this method (e.g. strip)
1657 # 2) use phasecache somewhere (e.g. commit)
1661 # 2) use phasecache somewhere (e.g. commit)
1658 #
1662 #
1659 # then 2) will fail because the phasecache contains nodes that were
1663 # then 2) will fail because the phasecache contains nodes that were
1660 # removed. We can either remove phasecache from the filecache,
1664 # removed. We can either remove phasecache from the filecache,
1661 # causing it to reload next time it is accessed, or simply filter
1665 # causing it to reload next time it is accessed, or simply filter
1662 # the removed nodes now and write the updated cache.
1666 # the removed nodes now and write the updated cache.
1663 self._phasecache.filterunknown(self)
1667 self._phasecache.filterunknown(self)
1664 self._phasecache.write()
1668 self._phasecache.write()
1665
1669
1666 # update the 'served' branch cache to help read only server process
1670 # update the 'served' branch cache to help read only server process
1667 # Thanks to branchcache collaboration this is done from the nearest
1671 # Thanks to branchcache collaboration this is done from the nearest
1668 # filtered subset and it is expected to be fast.
1672 # filtered subset and it is expected to be fast.
1669 branchmap.updatecache(self.filtered('served'))
1673 branchmap.updatecache(self.filtered('served'))
1670
1674
1671 # Ensure the persistent tag cache is updated. Doing it now
1675 # Ensure the persistent tag cache is updated. Doing it now
1672 # means that the tag cache only has to worry about destroyed
1676 # means that the tag cache only has to worry about destroyed
1673 # heads immediately after a strip/rollback. That in turn
1677 # heads immediately after a strip/rollback. That in turn
1674 # guarantees that "cachetip == currenttip" (comparing both rev
1678 # guarantees that "cachetip == currenttip" (comparing both rev
1675 # and node) always means no nodes have been added or destroyed.
1679 # and node) always means no nodes have been added or destroyed.
1676
1680
1677 # XXX this is suboptimal when qrefresh'ing: we strip the current
1681 # XXX this is suboptimal when qrefresh'ing: we strip the current
1678 # head, refresh the tag cache, then immediately add a new head.
1682 # head, refresh the tag cache, then immediately add a new head.
1679 # But I think doing it this way is necessary for the "instant
1683 # But I think doing it this way is necessary for the "instant
1680 # tag cache retrieval" case to work.
1684 # tag cache retrieval" case to work.
1681 self.invalidate()
1685 self.invalidate()
1682
1686
1683 def walk(self, match, node=None):
1687 def walk(self, match, node=None):
1684 '''
1688 '''
1685 walk recursively through the directory tree or a given
1689 walk recursively through the directory tree or a given
1686 changeset, finding all files matched by the match
1690 changeset, finding all files matched by the match
1687 function
1691 function
1688 '''
1692 '''
1689 return self[node].walk(match)
1693 return self[node].walk(match)
1690
1694
1691 def status(self, node1='.', node2=None, match=None,
1695 def status(self, node1='.', node2=None, match=None,
1692 ignored=False, clean=False, unknown=False,
1696 ignored=False, clean=False, unknown=False,
1693 listsubrepos=False):
1697 listsubrepos=False):
1694 '''a convenience method that calls node1.status(node2)'''
1698 '''a convenience method that calls node1.status(node2)'''
1695 return self[node1].status(node2, match, ignored, clean, unknown,
1699 return self[node1].status(node2, match, ignored, clean, unknown,
1696 listsubrepos)
1700 listsubrepos)
1697
1701
1698 def heads(self, start=None):
1702 def heads(self, start=None):
1699 heads = self.changelog.heads(start)
1703 heads = self.changelog.heads(start)
1700 # sort the output in rev descending order
1704 # sort the output in rev descending order
1701 return sorted(heads, key=self.changelog.rev, reverse=True)
1705 return sorted(heads, key=self.changelog.rev, reverse=True)
1702
1706
1703 def branchheads(self, branch=None, start=None, closed=False):
1707 def branchheads(self, branch=None, start=None, closed=False):
1704 '''return a (possibly filtered) list of heads for the given branch
1708 '''return a (possibly filtered) list of heads for the given branch
1705
1709
1706 Heads are returned in topological order, from newest to oldest.
1710 Heads are returned in topological order, from newest to oldest.
1707 If branch is None, use the dirstate branch.
1711 If branch is None, use the dirstate branch.
1708 If start is not None, return only heads reachable from start.
1712 If start is not None, return only heads reachable from start.
1709 If closed is True, return heads that are marked as closed as well.
1713 If closed is True, return heads that are marked as closed as well.
1710 '''
1714 '''
1711 if branch is None:
1715 if branch is None:
1712 branch = self[None].branch()
1716 branch = self[None].branch()
1713 branches = self.branchmap()
1717 branches = self.branchmap()
1714 if branch not in branches:
1718 if branch not in branches:
1715 return []
1719 return []
1716 # the cache returns heads ordered lowest to highest
1720 # the cache returns heads ordered lowest to highest
1717 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1721 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1718 if start is not None:
1722 if start is not None:
1719 # filter out the heads that cannot be reached from startrev
1723 # filter out the heads that cannot be reached from startrev
1720 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1724 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1721 bheads = [h for h in bheads if h in fbheads]
1725 bheads = [h for h in bheads if h in fbheads]
1722 return bheads
1726 return bheads
1723
1727
1724 def branches(self, nodes):
1728 def branches(self, nodes):
1725 if not nodes:
1729 if not nodes:
1726 nodes = [self.changelog.tip()]
1730 nodes = [self.changelog.tip()]
1727 b = []
1731 b = []
1728 for n in nodes:
1732 for n in nodes:
1729 t = n
1733 t = n
1730 while True:
1734 while True:
1731 p = self.changelog.parents(n)
1735 p = self.changelog.parents(n)
1732 if p[1] != nullid or p[0] == nullid:
1736 if p[1] != nullid or p[0] == nullid:
1733 b.append((t, n, p[0], p[1]))
1737 b.append((t, n, p[0], p[1]))
1734 break
1738 break
1735 n = p[0]
1739 n = p[0]
1736 return b
1740 return b
1737
1741
1738 def between(self, pairs):
1742 def between(self, pairs):
1739 r = []
1743 r = []
1740
1744
1741 for top, bottom in pairs:
1745 for top, bottom in pairs:
1742 n, l, i = top, [], 0
1746 n, l, i = top, [], 0
1743 f = 1
1747 f = 1
1744
1748
1745 while n != bottom and n != nullid:
1749 while n != bottom and n != nullid:
1746 p = self.changelog.parents(n)[0]
1750 p = self.changelog.parents(n)[0]
1747 if i == f:
1751 if i == f:
1748 l.append(n)
1752 l.append(n)
1749 f = f * 2
1753 f = f * 2
1750 n = p
1754 n = p
1751 i += 1
1755 i += 1
1752
1756
1753 r.append(l)
1757 r.append(l)
1754
1758
1755 return r
1759 return r
1756
1760
1757 def checkpush(self, pushop):
1761 def checkpush(self, pushop):
1758 """Extensions can override this function if additional checks have
1762 """Extensions can override this function if additional checks have
1759 to be performed before pushing, or call it if they override push
1763 to be performed before pushing, or call it if they override push
1760 command.
1764 command.
1761 """
1765 """
1762 pass
1766 pass
1763
1767
1764 @unfilteredpropertycache
1768 @unfilteredpropertycache
1765 def prepushoutgoinghooks(self):
1769 def prepushoutgoinghooks(self):
1766 """Return util.hooks consists of "(repo, remote, outgoing)"
1770 """Return util.hooks consists of "(repo, remote, outgoing)"
1767 functions, which are called before pushing changesets.
1771 functions, which are called before pushing changesets.
1768 """
1772 """
1769 return util.hooks()
1773 return util.hooks()
1770
1774
1771 def stream_in(self, remote, remotereqs):
1775 def stream_in(self, remote, remotereqs):
1772 # Save remote branchmap. We will use it later
1776 # Save remote branchmap. We will use it later
1773 # to speed up branchcache creation
1777 # to speed up branchcache creation
1774 rbranchmap = None
1778 rbranchmap = None
1775 if remote.capable("branchmap"):
1779 if remote.capable("branchmap"):
1776 rbranchmap = remote.branchmap()
1780 rbranchmap = remote.branchmap()
1777
1781
1778 fp = remote.stream_out()
1782 fp = remote.stream_out()
1779 l = fp.readline()
1783 l = fp.readline()
1780 try:
1784 try:
1781 resp = int(l)
1785 resp = int(l)
1782 except ValueError:
1786 except ValueError:
1783 raise error.ResponseError(
1787 raise error.ResponseError(
1784 _('unexpected response from remote server:'), l)
1788 _('unexpected response from remote server:'), l)
1785 if resp == 1:
1789 if resp == 1:
1786 raise util.Abort(_('operation forbidden by server'))
1790 raise util.Abort(_('operation forbidden by server'))
1787 elif resp == 2:
1791 elif resp == 2:
1788 raise util.Abort(_('locking the remote repository failed'))
1792 raise util.Abort(_('locking the remote repository failed'))
1789 elif resp != 0:
1793 elif resp != 0:
1790 raise util.Abort(_('the server sent an unknown error code'))
1794 raise util.Abort(_('the server sent an unknown error code'))
1791
1795
1792 self.applystreamclone(remotereqs, rbranchmap, fp)
1796 self.applystreamclone(remotereqs, rbranchmap, fp)
1793 return len(self.heads()) + 1
1797 return len(self.heads()) + 1
1794
1798
1795 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1799 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1796 """Apply stream clone data to this repository.
1800 """Apply stream clone data to this repository.
1797
1801
1798 "remotereqs" is a set of requirements to handle the incoming data.
1802 "remotereqs" is a set of requirements to handle the incoming data.
1799 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1803 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1800 can be None.
1804 can be None.
1801 "fp" is a file object containing the raw stream data, suitable for
1805 "fp" is a file object containing the raw stream data, suitable for
1802 feeding into exchange.consumestreamclone.
1806 feeding into exchange.consumestreamclone.
1803 """
1807 """
1804 lock = self.lock()
1808 lock = self.lock()
1805 try:
1809 try:
1806 exchange.consumestreamclone(self, fp)
1810 exchange.consumestreamclone(self, fp)
1807
1811
1808 # new requirements = old non-format requirements +
1812 # new requirements = old non-format requirements +
1809 # new format-related remote requirements
1813 # new format-related remote requirements
1810 # requirements from the streamed-in repository
1814 # requirements from the streamed-in repository
1811 self.requirements = remotereqs | (
1815 self.requirements = remotereqs | (
1812 self.requirements - self.supportedformats)
1816 self.requirements - self.supportedformats)
1813 self._applyopenerreqs()
1817 self._applyopenerreqs()
1814 self._writerequirements()
1818 self._writerequirements()
1815
1819
1816 if remotebranchmap:
1820 if remotebranchmap:
1817 rbheads = []
1821 rbheads = []
1818 closed = []
1822 closed = []
1819 for bheads in remotebranchmap.itervalues():
1823 for bheads in remotebranchmap.itervalues():
1820 rbheads.extend(bheads)
1824 rbheads.extend(bheads)
1821 for h in bheads:
1825 for h in bheads:
1822 r = self.changelog.rev(h)
1826 r = self.changelog.rev(h)
1823 b, c = self.changelog.branchinfo(r)
1827 b, c = self.changelog.branchinfo(r)
1824 if c:
1828 if c:
1825 closed.append(h)
1829 closed.append(h)
1826
1830
1827 if rbheads:
1831 if rbheads:
1828 rtiprev = max((int(self.changelog.rev(node))
1832 rtiprev = max((int(self.changelog.rev(node))
1829 for node in rbheads))
1833 for node in rbheads))
1830 cache = branchmap.branchcache(remotebranchmap,
1834 cache = branchmap.branchcache(remotebranchmap,
1831 self[rtiprev].node(),
1835 self[rtiprev].node(),
1832 rtiprev,
1836 rtiprev,
1833 closednodes=closed)
1837 closednodes=closed)
1834 # Try to stick it as low as possible
1838 # Try to stick it as low as possible
1835 # filter above served are unlikely to be fetch from a clone
1839 # filter above served are unlikely to be fetch from a clone
1836 for candidate in ('base', 'immutable', 'served'):
1840 for candidate in ('base', 'immutable', 'served'):
1837 rview = self.filtered(candidate)
1841 rview = self.filtered(candidate)
1838 if cache.validfor(rview):
1842 if cache.validfor(rview):
1839 self._branchcaches[candidate] = cache
1843 self._branchcaches[candidate] = cache
1840 cache.write(rview)
1844 cache.write(rview)
1841 break
1845 break
1842 self.invalidate()
1846 self.invalidate()
1843 finally:
1847 finally:
1844 lock.release()
1848 lock.release()
1845
1849
1846 def clone(self, remote, heads=[], stream=None):
1850 def clone(self, remote, heads=[], stream=None):
1847 '''clone remote repository.
1851 '''clone remote repository.
1848
1852
1849 keyword arguments:
1853 keyword arguments:
1850 heads: list of revs to clone (forces use of pull)
1854 heads: list of revs to clone (forces use of pull)
1851 stream: use streaming clone if possible'''
1855 stream: use streaming clone if possible'''
1852
1856
1853 # now, all clients that can request uncompressed clones can
1857 # now, all clients that can request uncompressed clones can
1854 # read repo formats supported by all servers that can serve
1858 # read repo formats supported by all servers that can serve
1855 # them.
1859 # them.
1856
1860
1857 # if revlog format changes, client will have to check version
1861 # if revlog format changes, client will have to check version
1858 # and format flags on "stream" capability, and use
1862 # and format flags on "stream" capability, and use
1859 # uncompressed only if compatible.
1863 # uncompressed only if compatible.
1860
1864
1861 if stream is None:
1865 if stream is None:
1862 # if the server explicitly prefers to stream (for fast LANs)
1866 # if the server explicitly prefers to stream (for fast LANs)
1863 stream = remote.capable('stream-preferred')
1867 stream = remote.capable('stream-preferred')
1864
1868
1865 if stream and not heads:
1869 if stream and not heads:
1866 # 'stream' means remote revlog format is revlogv1 only
1870 # 'stream' means remote revlog format is revlogv1 only
1867 if remote.capable('stream'):
1871 if remote.capable('stream'):
1868 self.stream_in(remote, set(('revlogv1',)))
1872 self.stream_in(remote, set(('revlogv1',)))
1869 else:
1873 else:
1870 # otherwise, 'streamreqs' contains the remote revlog format
1874 # otherwise, 'streamreqs' contains the remote revlog format
1871 streamreqs = remote.capable('streamreqs')
1875 streamreqs = remote.capable('streamreqs')
1872 if streamreqs:
1876 if streamreqs:
1873 streamreqs = set(streamreqs.split(','))
1877 streamreqs = set(streamreqs.split(','))
1874 # if we support it, stream in and adjust our requirements
1878 # if we support it, stream in and adjust our requirements
1875 if not streamreqs - self.supportedformats:
1879 if not streamreqs - self.supportedformats:
1876 self.stream_in(remote, streamreqs)
1880 self.stream_in(remote, streamreqs)
1877
1881
1878 # internal config: ui.quietbookmarkmove
1882 # internal config: ui.quietbookmarkmove
1879 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1883 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1880 try:
1884 try:
1881 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1885 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1882 ret = exchange.pull(self, remote, heads).cgresult
1886 ret = exchange.pull(self, remote, heads).cgresult
1883 finally:
1887 finally:
1884 self.ui.restoreconfig(quiet)
1888 self.ui.restoreconfig(quiet)
1885 return ret
1889 return ret
1886
1890
1887 def pushkey(self, namespace, key, old, new):
1891 def pushkey(self, namespace, key, old, new):
1888 try:
1892 try:
1889 tr = self.currenttransaction()
1893 tr = self.currenttransaction()
1890 hookargs = {}
1894 hookargs = {}
1891 if tr is not None:
1895 if tr is not None:
1892 hookargs.update(tr.hookargs)
1896 hookargs.update(tr.hookargs)
1893 pending = lambda: tr.writepending() and self.root or ""
1897 pending = lambda: tr.writepending() and self.root or ""
1894 hookargs['pending'] = pending
1898 hookargs['pending'] = pending
1895 hookargs['namespace'] = namespace
1899 hookargs['namespace'] = namespace
1896 hookargs['key'] = key
1900 hookargs['key'] = key
1897 hookargs['old'] = old
1901 hookargs['old'] = old
1898 hookargs['new'] = new
1902 hookargs['new'] = new
1899 self.hook('prepushkey', throw=True, **hookargs)
1903 self.hook('prepushkey', throw=True, **hookargs)
1900 except error.HookAbort as exc:
1904 except error.HookAbort as exc:
1901 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1905 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1902 if exc.hint:
1906 if exc.hint:
1903 self.ui.write_err(_("(%s)\n") % exc.hint)
1907 self.ui.write_err(_("(%s)\n") % exc.hint)
1904 return False
1908 return False
1905 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1909 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1906 ret = pushkey.push(self, namespace, key, old, new)
1910 ret = pushkey.push(self, namespace, key, old, new)
1907 def runhook():
1911 def runhook():
1908 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1912 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1909 ret=ret)
1913 ret=ret)
1910 self._afterlock(runhook)
1914 self._afterlock(runhook)
1911 return ret
1915 return ret
1912
1916
1913 def listkeys(self, namespace):
1917 def listkeys(self, namespace):
1914 self.hook('prelistkeys', throw=True, namespace=namespace)
1918 self.hook('prelistkeys', throw=True, namespace=namespace)
1915 self.ui.debug('listing keys for "%s"\n' % namespace)
1919 self.ui.debug('listing keys for "%s"\n' % namespace)
1916 values = pushkey.list(self, namespace)
1920 values = pushkey.list(self, namespace)
1917 self.hook('listkeys', namespace=namespace, values=values)
1921 self.hook('listkeys', namespace=namespace, values=values)
1918 return values
1922 return values
1919
1923
1920 def debugwireargs(self, one, two, three=None, four=None, five=None):
1924 def debugwireargs(self, one, two, three=None, four=None, five=None):
1921 '''used to test argument passing over the wire'''
1925 '''used to test argument passing over the wire'''
1922 return "%s %s %s %s %s" % (one, two, three, four, five)
1926 return "%s %s %s %s %s" % (one, two, three, four, five)
1923
1927
1924 def savecommitmessage(self, text):
1928 def savecommitmessage(self, text):
1925 fp = self.vfs('last-message.txt', 'wb')
1929 fp = self.vfs('last-message.txt', 'wb')
1926 try:
1930 try:
1927 fp.write(text)
1931 fp.write(text)
1928 finally:
1932 finally:
1929 fp.close()
1933 fp.close()
1930 return self.pathto(fp.name[len(self.root) + 1:])
1934 return self.pathto(fp.name[len(self.root) + 1:])
1931
1935
1932 # used to avoid circular references so destructors work
1936 # used to avoid circular references so destructors work
1933 def aftertrans(files):
1937 def aftertrans(files):
1934 renamefiles = [tuple(t) for t in files]
1938 renamefiles = [tuple(t) for t in files]
1935 def a():
1939 def a():
1936 for vfs, src, dest in renamefiles:
1940 for vfs, src, dest in renamefiles:
1937 try:
1941 try:
1938 vfs.rename(src, dest)
1942 vfs.rename(src, dest)
1939 except OSError: # journal file does not yet exist
1943 except OSError: # journal file does not yet exist
1940 pass
1944 pass
1941 return a
1945 return a
1942
1946
1943 def undoname(fn):
1947 def undoname(fn):
1944 base, name = os.path.split(fn)
1948 base, name = os.path.split(fn)
1945 assert name.startswith('journal')
1949 assert name.startswith('journal')
1946 return os.path.join(base, name.replace('journal', 'undo', 1))
1950 return os.path.join(base, name.replace('journal', 'undo', 1))
1947
1951
1948 def instance(ui, path, create):
1952 def instance(ui, path, create):
1949 return localrepository(ui, util.urllocalpath(path), create)
1953 return localrepository(ui, util.urllocalpath(path), create)
1950
1954
1951 def islocal(path):
1955 def islocal(path):
1952 return True
1956 return True
@@ -1,1629 +1,1651 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 import collections
15 import collections
16 from node import bin, hex, nullid, nullrev
16 from node import bin, hex, nullid, nullrev
17 from i18n import _
17 from i18n import _
18 import ancestor, mdiff, parsers, error, util, templatefilters
18 import ancestor, mdiff, parsers, error, util, templatefilters
19 import struct, zlib, errno
19 import struct, zlib, errno
20
20
21 _pack = struct.pack
21 _pack = struct.pack
22 _unpack = struct.unpack
22 _unpack = struct.unpack
23 _compress = zlib.compress
23 _compress = zlib.compress
24 _decompress = zlib.decompress
24 _decompress = zlib.decompress
25 _sha = util.sha1
25 _sha = util.sha1
26
26
27 # revlog header flags
27 # revlog header flags
28 REVLOGV0 = 0
28 REVLOGV0 = 0
29 REVLOGNG = 1
29 REVLOGNG = 1
30 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGNGINLINEDATA = (1 << 16)
31 REVLOGGENERALDELTA = (1 << 17)
31 REVLOGGENERALDELTA = (1 << 17)
32 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
33 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_FORMAT = REVLOGNG
34 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
35 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
36
36
37 # revlog index flags
37 # revlog index flags
38 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
38 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
39 REVIDX_DEFAULT_FLAGS = 0
39 REVIDX_DEFAULT_FLAGS = 0
40 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
40 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
41
41
42 # max size of revlog with inline data
42 # max size of revlog with inline data
43 _maxinline = 131072
43 _maxinline = 131072
44 _chunksize = 1048576
44 _chunksize = 1048576
45
45
46 RevlogError = error.RevlogError
46 RevlogError = error.RevlogError
47 LookupError = error.LookupError
47 LookupError = error.LookupError
48 CensoredNodeError = error.CensoredNodeError
48 CensoredNodeError = error.CensoredNodeError
49
49
50 def getoffset(q):
50 def getoffset(q):
51 return int(q >> 16)
51 return int(q >> 16)
52
52
53 def gettype(q):
53 def gettype(q):
54 return int(q & 0xFFFF)
54 return int(q & 0xFFFF)
55
55
56 def offset_type(offset, type):
56 def offset_type(offset, type):
57 return long(long(offset) << 16 | type)
57 return long(long(offset) << 16 | type)
58
58
59 _nullhash = _sha(nullid)
59 _nullhash = _sha(nullid)
60
60
61 def hash(text, p1, p2):
61 def hash(text, p1, p2):
62 """generate a hash from the given text and its parent hashes
62 """generate a hash from the given text and its parent hashes
63
63
64 This hash combines both the current file contents and its history
64 This hash combines both the current file contents and its history
65 in a manner that makes it easy to distinguish nodes with the same
65 in a manner that makes it easy to distinguish nodes with the same
66 content in the revision graph.
66 content in the revision graph.
67 """
67 """
68 # As of now, if one of the parent node is null, p2 is null
68 # As of now, if one of the parent node is null, p2 is null
69 if p2 == nullid:
69 if p2 == nullid:
70 # deep copy of a hash is faster than creating one
70 # deep copy of a hash is faster than creating one
71 s = _nullhash.copy()
71 s = _nullhash.copy()
72 s.update(p1)
72 s.update(p1)
73 else:
73 else:
74 # none of the parent nodes are nullid
74 # none of the parent nodes are nullid
75 l = [p1, p2]
75 l = [p1, p2]
76 l.sort()
76 l.sort()
77 s = _sha(l[0])
77 s = _sha(l[0])
78 s.update(l[1])
78 s.update(l[1])
79 s.update(text)
79 s.update(text)
80 return s.digest()
80 return s.digest()
81
81
82 def decompress(bin):
82 def decompress(bin):
83 """ decompress the given input """
83 """ decompress the given input """
84 if not bin:
84 if not bin:
85 return bin
85 return bin
86 t = bin[0]
86 t = bin[0]
87 if t == '\0':
87 if t == '\0':
88 return bin
88 return bin
89 if t == 'x':
89 if t == 'x':
90 try:
90 try:
91 return _decompress(bin)
91 return _decompress(bin)
92 except zlib.error as e:
92 except zlib.error as e:
93 raise RevlogError(_("revlog decompress error: %s") % str(e))
93 raise RevlogError(_("revlog decompress error: %s") % str(e))
94 if t == 'u':
94 if t == 'u':
95 return bin[1:]
95 return bin[1:]
96 raise RevlogError(_("unknown compression type %r") % t)
96 raise RevlogError(_("unknown compression type %r") % t)
97
97
98 # index v0:
98 # index v0:
99 # 4 bytes: offset
99 # 4 bytes: offset
100 # 4 bytes: compressed length
100 # 4 bytes: compressed length
101 # 4 bytes: base rev
101 # 4 bytes: base rev
102 # 4 bytes: link rev
102 # 4 bytes: link rev
103 # 20 bytes: parent 1 nodeid
103 # 20 bytes: parent 1 nodeid
104 # 20 bytes: parent 2 nodeid
104 # 20 bytes: parent 2 nodeid
105 # 20 bytes: nodeid
105 # 20 bytes: nodeid
106 indexformatv0 = ">4l20s20s20s"
106 indexformatv0 = ">4l20s20s20s"
107
107
108 class revlogoldio(object):
108 class revlogoldio(object):
109 def __init__(self):
109 def __init__(self):
110 self.size = struct.calcsize(indexformatv0)
110 self.size = struct.calcsize(indexformatv0)
111
111
112 def parseindex(self, data, inline):
112 def parseindex(self, data, inline):
113 s = self.size
113 s = self.size
114 index = []
114 index = []
115 nodemap = {nullid: nullrev}
115 nodemap = {nullid: nullrev}
116 n = off = 0
116 n = off = 0
117 l = len(data)
117 l = len(data)
118 while off + s <= l:
118 while off + s <= l:
119 cur = data[off:off + s]
119 cur = data[off:off + s]
120 off += s
120 off += s
121 e = _unpack(indexformatv0, cur)
121 e = _unpack(indexformatv0, cur)
122 # transform to revlogv1 format
122 # transform to revlogv1 format
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
125 index.append(e2)
125 index.append(e2)
126 nodemap[e[6]] = n
126 nodemap[e[6]] = n
127 n += 1
127 n += 1
128
128
129 # add the magic null revision at -1
129 # add the magic null revision at -1
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
131
131
132 return index, nodemap, None
132 return index, nodemap, None
133
133
134 def packentry(self, entry, node, version, rev):
134 def packentry(self, entry, node, version, rev):
135 if gettype(entry[0]):
135 if gettype(entry[0]):
136 raise RevlogError(_("index entry flags need RevlogNG"))
136 raise RevlogError(_("index entry flags need RevlogNG"))
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
138 node(entry[5]), node(entry[6]), entry[7])
138 node(entry[5]), node(entry[6]), entry[7])
139 return _pack(indexformatv0, *e2)
139 return _pack(indexformatv0, *e2)
140
140
141 # index ng:
141 # index ng:
142 # 6 bytes: offset
142 # 6 bytes: offset
143 # 2 bytes: flags
143 # 2 bytes: flags
144 # 4 bytes: compressed length
144 # 4 bytes: compressed length
145 # 4 bytes: uncompressed length
145 # 4 bytes: uncompressed length
146 # 4 bytes: base rev
146 # 4 bytes: base rev
147 # 4 bytes: link rev
147 # 4 bytes: link rev
148 # 4 bytes: parent 1 rev
148 # 4 bytes: parent 1 rev
149 # 4 bytes: parent 2 rev
149 # 4 bytes: parent 2 rev
150 # 32 bytes: nodeid
150 # 32 bytes: nodeid
151 indexformatng = ">Qiiiiii20s12x"
151 indexformatng = ">Qiiiiii20s12x"
152 versionformat = ">I"
152 versionformat = ">I"
153
153
154 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
154 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
155 # signed integer)
155 # signed integer)
156 _maxentrysize = 0x7fffffff
156 _maxentrysize = 0x7fffffff
157
157
158 class revlogio(object):
158 class revlogio(object):
159 def __init__(self):
159 def __init__(self):
160 self.size = struct.calcsize(indexformatng)
160 self.size = struct.calcsize(indexformatng)
161
161
162 def parseindex(self, data, inline):
162 def parseindex(self, data, inline):
163 # call the C implementation to parse the index data
163 # call the C implementation to parse the index data
164 index, cache = parsers.parse_index2(data, inline)
164 index, cache = parsers.parse_index2(data, inline)
165 return index, getattr(index, 'nodemap', None), cache
165 return index, getattr(index, 'nodemap', None), cache
166
166
167 def packentry(self, entry, node, version, rev):
167 def packentry(self, entry, node, version, rev):
168 p = _pack(indexformatng, *entry)
168 p = _pack(indexformatng, *entry)
169 if rev == 0:
169 if rev == 0:
170 p = _pack(versionformat, version) + p[4:]
170 p = _pack(versionformat, version) + p[4:]
171 return p
171 return p
172
172
173 class revlog(object):
173 class revlog(object):
174 """
174 """
175 the underlying revision storage object
175 the underlying revision storage object
176
176
177 A revlog consists of two parts, an index and the revision data.
177 A revlog consists of two parts, an index and the revision data.
178
178
179 The index is a file with a fixed record size containing
179 The index is a file with a fixed record size containing
180 information on each revision, including its nodeid (hash), the
180 information on each revision, including its nodeid (hash), the
181 nodeids of its parents, the position and offset of its data within
181 nodeids of its parents, the position and offset of its data within
182 the data file, and the revision it's based on. Finally, each entry
182 the data file, and the revision it's based on. Finally, each entry
183 contains a linkrev entry that can serve as a pointer to external
183 contains a linkrev entry that can serve as a pointer to external
184 data.
184 data.
185
185
186 The revision data itself is a linear collection of data chunks.
186 The revision data itself is a linear collection of data chunks.
187 Each chunk represents a revision and is usually represented as a
187 Each chunk represents a revision and is usually represented as a
188 delta against the previous chunk. To bound lookup time, runs of
188 delta against the previous chunk. To bound lookup time, runs of
189 deltas are limited to about 2 times the length of the original
189 deltas are limited to about 2 times the length of the original
190 version data. This makes retrieval of a version proportional to
190 version data. This makes retrieval of a version proportional to
191 its size, or O(1) relative to the number of revisions.
191 its size, or O(1) relative to the number of revisions.
192
192
193 Both pieces of the revlog are written to in an append-only
193 Both pieces of the revlog are written to in an append-only
194 fashion, which means we never need to rewrite a file to insert or
194 fashion, which means we never need to rewrite a file to insert or
195 remove data, and can use some simple techniques to avoid the need
195 remove data, and can use some simple techniques to avoid the need
196 for locking while reading.
196 for locking while reading.
197 """
197 """
198 def __init__(self, opener, indexfile):
198 def __init__(self, opener, indexfile):
199 """
199 """
200 create a revlog object
200 create a revlog object
201
201
202 opener is a function that abstracts the file opening operation
202 opener is a function that abstracts the file opening operation
203 and can be used to implement COW semantics or the like.
203 and can be used to implement COW semantics or the like.
204 """
204 """
205 self.indexfile = indexfile
205 self.indexfile = indexfile
206 self.datafile = indexfile[:-2] + ".d"
206 self.datafile = indexfile[:-2] + ".d"
207 self.opener = opener
207 self.opener = opener
208 self._cache = None
208 self._cache = None
209 self._basecache = None
209 self._basecache = None
210 self._chunkcache = (0, '')
210 self._chunkcache = (0, '')
211 self._chunkcachesize = 65536
211 self._chunkcachesize = 65536
212 self._maxchainlen = None
212 self._maxchainlen = None
213 self._aggressivemergedeltas = False
213 self.index = []
214 self.index = []
214 self._pcache = {}
215 self._pcache = {}
215 self._nodecache = {nullid: nullrev}
216 self._nodecache = {nullid: nullrev}
216 self._nodepos = None
217 self._nodepos = None
217
218
218 v = REVLOG_DEFAULT_VERSION
219 v = REVLOG_DEFAULT_VERSION
219 opts = getattr(opener, 'options', None)
220 opts = getattr(opener, 'options', None)
220 if opts is not None:
221 if opts is not None:
221 if 'revlogv1' in opts:
222 if 'revlogv1' in opts:
222 if 'generaldelta' in opts:
223 if 'generaldelta' in opts:
223 v |= REVLOGGENERALDELTA
224 v |= REVLOGGENERALDELTA
224 else:
225 else:
225 v = 0
226 v = 0
226 if 'chunkcachesize' in opts:
227 if 'chunkcachesize' in opts:
227 self._chunkcachesize = opts['chunkcachesize']
228 self._chunkcachesize = opts['chunkcachesize']
228 if 'maxchainlen' in opts:
229 if 'maxchainlen' in opts:
229 self._maxchainlen = opts['maxchainlen']
230 self._maxchainlen = opts['maxchainlen']
231 if 'aggressivemergedeltas' in opts:
232 self._aggressivemergedeltas = opts['aggressivemergedeltas']
230
233
231 if self._chunkcachesize <= 0:
234 if self._chunkcachesize <= 0:
232 raise RevlogError(_('revlog chunk cache size %r is not greater '
235 raise RevlogError(_('revlog chunk cache size %r is not greater '
233 'than 0') % self._chunkcachesize)
236 'than 0') % self._chunkcachesize)
234 elif self._chunkcachesize & (self._chunkcachesize - 1):
237 elif self._chunkcachesize & (self._chunkcachesize - 1):
235 raise RevlogError(_('revlog chunk cache size %r is not a power '
238 raise RevlogError(_('revlog chunk cache size %r is not a power '
236 'of 2') % self._chunkcachesize)
239 'of 2') % self._chunkcachesize)
237
240
238 i = ''
241 i = ''
239 self._initempty = True
242 self._initempty = True
240 try:
243 try:
241 f = self.opener(self.indexfile)
244 f = self.opener(self.indexfile)
242 i = f.read()
245 i = f.read()
243 f.close()
246 f.close()
244 if len(i) > 0:
247 if len(i) > 0:
245 v = struct.unpack(versionformat, i[:4])[0]
248 v = struct.unpack(versionformat, i[:4])[0]
246 self._initempty = False
249 self._initempty = False
247 except IOError as inst:
250 except IOError as inst:
248 if inst.errno != errno.ENOENT:
251 if inst.errno != errno.ENOENT:
249 raise
252 raise
250
253
251 self.version = v
254 self.version = v
252 self._inline = v & REVLOGNGINLINEDATA
255 self._inline = v & REVLOGNGINLINEDATA
253 self._generaldelta = v & REVLOGGENERALDELTA
256 self._generaldelta = v & REVLOGGENERALDELTA
254 flags = v & ~0xFFFF
257 flags = v & ~0xFFFF
255 fmt = v & 0xFFFF
258 fmt = v & 0xFFFF
256 if fmt == REVLOGV0 and flags:
259 if fmt == REVLOGV0 and flags:
257 raise RevlogError(_("index %s unknown flags %#04x for format v0")
260 raise RevlogError(_("index %s unknown flags %#04x for format v0")
258 % (self.indexfile, flags >> 16))
261 % (self.indexfile, flags >> 16))
259 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
262 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
260 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
263 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
261 % (self.indexfile, flags >> 16))
264 % (self.indexfile, flags >> 16))
262 elif fmt > REVLOGNG:
265 elif fmt > REVLOGNG:
263 raise RevlogError(_("index %s unknown format %d")
266 raise RevlogError(_("index %s unknown format %d")
264 % (self.indexfile, fmt))
267 % (self.indexfile, fmt))
265
268
266 self._io = revlogio()
269 self._io = revlogio()
267 if self.version == REVLOGV0:
270 if self.version == REVLOGV0:
268 self._io = revlogoldio()
271 self._io = revlogoldio()
269 try:
272 try:
270 d = self._io.parseindex(i, self._inline)
273 d = self._io.parseindex(i, self._inline)
271 except (ValueError, IndexError):
274 except (ValueError, IndexError):
272 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
275 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
273 self.index, nodemap, self._chunkcache = d
276 self.index, nodemap, self._chunkcache = d
274 if nodemap is not None:
277 if nodemap is not None:
275 self.nodemap = self._nodecache = nodemap
278 self.nodemap = self._nodecache = nodemap
276 if not self._chunkcache:
279 if not self._chunkcache:
277 self._chunkclear()
280 self._chunkclear()
278 # revnum -> (chain-length, sum-delta-length)
281 # revnum -> (chain-length, sum-delta-length)
279 self._chaininfocache = {}
282 self._chaininfocache = {}
280
283
281 def tip(self):
284 def tip(self):
282 return self.node(len(self.index) - 2)
285 return self.node(len(self.index) - 2)
283 def __contains__(self, rev):
286 def __contains__(self, rev):
284 return 0 <= rev < len(self)
287 return 0 <= rev < len(self)
285 def __len__(self):
288 def __len__(self):
286 return len(self.index) - 1
289 return len(self.index) - 1
287 def __iter__(self):
290 def __iter__(self):
288 return iter(xrange(len(self)))
291 return iter(xrange(len(self)))
289 def revs(self, start=0, stop=None):
292 def revs(self, start=0, stop=None):
290 """iterate over all rev in this revlog (from start to stop)"""
293 """iterate over all rev in this revlog (from start to stop)"""
291 step = 1
294 step = 1
292 if stop is not None:
295 if stop is not None:
293 if start > stop:
296 if start > stop:
294 step = -1
297 step = -1
295 stop += step
298 stop += step
296 else:
299 else:
297 stop = len(self)
300 stop = len(self)
298 return xrange(start, stop, step)
301 return xrange(start, stop, step)
299
302
300 @util.propertycache
303 @util.propertycache
301 def nodemap(self):
304 def nodemap(self):
302 self.rev(self.node(0))
305 self.rev(self.node(0))
303 return self._nodecache
306 return self._nodecache
304
307
305 def hasnode(self, node):
308 def hasnode(self, node):
306 try:
309 try:
307 self.rev(node)
310 self.rev(node)
308 return True
311 return True
309 except KeyError:
312 except KeyError:
310 return False
313 return False
311
314
312 def clearcaches(self):
315 def clearcaches(self):
313 try:
316 try:
314 self._nodecache.clearcaches()
317 self._nodecache.clearcaches()
315 except AttributeError:
318 except AttributeError:
316 self._nodecache = {nullid: nullrev}
319 self._nodecache = {nullid: nullrev}
317 self._nodepos = None
320 self._nodepos = None
318
321
319 def rev(self, node):
322 def rev(self, node):
320 try:
323 try:
321 return self._nodecache[node]
324 return self._nodecache[node]
322 except TypeError:
325 except TypeError:
323 raise
326 raise
324 except RevlogError:
327 except RevlogError:
325 # parsers.c radix tree lookup failed
328 # parsers.c radix tree lookup failed
326 raise LookupError(node, self.indexfile, _('no node'))
329 raise LookupError(node, self.indexfile, _('no node'))
327 except KeyError:
330 except KeyError:
328 # pure python cache lookup failed
331 # pure python cache lookup failed
329 n = self._nodecache
332 n = self._nodecache
330 i = self.index
333 i = self.index
331 p = self._nodepos
334 p = self._nodepos
332 if p is None:
335 if p is None:
333 p = len(i) - 2
336 p = len(i) - 2
334 for r in xrange(p, -1, -1):
337 for r in xrange(p, -1, -1):
335 v = i[r][7]
338 v = i[r][7]
336 n[v] = r
339 n[v] = r
337 if v == node:
340 if v == node:
338 self._nodepos = r - 1
341 self._nodepos = r - 1
339 return r
342 return r
340 raise LookupError(node, self.indexfile, _('no node'))
343 raise LookupError(node, self.indexfile, _('no node'))
341
344
342 def node(self, rev):
345 def node(self, rev):
343 return self.index[rev][7]
346 return self.index[rev][7]
344 def linkrev(self, rev):
347 def linkrev(self, rev):
345 return self.index[rev][4]
348 return self.index[rev][4]
346 def parents(self, node):
349 def parents(self, node):
347 i = self.index
350 i = self.index
348 d = i[self.rev(node)]
351 d = i[self.rev(node)]
349 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
352 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
350 def parentrevs(self, rev):
353 def parentrevs(self, rev):
351 return self.index[rev][5:7]
354 return self.index[rev][5:7]
352 def start(self, rev):
355 def start(self, rev):
353 return int(self.index[rev][0] >> 16)
356 return int(self.index[rev][0] >> 16)
354 def end(self, rev):
357 def end(self, rev):
355 return self.start(rev) + self.length(rev)
358 return self.start(rev) + self.length(rev)
356 def length(self, rev):
359 def length(self, rev):
357 return self.index[rev][1]
360 return self.index[rev][1]
358 def chainbase(self, rev):
361 def chainbase(self, rev):
359 index = self.index
362 index = self.index
360 base = index[rev][3]
363 base = index[rev][3]
361 while base != rev:
364 while base != rev:
362 rev = base
365 rev = base
363 base = index[rev][3]
366 base = index[rev][3]
364 return base
367 return base
365 def chainlen(self, rev):
368 def chainlen(self, rev):
366 return self._chaininfo(rev)[0]
369 return self._chaininfo(rev)[0]
367
370
368 def _chaininfo(self, rev):
371 def _chaininfo(self, rev):
369 chaininfocache = self._chaininfocache
372 chaininfocache = self._chaininfocache
370 if rev in chaininfocache:
373 if rev in chaininfocache:
371 return chaininfocache[rev]
374 return chaininfocache[rev]
372 index = self.index
375 index = self.index
373 generaldelta = self._generaldelta
376 generaldelta = self._generaldelta
374 iterrev = rev
377 iterrev = rev
375 e = index[iterrev]
378 e = index[iterrev]
376 clen = 0
379 clen = 0
377 compresseddeltalen = 0
380 compresseddeltalen = 0
378 while iterrev != e[3]:
381 while iterrev != e[3]:
379 clen += 1
382 clen += 1
380 compresseddeltalen += e[1]
383 compresseddeltalen += e[1]
381 if generaldelta:
384 if generaldelta:
382 iterrev = e[3]
385 iterrev = e[3]
383 else:
386 else:
384 iterrev -= 1
387 iterrev -= 1
385 if iterrev in chaininfocache:
388 if iterrev in chaininfocache:
386 t = chaininfocache[iterrev]
389 t = chaininfocache[iterrev]
387 clen += t[0]
390 clen += t[0]
388 compresseddeltalen += t[1]
391 compresseddeltalen += t[1]
389 break
392 break
390 e = index[iterrev]
393 e = index[iterrev]
391 else:
394 else:
392 # Add text length of base since decompressing that also takes
395 # Add text length of base since decompressing that also takes
393 # work. For cache hits the length is already included.
396 # work. For cache hits the length is already included.
394 compresseddeltalen += e[1]
397 compresseddeltalen += e[1]
395 r = (clen, compresseddeltalen)
398 r = (clen, compresseddeltalen)
396 chaininfocache[rev] = r
399 chaininfocache[rev] = r
397 return r
400 return r
398
401
399 def flags(self, rev):
402 def flags(self, rev):
400 return self.index[rev][0] & 0xFFFF
403 return self.index[rev][0] & 0xFFFF
401 def rawsize(self, rev):
404 def rawsize(self, rev):
402 """return the length of the uncompressed text for a given revision"""
405 """return the length of the uncompressed text for a given revision"""
403 l = self.index[rev][2]
406 l = self.index[rev][2]
404 if l >= 0:
407 if l >= 0:
405 return l
408 return l
406
409
407 t = self.revision(self.node(rev))
410 t = self.revision(self.node(rev))
408 return len(t)
411 return len(t)
409 size = rawsize
412 size = rawsize
410
413
411 def ancestors(self, revs, stoprev=0, inclusive=False):
414 def ancestors(self, revs, stoprev=0, inclusive=False):
412 """Generate the ancestors of 'revs' in reverse topological order.
415 """Generate the ancestors of 'revs' in reverse topological order.
413 Does not generate revs lower than stoprev.
416 Does not generate revs lower than stoprev.
414
417
415 See the documentation for ancestor.lazyancestors for more details."""
418 See the documentation for ancestor.lazyancestors for more details."""
416
419
417 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
420 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
418 inclusive=inclusive)
421 inclusive=inclusive)
419
422
420 def descendants(self, revs):
423 def descendants(self, revs):
421 """Generate the descendants of 'revs' in revision order.
424 """Generate the descendants of 'revs' in revision order.
422
425
423 Yield a sequence of revision numbers starting with a child of
426 Yield a sequence of revision numbers starting with a child of
424 some rev in revs, i.e., each revision is *not* considered a
427 some rev in revs, i.e., each revision is *not* considered a
425 descendant of itself. Results are ordered by revision number (a
428 descendant of itself. Results are ordered by revision number (a
426 topological sort)."""
429 topological sort)."""
427 first = min(revs)
430 first = min(revs)
428 if first == nullrev:
431 if first == nullrev:
429 for i in self:
432 for i in self:
430 yield i
433 yield i
431 return
434 return
432
435
433 seen = set(revs)
436 seen = set(revs)
434 for i in self.revs(start=first + 1):
437 for i in self.revs(start=first + 1):
435 for x in self.parentrevs(i):
438 for x in self.parentrevs(i):
436 if x != nullrev and x in seen:
439 if x != nullrev and x in seen:
437 seen.add(i)
440 seen.add(i)
438 yield i
441 yield i
439 break
442 break
440
443
441 def findcommonmissing(self, common=None, heads=None):
444 def findcommonmissing(self, common=None, heads=None):
442 """Return a tuple of the ancestors of common and the ancestors of heads
445 """Return a tuple of the ancestors of common and the ancestors of heads
443 that are not ancestors of common. In revset terminology, we return the
446 that are not ancestors of common. In revset terminology, we return the
444 tuple:
447 tuple:
445
448
446 ::common, (::heads) - (::common)
449 ::common, (::heads) - (::common)
447
450
448 The list is sorted by revision number, meaning it is
451 The list is sorted by revision number, meaning it is
449 topologically sorted.
452 topologically sorted.
450
453
451 'heads' and 'common' are both lists of node IDs. If heads is
454 'heads' and 'common' are both lists of node IDs. If heads is
452 not supplied, uses all of the revlog's heads. If common is not
455 not supplied, uses all of the revlog's heads. If common is not
453 supplied, uses nullid."""
456 supplied, uses nullid."""
454 if common is None:
457 if common is None:
455 common = [nullid]
458 common = [nullid]
456 if heads is None:
459 if heads is None:
457 heads = self.heads()
460 heads = self.heads()
458
461
459 common = [self.rev(n) for n in common]
462 common = [self.rev(n) for n in common]
460 heads = [self.rev(n) for n in heads]
463 heads = [self.rev(n) for n in heads]
461
464
462 # we want the ancestors, but inclusive
465 # we want the ancestors, but inclusive
463 class lazyset(object):
466 class lazyset(object):
464 def __init__(self, lazyvalues):
467 def __init__(self, lazyvalues):
465 self.addedvalues = set()
468 self.addedvalues = set()
466 self.lazyvalues = lazyvalues
469 self.lazyvalues = lazyvalues
467
470
468 def __contains__(self, value):
471 def __contains__(self, value):
469 return value in self.addedvalues or value in self.lazyvalues
472 return value in self.addedvalues or value in self.lazyvalues
470
473
471 def __iter__(self):
474 def __iter__(self):
472 added = self.addedvalues
475 added = self.addedvalues
473 for r in added:
476 for r in added:
474 yield r
477 yield r
475 for r in self.lazyvalues:
478 for r in self.lazyvalues:
476 if not r in added:
479 if not r in added:
477 yield r
480 yield r
478
481
479 def add(self, value):
482 def add(self, value):
480 self.addedvalues.add(value)
483 self.addedvalues.add(value)
481
484
482 def update(self, values):
485 def update(self, values):
483 self.addedvalues.update(values)
486 self.addedvalues.update(values)
484
487
485 has = lazyset(self.ancestors(common))
488 has = lazyset(self.ancestors(common))
486 has.add(nullrev)
489 has.add(nullrev)
487 has.update(common)
490 has.update(common)
488
491
489 # take all ancestors from heads that aren't in has
492 # take all ancestors from heads that aren't in has
490 missing = set()
493 missing = set()
491 visit = collections.deque(r for r in heads if r not in has)
494 visit = collections.deque(r for r in heads if r not in has)
492 while visit:
495 while visit:
493 r = visit.popleft()
496 r = visit.popleft()
494 if r in missing:
497 if r in missing:
495 continue
498 continue
496 else:
499 else:
497 missing.add(r)
500 missing.add(r)
498 for p in self.parentrevs(r):
501 for p in self.parentrevs(r):
499 if p not in has:
502 if p not in has:
500 visit.append(p)
503 visit.append(p)
501 missing = list(missing)
504 missing = list(missing)
502 missing.sort()
505 missing.sort()
503 return has, [self.node(r) for r in missing]
506 return has, [self.node(r) for r in missing]
504
507
505 def incrementalmissingrevs(self, common=None):
508 def incrementalmissingrevs(self, common=None):
506 """Return an object that can be used to incrementally compute the
509 """Return an object that can be used to incrementally compute the
507 revision numbers of the ancestors of arbitrary sets that are not
510 revision numbers of the ancestors of arbitrary sets that are not
508 ancestors of common. This is an ancestor.incrementalmissingancestors
511 ancestors of common. This is an ancestor.incrementalmissingancestors
509 object.
512 object.
510
513
511 'common' is a list of revision numbers. If common is not supplied, uses
514 'common' is a list of revision numbers. If common is not supplied, uses
512 nullrev.
515 nullrev.
513 """
516 """
514 if common is None:
517 if common is None:
515 common = [nullrev]
518 common = [nullrev]
516
519
517 return ancestor.incrementalmissingancestors(self.parentrevs, common)
520 return ancestor.incrementalmissingancestors(self.parentrevs, common)
518
521
519 def findmissingrevs(self, common=None, heads=None):
522 def findmissingrevs(self, common=None, heads=None):
520 """Return the revision numbers of the ancestors of heads that
523 """Return the revision numbers of the ancestors of heads that
521 are not ancestors of common.
524 are not ancestors of common.
522
525
523 More specifically, return a list of revision numbers corresponding to
526 More specifically, return a list of revision numbers corresponding to
524 nodes N such that every N satisfies the following constraints:
527 nodes N such that every N satisfies the following constraints:
525
528
526 1. N is an ancestor of some node in 'heads'
529 1. N is an ancestor of some node in 'heads'
527 2. N is not an ancestor of any node in 'common'
530 2. N is not an ancestor of any node in 'common'
528
531
529 The list is sorted by revision number, meaning it is
532 The list is sorted by revision number, meaning it is
530 topologically sorted.
533 topologically sorted.
531
534
532 'heads' and 'common' are both lists of revision numbers. If heads is
535 'heads' and 'common' are both lists of revision numbers. If heads is
533 not supplied, uses all of the revlog's heads. If common is not
536 not supplied, uses all of the revlog's heads. If common is not
534 supplied, uses nullid."""
537 supplied, uses nullid."""
535 if common is None:
538 if common is None:
536 common = [nullrev]
539 common = [nullrev]
537 if heads is None:
540 if heads is None:
538 heads = self.headrevs()
541 heads = self.headrevs()
539
542
540 inc = self.incrementalmissingrevs(common=common)
543 inc = self.incrementalmissingrevs(common=common)
541 return inc.missingancestors(heads)
544 return inc.missingancestors(heads)
542
545
543 def findmissing(self, common=None, heads=None):
546 def findmissing(self, common=None, heads=None):
544 """Return the ancestors of heads that are not ancestors of common.
547 """Return the ancestors of heads that are not ancestors of common.
545
548
546 More specifically, return a list of nodes N such that every N
549 More specifically, return a list of nodes N such that every N
547 satisfies the following constraints:
550 satisfies the following constraints:
548
551
549 1. N is an ancestor of some node in 'heads'
552 1. N is an ancestor of some node in 'heads'
550 2. N is not an ancestor of any node in 'common'
553 2. N is not an ancestor of any node in 'common'
551
554
552 The list is sorted by revision number, meaning it is
555 The list is sorted by revision number, meaning it is
553 topologically sorted.
556 topologically sorted.
554
557
555 'heads' and 'common' are both lists of node IDs. If heads is
558 'heads' and 'common' are both lists of node IDs. If heads is
556 not supplied, uses all of the revlog's heads. If common is not
559 not supplied, uses all of the revlog's heads. If common is not
557 supplied, uses nullid."""
560 supplied, uses nullid."""
558 if common is None:
561 if common is None:
559 common = [nullid]
562 common = [nullid]
560 if heads is None:
563 if heads is None:
561 heads = self.heads()
564 heads = self.heads()
562
565
563 common = [self.rev(n) for n in common]
566 common = [self.rev(n) for n in common]
564 heads = [self.rev(n) for n in heads]
567 heads = [self.rev(n) for n in heads]
565
568
566 inc = self.incrementalmissingrevs(common=common)
569 inc = self.incrementalmissingrevs(common=common)
567 return [self.node(r) for r in inc.missingancestors(heads)]
570 return [self.node(r) for r in inc.missingancestors(heads)]
568
571
569 def nodesbetween(self, roots=None, heads=None):
572 def nodesbetween(self, roots=None, heads=None):
570 """Return a topological path from 'roots' to 'heads'.
573 """Return a topological path from 'roots' to 'heads'.
571
574
572 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
575 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
573 topologically sorted list of all nodes N that satisfy both of
576 topologically sorted list of all nodes N that satisfy both of
574 these constraints:
577 these constraints:
575
578
576 1. N is a descendant of some node in 'roots'
579 1. N is a descendant of some node in 'roots'
577 2. N is an ancestor of some node in 'heads'
580 2. N is an ancestor of some node in 'heads'
578
581
579 Every node is considered to be both a descendant and an ancestor
582 Every node is considered to be both a descendant and an ancestor
580 of itself, so every reachable node in 'roots' and 'heads' will be
583 of itself, so every reachable node in 'roots' and 'heads' will be
581 included in 'nodes'.
584 included in 'nodes'.
582
585
583 'outroots' is the list of reachable nodes in 'roots', i.e., the
586 'outroots' is the list of reachable nodes in 'roots', i.e., the
584 subset of 'roots' that is returned in 'nodes'. Likewise,
587 subset of 'roots' that is returned in 'nodes'. Likewise,
585 'outheads' is the subset of 'heads' that is also in 'nodes'.
588 'outheads' is the subset of 'heads' that is also in 'nodes'.
586
589
587 'roots' and 'heads' are both lists of node IDs. If 'roots' is
590 'roots' and 'heads' are both lists of node IDs. If 'roots' is
588 unspecified, uses nullid as the only root. If 'heads' is
591 unspecified, uses nullid as the only root. If 'heads' is
589 unspecified, uses list of all of the revlog's heads."""
592 unspecified, uses list of all of the revlog's heads."""
590 nonodes = ([], [], [])
593 nonodes = ([], [], [])
591 if roots is not None:
594 if roots is not None:
592 roots = list(roots)
595 roots = list(roots)
593 if not roots:
596 if not roots:
594 return nonodes
597 return nonodes
595 lowestrev = min([self.rev(n) for n in roots])
598 lowestrev = min([self.rev(n) for n in roots])
596 else:
599 else:
597 roots = [nullid] # Everybody's a descendant of nullid
600 roots = [nullid] # Everybody's a descendant of nullid
598 lowestrev = nullrev
601 lowestrev = nullrev
599 if (lowestrev == nullrev) and (heads is None):
602 if (lowestrev == nullrev) and (heads is None):
600 # We want _all_ the nodes!
603 # We want _all_ the nodes!
601 return ([self.node(r) for r in self], [nullid], list(self.heads()))
604 return ([self.node(r) for r in self], [nullid], list(self.heads()))
602 if heads is None:
605 if heads is None:
603 # All nodes are ancestors, so the latest ancestor is the last
606 # All nodes are ancestors, so the latest ancestor is the last
604 # node.
607 # node.
605 highestrev = len(self) - 1
608 highestrev = len(self) - 1
606 # Set ancestors to None to signal that every node is an ancestor.
609 # Set ancestors to None to signal that every node is an ancestor.
607 ancestors = None
610 ancestors = None
608 # Set heads to an empty dictionary for later discovery of heads
611 # Set heads to an empty dictionary for later discovery of heads
609 heads = {}
612 heads = {}
610 else:
613 else:
611 heads = list(heads)
614 heads = list(heads)
612 if not heads:
615 if not heads:
613 return nonodes
616 return nonodes
614 ancestors = set()
617 ancestors = set()
615 # Turn heads into a dictionary so we can remove 'fake' heads.
618 # Turn heads into a dictionary so we can remove 'fake' heads.
616 # Also, later we will be using it to filter out the heads we can't
619 # Also, later we will be using it to filter out the heads we can't
617 # find from roots.
620 # find from roots.
618 heads = dict.fromkeys(heads, False)
621 heads = dict.fromkeys(heads, False)
619 # Start at the top and keep marking parents until we're done.
622 # Start at the top and keep marking parents until we're done.
620 nodestotag = set(heads)
623 nodestotag = set(heads)
621 # Remember where the top was so we can use it as a limit later.
624 # Remember where the top was so we can use it as a limit later.
622 highestrev = max([self.rev(n) for n in nodestotag])
625 highestrev = max([self.rev(n) for n in nodestotag])
623 while nodestotag:
626 while nodestotag:
624 # grab a node to tag
627 # grab a node to tag
625 n = nodestotag.pop()
628 n = nodestotag.pop()
626 # Never tag nullid
629 # Never tag nullid
627 if n == nullid:
630 if n == nullid:
628 continue
631 continue
629 # A node's revision number represents its place in a
632 # A node's revision number represents its place in a
630 # topologically sorted list of nodes.
633 # topologically sorted list of nodes.
631 r = self.rev(n)
634 r = self.rev(n)
632 if r >= lowestrev:
635 if r >= lowestrev:
633 if n not in ancestors:
636 if n not in ancestors:
634 # If we are possibly a descendant of one of the roots
637 # If we are possibly a descendant of one of the roots
635 # and we haven't already been marked as an ancestor
638 # and we haven't already been marked as an ancestor
636 ancestors.add(n) # Mark as ancestor
639 ancestors.add(n) # Mark as ancestor
637 # Add non-nullid parents to list of nodes to tag.
640 # Add non-nullid parents to list of nodes to tag.
638 nodestotag.update([p for p in self.parents(n) if
641 nodestotag.update([p for p in self.parents(n) if
639 p != nullid])
642 p != nullid])
640 elif n in heads: # We've seen it before, is it a fake head?
643 elif n in heads: # We've seen it before, is it a fake head?
641 # So it is, real heads should not be the ancestors of
644 # So it is, real heads should not be the ancestors of
642 # any other heads.
645 # any other heads.
643 heads.pop(n)
646 heads.pop(n)
644 if not ancestors:
647 if not ancestors:
645 return nonodes
648 return nonodes
646 # Now that we have our set of ancestors, we want to remove any
649 # Now that we have our set of ancestors, we want to remove any
647 # roots that are not ancestors.
650 # roots that are not ancestors.
648
651
649 # If one of the roots was nullid, everything is included anyway.
652 # If one of the roots was nullid, everything is included anyway.
650 if lowestrev > nullrev:
653 if lowestrev > nullrev:
651 # But, since we weren't, let's recompute the lowest rev to not
654 # But, since we weren't, let's recompute the lowest rev to not
652 # include roots that aren't ancestors.
655 # include roots that aren't ancestors.
653
656
654 # Filter out roots that aren't ancestors of heads
657 # Filter out roots that aren't ancestors of heads
655 roots = [n for n in roots if n in ancestors]
658 roots = [n for n in roots if n in ancestors]
656 # Recompute the lowest revision
659 # Recompute the lowest revision
657 if roots:
660 if roots:
658 lowestrev = min([self.rev(n) for n in roots])
661 lowestrev = min([self.rev(n) for n in roots])
659 else:
662 else:
660 # No more roots? Return empty list
663 # No more roots? Return empty list
661 return nonodes
664 return nonodes
662 else:
665 else:
663 # We are descending from nullid, and don't need to care about
666 # We are descending from nullid, and don't need to care about
664 # any other roots.
667 # any other roots.
665 lowestrev = nullrev
668 lowestrev = nullrev
666 roots = [nullid]
669 roots = [nullid]
667 # Transform our roots list into a set.
670 # Transform our roots list into a set.
668 descendants = set(roots)
671 descendants = set(roots)
669 # Also, keep the original roots so we can filter out roots that aren't
672 # Also, keep the original roots so we can filter out roots that aren't
670 # 'real' roots (i.e. are descended from other roots).
673 # 'real' roots (i.e. are descended from other roots).
671 roots = descendants.copy()
674 roots = descendants.copy()
672 # Our topologically sorted list of output nodes.
675 # Our topologically sorted list of output nodes.
673 orderedout = []
676 orderedout = []
674 # Don't start at nullid since we don't want nullid in our output list,
677 # Don't start at nullid since we don't want nullid in our output list,
675 # and if nullid shows up in descendants, empty parents will look like
678 # and if nullid shows up in descendants, empty parents will look like
676 # they're descendants.
679 # they're descendants.
677 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
680 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
678 n = self.node(r)
681 n = self.node(r)
679 isdescendant = False
682 isdescendant = False
680 if lowestrev == nullrev: # Everybody is a descendant of nullid
683 if lowestrev == nullrev: # Everybody is a descendant of nullid
681 isdescendant = True
684 isdescendant = True
682 elif n in descendants:
685 elif n in descendants:
683 # n is already a descendant
686 # n is already a descendant
684 isdescendant = True
687 isdescendant = True
685 # This check only needs to be done here because all the roots
688 # This check only needs to be done here because all the roots
686 # will start being marked is descendants before the loop.
689 # will start being marked is descendants before the loop.
687 if n in roots:
690 if n in roots:
688 # If n was a root, check if it's a 'real' root.
691 # If n was a root, check if it's a 'real' root.
689 p = tuple(self.parents(n))
692 p = tuple(self.parents(n))
690 # If any of its parents are descendants, it's not a root.
693 # If any of its parents are descendants, it's not a root.
691 if (p[0] in descendants) or (p[1] in descendants):
694 if (p[0] in descendants) or (p[1] in descendants):
692 roots.remove(n)
695 roots.remove(n)
693 else:
696 else:
694 p = tuple(self.parents(n))
697 p = tuple(self.parents(n))
695 # A node is a descendant if either of its parents are
698 # A node is a descendant if either of its parents are
696 # descendants. (We seeded the dependents list with the roots
699 # descendants. (We seeded the dependents list with the roots
697 # up there, remember?)
700 # up there, remember?)
698 if (p[0] in descendants) or (p[1] in descendants):
701 if (p[0] in descendants) or (p[1] in descendants):
699 descendants.add(n)
702 descendants.add(n)
700 isdescendant = True
703 isdescendant = True
701 if isdescendant and ((ancestors is None) or (n in ancestors)):
704 if isdescendant and ((ancestors is None) or (n in ancestors)):
702 # Only include nodes that are both descendants and ancestors.
705 # Only include nodes that are both descendants and ancestors.
703 orderedout.append(n)
706 orderedout.append(n)
704 if (ancestors is not None) and (n in heads):
707 if (ancestors is not None) and (n in heads):
705 # We're trying to figure out which heads are reachable
708 # We're trying to figure out which heads are reachable
706 # from roots.
709 # from roots.
707 # Mark this head as having been reached
710 # Mark this head as having been reached
708 heads[n] = True
711 heads[n] = True
709 elif ancestors is None:
712 elif ancestors is None:
710 # Otherwise, we're trying to discover the heads.
713 # Otherwise, we're trying to discover the heads.
711 # Assume this is a head because if it isn't, the next step
714 # Assume this is a head because if it isn't, the next step
712 # will eventually remove it.
715 # will eventually remove it.
713 heads[n] = True
716 heads[n] = True
714 # But, obviously its parents aren't.
717 # But, obviously its parents aren't.
715 for p in self.parents(n):
718 for p in self.parents(n):
716 heads.pop(p, None)
719 heads.pop(p, None)
717 heads = [n for n, flag in heads.iteritems() if flag]
720 heads = [n for n, flag in heads.iteritems() if flag]
718 roots = list(roots)
721 roots = list(roots)
719 assert orderedout
722 assert orderedout
720 assert roots
723 assert roots
721 assert heads
724 assert heads
722 return (orderedout, roots, heads)
725 return (orderedout, roots, heads)
723
726
724 def headrevs(self):
727 def headrevs(self):
725 try:
728 try:
726 return self.index.headrevs()
729 return self.index.headrevs()
727 except AttributeError:
730 except AttributeError:
728 return self._headrevs()
731 return self._headrevs()
729
732
730 def computephases(self, roots):
733 def computephases(self, roots):
731 return self.index.computephasesmapsets(roots)
734 return self.index.computephasesmapsets(roots)
732
735
733 def _headrevs(self):
736 def _headrevs(self):
734 count = len(self)
737 count = len(self)
735 if not count:
738 if not count:
736 return [nullrev]
739 return [nullrev]
737 # we won't iter over filtered rev so nobody is a head at start
740 # we won't iter over filtered rev so nobody is a head at start
738 ishead = [0] * (count + 1)
741 ishead = [0] * (count + 1)
739 index = self.index
742 index = self.index
740 for r in self:
743 for r in self:
741 ishead[r] = 1 # I may be an head
744 ishead[r] = 1 # I may be an head
742 e = index[r]
745 e = index[r]
743 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
746 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
744 return [r for r, val in enumerate(ishead) if val]
747 return [r for r, val in enumerate(ishead) if val]
745
748
746 def heads(self, start=None, stop=None):
749 def heads(self, start=None, stop=None):
747 """return the list of all nodes that have no children
750 """return the list of all nodes that have no children
748
751
749 if start is specified, only heads that are descendants of
752 if start is specified, only heads that are descendants of
750 start will be returned
753 start will be returned
751 if stop is specified, it will consider all the revs from stop
754 if stop is specified, it will consider all the revs from stop
752 as if they had no children
755 as if they had no children
753 """
756 """
754 if start is None and stop is None:
757 if start is None and stop is None:
755 if not len(self):
758 if not len(self):
756 return [nullid]
759 return [nullid]
757 return [self.node(r) for r in self.headrevs()]
760 return [self.node(r) for r in self.headrevs()]
758
761
759 if start is None:
762 if start is None:
760 start = nullid
763 start = nullid
761 if stop is None:
764 if stop is None:
762 stop = []
765 stop = []
763 stoprevs = set([self.rev(n) for n in stop])
766 stoprevs = set([self.rev(n) for n in stop])
764 startrev = self.rev(start)
767 startrev = self.rev(start)
765 reachable = set((startrev,))
768 reachable = set((startrev,))
766 heads = set((startrev,))
769 heads = set((startrev,))
767
770
768 parentrevs = self.parentrevs
771 parentrevs = self.parentrevs
769 for r in self.revs(start=startrev + 1):
772 for r in self.revs(start=startrev + 1):
770 for p in parentrevs(r):
773 for p in parentrevs(r):
771 if p in reachable:
774 if p in reachable:
772 if r not in stoprevs:
775 if r not in stoprevs:
773 reachable.add(r)
776 reachable.add(r)
774 heads.add(r)
777 heads.add(r)
775 if p in heads and p not in stoprevs:
778 if p in heads and p not in stoprevs:
776 heads.remove(p)
779 heads.remove(p)
777
780
778 return [self.node(r) for r in heads]
781 return [self.node(r) for r in heads]
779
782
780 def children(self, node):
783 def children(self, node):
781 """find the children of a given node"""
784 """find the children of a given node"""
782 c = []
785 c = []
783 p = self.rev(node)
786 p = self.rev(node)
784 for r in self.revs(start=p + 1):
787 for r in self.revs(start=p + 1):
785 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
788 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
786 if prevs:
789 if prevs:
787 for pr in prevs:
790 for pr in prevs:
788 if pr == p:
791 if pr == p:
789 c.append(self.node(r))
792 c.append(self.node(r))
790 elif p == nullrev:
793 elif p == nullrev:
791 c.append(self.node(r))
794 c.append(self.node(r))
792 return c
795 return c
793
796
794 def descendant(self, start, end):
797 def descendant(self, start, end):
795 if start == nullrev:
798 if start == nullrev:
796 return True
799 return True
797 for i in self.descendants([start]):
800 for i in self.descendants([start]):
798 if i == end:
801 if i == end:
799 return True
802 return True
800 elif i > end:
803 elif i > end:
801 break
804 break
802 return False
805 return False
803
806
804 def commonancestorsheads(self, a, b):
807 def commonancestorsheads(self, a, b):
805 """calculate all the heads of the common ancestors of nodes a and b"""
808 """calculate all the heads of the common ancestors of nodes a and b"""
806 a, b = self.rev(a), self.rev(b)
809 a, b = self.rev(a), self.rev(b)
807 try:
810 try:
808 ancs = self.index.commonancestorsheads(a, b)
811 ancs = self.index.commonancestorsheads(a, b)
809 except (AttributeError, OverflowError): # C implementation failed
812 except (AttributeError, OverflowError): # C implementation failed
810 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
813 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
811 return map(self.node, ancs)
814 return map(self.node, ancs)
812
815
813 def isancestor(self, a, b):
816 def isancestor(self, a, b):
814 """return True if node a is an ancestor of node b
817 """return True if node a is an ancestor of node b
815
818
816 The implementation of this is trivial but the use of
819 The implementation of this is trivial but the use of
817 commonancestorsheads is not."""
820 commonancestorsheads is not."""
818 return a in self.commonancestorsheads(a, b)
821 return a in self.commonancestorsheads(a, b)
819
822
820 def ancestor(self, a, b):
823 def ancestor(self, a, b):
821 """calculate the "best" common ancestor of nodes a and b"""
824 """calculate the "best" common ancestor of nodes a and b"""
822
825
823 a, b = self.rev(a), self.rev(b)
826 a, b = self.rev(a), self.rev(b)
824 try:
827 try:
825 ancs = self.index.ancestors(a, b)
828 ancs = self.index.ancestors(a, b)
826 except (AttributeError, OverflowError):
829 except (AttributeError, OverflowError):
827 ancs = ancestor.ancestors(self.parentrevs, a, b)
830 ancs = ancestor.ancestors(self.parentrevs, a, b)
828 if ancs:
831 if ancs:
829 # choose a consistent winner when there's a tie
832 # choose a consistent winner when there's a tie
830 return min(map(self.node, ancs))
833 return min(map(self.node, ancs))
831 return nullid
834 return nullid
832
835
833 def _match(self, id):
836 def _match(self, id):
834 if isinstance(id, int):
837 if isinstance(id, int):
835 # rev
838 # rev
836 return self.node(id)
839 return self.node(id)
837 if len(id) == 20:
840 if len(id) == 20:
838 # possibly a binary node
841 # possibly a binary node
839 # odds of a binary node being all hex in ASCII are 1 in 10**25
842 # odds of a binary node being all hex in ASCII are 1 in 10**25
840 try:
843 try:
841 node = id
844 node = id
842 self.rev(node) # quick search the index
845 self.rev(node) # quick search the index
843 return node
846 return node
844 except LookupError:
847 except LookupError:
845 pass # may be partial hex id
848 pass # may be partial hex id
846 try:
849 try:
847 # str(rev)
850 # str(rev)
848 rev = int(id)
851 rev = int(id)
849 if str(rev) != id:
852 if str(rev) != id:
850 raise ValueError
853 raise ValueError
851 if rev < 0:
854 if rev < 0:
852 rev = len(self) + rev
855 rev = len(self) + rev
853 if rev < 0 or rev >= len(self):
856 if rev < 0 or rev >= len(self):
854 raise ValueError
857 raise ValueError
855 return self.node(rev)
858 return self.node(rev)
856 except (ValueError, OverflowError):
859 except (ValueError, OverflowError):
857 pass
860 pass
858 if len(id) == 40:
861 if len(id) == 40:
859 try:
862 try:
860 # a full hex nodeid?
863 # a full hex nodeid?
861 node = bin(id)
864 node = bin(id)
862 self.rev(node)
865 self.rev(node)
863 return node
866 return node
864 except (TypeError, LookupError):
867 except (TypeError, LookupError):
865 pass
868 pass
866
869
867 def _partialmatch(self, id):
870 def _partialmatch(self, id):
868 try:
871 try:
869 n = self.index.partialmatch(id)
872 n = self.index.partialmatch(id)
870 if n and self.hasnode(n):
873 if n and self.hasnode(n):
871 return n
874 return n
872 return None
875 return None
873 except RevlogError:
876 except RevlogError:
874 # parsers.c radix tree lookup gave multiple matches
877 # parsers.c radix tree lookup gave multiple matches
875 # fall through to slow path that filters hidden revisions
878 # fall through to slow path that filters hidden revisions
876 pass
879 pass
877 except (AttributeError, ValueError):
880 except (AttributeError, ValueError):
878 # we are pure python, or key was too short to search radix tree
881 # we are pure python, or key was too short to search radix tree
879 pass
882 pass
880
883
881 if id in self._pcache:
884 if id in self._pcache:
882 return self._pcache[id]
885 return self._pcache[id]
883
886
884 if len(id) < 40:
887 if len(id) < 40:
885 try:
888 try:
886 # hex(node)[:...]
889 # hex(node)[:...]
887 l = len(id) // 2 # grab an even number of digits
890 l = len(id) // 2 # grab an even number of digits
888 prefix = bin(id[:l * 2])
891 prefix = bin(id[:l * 2])
889 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
892 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
890 nl = [n for n in nl if hex(n).startswith(id) and
893 nl = [n for n in nl if hex(n).startswith(id) and
891 self.hasnode(n)]
894 self.hasnode(n)]
892 if len(nl) > 0:
895 if len(nl) > 0:
893 if len(nl) == 1:
896 if len(nl) == 1:
894 self._pcache[id] = nl[0]
897 self._pcache[id] = nl[0]
895 return nl[0]
898 return nl[0]
896 raise LookupError(id, self.indexfile,
899 raise LookupError(id, self.indexfile,
897 _('ambiguous identifier'))
900 _('ambiguous identifier'))
898 return None
901 return None
899 except TypeError:
902 except TypeError:
900 pass
903 pass
901
904
902 def lookup(self, id):
905 def lookup(self, id):
903 """locate a node based on:
906 """locate a node based on:
904 - revision number or str(revision number)
907 - revision number or str(revision number)
905 - nodeid or subset of hex nodeid
908 - nodeid or subset of hex nodeid
906 """
909 """
907 n = self._match(id)
910 n = self._match(id)
908 if n is not None:
911 if n is not None:
909 return n
912 return n
910 n = self._partialmatch(id)
913 n = self._partialmatch(id)
911 if n:
914 if n:
912 return n
915 return n
913
916
914 raise LookupError(id, self.indexfile, _('no match found'))
917 raise LookupError(id, self.indexfile, _('no match found'))
915
918
916 def cmp(self, node, text):
919 def cmp(self, node, text):
917 """compare text with a given file revision
920 """compare text with a given file revision
918
921
919 returns True if text is different than what is stored.
922 returns True if text is different than what is stored.
920 """
923 """
921 p1, p2 = self.parents(node)
924 p1, p2 = self.parents(node)
922 return hash(text, p1, p2) != node
925 return hash(text, p1, p2) != node
923
926
924 def _addchunk(self, offset, data):
927 def _addchunk(self, offset, data):
925 o, d = self._chunkcache
928 o, d = self._chunkcache
926 # try to add to existing cache
929 # try to add to existing cache
927 if o + len(d) == offset and len(d) + len(data) < _chunksize:
930 if o + len(d) == offset and len(d) + len(data) < _chunksize:
928 self._chunkcache = o, d + data
931 self._chunkcache = o, d + data
929 else:
932 else:
930 self._chunkcache = offset, data
933 self._chunkcache = offset, data
931
934
932 def _loadchunk(self, offset, length):
935 def _loadchunk(self, offset, length):
933 if self._inline:
936 if self._inline:
934 df = self.opener(self.indexfile)
937 df = self.opener(self.indexfile)
935 else:
938 else:
936 df = self.opener(self.datafile)
939 df = self.opener(self.datafile)
937
940
938 # Cache data both forward and backward around the requested
941 # Cache data both forward and backward around the requested
939 # data, in a fixed size window. This helps speed up operations
942 # data, in a fixed size window. This helps speed up operations
940 # involving reading the revlog backwards.
943 # involving reading the revlog backwards.
941 cachesize = self._chunkcachesize
944 cachesize = self._chunkcachesize
942 realoffset = offset & ~(cachesize - 1)
945 realoffset = offset & ~(cachesize - 1)
943 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
946 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
944 - realoffset)
947 - realoffset)
945 df.seek(realoffset)
948 df.seek(realoffset)
946 d = df.read(reallength)
949 d = df.read(reallength)
947 df.close()
950 df.close()
948 self._addchunk(realoffset, d)
951 self._addchunk(realoffset, d)
949 if offset != realoffset or reallength != length:
952 if offset != realoffset or reallength != length:
950 return util.buffer(d, offset - realoffset, length)
953 return util.buffer(d, offset - realoffset, length)
951 return d
954 return d
952
955
953 def _getchunk(self, offset, length):
956 def _getchunk(self, offset, length):
954 o, d = self._chunkcache
957 o, d = self._chunkcache
955 l = len(d)
958 l = len(d)
956
959
957 # is it in the cache?
960 # is it in the cache?
958 cachestart = offset - o
961 cachestart = offset - o
959 cacheend = cachestart + length
962 cacheend = cachestart + length
960 if cachestart >= 0 and cacheend <= l:
963 if cachestart >= 0 and cacheend <= l:
961 if cachestart == 0 and cacheend == l:
964 if cachestart == 0 and cacheend == l:
962 return d # avoid a copy
965 return d # avoid a copy
963 return util.buffer(d, cachestart, cacheend - cachestart)
966 return util.buffer(d, cachestart, cacheend - cachestart)
964
967
965 return self._loadchunk(offset, length)
968 return self._loadchunk(offset, length)
966
969
967 def _chunkraw(self, startrev, endrev):
970 def _chunkraw(self, startrev, endrev):
968 start = self.start(startrev)
971 start = self.start(startrev)
969 end = self.end(endrev)
972 end = self.end(endrev)
970 if self._inline:
973 if self._inline:
971 start += (startrev + 1) * self._io.size
974 start += (startrev + 1) * self._io.size
972 end += (endrev + 1) * self._io.size
975 end += (endrev + 1) * self._io.size
973 length = end - start
976 length = end - start
974 return self._getchunk(start, length)
977 return self._getchunk(start, length)
975
978
976 def _chunk(self, rev):
979 def _chunk(self, rev):
977 return decompress(self._chunkraw(rev, rev))
980 return decompress(self._chunkraw(rev, rev))
978
981
979 def _chunks(self, revs):
982 def _chunks(self, revs):
980 '''faster version of [self._chunk(rev) for rev in revs]
983 '''faster version of [self._chunk(rev) for rev in revs]
981
984
982 Assumes that revs is in ascending order.'''
985 Assumes that revs is in ascending order.'''
983 if not revs:
986 if not revs:
984 return []
987 return []
985 start = self.start
988 start = self.start
986 length = self.length
989 length = self.length
987 inline = self._inline
990 inline = self._inline
988 iosize = self._io.size
991 iosize = self._io.size
989 buffer = util.buffer
992 buffer = util.buffer
990
993
991 l = []
994 l = []
992 ladd = l.append
995 ladd = l.append
993
996
994 # preload the cache
997 # preload the cache
995 try:
998 try:
996 while True:
999 while True:
997 # ensure that the cache doesn't change out from under us
1000 # ensure that the cache doesn't change out from under us
998 _cache = self._chunkcache
1001 _cache = self._chunkcache
999 self._chunkraw(revs[0], revs[-1])
1002 self._chunkraw(revs[0], revs[-1])
1000 if _cache == self._chunkcache:
1003 if _cache == self._chunkcache:
1001 break
1004 break
1002 offset, data = _cache
1005 offset, data = _cache
1003 except OverflowError:
1006 except OverflowError:
1004 # issue4215 - we can't cache a run of chunks greater than
1007 # issue4215 - we can't cache a run of chunks greater than
1005 # 2G on Windows
1008 # 2G on Windows
1006 return [self._chunk(rev) for rev in revs]
1009 return [self._chunk(rev) for rev in revs]
1007
1010
1008 for rev in revs:
1011 for rev in revs:
1009 chunkstart = start(rev)
1012 chunkstart = start(rev)
1010 if inline:
1013 if inline:
1011 chunkstart += (rev + 1) * iosize
1014 chunkstart += (rev + 1) * iosize
1012 chunklength = length(rev)
1015 chunklength = length(rev)
1013 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1016 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1014
1017
1015 return l
1018 return l
1016
1019
1017 def _chunkclear(self):
1020 def _chunkclear(self):
1018 self._chunkcache = (0, '')
1021 self._chunkcache = (0, '')
1019
1022
1020 def deltaparent(self, rev):
1023 def deltaparent(self, rev):
1021 """return deltaparent of the given revision"""
1024 """return deltaparent of the given revision"""
1022 base = self.index[rev][3]
1025 base = self.index[rev][3]
1023 if base == rev:
1026 if base == rev:
1024 return nullrev
1027 return nullrev
1025 elif self._generaldelta:
1028 elif self._generaldelta:
1026 return base
1029 return base
1027 else:
1030 else:
1028 return rev - 1
1031 return rev - 1
1029
1032
1030 def revdiff(self, rev1, rev2):
1033 def revdiff(self, rev1, rev2):
1031 """return or calculate a delta between two revisions"""
1034 """return or calculate a delta between two revisions"""
1032 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1035 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1033 return str(self._chunk(rev2))
1036 return str(self._chunk(rev2))
1034
1037
1035 return mdiff.textdiff(self.revision(rev1),
1038 return mdiff.textdiff(self.revision(rev1),
1036 self.revision(rev2))
1039 self.revision(rev2))
1037
1040
1038 def revision(self, nodeorrev):
1041 def revision(self, nodeorrev):
1039 """return an uncompressed revision of a given node or revision
1042 """return an uncompressed revision of a given node or revision
1040 number.
1043 number.
1041 """
1044 """
1042 if isinstance(nodeorrev, int):
1045 if isinstance(nodeorrev, int):
1043 rev = nodeorrev
1046 rev = nodeorrev
1044 node = self.node(rev)
1047 node = self.node(rev)
1045 else:
1048 else:
1046 node = nodeorrev
1049 node = nodeorrev
1047 rev = None
1050 rev = None
1048
1051
1049 _cache = self._cache # grab local copy of cache to avoid thread race
1052 _cache = self._cache # grab local copy of cache to avoid thread race
1050 cachedrev = None
1053 cachedrev = None
1051 if node == nullid:
1054 if node == nullid:
1052 return ""
1055 return ""
1053 if _cache:
1056 if _cache:
1054 if _cache[0] == node:
1057 if _cache[0] == node:
1055 return _cache[2]
1058 return _cache[2]
1056 cachedrev = _cache[1]
1059 cachedrev = _cache[1]
1057
1060
1058 # look up what we need to read
1061 # look up what we need to read
1059 text = None
1062 text = None
1060 if rev is None:
1063 if rev is None:
1061 rev = self.rev(node)
1064 rev = self.rev(node)
1062
1065
1063 # check rev flags
1066 # check rev flags
1064 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1067 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1065 raise RevlogError(_('incompatible revision flag %x') %
1068 raise RevlogError(_('incompatible revision flag %x') %
1066 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1069 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1067
1070
1068 # build delta chain
1071 # build delta chain
1069 chain = []
1072 chain = []
1070 index = self.index # for performance
1073 index = self.index # for performance
1071 generaldelta = self._generaldelta
1074 generaldelta = self._generaldelta
1072 iterrev = rev
1075 iterrev = rev
1073 e = index[iterrev]
1076 e = index[iterrev]
1074 while iterrev != e[3] and iterrev != cachedrev:
1077 while iterrev != e[3] and iterrev != cachedrev:
1075 chain.append(iterrev)
1078 chain.append(iterrev)
1076 if generaldelta:
1079 if generaldelta:
1077 iterrev = e[3]
1080 iterrev = e[3]
1078 else:
1081 else:
1079 iterrev -= 1
1082 iterrev -= 1
1080 e = index[iterrev]
1083 e = index[iterrev]
1081
1084
1082 if iterrev == cachedrev:
1085 if iterrev == cachedrev:
1083 # cache hit
1086 # cache hit
1084 text = _cache[2]
1087 text = _cache[2]
1085 else:
1088 else:
1086 chain.append(iterrev)
1089 chain.append(iterrev)
1087 chain.reverse()
1090 chain.reverse()
1088
1091
1089 # drop cache to save memory
1092 # drop cache to save memory
1090 self._cache = None
1093 self._cache = None
1091
1094
1092 bins = self._chunks(chain)
1095 bins = self._chunks(chain)
1093 if text is None:
1096 if text is None:
1094 text = str(bins[0])
1097 text = str(bins[0])
1095 bins = bins[1:]
1098 bins = bins[1:]
1096
1099
1097 text = mdiff.patches(text, bins)
1100 text = mdiff.patches(text, bins)
1098
1101
1099 text = self._checkhash(text, node, rev)
1102 text = self._checkhash(text, node, rev)
1100
1103
1101 self._cache = (node, rev, text)
1104 self._cache = (node, rev, text)
1102 return text
1105 return text
1103
1106
1104 def hash(self, text, p1, p2):
1107 def hash(self, text, p1, p2):
1105 """Compute a node hash.
1108 """Compute a node hash.
1106
1109
1107 Available as a function so that subclasses can replace the hash
1110 Available as a function so that subclasses can replace the hash
1108 as needed.
1111 as needed.
1109 """
1112 """
1110 return hash(text, p1, p2)
1113 return hash(text, p1, p2)
1111
1114
1112 def _checkhash(self, text, node, rev):
1115 def _checkhash(self, text, node, rev):
1113 p1, p2 = self.parents(node)
1116 p1, p2 = self.parents(node)
1114 self.checkhash(text, p1, p2, node, rev)
1117 self.checkhash(text, p1, p2, node, rev)
1115 return text
1118 return text
1116
1119
1117 def checkhash(self, text, p1, p2, node, rev=None):
1120 def checkhash(self, text, p1, p2, node, rev=None):
1118 if node != self.hash(text, p1, p2):
1121 if node != self.hash(text, p1, p2):
1119 revornode = rev
1122 revornode = rev
1120 if revornode is None:
1123 if revornode is None:
1121 revornode = templatefilters.short(hex(node))
1124 revornode = templatefilters.short(hex(node))
1122 raise RevlogError(_("integrity check failed on %s:%s")
1125 raise RevlogError(_("integrity check failed on %s:%s")
1123 % (self.indexfile, revornode))
1126 % (self.indexfile, revornode))
1124
1127
1125 def checkinlinesize(self, tr, fp=None):
1128 def checkinlinesize(self, tr, fp=None):
1126 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1129 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1127 return
1130 return
1128
1131
1129 trinfo = tr.find(self.indexfile)
1132 trinfo = tr.find(self.indexfile)
1130 if trinfo is None:
1133 if trinfo is None:
1131 raise RevlogError(_("%s not found in the transaction")
1134 raise RevlogError(_("%s not found in the transaction")
1132 % self.indexfile)
1135 % self.indexfile)
1133
1136
1134 trindex = trinfo[2]
1137 trindex = trinfo[2]
1135 if trindex is not None:
1138 if trindex is not None:
1136 dataoff = self.start(trindex)
1139 dataoff = self.start(trindex)
1137 else:
1140 else:
1138 # revlog was stripped at start of transaction, use all leftover data
1141 # revlog was stripped at start of transaction, use all leftover data
1139 trindex = len(self) - 1
1142 trindex = len(self) - 1
1140 dataoff = self.end(-2)
1143 dataoff = self.end(-2)
1141
1144
1142 tr.add(self.datafile, dataoff)
1145 tr.add(self.datafile, dataoff)
1143
1146
1144 if fp:
1147 if fp:
1145 fp.flush()
1148 fp.flush()
1146 fp.close()
1149 fp.close()
1147
1150
1148 df = self.opener(self.datafile, 'w')
1151 df = self.opener(self.datafile, 'w')
1149 try:
1152 try:
1150 for r in self:
1153 for r in self:
1151 df.write(self._chunkraw(r, r))
1154 df.write(self._chunkraw(r, r))
1152 finally:
1155 finally:
1153 df.close()
1156 df.close()
1154
1157
1155 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1158 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1156 self.version &= ~(REVLOGNGINLINEDATA)
1159 self.version &= ~(REVLOGNGINLINEDATA)
1157 self._inline = False
1160 self._inline = False
1158 for i in self:
1161 for i in self:
1159 e = self._io.packentry(self.index[i], self.node, self.version, i)
1162 e = self._io.packentry(self.index[i], self.node, self.version, i)
1160 fp.write(e)
1163 fp.write(e)
1161
1164
1162 # if we don't call close, the temp file will never replace the
1165 # if we don't call close, the temp file will never replace the
1163 # real index
1166 # real index
1164 fp.close()
1167 fp.close()
1165
1168
1166 tr.replace(self.indexfile, trindex * self._io.size)
1169 tr.replace(self.indexfile, trindex * self._io.size)
1167 self._chunkclear()
1170 self._chunkclear()
1168
1171
1169 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1172 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1170 node=None):
1173 node=None):
1171 """add a revision to the log
1174 """add a revision to the log
1172
1175
1173 text - the revision data to add
1176 text - the revision data to add
1174 transaction - the transaction object used for rollback
1177 transaction - the transaction object used for rollback
1175 link - the linkrev data to add
1178 link - the linkrev data to add
1176 p1, p2 - the parent nodeids of the revision
1179 p1, p2 - the parent nodeids of the revision
1177 cachedelta - an optional precomputed delta
1180 cachedelta - an optional precomputed delta
1178 node - nodeid of revision; typically node is not specified, and it is
1181 node - nodeid of revision; typically node is not specified, and it is
1179 computed by default as hash(text, p1, p2), however subclasses might
1182 computed by default as hash(text, p1, p2), however subclasses might
1180 use different hashing method (and override checkhash() in such case)
1183 use different hashing method (and override checkhash() in such case)
1181 """
1184 """
1182 if link == nullrev:
1185 if link == nullrev:
1183 raise RevlogError(_("attempted to add linkrev -1 to %s")
1186 raise RevlogError(_("attempted to add linkrev -1 to %s")
1184 % self.indexfile)
1187 % self.indexfile)
1185
1188
1186 if len(text) > _maxentrysize:
1189 if len(text) > _maxentrysize:
1187 raise RevlogError(
1190 raise RevlogError(
1188 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1191 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1189 % (self.indexfile, len(text)))
1192 % (self.indexfile, len(text)))
1190
1193
1191 node = node or self.hash(text, p1, p2)
1194 node = node or self.hash(text, p1, p2)
1192 if node in self.nodemap:
1195 if node in self.nodemap:
1193 return node
1196 return node
1194
1197
1195 dfh = None
1198 dfh = None
1196 if not self._inline:
1199 if not self._inline:
1197 dfh = self.opener(self.datafile, "a")
1200 dfh = self.opener(self.datafile, "a")
1198 ifh = self.opener(self.indexfile, "a+")
1201 ifh = self.opener(self.indexfile, "a+")
1199 try:
1202 try:
1200 return self._addrevision(node, text, transaction, link, p1, p2,
1203 return self._addrevision(node, text, transaction, link, p1, p2,
1201 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1204 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1202 finally:
1205 finally:
1203 if dfh:
1206 if dfh:
1204 dfh.close()
1207 dfh.close()
1205 ifh.close()
1208 ifh.close()
1206
1209
1207 def compress(self, text):
1210 def compress(self, text):
1208 """ generate a possibly-compressed representation of text """
1211 """ generate a possibly-compressed representation of text """
1209 if not text:
1212 if not text:
1210 return ("", text)
1213 return ("", text)
1211 l = len(text)
1214 l = len(text)
1212 bin = None
1215 bin = None
1213 if l < 44:
1216 if l < 44:
1214 pass
1217 pass
1215 elif l > 1000000:
1218 elif l > 1000000:
1216 # zlib makes an internal copy, thus doubling memory usage for
1219 # zlib makes an internal copy, thus doubling memory usage for
1217 # large files, so lets do this in pieces
1220 # large files, so lets do this in pieces
1218 z = zlib.compressobj()
1221 z = zlib.compressobj()
1219 p = []
1222 p = []
1220 pos = 0
1223 pos = 0
1221 while pos < l:
1224 while pos < l:
1222 pos2 = pos + 2**20
1225 pos2 = pos + 2**20
1223 p.append(z.compress(text[pos:pos2]))
1226 p.append(z.compress(text[pos:pos2]))
1224 pos = pos2
1227 pos = pos2
1225 p.append(z.flush())
1228 p.append(z.flush())
1226 if sum(map(len, p)) < l:
1229 if sum(map(len, p)) < l:
1227 bin = "".join(p)
1230 bin = "".join(p)
1228 else:
1231 else:
1229 bin = _compress(text)
1232 bin = _compress(text)
1230 if bin is None or len(bin) > l:
1233 if bin is None or len(bin) > l:
1231 if text[0] == '\0':
1234 if text[0] == '\0':
1232 return ("", text)
1235 return ("", text)
1233 return ('u', text)
1236 return ('u', text)
1234 return ("", bin)
1237 return ("", bin)
1235
1238
1236 def _isgooddelta(self, d, textlen):
1239 def _isgooddelta(self, d, textlen):
1237 """Returns True if the given delta is good. Good means that it is within
1240 """Returns True if the given delta is good. Good means that it is within
1238 the disk span, disk size, and chain length bounds that we know to be
1241 the disk span, disk size, and chain length bounds that we know to be
1239 performant."""
1242 performant."""
1240 if d is None:
1243 if d is None:
1241 return False
1244 return False
1242
1245
1243 # - 'dist' is the distance from the base revision -- bounding it limits
1246 # - 'dist' is the distance from the base revision -- bounding it limits
1244 # the amount of I/O we need to do.
1247 # the amount of I/O we need to do.
1245 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1248 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1246 # to apply -- bounding it limits the amount of CPU we consume.
1249 # to apply -- bounding it limits the amount of CPU we consume.
1247 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1250 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1248 if (dist > textlen * 4 or l > textlen or
1251 if (dist > textlen * 4 or l > textlen or
1249 compresseddeltalen > textlen * 2 or
1252 compresseddeltalen > textlen * 2 or
1250 (self._maxchainlen and chainlen > self._maxchainlen)):
1253 (self._maxchainlen and chainlen > self._maxchainlen)):
1251 return False
1254 return False
1252
1255
1253 return True
1256 return True
1254
1257
1255 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1258 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1256 cachedelta, ifh, dfh):
1259 cachedelta, ifh, dfh):
1257 """internal function to add revisions to the log
1260 """internal function to add revisions to the log
1258
1261
1259 see addrevision for argument descriptions.
1262 see addrevision for argument descriptions.
1260 invariants:
1263 invariants:
1261 - text is optional (can be None); if not set, cachedelta must be set.
1264 - text is optional (can be None); if not set, cachedelta must be set.
1262 if both are set, they must correspond to each other.
1265 if both are set, they must correspond to each other.
1263 """
1266 """
1264 btext = [text]
1267 btext = [text]
1265 def buildtext():
1268 def buildtext():
1266 if btext[0] is not None:
1269 if btext[0] is not None:
1267 return btext[0]
1270 return btext[0]
1268 # flush any pending writes here so we can read it in revision
1271 # flush any pending writes here so we can read it in revision
1269 if dfh:
1272 if dfh:
1270 dfh.flush()
1273 dfh.flush()
1271 ifh.flush()
1274 ifh.flush()
1272 baserev = cachedelta[0]
1275 baserev = cachedelta[0]
1273 delta = cachedelta[1]
1276 delta = cachedelta[1]
1274 # special case deltas which replace entire base; no need to decode
1277 # special case deltas which replace entire base; no need to decode
1275 # base revision. this neatly avoids censored bases, which throw when
1278 # base revision. this neatly avoids censored bases, which throw when
1276 # they're decoded.
1279 # they're decoded.
1277 hlen = struct.calcsize(">lll")
1280 hlen = struct.calcsize(">lll")
1278 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1281 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1279 len(delta) - hlen):
1282 len(delta) - hlen):
1280 btext[0] = delta[hlen:]
1283 btext[0] = delta[hlen:]
1281 else:
1284 else:
1282 basetext = self.revision(self.node(baserev))
1285 basetext = self.revision(self.node(baserev))
1283 btext[0] = mdiff.patch(basetext, delta)
1286 btext[0] = mdiff.patch(basetext, delta)
1284 try:
1287 try:
1285 self.checkhash(btext[0], p1, p2, node)
1288 self.checkhash(btext[0], p1, p2, node)
1286 if flags & REVIDX_ISCENSORED:
1289 if flags & REVIDX_ISCENSORED:
1287 raise RevlogError(_('node %s is not censored') % node)
1290 raise RevlogError(_('node %s is not censored') % node)
1288 except CensoredNodeError:
1291 except CensoredNodeError:
1289 # must pass the censored index flag to add censored revisions
1292 # must pass the censored index flag to add censored revisions
1290 if not flags & REVIDX_ISCENSORED:
1293 if not flags & REVIDX_ISCENSORED:
1291 raise
1294 raise
1292 return btext[0]
1295 return btext[0]
1293
1296
1294 def builddelta(rev):
1297 def builddelta(rev):
1295 # can we use the cached delta?
1298 # can we use the cached delta?
1296 if cachedelta and cachedelta[0] == rev:
1299 if cachedelta and cachedelta[0] == rev:
1297 delta = cachedelta[1]
1300 delta = cachedelta[1]
1298 else:
1301 else:
1299 t = buildtext()
1302 t = buildtext()
1300 if self.iscensored(rev):
1303 if self.iscensored(rev):
1301 # deltas based on a censored revision must replace the
1304 # deltas based on a censored revision must replace the
1302 # full content in one patch, so delta works everywhere
1305 # full content in one patch, so delta works everywhere
1303 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1306 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1304 delta = header + t
1307 delta = header + t
1305 else:
1308 else:
1306 ptext = self.revision(self.node(rev))
1309 ptext = self.revision(self.node(rev))
1307 delta = mdiff.textdiff(ptext, t)
1310 delta = mdiff.textdiff(ptext, t)
1308 data = self.compress(delta)
1311 data = self.compress(delta)
1309 l = len(data[1]) + len(data[0])
1312 l = len(data[1]) + len(data[0])
1310 if basecache[0] == rev:
1313 if basecache[0] == rev:
1311 chainbase = basecache[1]
1314 chainbase = basecache[1]
1312 else:
1315 else:
1313 chainbase = self.chainbase(rev)
1316 chainbase = self.chainbase(rev)
1314 dist = l + offset - self.start(chainbase)
1317 dist = l + offset - self.start(chainbase)
1315 if self._generaldelta:
1318 if self._generaldelta:
1316 base = rev
1319 base = rev
1317 else:
1320 else:
1318 base = chainbase
1321 base = chainbase
1319 chainlen, compresseddeltalen = self._chaininfo(rev)
1322 chainlen, compresseddeltalen = self._chaininfo(rev)
1320 chainlen += 1
1323 chainlen += 1
1321 compresseddeltalen += l
1324 compresseddeltalen += l
1322 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1325 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1323
1326
1324 curr = len(self)
1327 curr = len(self)
1325 prev = curr - 1
1328 prev = curr - 1
1326 base = chainbase = curr
1329 base = chainbase = curr
1327 chainlen = None
1330 chainlen = None
1328 offset = self.end(prev)
1331 offset = self.end(prev)
1329 d = None
1332 d = None
1330 if self._basecache is None:
1333 if self._basecache is None:
1331 self._basecache = (prev, self.chainbase(prev))
1334 self._basecache = (prev, self.chainbase(prev))
1332 basecache = self._basecache
1335 basecache = self._basecache
1333 p1r, p2r = self.rev(p1), self.rev(p2)
1336 p1r, p2r = self.rev(p1), self.rev(p2)
1334
1337
1335 # full versions are inserted when the needed deltas
1338 # full versions are inserted when the needed deltas
1336 # become comparable to the uncompressed text
1339 # become comparable to the uncompressed text
1337 if text is None:
1340 if text is None:
1338 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1341 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1339 cachedelta[1])
1342 cachedelta[1])
1340 else:
1343 else:
1341 textlen = len(text)
1344 textlen = len(text)
1342
1345
1343 # should we try to build a delta?
1346 # should we try to build a delta?
1344 if prev != nullrev:
1347 if prev != nullrev:
1345 if self._generaldelta:
1348 if self._generaldelta:
1346 # Pick whichever parent is closer to us (to minimize the
1349 if p2r != nullrev and self._aggressivemergedeltas:
1347 # chance of having to build a fulltext). Since
1350 d = builddelta(p1r)
1348 # nullrev == -1, any non-merge commit will always pick p1r.
1351 d2 = builddelta(p2r)
1349 drev = p2r if p2r > p1r else p1r
1352 p1good = self._isgooddelta(d, textlen)
1350 d = builddelta(drev)
1353 p2good = self._isgooddelta(d2, textlen)
1351 # If the chosen delta will result in us making a full text,
1354 if p1good and p2good:
1352 # give it one last try against prev.
1355 # If both are good deltas, choose the smallest
1353 if drev != prev and not self._isgooddelta(d, textlen):
1356 if d2[1] < d[1]:
1354 d = builddelta(prev)
1357 d = d2
1358 elif p2good:
1359 # If only p2 is good, use it
1360 d = d2
1361 elif p1good:
1362 pass
1363 else:
1364 # Neither is good, try against prev to hopefully save us
1365 # a fulltext.
1366 d = builddelta(prev)
1367 else:
1368 # Pick whichever parent is closer to us (to minimize the
1369 # chance of having to build a fulltext). Since
1370 # nullrev == -1, any non-merge commit will always pick p1r.
1371 drev = p2r if p2r > p1r else p1r
1372 d = builddelta(drev)
1373 # If the chosen delta will result in us making a full text,
1374 # give it one last try against prev.
1375 if drev != prev and not self._isgooddelta(d, textlen):
1376 d = builddelta(prev)
1355 else:
1377 else:
1356 d = builddelta(prev)
1378 d = builddelta(prev)
1357 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1379 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1358
1380
1359 if not self._isgooddelta(d, textlen):
1381 if not self._isgooddelta(d, textlen):
1360 text = buildtext()
1382 text = buildtext()
1361 data = self.compress(text)
1383 data = self.compress(text)
1362 l = len(data[1]) + len(data[0])
1384 l = len(data[1]) + len(data[0])
1363 base = chainbase = curr
1385 base = chainbase = curr
1364
1386
1365 e = (offset_type(offset, flags), l, textlen,
1387 e = (offset_type(offset, flags), l, textlen,
1366 base, link, p1r, p2r, node)
1388 base, link, p1r, p2r, node)
1367 self.index.insert(-1, e)
1389 self.index.insert(-1, e)
1368 self.nodemap[node] = curr
1390 self.nodemap[node] = curr
1369
1391
1370 entry = self._io.packentry(e, self.node, self.version, curr)
1392 entry = self._io.packentry(e, self.node, self.version, curr)
1371 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1393 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1372
1394
1373 if type(text) == str: # only accept immutable objects
1395 if type(text) == str: # only accept immutable objects
1374 self._cache = (node, curr, text)
1396 self._cache = (node, curr, text)
1375 self._basecache = (curr, chainbase)
1397 self._basecache = (curr, chainbase)
1376 return node
1398 return node
1377
1399
1378 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1400 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1379 curr = len(self) - 1
1401 curr = len(self) - 1
1380 if not self._inline:
1402 if not self._inline:
1381 transaction.add(self.datafile, offset)
1403 transaction.add(self.datafile, offset)
1382 transaction.add(self.indexfile, curr * len(entry))
1404 transaction.add(self.indexfile, curr * len(entry))
1383 if data[0]:
1405 if data[0]:
1384 dfh.write(data[0])
1406 dfh.write(data[0])
1385 dfh.write(data[1])
1407 dfh.write(data[1])
1386 dfh.flush()
1408 dfh.flush()
1387 ifh.write(entry)
1409 ifh.write(entry)
1388 else:
1410 else:
1389 offset += curr * self._io.size
1411 offset += curr * self._io.size
1390 transaction.add(self.indexfile, offset, curr)
1412 transaction.add(self.indexfile, offset, curr)
1391 ifh.write(entry)
1413 ifh.write(entry)
1392 ifh.write(data[0])
1414 ifh.write(data[0])
1393 ifh.write(data[1])
1415 ifh.write(data[1])
1394 self.checkinlinesize(transaction, ifh)
1416 self.checkinlinesize(transaction, ifh)
1395
1417
1396 def addgroup(self, bundle, linkmapper, transaction, addrevisioncb=None):
1418 def addgroup(self, bundle, linkmapper, transaction, addrevisioncb=None):
1397 """
1419 """
1398 add a delta group
1420 add a delta group
1399
1421
1400 given a set of deltas, add them to the revision log. the
1422 given a set of deltas, add them to the revision log. the
1401 first delta is against its parent, which should be in our
1423 first delta is against its parent, which should be in our
1402 log, the rest are against the previous delta.
1424 log, the rest are against the previous delta.
1403
1425
1404 If ``addrevisioncb`` is defined, it will be called with arguments of
1426 If ``addrevisioncb`` is defined, it will be called with arguments of
1405 this revlog and the node that was added.
1427 this revlog and the node that was added.
1406 """
1428 """
1407
1429
1408 # track the base of the current delta log
1430 # track the base of the current delta log
1409 content = []
1431 content = []
1410 node = None
1432 node = None
1411
1433
1412 r = len(self)
1434 r = len(self)
1413 end = 0
1435 end = 0
1414 if r:
1436 if r:
1415 end = self.end(r - 1)
1437 end = self.end(r - 1)
1416 ifh = self.opener(self.indexfile, "a+")
1438 ifh = self.opener(self.indexfile, "a+")
1417 isize = r * self._io.size
1439 isize = r * self._io.size
1418 if self._inline:
1440 if self._inline:
1419 transaction.add(self.indexfile, end + isize, r)
1441 transaction.add(self.indexfile, end + isize, r)
1420 dfh = None
1442 dfh = None
1421 else:
1443 else:
1422 transaction.add(self.indexfile, isize, r)
1444 transaction.add(self.indexfile, isize, r)
1423 transaction.add(self.datafile, end)
1445 transaction.add(self.datafile, end)
1424 dfh = self.opener(self.datafile, "a")
1446 dfh = self.opener(self.datafile, "a")
1425 def flush():
1447 def flush():
1426 if dfh:
1448 if dfh:
1427 dfh.flush()
1449 dfh.flush()
1428 ifh.flush()
1450 ifh.flush()
1429 try:
1451 try:
1430 # loop through our set of deltas
1452 # loop through our set of deltas
1431 chain = None
1453 chain = None
1432 while True:
1454 while True:
1433 chunkdata = bundle.deltachunk(chain)
1455 chunkdata = bundle.deltachunk(chain)
1434 if not chunkdata:
1456 if not chunkdata:
1435 break
1457 break
1436 node = chunkdata['node']
1458 node = chunkdata['node']
1437 p1 = chunkdata['p1']
1459 p1 = chunkdata['p1']
1438 p2 = chunkdata['p2']
1460 p2 = chunkdata['p2']
1439 cs = chunkdata['cs']
1461 cs = chunkdata['cs']
1440 deltabase = chunkdata['deltabase']
1462 deltabase = chunkdata['deltabase']
1441 delta = chunkdata['delta']
1463 delta = chunkdata['delta']
1442
1464
1443 content.append(node)
1465 content.append(node)
1444
1466
1445 link = linkmapper(cs)
1467 link = linkmapper(cs)
1446 if node in self.nodemap:
1468 if node in self.nodemap:
1447 # this can happen if two branches make the same change
1469 # this can happen if two branches make the same change
1448 chain = node
1470 chain = node
1449 continue
1471 continue
1450
1472
1451 for p in (p1, p2):
1473 for p in (p1, p2):
1452 if p not in self.nodemap:
1474 if p not in self.nodemap:
1453 raise LookupError(p, self.indexfile,
1475 raise LookupError(p, self.indexfile,
1454 _('unknown parent'))
1476 _('unknown parent'))
1455
1477
1456 if deltabase not in self.nodemap:
1478 if deltabase not in self.nodemap:
1457 raise LookupError(deltabase, self.indexfile,
1479 raise LookupError(deltabase, self.indexfile,
1458 _('unknown delta base'))
1480 _('unknown delta base'))
1459
1481
1460 baserev = self.rev(deltabase)
1482 baserev = self.rev(deltabase)
1461
1483
1462 if baserev != nullrev and self.iscensored(baserev):
1484 if baserev != nullrev and self.iscensored(baserev):
1463 # if base is censored, delta must be full replacement in a
1485 # if base is censored, delta must be full replacement in a
1464 # single patch operation
1486 # single patch operation
1465 hlen = struct.calcsize(">lll")
1487 hlen = struct.calcsize(">lll")
1466 oldlen = self.rawsize(baserev)
1488 oldlen = self.rawsize(baserev)
1467 newlen = len(delta) - hlen
1489 newlen = len(delta) - hlen
1468 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1490 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1469 raise error.CensoredBaseError(self.indexfile,
1491 raise error.CensoredBaseError(self.indexfile,
1470 self.node(baserev))
1492 self.node(baserev))
1471
1493
1472 flags = REVIDX_DEFAULT_FLAGS
1494 flags = REVIDX_DEFAULT_FLAGS
1473 if self._peek_iscensored(baserev, delta, flush):
1495 if self._peek_iscensored(baserev, delta, flush):
1474 flags |= REVIDX_ISCENSORED
1496 flags |= REVIDX_ISCENSORED
1475
1497
1476 chain = self._addrevision(node, None, transaction, link,
1498 chain = self._addrevision(node, None, transaction, link,
1477 p1, p2, flags, (baserev, delta),
1499 p1, p2, flags, (baserev, delta),
1478 ifh, dfh)
1500 ifh, dfh)
1479
1501
1480 if addrevisioncb:
1502 if addrevisioncb:
1481 # Data for added revision can't be read unless flushed
1503 # Data for added revision can't be read unless flushed
1482 # because _loadchunk always opensa new file handle and
1504 # because _loadchunk always opensa new file handle and
1483 # there is no guarantee data was actually written yet.
1505 # there is no guarantee data was actually written yet.
1484 flush()
1506 flush()
1485 addrevisioncb(self, chain)
1507 addrevisioncb(self, chain)
1486
1508
1487 if not dfh and not self._inline:
1509 if not dfh and not self._inline:
1488 # addrevision switched from inline to conventional
1510 # addrevision switched from inline to conventional
1489 # reopen the index
1511 # reopen the index
1490 ifh.close()
1512 ifh.close()
1491 dfh = self.opener(self.datafile, "a")
1513 dfh = self.opener(self.datafile, "a")
1492 ifh = self.opener(self.indexfile, "a")
1514 ifh = self.opener(self.indexfile, "a")
1493 finally:
1515 finally:
1494 if dfh:
1516 if dfh:
1495 dfh.close()
1517 dfh.close()
1496 ifh.close()
1518 ifh.close()
1497
1519
1498 return content
1520 return content
1499
1521
1500 def iscensored(self, rev):
1522 def iscensored(self, rev):
1501 """Check if a file revision is censored."""
1523 """Check if a file revision is censored."""
1502 return False
1524 return False
1503
1525
1504 def _peek_iscensored(self, baserev, delta, flush):
1526 def _peek_iscensored(self, baserev, delta, flush):
1505 """Quickly check if a delta produces a censored revision."""
1527 """Quickly check if a delta produces a censored revision."""
1506 return False
1528 return False
1507
1529
1508 def getstrippoint(self, minlink):
1530 def getstrippoint(self, minlink):
1509 """find the minimum rev that must be stripped to strip the linkrev
1531 """find the minimum rev that must be stripped to strip the linkrev
1510
1532
1511 Returns a tuple containing the minimum rev and a set of all revs that
1533 Returns a tuple containing the minimum rev and a set of all revs that
1512 have linkrevs that will be broken by this strip.
1534 have linkrevs that will be broken by this strip.
1513 """
1535 """
1514 brokenrevs = set()
1536 brokenrevs = set()
1515 strippoint = len(self)
1537 strippoint = len(self)
1516
1538
1517 heads = {}
1539 heads = {}
1518 futurelargelinkrevs = set()
1540 futurelargelinkrevs = set()
1519 for head in self.headrevs():
1541 for head in self.headrevs():
1520 headlinkrev = self.linkrev(head)
1542 headlinkrev = self.linkrev(head)
1521 heads[head] = headlinkrev
1543 heads[head] = headlinkrev
1522 if headlinkrev >= minlink:
1544 if headlinkrev >= minlink:
1523 futurelargelinkrevs.add(headlinkrev)
1545 futurelargelinkrevs.add(headlinkrev)
1524
1546
1525 # This algorithm involves walking down the rev graph, starting at the
1547 # This algorithm involves walking down the rev graph, starting at the
1526 # heads. Since the revs are topologically sorted according to linkrev,
1548 # heads. Since the revs are topologically sorted according to linkrev,
1527 # once all head linkrevs are below the minlink, we know there are
1549 # once all head linkrevs are below the minlink, we know there are
1528 # no more revs that could have a linkrev greater than minlink.
1550 # no more revs that could have a linkrev greater than minlink.
1529 # So we can stop walking.
1551 # So we can stop walking.
1530 while futurelargelinkrevs:
1552 while futurelargelinkrevs:
1531 strippoint -= 1
1553 strippoint -= 1
1532 linkrev = heads.pop(strippoint)
1554 linkrev = heads.pop(strippoint)
1533
1555
1534 if linkrev < minlink:
1556 if linkrev < minlink:
1535 brokenrevs.add(strippoint)
1557 brokenrevs.add(strippoint)
1536 else:
1558 else:
1537 futurelargelinkrevs.remove(linkrev)
1559 futurelargelinkrevs.remove(linkrev)
1538
1560
1539 for p in self.parentrevs(strippoint):
1561 for p in self.parentrevs(strippoint):
1540 if p != nullrev:
1562 if p != nullrev:
1541 plinkrev = self.linkrev(p)
1563 plinkrev = self.linkrev(p)
1542 heads[p] = plinkrev
1564 heads[p] = plinkrev
1543 if plinkrev >= minlink:
1565 if plinkrev >= minlink:
1544 futurelargelinkrevs.add(plinkrev)
1566 futurelargelinkrevs.add(plinkrev)
1545
1567
1546 return strippoint, brokenrevs
1568 return strippoint, brokenrevs
1547
1569
1548 def strip(self, minlink, transaction):
1570 def strip(self, minlink, transaction):
1549 """truncate the revlog on the first revision with a linkrev >= minlink
1571 """truncate the revlog on the first revision with a linkrev >= minlink
1550
1572
1551 This function is called when we're stripping revision minlink and
1573 This function is called when we're stripping revision minlink and
1552 its descendants from the repository.
1574 its descendants from the repository.
1553
1575
1554 We have to remove all revisions with linkrev >= minlink, because
1576 We have to remove all revisions with linkrev >= minlink, because
1555 the equivalent changelog revisions will be renumbered after the
1577 the equivalent changelog revisions will be renumbered after the
1556 strip.
1578 strip.
1557
1579
1558 So we truncate the revlog on the first of these revisions, and
1580 So we truncate the revlog on the first of these revisions, and
1559 trust that the caller has saved the revisions that shouldn't be
1581 trust that the caller has saved the revisions that shouldn't be
1560 removed and that it'll re-add them after this truncation.
1582 removed and that it'll re-add them after this truncation.
1561 """
1583 """
1562 if len(self) == 0:
1584 if len(self) == 0:
1563 return
1585 return
1564
1586
1565 rev, _ = self.getstrippoint(minlink)
1587 rev, _ = self.getstrippoint(minlink)
1566 if rev == len(self):
1588 if rev == len(self):
1567 return
1589 return
1568
1590
1569 # first truncate the files on disk
1591 # first truncate the files on disk
1570 end = self.start(rev)
1592 end = self.start(rev)
1571 if not self._inline:
1593 if not self._inline:
1572 transaction.add(self.datafile, end)
1594 transaction.add(self.datafile, end)
1573 end = rev * self._io.size
1595 end = rev * self._io.size
1574 else:
1596 else:
1575 end += rev * self._io.size
1597 end += rev * self._io.size
1576
1598
1577 transaction.add(self.indexfile, end)
1599 transaction.add(self.indexfile, end)
1578
1600
1579 # then reset internal state in memory to forget those revisions
1601 # then reset internal state in memory to forget those revisions
1580 self._cache = None
1602 self._cache = None
1581 self._chaininfocache = {}
1603 self._chaininfocache = {}
1582 self._chunkclear()
1604 self._chunkclear()
1583 for x in xrange(rev, len(self)):
1605 for x in xrange(rev, len(self)):
1584 del self.nodemap[self.node(x)]
1606 del self.nodemap[self.node(x)]
1585
1607
1586 del self.index[rev:-1]
1608 del self.index[rev:-1]
1587
1609
1588 def checksize(self):
1610 def checksize(self):
1589 expected = 0
1611 expected = 0
1590 if len(self):
1612 if len(self):
1591 expected = max(0, self.end(len(self) - 1))
1613 expected = max(0, self.end(len(self) - 1))
1592
1614
1593 try:
1615 try:
1594 f = self.opener(self.datafile)
1616 f = self.opener(self.datafile)
1595 f.seek(0, 2)
1617 f.seek(0, 2)
1596 actual = f.tell()
1618 actual = f.tell()
1597 f.close()
1619 f.close()
1598 dd = actual - expected
1620 dd = actual - expected
1599 except IOError as inst:
1621 except IOError as inst:
1600 if inst.errno != errno.ENOENT:
1622 if inst.errno != errno.ENOENT:
1601 raise
1623 raise
1602 dd = 0
1624 dd = 0
1603
1625
1604 try:
1626 try:
1605 f = self.opener(self.indexfile)
1627 f = self.opener(self.indexfile)
1606 f.seek(0, 2)
1628 f.seek(0, 2)
1607 actual = f.tell()
1629 actual = f.tell()
1608 f.close()
1630 f.close()
1609 s = self._io.size
1631 s = self._io.size
1610 i = max(0, actual // s)
1632 i = max(0, actual // s)
1611 di = actual - (i * s)
1633 di = actual - (i * s)
1612 if self._inline:
1634 if self._inline:
1613 databytes = 0
1635 databytes = 0
1614 for r in self:
1636 for r in self:
1615 databytes += max(0, self.length(r))
1637 databytes += max(0, self.length(r))
1616 dd = 0
1638 dd = 0
1617 di = actual - len(self) * s - databytes
1639 di = actual - len(self) * s - databytes
1618 except IOError as inst:
1640 except IOError as inst:
1619 if inst.errno != errno.ENOENT:
1641 if inst.errno != errno.ENOENT:
1620 raise
1642 raise
1621 di = 0
1643 di = 0
1622
1644
1623 return (dd, di)
1645 return (dd, di)
1624
1646
1625 def files(self):
1647 def files(self):
1626 res = [self.indexfile]
1648 res = [self.indexfile]
1627 if not self._inline:
1649 if not self._inline:
1628 res.append(self.datafile)
1650 res.append(self.datafile)
1629 return res
1651 return res
@@ -1,71 +1,105 b''
1 Check whether size of generaldelta revlog is not bigger than its
1 Check whether size of generaldelta revlog is not bigger than its
2 regular equivalent. Test would fail if generaldelta was naive
2 regular equivalent. Test would fail if generaldelta was naive
3 implementation of parentdelta: third manifest revision would be fully
3 implementation of parentdelta: third manifest revision would be fully
4 inserted due to big distance from its paren revision (zero).
4 inserted due to big distance from its paren revision (zero).
5
5
6 $ hg init repo
6 $ hg init repo
7 $ cd repo
7 $ cd repo
8 $ echo foo > foo
8 $ echo foo > foo
9 $ echo bar > bar
9 $ echo bar > bar
10 $ hg commit -q -Am boo
10 $ hg commit -q -Am boo
11 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
11 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
12 $ for r in 1 2 3; do
12 $ for r in 1 2 3; do
13 > echo $r > foo
13 > echo $r > foo
14 > hg commit -q -m $r
14 > hg commit -q -m $r
15 > hg up -q -r 0
15 > hg up -q -r 0
16 > hg pull . -q -r $r -R ../gdrepo
16 > hg pull . -q -r $r -R ../gdrepo
17 > done
17 > done
18
18
19 $ cd ..
19 $ cd ..
20 >>> import os
20 >>> import os
21 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
21 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
22 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
22 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
23 >>> if regsize < gdsize:
23 >>> if regsize < gdsize:
24 ... print 'generaldata increased size of manifest'
24 ... print 'generaldata increased size of manifest'
25
25
26 Verify rev reordering doesnt create invalid bundles (issue4462)
26 Verify rev reordering doesnt create invalid bundles (issue4462)
27 This requires a commit tree that when pulled will reorder manifest revs such
27 This requires a commit tree that when pulled will reorder manifest revs such
28 that the second manifest to create a file rev will be ordered before the first
28 that the second manifest to create a file rev will be ordered before the first
29 manifest to create that file rev. We also need to do a partial pull to ensure
29 manifest to create that file rev. We also need to do a partial pull to ensure
30 reordering happens. At the end we verify the linkrev points at the earliest
30 reordering happens. At the end we verify the linkrev points at the earliest
31 commit.
31 commit.
32
32
33 $ hg init server --config format.generaldelta=True
33 $ hg init server --config format.generaldelta=True
34 $ cd server
34 $ cd server
35 $ touch a
35 $ touch a
36 $ hg commit -Aqm a
36 $ hg commit -Aqm a
37 $ echo x > x
37 $ echo x > x
38 $ echo y > y
38 $ echo y > y
39 $ hg commit -Aqm xy
39 $ hg commit -Aqm xy
40 $ hg up -q '.^'
40 $ hg up -q '.^'
41 $ echo x > x
41 $ echo x > x
42 $ echo z > z
42 $ echo z > z
43 $ hg commit -Aqm xz
43 $ hg commit -Aqm xz
44 $ hg up -q 1
44 $ hg up -q 1
45 $ echo b > b
45 $ echo b > b
46 $ hg commit -Aqm b
46 $ hg commit -Aqm b
47 $ hg merge -q 2
47 $ hg merge -q 2
48 $ hg commit -Aqm merge
48 $ hg commit -Aqm merge
49 $ echo c > c
49 $ echo c > c
50 $ hg commit -Aqm c
50 $ hg commit -Aqm c
51 $ hg log -G -T '{rev} {shortest(node)} {desc}'
51 $ hg log -G -T '{rev} {shortest(node)} {desc}'
52 @ 5 ebb8 c
52 @ 5 ebb8 c
53 |
53 |
54 o 4 baf7 merge
54 o 4 baf7 merge
55 |\
55 |\
56 | o 3 a129 b
56 | o 3 a129 b
57 | |
57 | |
58 o | 2 958c xz
58 o | 2 958c xz
59 | |
59 | |
60 | o 1 f00c xy
60 | o 1 f00c xy
61 |/
61 |/
62 o 0 3903 a
62 o 0 3903 a
63
63
64 $ cd ..
64 $ cd ..
65 $ hg init client
65 $ hg init client
66 $ cd client
66 $ cd client
67 $ hg pull -q ../server -r 4
67 $ hg pull -q ../server -r 4
68 $ hg debugindex x
68 $ hg debugindex x
69 rev offset length base linkrev nodeid p1 p2
69 rev offset length base linkrev nodeid p1 p2
70 0 0 3 0 1 1406e7411862 000000000000 000000000000
70 0 0 3 0 1 1406e7411862 000000000000 000000000000
71
71
72 $ cd ..
73
74 Test format.aggressivemergedeltas
75
76 $ hg init --config format.generaldelta=1 aggressive
77 $ cd aggressive
78 $ touch a b c d e
79 $ hg commit -Aqm side1
80 $ hg up -q null
81 $ touch x y
82 $ hg commit -Aqm side2
83
84 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
85 $ hg merge -q 0
86 $ hg commit -q -m merge
87 $ hg debugindex -m
88 rev offset length delta linkrev nodeid p1 p2
89 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
90 1 59 59 -1 1 315c023f341d 000000000000 000000000000
91 2 118 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
92
93 $ hg strip -q -r . --config extensions.strip=
94
95 - Verify aggressive merge uses p2 (commit 0) as delta parent
96 $ hg up -q -C 1
97 $ hg merge -q 0
98 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
99 $ hg debugindex -m
100 rev offset length delta linkrev nodeid p1 p2
101 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
102 1 59 59 -1 1 315c023f341d 000000000000 000000000000
103 2 118 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
104
105 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now