##// END OF EJS Templates
transaction: track new obsmarkers in the 'changes' mapping...
marmoute -
r33248:a5cb2e44 default
parent child Browse files
Show More
@@ -1,2110 +1,2111
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 class _basefilecache(scmutil.filecache):
69 class _basefilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """
71 """
72 def __get__(self, repo, type=None):
72 def __get__(self, repo, type=None):
73 if repo is None:
73 if repo is None:
74 return self
74 return self
75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
76 def __set__(self, repo, value):
76 def __set__(self, repo, value):
77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
78 def __delete__(self, repo):
78 def __delete__(self, repo):
79 return super(_basefilecache, self).__delete__(repo.unfiltered())
79 return super(_basefilecache, self).__delete__(repo.unfiltered())
80
80
81 class repofilecache(_basefilecache):
81 class repofilecache(_basefilecache):
82 """filecache for files in .hg but outside of .hg/store"""
82 """filecache for files in .hg but outside of .hg/store"""
83 def join(self, obj, fname):
83 def join(self, obj, fname):
84 return obj.vfs.join(fname)
84 return obj.vfs.join(fname)
85
85
86 class storecache(_basefilecache):
86 class storecache(_basefilecache):
87 """filecache for files in the store"""
87 """filecache for files in the store"""
88 def join(self, obj, fname):
88 def join(self, obj, fname):
89 return obj.sjoin(fname)
89 return obj.sjoin(fname)
90
90
91 class unfilteredpropertycache(util.propertycache):
91 class unfilteredpropertycache(util.propertycache):
92 """propertycache that apply to unfiltered repo only"""
92 """propertycache that apply to unfiltered repo only"""
93
93
94 def __get__(self, repo, type=None):
94 def __get__(self, repo, type=None):
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 if unfi is repo:
96 if unfi is repo:
97 return super(unfilteredpropertycache, self).__get__(unfi)
97 return super(unfilteredpropertycache, self).__get__(unfi)
98 return getattr(unfi, self.name)
98 return getattr(unfi, self.name)
99
99
100 class filteredpropertycache(util.propertycache):
100 class filteredpropertycache(util.propertycache):
101 """propertycache that must take filtering in account"""
101 """propertycache that must take filtering in account"""
102
102
103 def cachevalue(self, obj, value):
103 def cachevalue(self, obj, value):
104 object.__setattr__(obj, self.name, value)
104 object.__setattr__(obj, self.name, value)
105
105
106
106
107 def hasunfilteredcache(repo, name):
107 def hasunfilteredcache(repo, name):
108 """check if a repo has an unfilteredpropertycache value for <name>"""
108 """check if a repo has an unfilteredpropertycache value for <name>"""
109 return name in vars(repo.unfiltered())
109 return name in vars(repo.unfiltered())
110
110
111 def unfilteredmethod(orig):
111 def unfilteredmethod(orig):
112 """decorate method that always need to be run on unfiltered version"""
112 """decorate method that always need to be run on unfiltered version"""
113 def wrapper(repo, *args, **kwargs):
113 def wrapper(repo, *args, **kwargs):
114 return orig(repo.unfiltered(), *args, **kwargs)
114 return orig(repo.unfiltered(), *args, **kwargs)
115 return wrapper
115 return wrapper
116
116
117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
118 'unbundle'}
118 'unbundle'}
119 legacycaps = moderncaps.union({'changegroupsubset'})
119 legacycaps = moderncaps.union({'changegroupsubset'})
120
120
121 class localpeer(peer.peerrepository):
121 class localpeer(peer.peerrepository):
122 '''peer for a local repo; reflects only the most recent API'''
122 '''peer for a local repo; reflects only the most recent API'''
123
123
124 def __init__(self, repo, caps=None):
124 def __init__(self, repo, caps=None):
125 if caps is None:
125 if caps is None:
126 caps = moderncaps.copy()
126 caps = moderncaps.copy()
127 peer.peerrepository.__init__(self)
127 peer.peerrepository.__init__(self)
128 self._repo = repo.filtered('served')
128 self._repo = repo.filtered('served')
129 self.ui = repo.ui
129 self.ui = repo.ui
130 self._caps = repo._restrictcapabilities(caps)
130 self._caps = repo._restrictcapabilities(caps)
131 self.requirements = repo.requirements
131 self.requirements = repo.requirements
132 self.supportedformats = repo.supportedformats
132 self.supportedformats = repo.supportedformats
133
133
134 def close(self):
134 def close(self):
135 self._repo.close()
135 self._repo.close()
136
136
137 def _capabilities(self):
137 def _capabilities(self):
138 return self._caps
138 return self._caps
139
139
140 def local(self):
140 def local(self):
141 return self._repo
141 return self._repo
142
142
143 def canpush(self):
143 def canpush(self):
144 return True
144 return True
145
145
146 def url(self):
146 def url(self):
147 return self._repo.url()
147 return self._repo.url()
148
148
149 def lookup(self, key):
149 def lookup(self, key):
150 return self._repo.lookup(key)
150 return self._repo.lookup(key)
151
151
152 def branchmap(self):
152 def branchmap(self):
153 return self._repo.branchmap()
153 return self._repo.branchmap()
154
154
155 def heads(self):
155 def heads(self):
156 return self._repo.heads()
156 return self._repo.heads()
157
157
158 def known(self, nodes):
158 def known(self, nodes):
159 return self._repo.known(nodes)
159 return self._repo.known(nodes)
160
160
161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
162 **kwargs):
162 **kwargs):
163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
164 common=common, bundlecaps=bundlecaps,
164 common=common, bundlecaps=bundlecaps,
165 **kwargs)
165 **kwargs)
166 cb = util.chunkbuffer(chunks)
166 cb = util.chunkbuffer(chunks)
167
167
168 if exchange.bundle2requested(bundlecaps):
168 if exchange.bundle2requested(bundlecaps):
169 # When requesting a bundle2, getbundle returns a stream to make the
169 # When requesting a bundle2, getbundle returns a stream to make the
170 # wire level function happier. We need to build a proper object
170 # wire level function happier. We need to build a proper object
171 # from it in local peer.
171 # from it in local peer.
172 return bundle2.getunbundler(self.ui, cb)
172 return bundle2.getunbundler(self.ui, cb)
173 else:
173 else:
174 return changegroup.getunbundler('01', cb, None)
174 return changegroup.getunbundler('01', cb, None)
175
175
176 # TODO We might want to move the next two calls into legacypeer and add
176 # TODO We might want to move the next two calls into legacypeer and add
177 # unbundle instead.
177 # unbundle instead.
178
178
179 def unbundle(self, cg, heads, url):
179 def unbundle(self, cg, heads, url):
180 """apply a bundle on a repo
180 """apply a bundle on a repo
181
181
182 This function handles the repo locking itself."""
182 This function handles the repo locking itself."""
183 try:
183 try:
184 try:
184 try:
185 cg = exchange.readbundle(self.ui, cg, None)
185 cg = exchange.readbundle(self.ui, cg, None)
186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
187 if util.safehasattr(ret, 'getchunks'):
187 if util.safehasattr(ret, 'getchunks'):
188 # This is a bundle20 object, turn it into an unbundler.
188 # This is a bundle20 object, turn it into an unbundler.
189 # This little dance should be dropped eventually when the
189 # This little dance should be dropped eventually when the
190 # API is finally improved.
190 # API is finally improved.
191 stream = util.chunkbuffer(ret.getchunks())
191 stream = util.chunkbuffer(ret.getchunks())
192 ret = bundle2.getunbundler(self.ui, stream)
192 ret = bundle2.getunbundler(self.ui, stream)
193 return ret
193 return ret
194 except Exception as exc:
194 except Exception as exc:
195 # If the exception contains output salvaged from a bundle2
195 # If the exception contains output salvaged from a bundle2
196 # reply, we need to make sure it is printed before continuing
196 # reply, we need to make sure it is printed before continuing
197 # to fail. So we build a bundle2 with such output and consume
197 # to fail. So we build a bundle2 with such output and consume
198 # it directly.
198 # it directly.
199 #
199 #
200 # This is not very elegant but allows a "simple" solution for
200 # This is not very elegant but allows a "simple" solution for
201 # issue4594
201 # issue4594
202 output = getattr(exc, '_bundle2salvagedoutput', ())
202 output = getattr(exc, '_bundle2salvagedoutput', ())
203 if output:
203 if output:
204 bundler = bundle2.bundle20(self._repo.ui)
204 bundler = bundle2.bundle20(self._repo.ui)
205 for out in output:
205 for out in output:
206 bundler.addpart(out)
206 bundler.addpart(out)
207 stream = util.chunkbuffer(bundler.getchunks())
207 stream = util.chunkbuffer(bundler.getchunks())
208 b = bundle2.getunbundler(self.ui, stream)
208 b = bundle2.getunbundler(self.ui, stream)
209 bundle2.processbundle(self._repo, b)
209 bundle2.processbundle(self._repo, b)
210 raise
210 raise
211 except error.PushRaced as exc:
211 except error.PushRaced as exc:
212 raise error.ResponseError(_('push failed:'), str(exc))
212 raise error.ResponseError(_('push failed:'), str(exc))
213
213
214 def lock(self):
214 def lock(self):
215 return self._repo.lock()
215 return self._repo.lock()
216
216
217 def pushkey(self, namespace, key, old, new):
217 def pushkey(self, namespace, key, old, new):
218 return self._repo.pushkey(namespace, key, old, new)
218 return self._repo.pushkey(namespace, key, old, new)
219
219
220 def listkeys(self, namespace):
220 def listkeys(self, namespace):
221 return self._repo.listkeys(namespace)
221 return self._repo.listkeys(namespace)
222
222
223 def debugwireargs(self, one, two, three=None, four=None, five=None):
223 def debugwireargs(self, one, two, three=None, four=None, five=None):
224 '''used to test argument passing over the wire'''
224 '''used to test argument passing over the wire'''
225 return "%s %s %s %s %s" % (one, two, three, four, five)
225 return "%s %s %s %s %s" % (one, two, three, four, five)
226
226
227 class locallegacypeer(localpeer):
227 class locallegacypeer(localpeer):
228 '''peer extension which implements legacy methods too; used for tests with
228 '''peer extension which implements legacy methods too; used for tests with
229 restricted capabilities'''
229 restricted capabilities'''
230
230
231 def __init__(self, repo):
231 def __init__(self, repo):
232 localpeer.__init__(self, repo, caps=legacycaps)
232 localpeer.__init__(self, repo, caps=legacycaps)
233
233
234 def branches(self, nodes):
234 def branches(self, nodes):
235 return self._repo.branches(nodes)
235 return self._repo.branches(nodes)
236
236
237 def between(self, pairs):
237 def between(self, pairs):
238 return self._repo.between(pairs)
238 return self._repo.between(pairs)
239
239
240 def changegroup(self, basenodes, source):
240 def changegroup(self, basenodes, source):
241 return changegroup.changegroup(self._repo, basenodes, source)
241 return changegroup.changegroup(self._repo, basenodes, source)
242
242
243 def changegroupsubset(self, bases, heads, source):
243 def changegroupsubset(self, bases, heads, source):
244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
245
245
246 # Increment the sub-version when the revlog v2 format changes to lock out old
246 # Increment the sub-version when the revlog v2 format changes to lock out old
247 # clients.
247 # clients.
248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
249
249
250 class localrepository(object):
250 class localrepository(object):
251
251
252 supportedformats = {
252 supportedformats = {
253 'revlogv1',
253 'revlogv1',
254 'generaldelta',
254 'generaldelta',
255 'treemanifest',
255 'treemanifest',
256 'manifestv2',
256 'manifestv2',
257 REVLOGV2_REQUIREMENT,
257 REVLOGV2_REQUIREMENT,
258 }
258 }
259 _basesupported = supportedformats | {
259 _basesupported = supportedformats | {
260 'store',
260 'store',
261 'fncache',
261 'fncache',
262 'shared',
262 'shared',
263 'relshared',
263 'relshared',
264 'dotencode',
264 'dotencode',
265 }
265 }
266 openerreqs = {
266 openerreqs = {
267 'revlogv1',
267 'revlogv1',
268 'generaldelta',
268 'generaldelta',
269 'treemanifest',
269 'treemanifest',
270 'manifestv2',
270 'manifestv2',
271 }
271 }
272
272
273 # a list of (ui, featureset) functions.
273 # a list of (ui, featureset) functions.
274 # only functions defined in module of enabled extensions are invoked
274 # only functions defined in module of enabled extensions are invoked
275 featuresetupfuncs = set()
275 featuresetupfuncs = set()
276
276
277 def __init__(self, baseui, path, create=False):
277 def __init__(self, baseui, path, create=False):
278 self.requirements = set()
278 self.requirements = set()
279 self.filtername = None
279 self.filtername = None
280 # wvfs: rooted at the repository root, used to access the working copy
280 # wvfs: rooted at the repository root, used to access the working copy
281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
283 self.vfs = None
283 self.vfs = None
284 # svfs: usually rooted at .hg/store, used to access repository history
284 # svfs: usually rooted at .hg/store, used to access repository history
285 # If this is a shared repository, this vfs may point to another
285 # If this is a shared repository, this vfs may point to another
286 # repository's .hg/store directory.
286 # repository's .hg/store directory.
287 self.svfs = None
287 self.svfs = None
288 self.root = self.wvfs.base
288 self.root = self.wvfs.base
289 self.path = self.wvfs.join(".hg")
289 self.path = self.wvfs.join(".hg")
290 self.origroot = path
290 self.origroot = path
291 self.auditor = pathutil.pathauditor(self.root, self._checknested)
291 self.auditor = pathutil.pathauditor(self.root, self._checknested)
292 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
292 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
293 realfs=False)
293 realfs=False)
294 self.vfs = vfsmod.vfs(self.path)
294 self.vfs = vfsmod.vfs(self.path)
295 self.baseui = baseui
295 self.baseui = baseui
296 self.ui = baseui.copy()
296 self.ui = baseui.copy()
297 self.ui.copy = baseui.copy # prevent copying repo configuration
297 self.ui.copy = baseui.copy # prevent copying repo configuration
298 # A list of callback to shape the phase if no data were found.
298 # A list of callback to shape the phase if no data were found.
299 # Callback are in the form: func(repo, roots) --> processed root.
299 # Callback are in the form: func(repo, roots) --> processed root.
300 # This list it to be filled by extension during repo setup
300 # This list it to be filled by extension during repo setup
301 self._phasedefaults = []
301 self._phasedefaults = []
302 try:
302 try:
303 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
303 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
304 self._loadextensions()
304 self._loadextensions()
305 except IOError:
305 except IOError:
306 pass
306 pass
307
307
308 if self.featuresetupfuncs:
308 if self.featuresetupfuncs:
309 self.supported = set(self._basesupported) # use private copy
309 self.supported = set(self._basesupported) # use private copy
310 extmods = set(m.__name__ for n, m
310 extmods = set(m.__name__ for n, m
311 in extensions.extensions(self.ui))
311 in extensions.extensions(self.ui))
312 for setupfunc in self.featuresetupfuncs:
312 for setupfunc in self.featuresetupfuncs:
313 if setupfunc.__module__ in extmods:
313 if setupfunc.__module__ in extmods:
314 setupfunc(self.ui, self.supported)
314 setupfunc(self.ui, self.supported)
315 else:
315 else:
316 self.supported = self._basesupported
316 self.supported = self._basesupported
317 color.setup(self.ui)
317 color.setup(self.ui)
318
318
319 # Add compression engines.
319 # Add compression engines.
320 for name in util.compengines:
320 for name in util.compengines:
321 engine = util.compengines[name]
321 engine = util.compengines[name]
322 if engine.revlogheader():
322 if engine.revlogheader():
323 self.supported.add('exp-compression-%s' % name)
323 self.supported.add('exp-compression-%s' % name)
324
324
325 if not self.vfs.isdir():
325 if not self.vfs.isdir():
326 if create:
326 if create:
327 self.requirements = newreporequirements(self)
327 self.requirements = newreporequirements(self)
328
328
329 if not self.wvfs.exists():
329 if not self.wvfs.exists():
330 self.wvfs.makedirs()
330 self.wvfs.makedirs()
331 self.vfs.makedir(notindexed=True)
331 self.vfs.makedir(notindexed=True)
332
332
333 if 'store' in self.requirements:
333 if 'store' in self.requirements:
334 self.vfs.mkdir("store")
334 self.vfs.mkdir("store")
335
335
336 # create an invalid changelog
336 # create an invalid changelog
337 self.vfs.append(
337 self.vfs.append(
338 "00changelog.i",
338 "00changelog.i",
339 '\0\0\0\2' # represents revlogv2
339 '\0\0\0\2' # represents revlogv2
340 ' dummy changelog to prevent using the old repo layout'
340 ' dummy changelog to prevent using the old repo layout'
341 )
341 )
342 else:
342 else:
343 raise error.RepoError(_("repository %s not found") % path)
343 raise error.RepoError(_("repository %s not found") % path)
344 elif create:
344 elif create:
345 raise error.RepoError(_("repository %s already exists") % path)
345 raise error.RepoError(_("repository %s already exists") % path)
346 else:
346 else:
347 try:
347 try:
348 self.requirements = scmutil.readrequires(
348 self.requirements = scmutil.readrequires(
349 self.vfs, self.supported)
349 self.vfs, self.supported)
350 except IOError as inst:
350 except IOError as inst:
351 if inst.errno != errno.ENOENT:
351 if inst.errno != errno.ENOENT:
352 raise
352 raise
353
353
354 self.sharedpath = self.path
354 self.sharedpath = self.path
355 try:
355 try:
356 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
356 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
357 if 'relshared' in self.requirements:
357 if 'relshared' in self.requirements:
358 sharedpath = self.vfs.join(sharedpath)
358 sharedpath = self.vfs.join(sharedpath)
359 vfs = vfsmod.vfs(sharedpath, realpath=True)
359 vfs = vfsmod.vfs(sharedpath, realpath=True)
360 s = vfs.base
360 s = vfs.base
361 if not vfs.exists():
361 if not vfs.exists():
362 raise error.RepoError(
362 raise error.RepoError(
363 _('.hg/sharedpath points to nonexistent directory %s') % s)
363 _('.hg/sharedpath points to nonexistent directory %s') % s)
364 self.sharedpath = s
364 self.sharedpath = s
365 except IOError as inst:
365 except IOError as inst:
366 if inst.errno != errno.ENOENT:
366 if inst.errno != errno.ENOENT:
367 raise
367 raise
368
368
369 self.store = store.store(
369 self.store = store.store(
370 self.requirements, self.sharedpath, vfsmod.vfs)
370 self.requirements, self.sharedpath, vfsmod.vfs)
371 self.spath = self.store.path
371 self.spath = self.store.path
372 self.svfs = self.store.vfs
372 self.svfs = self.store.vfs
373 self.sjoin = self.store.join
373 self.sjoin = self.store.join
374 self.vfs.createmode = self.store.createmode
374 self.vfs.createmode = self.store.createmode
375 self._applyopenerreqs()
375 self._applyopenerreqs()
376 if create:
376 if create:
377 self._writerequirements()
377 self._writerequirements()
378
378
379 self._dirstatevalidatewarned = False
379 self._dirstatevalidatewarned = False
380
380
381 self._branchcaches = {}
381 self._branchcaches = {}
382 self._revbranchcache = None
382 self._revbranchcache = None
383 self.filterpats = {}
383 self.filterpats = {}
384 self._datafilters = {}
384 self._datafilters = {}
385 self._transref = self._lockref = self._wlockref = None
385 self._transref = self._lockref = self._wlockref = None
386
386
387 # A cache for various files under .hg/ that tracks file changes,
387 # A cache for various files under .hg/ that tracks file changes,
388 # (used by the filecache decorator)
388 # (used by the filecache decorator)
389 #
389 #
390 # Maps a property name to its util.filecacheentry
390 # Maps a property name to its util.filecacheentry
391 self._filecache = {}
391 self._filecache = {}
392
392
393 # hold sets of revision to be filtered
393 # hold sets of revision to be filtered
394 # should be cleared when something might have changed the filter value:
394 # should be cleared when something might have changed the filter value:
395 # - new changesets,
395 # - new changesets,
396 # - phase change,
396 # - phase change,
397 # - new obsolescence marker,
397 # - new obsolescence marker,
398 # - working directory parent change,
398 # - working directory parent change,
399 # - bookmark changes
399 # - bookmark changes
400 self.filteredrevcache = {}
400 self.filteredrevcache = {}
401
401
402 # post-dirstate-status hooks
402 # post-dirstate-status hooks
403 self._postdsstatus = []
403 self._postdsstatus = []
404
404
405 # generic mapping between names and nodes
405 # generic mapping between names and nodes
406 self.names = namespaces.namespaces()
406 self.names = namespaces.namespaces()
407
407
408 def close(self):
408 def close(self):
409 self._writecaches()
409 self._writecaches()
410
410
411 def _loadextensions(self):
411 def _loadextensions(self):
412 extensions.loadall(self.ui)
412 extensions.loadall(self.ui)
413
413
414 def _writecaches(self):
414 def _writecaches(self):
415 if self._revbranchcache:
415 if self._revbranchcache:
416 self._revbranchcache.write()
416 self._revbranchcache.write()
417
417
418 def _restrictcapabilities(self, caps):
418 def _restrictcapabilities(self, caps):
419 if self.ui.configbool('experimental', 'bundle2-advertise', True):
419 if self.ui.configbool('experimental', 'bundle2-advertise', True):
420 caps = set(caps)
420 caps = set(caps)
421 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
421 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
422 caps.add('bundle2=' + urlreq.quote(capsblob))
422 caps.add('bundle2=' + urlreq.quote(capsblob))
423 return caps
423 return caps
424
424
425 def _applyopenerreqs(self):
425 def _applyopenerreqs(self):
426 self.svfs.options = dict((r, 1) for r in self.requirements
426 self.svfs.options = dict((r, 1) for r in self.requirements
427 if r in self.openerreqs)
427 if r in self.openerreqs)
428 # experimental config: format.chunkcachesize
428 # experimental config: format.chunkcachesize
429 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
429 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
430 if chunkcachesize is not None:
430 if chunkcachesize is not None:
431 self.svfs.options['chunkcachesize'] = chunkcachesize
431 self.svfs.options['chunkcachesize'] = chunkcachesize
432 # experimental config: format.maxchainlen
432 # experimental config: format.maxchainlen
433 maxchainlen = self.ui.configint('format', 'maxchainlen')
433 maxchainlen = self.ui.configint('format', 'maxchainlen')
434 if maxchainlen is not None:
434 if maxchainlen is not None:
435 self.svfs.options['maxchainlen'] = maxchainlen
435 self.svfs.options['maxchainlen'] = maxchainlen
436 # experimental config: format.manifestcachesize
436 # experimental config: format.manifestcachesize
437 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
437 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
438 if manifestcachesize is not None:
438 if manifestcachesize is not None:
439 self.svfs.options['manifestcachesize'] = manifestcachesize
439 self.svfs.options['manifestcachesize'] = manifestcachesize
440 # experimental config: format.aggressivemergedeltas
440 # experimental config: format.aggressivemergedeltas
441 aggressivemergedeltas = self.ui.configbool('format',
441 aggressivemergedeltas = self.ui.configbool('format',
442 'aggressivemergedeltas')
442 'aggressivemergedeltas')
443 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
443 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
444 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
444 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
445 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
445 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
446 if 0 <= chainspan:
446 if 0 <= chainspan:
447 self.svfs.options['maxdeltachainspan'] = chainspan
447 self.svfs.options['maxdeltachainspan'] = chainspan
448
448
449 for r in self.requirements:
449 for r in self.requirements:
450 if r.startswith('exp-compression-'):
450 if r.startswith('exp-compression-'):
451 self.svfs.options['compengine'] = r[len('exp-compression-'):]
451 self.svfs.options['compengine'] = r[len('exp-compression-'):]
452
452
453 # TODO move "revlogv2" to openerreqs once finalized.
453 # TODO move "revlogv2" to openerreqs once finalized.
454 if REVLOGV2_REQUIREMENT in self.requirements:
454 if REVLOGV2_REQUIREMENT in self.requirements:
455 self.svfs.options['revlogv2'] = True
455 self.svfs.options['revlogv2'] = True
456
456
457 def _writerequirements(self):
457 def _writerequirements(self):
458 scmutil.writerequires(self.vfs, self.requirements)
458 scmutil.writerequires(self.vfs, self.requirements)
459
459
460 def _checknested(self, path):
460 def _checknested(self, path):
461 """Determine if path is a legal nested repository."""
461 """Determine if path is a legal nested repository."""
462 if not path.startswith(self.root):
462 if not path.startswith(self.root):
463 return False
463 return False
464 subpath = path[len(self.root) + 1:]
464 subpath = path[len(self.root) + 1:]
465 normsubpath = util.pconvert(subpath)
465 normsubpath = util.pconvert(subpath)
466
466
467 # XXX: Checking against the current working copy is wrong in
467 # XXX: Checking against the current working copy is wrong in
468 # the sense that it can reject things like
468 # the sense that it can reject things like
469 #
469 #
470 # $ hg cat -r 10 sub/x.txt
470 # $ hg cat -r 10 sub/x.txt
471 #
471 #
472 # if sub/ is no longer a subrepository in the working copy
472 # if sub/ is no longer a subrepository in the working copy
473 # parent revision.
473 # parent revision.
474 #
474 #
475 # However, it can of course also allow things that would have
475 # However, it can of course also allow things that would have
476 # been rejected before, such as the above cat command if sub/
476 # been rejected before, such as the above cat command if sub/
477 # is a subrepository now, but was a normal directory before.
477 # is a subrepository now, but was a normal directory before.
478 # The old path auditor would have rejected by mistake since it
478 # The old path auditor would have rejected by mistake since it
479 # panics when it sees sub/.hg/.
479 # panics when it sees sub/.hg/.
480 #
480 #
481 # All in all, checking against the working copy seems sensible
481 # All in all, checking against the working copy seems sensible
482 # since we want to prevent access to nested repositories on
482 # since we want to prevent access to nested repositories on
483 # the filesystem *now*.
483 # the filesystem *now*.
484 ctx = self[None]
484 ctx = self[None]
485 parts = util.splitpath(subpath)
485 parts = util.splitpath(subpath)
486 while parts:
486 while parts:
487 prefix = '/'.join(parts)
487 prefix = '/'.join(parts)
488 if prefix in ctx.substate:
488 if prefix in ctx.substate:
489 if prefix == normsubpath:
489 if prefix == normsubpath:
490 return True
490 return True
491 else:
491 else:
492 sub = ctx.sub(prefix)
492 sub = ctx.sub(prefix)
493 return sub.checknested(subpath[len(prefix) + 1:])
493 return sub.checknested(subpath[len(prefix) + 1:])
494 else:
494 else:
495 parts.pop()
495 parts.pop()
496 return False
496 return False
497
497
498 def peer(self):
498 def peer(self):
499 return localpeer(self) # not cached to avoid reference cycle
499 return localpeer(self) # not cached to avoid reference cycle
500
500
501 def unfiltered(self):
501 def unfiltered(self):
502 """Return unfiltered version of the repository
502 """Return unfiltered version of the repository
503
503
504 Intended to be overwritten by filtered repo."""
504 Intended to be overwritten by filtered repo."""
505 return self
505 return self
506
506
507 def filtered(self, name):
507 def filtered(self, name):
508 """Return a filtered version of a repository"""
508 """Return a filtered version of a repository"""
509 # build a new class with the mixin and the current class
509 # build a new class with the mixin and the current class
510 # (possibly subclass of the repo)
510 # (possibly subclass of the repo)
511 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
511 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
512 pass
512 pass
513 return filteredrepo(self, name)
513 return filteredrepo(self, name)
514
514
515 @repofilecache('bookmarks', 'bookmarks.current')
515 @repofilecache('bookmarks', 'bookmarks.current')
516 def _bookmarks(self):
516 def _bookmarks(self):
517 return bookmarks.bmstore(self)
517 return bookmarks.bmstore(self)
518
518
519 @property
519 @property
520 def _activebookmark(self):
520 def _activebookmark(self):
521 return self._bookmarks.active
521 return self._bookmarks.active
522
522
523 # _phaserevs and _phasesets depend on changelog. what we need is to
523 # _phaserevs and _phasesets depend on changelog. what we need is to
524 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
524 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
525 # can't be easily expressed in filecache mechanism.
525 # can't be easily expressed in filecache mechanism.
526 @storecache('phaseroots', '00changelog.i')
526 @storecache('phaseroots', '00changelog.i')
527 def _phasecache(self):
527 def _phasecache(self):
528 return phases.phasecache(self, self._phasedefaults)
528 return phases.phasecache(self, self._phasedefaults)
529
529
530 @storecache('obsstore')
530 @storecache('obsstore')
531 def obsstore(self):
531 def obsstore(self):
532 return obsolete.makestore(self.ui, self)
532 return obsolete.makestore(self.ui, self)
533
533
534 @storecache('00changelog.i')
534 @storecache('00changelog.i')
535 def changelog(self):
535 def changelog(self):
536 return changelog.changelog(self.svfs,
536 return changelog.changelog(self.svfs,
537 trypending=txnutil.mayhavepending(self.root))
537 trypending=txnutil.mayhavepending(self.root))
538
538
539 def _constructmanifest(self):
539 def _constructmanifest(self):
540 # This is a temporary function while we migrate from manifest to
540 # This is a temporary function while we migrate from manifest to
541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 # manifest creation.
542 # manifest creation.
543 return manifest.manifestrevlog(self.svfs)
543 return manifest.manifestrevlog(self.svfs)
544
544
545 @storecache('00manifest.i')
545 @storecache('00manifest.i')
546 def manifestlog(self):
546 def manifestlog(self):
547 return manifest.manifestlog(self.svfs, self)
547 return manifest.manifestlog(self.svfs, self)
548
548
549 @repofilecache('dirstate')
549 @repofilecache('dirstate')
550 def dirstate(self):
550 def dirstate(self):
551 return dirstate.dirstate(self.vfs, self.ui, self.root,
551 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 self._dirstatevalidate)
552 self._dirstatevalidate)
553
553
554 def _dirstatevalidate(self, node):
554 def _dirstatevalidate(self, node):
555 try:
555 try:
556 self.changelog.rev(node)
556 self.changelog.rev(node)
557 return node
557 return node
558 except error.LookupError:
558 except error.LookupError:
559 if not self._dirstatevalidatewarned:
559 if not self._dirstatevalidatewarned:
560 self._dirstatevalidatewarned = True
560 self._dirstatevalidatewarned = True
561 self.ui.warn(_("warning: ignoring unknown"
561 self.ui.warn(_("warning: ignoring unknown"
562 " working parent %s!\n") % short(node))
562 " working parent %s!\n") % short(node))
563 return nullid
563 return nullid
564
564
565 def __getitem__(self, changeid):
565 def __getitem__(self, changeid):
566 if changeid is None:
566 if changeid is None:
567 return context.workingctx(self)
567 return context.workingctx(self)
568 if isinstance(changeid, slice):
568 if isinstance(changeid, slice):
569 # wdirrev isn't contiguous so the slice shouldn't include it
569 # wdirrev isn't contiguous so the slice shouldn't include it
570 return [context.changectx(self, i)
570 return [context.changectx(self, i)
571 for i in xrange(*changeid.indices(len(self)))
571 for i in xrange(*changeid.indices(len(self)))
572 if i not in self.changelog.filteredrevs]
572 if i not in self.changelog.filteredrevs]
573 try:
573 try:
574 return context.changectx(self, changeid)
574 return context.changectx(self, changeid)
575 except error.WdirUnsupported:
575 except error.WdirUnsupported:
576 return context.workingctx(self)
576 return context.workingctx(self)
577
577
578 def __contains__(self, changeid):
578 def __contains__(self, changeid):
579 """True if the given changeid exists
579 """True if the given changeid exists
580
580
581 error.LookupError is raised if an ambiguous node specified.
581 error.LookupError is raised if an ambiguous node specified.
582 """
582 """
583 try:
583 try:
584 self[changeid]
584 self[changeid]
585 return True
585 return True
586 except error.RepoLookupError:
586 except error.RepoLookupError:
587 return False
587 return False
588
588
589 def __nonzero__(self):
589 def __nonzero__(self):
590 return True
590 return True
591
591
592 __bool__ = __nonzero__
592 __bool__ = __nonzero__
593
593
594 def __len__(self):
594 def __len__(self):
595 return len(self.changelog)
595 return len(self.changelog)
596
596
597 def __iter__(self):
597 def __iter__(self):
598 return iter(self.changelog)
598 return iter(self.changelog)
599
599
600 def revs(self, expr, *args):
600 def revs(self, expr, *args):
601 '''Find revisions matching a revset.
601 '''Find revisions matching a revset.
602
602
603 The revset is specified as a string ``expr`` that may contain
603 The revset is specified as a string ``expr`` that may contain
604 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604 %-formatting to escape certain types. See ``revsetlang.formatspec``.
605
605
606 Revset aliases from the configuration are not expanded. To expand
606 Revset aliases from the configuration are not expanded. To expand
607 user aliases, consider calling ``scmutil.revrange()`` or
607 user aliases, consider calling ``scmutil.revrange()`` or
608 ``repo.anyrevs([expr], user=True)``.
608 ``repo.anyrevs([expr], user=True)``.
609
609
610 Returns a revset.abstractsmartset, which is a list-like interface
610 Returns a revset.abstractsmartset, which is a list-like interface
611 that contains integer revisions.
611 that contains integer revisions.
612 '''
612 '''
613 expr = revsetlang.formatspec(expr, *args)
613 expr = revsetlang.formatspec(expr, *args)
614 m = revset.match(None, expr)
614 m = revset.match(None, expr)
615 return m(self)
615 return m(self)
616
616
617 def set(self, expr, *args):
617 def set(self, expr, *args):
618 '''Find revisions matching a revset and emit changectx instances.
618 '''Find revisions matching a revset and emit changectx instances.
619
619
620 This is a convenience wrapper around ``revs()`` that iterates the
620 This is a convenience wrapper around ``revs()`` that iterates the
621 result and is a generator of changectx instances.
621 result and is a generator of changectx instances.
622
622
623 Revset aliases from the configuration are not expanded. To expand
623 Revset aliases from the configuration are not expanded. To expand
624 user aliases, consider calling ``scmutil.revrange()``.
624 user aliases, consider calling ``scmutil.revrange()``.
625 '''
625 '''
626 for r in self.revs(expr, *args):
626 for r in self.revs(expr, *args):
627 yield self[r]
627 yield self[r]
628
628
629 def anyrevs(self, specs, user=False):
629 def anyrevs(self, specs, user=False):
630 '''Find revisions matching one of the given revsets.
630 '''Find revisions matching one of the given revsets.
631
631
632 Revset aliases from the configuration are not expanded by default. To
632 Revset aliases from the configuration are not expanded by default. To
633 expand user aliases, specify ``user=True``.
633 expand user aliases, specify ``user=True``.
634 '''
634 '''
635 if user:
635 if user:
636 m = revset.matchany(self.ui, specs, repo=self)
636 m = revset.matchany(self.ui, specs, repo=self)
637 else:
637 else:
638 m = revset.matchany(None, specs)
638 m = revset.matchany(None, specs)
639 return m(self)
639 return m(self)
640
640
641 def url(self):
641 def url(self):
642 return 'file:' + self.root
642 return 'file:' + self.root
643
643
644 def hook(self, name, throw=False, **args):
644 def hook(self, name, throw=False, **args):
645 """Call a hook, passing this repo instance.
645 """Call a hook, passing this repo instance.
646
646
647 This a convenience method to aid invoking hooks. Extensions likely
647 This a convenience method to aid invoking hooks. Extensions likely
648 won't call this unless they have registered a custom hook or are
648 won't call this unless they have registered a custom hook or are
649 replacing code that is expected to call a hook.
649 replacing code that is expected to call a hook.
650 """
650 """
651 return hook.hook(self.ui, self, name, throw, **args)
651 return hook.hook(self.ui, self, name, throw, **args)
652
652
653 @filteredpropertycache
653 @filteredpropertycache
654 def _tagscache(self):
654 def _tagscache(self):
655 '''Returns a tagscache object that contains various tags related
655 '''Returns a tagscache object that contains various tags related
656 caches.'''
656 caches.'''
657
657
658 # This simplifies its cache management by having one decorated
658 # This simplifies its cache management by having one decorated
659 # function (this one) and the rest simply fetch things from it.
659 # function (this one) and the rest simply fetch things from it.
660 class tagscache(object):
660 class tagscache(object):
661 def __init__(self):
661 def __init__(self):
662 # These two define the set of tags for this repository. tags
662 # These two define the set of tags for this repository. tags
663 # maps tag name to node; tagtypes maps tag name to 'global' or
663 # maps tag name to node; tagtypes maps tag name to 'global' or
664 # 'local'. (Global tags are defined by .hgtags across all
664 # 'local'. (Global tags are defined by .hgtags across all
665 # heads, and local tags are defined in .hg/localtags.)
665 # heads, and local tags are defined in .hg/localtags.)
666 # They constitute the in-memory cache of tags.
666 # They constitute the in-memory cache of tags.
667 self.tags = self.tagtypes = None
667 self.tags = self.tagtypes = None
668
668
669 self.nodetagscache = self.tagslist = None
669 self.nodetagscache = self.tagslist = None
670
670
671 cache = tagscache()
671 cache = tagscache()
672 cache.tags, cache.tagtypes = self._findtags()
672 cache.tags, cache.tagtypes = self._findtags()
673
673
674 return cache
674 return cache
675
675
676 def tags(self):
676 def tags(self):
677 '''return a mapping of tag to node'''
677 '''return a mapping of tag to node'''
678 t = {}
678 t = {}
679 if self.changelog.filteredrevs:
679 if self.changelog.filteredrevs:
680 tags, tt = self._findtags()
680 tags, tt = self._findtags()
681 else:
681 else:
682 tags = self._tagscache.tags
682 tags = self._tagscache.tags
683 for k, v in tags.iteritems():
683 for k, v in tags.iteritems():
684 try:
684 try:
685 # ignore tags to unknown nodes
685 # ignore tags to unknown nodes
686 self.changelog.rev(v)
686 self.changelog.rev(v)
687 t[k] = v
687 t[k] = v
688 except (error.LookupError, ValueError):
688 except (error.LookupError, ValueError):
689 pass
689 pass
690 return t
690 return t
691
691
692 def _findtags(self):
692 def _findtags(self):
693 '''Do the hard work of finding tags. Return a pair of dicts
693 '''Do the hard work of finding tags. Return a pair of dicts
694 (tags, tagtypes) where tags maps tag name to node, and tagtypes
694 (tags, tagtypes) where tags maps tag name to node, and tagtypes
695 maps tag name to a string like \'global\' or \'local\'.
695 maps tag name to a string like \'global\' or \'local\'.
696 Subclasses or extensions are free to add their own tags, but
696 Subclasses or extensions are free to add their own tags, but
697 should be aware that the returned dicts will be retained for the
697 should be aware that the returned dicts will be retained for the
698 duration of the localrepo object.'''
698 duration of the localrepo object.'''
699
699
700 # XXX what tagtype should subclasses/extensions use? Currently
700 # XXX what tagtype should subclasses/extensions use? Currently
701 # mq and bookmarks add tags, but do not set the tagtype at all.
701 # mq and bookmarks add tags, but do not set the tagtype at all.
702 # Should each extension invent its own tag type? Should there
702 # Should each extension invent its own tag type? Should there
703 # be one tagtype for all such "virtual" tags? Or is the status
703 # be one tagtype for all such "virtual" tags? Or is the status
704 # quo fine?
704 # quo fine?
705
705
706
706
707 # map tag name to (node, hist)
707 # map tag name to (node, hist)
708 alltags = tagsmod.findglobaltags(self.ui, self)
708 alltags = tagsmod.findglobaltags(self.ui, self)
709 # map tag name to tag type
709 # map tag name to tag type
710 tagtypes = dict((tag, 'global') for tag in alltags)
710 tagtypes = dict((tag, 'global') for tag in alltags)
711
711
712 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
712 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
713
713
714 # Build the return dicts. Have to re-encode tag names because
714 # Build the return dicts. Have to re-encode tag names because
715 # the tags module always uses UTF-8 (in order not to lose info
715 # the tags module always uses UTF-8 (in order not to lose info
716 # writing to the cache), but the rest of Mercurial wants them in
716 # writing to the cache), but the rest of Mercurial wants them in
717 # local encoding.
717 # local encoding.
718 tags = {}
718 tags = {}
719 for (name, (node, hist)) in alltags.iteritems():
719 for (name, (node, hist)) in alltags.iteritems():
720 if node != nullid:
720 if node != nullid:
721 tags[encoding.tolocal(name)] = node
721 tags[encoding.tolocal(name)] = node
722 tags['tip'] = self.changelog.tip()
722 tags['tip'] = self.changelog.tip()
723 tagtypes = dict([(encoding.tolocal(name), value)
723 tagtypes = dict([(encoding.tolocal(name), value)
724 for (name, value) in tagtypes.iteritems()])
724 for (name, value) in tagtypes.iteritems()])
725 return (tags, tagtypes)
725 return (tags, tagtypes)
726
726
727 def tagtype(self, tagname):
727 def tagtype(self, tagname):
728 '''
728 '''
729 return the type of the given tag. result can be:
729 return the type of the given tag. result can be:
730
730
731 'local' : a local tag
731 'local' : a local tag
732 'global' : a global tag
732 'global' : a global tag
733 None : tag does not exist
733 None : tag does not exist
734 '''
734 '''
735
735
736 return self._tagscache.tagtypes.get(tagname)
736 return self._tagscache.tagtypes.get(tagname)
737
737
738 def tagslist(self):
738 def tagslist(self):
739 '''return a list of tags ordered by revision'''
739 '''return a list of tags ordered by revision'''
740 if not self._tagscache.tagslist:
740 if not self._tagscache.tagslist:
741 l = []
741 l = []
742 for t, n in self.tags().iteritems():
742 for t, n in self.tags().iteritems():
743 l.append((self.changelog.rev(n), t, n))
743 l.append((self.changelog.rev(n), t, n))
744 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
744 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
745
745
746 return self._tagscache.tagslist
746 return self._tagscache.tagslist
747
747
748 def nodetags(self, node):
748 def nodetags(self, node):
749 '''return the tags associated with a node'''
749 '''return the tags associated with a node'''
750 if not self._tagscache.nodetagscache:
750 if not self._tagscache.nodetagscache:
751 nodetagscache = {}
751 nodetagscache = {}
752 for t, n in self._tagscache.tags.iteritems():
752 for t, n in self._tagscache.tags.iteritems():
753 nodetagscache.setdefault(n, []).append(t)
753 nodetagscache.setdefault(n, []).append(t)
754 for tags in nodetagscache.itervalues():
754 for tags in nodetagscache.itervalues():
755 tags.sort()
755 tags.sort()
756 self._tagscache.nodetagscache = nodetagscache
756 self._tagscache.nodetagscache = nodetagscache
757 return self._tagscache.nodetagscache.get(node, [])
757 return self._tagscache.nodetagscache.get(node, [])
758
758
759 def nodebookmarks(self, node):
759 def nodebookmarks(self, node):
760 """return the list of bookmarks pointing to the specified node"""
760 """return the list of bookmarks pointing to the specified node"""
761 marks = []
761 marks = []
762 for bookmark, n in self._bookmarks.iteritems():
762 for bookmark, n in self._bookmarks.iteritems():
763 if n == node:
763 if n == node:
764 marks.append(bookmark)
764 marks.append(bookmark)
765 return sorted(marks)
765 return sorted(marks)
766
766
767 def branchmap(self):
767 def branchmap(self):
768 '''returns a dictionary {branch: [branchheads]} with branchheads
768 '''returns a dictionary {branch: [branchheads]} with branchheads
769 ordered by increasing revision number'''
769 ordered by increasing revision number'''
770 branchmap.updatecache(self)
770 branchmap.updatecache(self)
771 return self._branchcaches[self.filtername]
771 return self._branchcaches[self.filtername]
772
772
773 @unfilteredmethod
773 @unfilteredmethod
774 def revbranchcache(self):
774 def revbranchcache(self):
775 if not self._revbranchcache:
775 if not self._revbranchcache:
776 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
776 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
777 return self._revbranchcache
777 return self._revbranchcache
778
778
779 def branchtip(self, branch, ignoremissing=False):
779 def branchtip(self, branch, ignoremissing=False):
780 '''return the tip node for a given branch
780 '''return the tip node for a given branch
781
781
782 If ignoremissing is True, then this method will not raise an error.
782 If ignoremissing is True, then this method will not raise an error.
783 This is helpful for callers that only expect None for a missing branch
783 This is helpful for callers that only expect None for a missing branch
784 (e.g. namespace).
784 (e.g. namespace).
785
785
786 '''
786 '''
787 try:
787 try:
788 return self.branchmap().branchtip(branch)
788 return self.branchmap().branchtip(branch)
789 except KeyError:
789 except KeyError:
790 if not ignoremissing:
790 if not ignoremissing:
791 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
791 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
792 else:
792 else:
793 pass
793 pass
794
794
795 def lookup(self, key):
795 def lookup(self, key):
796 return self[key].node()
796 return self[key].node()
797
797
798 def lookupbranch(self, key, remote=None):
798 def lookupbranch(self, key, remote=None):
799 repo = remote or self
799 repo = remote or self
800 if key in repo.branchmap():
800 if key in repo.branchmap():
801 return key
801 return key
802
802
803 repo = (remote and remote.local()) and remote or self
803 repo = (remote and remote.local()) and remote or self
804 return repo[key].branch()
804 return repo[key].branch()
805
805
806 def known(self, nodes):
806 def known(self, nodes):
807 cl = self.changelog
807 cl = self.changelog
808 nm = cl.nodemap
808 nm = cl.nodemap
809 filtered = cl.filteredrevs
809 filtered = cl.filteredrevs
810 result = []
810 result = []
811 for n in nodes:
811 for n in nodes:
812 r = nm.get(n)
812 r = nm.get(n)
813 resp = not (r is None or r in filtered)
813 resp = not (r is None or r in filtered)
814 result.append(resp)
814 result.append(resp)
815 return result
815 return result
816
816
817 def local(self):
817 def local(self):
818 return self
818 return self
819
819
820 def publishing(self):
820 def publishing(self):
821 # it's safe (and desirable) to trust the publish flag unconditionally
821 # it's safe (and desirable) to trust the publish flag unconditionally
822 # so that we don't finalize changes shared between users via ssh or nfs
822 # so that we don't finalize changes shared between users via ssh or nfs
823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
824
824
825 def cancopy(self):
825 def cancopy(self):
826 # so statichttprepo's override of local() works
826 # so statichttprepo's override of local() works
827 if not self.local():
827 if not self.local():
828 return False
828 return False
829 if not self.publishing():
829 if not self.publishing():
830 return True
830 return True
831 # if publishing we can't copy if there is filtered content
831 # if publishing we can't copy if there is filtered content
832 return not self.filtered('visible').changelog.filteredrevs
832 return not self.filtered('visible').changelog.filteredrevs
833
833
834 def shared(self):
834 def shared(self):
835 '''the type of shared repository (None if not shared)'''
835 '''the type of shared repository (None if not shared)'''
836 if self.sharedpath != self.path:
836 if self.sharedpath != self.path:
837 return 'store'
837 return 'store'
838 return None
838 return None
839
839
840 def wjoin(self, f, *insidef):
840 def wjoin(self, f, *insidef):
841 return self.vfs.reljoin(self.root, f, *insidef)
841 return self.vfs.reljoin(self.root, f, *insidef)
842
842
843 def file(self, f):
843 def file(self, f):
844 if f[0] == '/':
844 if f[0] == '/':
845 f = f[1:]
845 f = f[1:]
846 return filelog.filelog(self.svfs, f)
846 return filelog.filelog(self.svfs, f)
847
847
848 def changectx(self, changeid):
848 def changectx(self, changeid):
849 return self[changeid]
849 return self[changeid]
850
850
851 def setparents(self, p1, p2=nullid):
851 def setparents(self, p1, p2=nullid):
852 with self.dirstate.parentchange():
852 with self.dirstate.parentchange():
853 copies = self.dirstate.setparents(p1, p2)
853 copies = self.dirstate.setparents(p1, p2)
854 pctx = self[p1]
854 pctx = self[p1]
855 if copies:
855 if copies:
856 # Adjust copy records, the dirstate cannot do it, it
856 # Adjust copy records, the dirstate cannot do it, it
857 # requires access to parents manifests. Preserve them
857 # requires access to parents manifests. Preserve them
858 # only for entries added to first parent.
858 # only for entries added to first parent.
859 for f in copies:
859 for f in copies:
860 if f not in pctx and copies[f] in pctx:
860 if f not in pctx and copies[f] in pctx:
861 self.dirstate.copy(copies[f], f)
861 self.dirstate.copy(copies[f], f)
862 if p2 == nullid:
862 if p2 == nullid:
863 for f, s in sorted(self.dirstate.copies().items()):
863 for f, s in sorted(self.dirstate.copies().items()):
864 if f not in pctx and s not in pctx:
864 if f not in pctx and s not in pctx:
865 self.dirstate.copy(None, f)
865 self.dirstate.copy(None, f)
866
866
867 def filectx(self, path, changeid=None, fileid=None):
867 def filectx(self, path, changeid=None, fileid=None):
868 """changeid can be a changeset revision, node, or tag.
868 """changeid can be a changeset revision, node, or tag.
869 fileid can be a file revision or node."""
869 fileid can be a file revision or node."""
870 return context.filectx(self, path, changeid, fileid)
870 return context.filectx(self, path, changeid, fileid)
871
871
872 def getcwd(self):
872 def getcwd(self):
873 return self.dirstate.getcwd()
873 return self.dirstate.getcwd()
874
874
875 def pathto(self, f, cwd=None):
875 def pathto(self, f, cwd=None):
876 return self.dirstate.pathto(f, cwd)
876 return self.dirstate.pathto(f, cwd)
877
877
878 def _loadfilter(self, filter):
878 def _loadfilter(self, filter):
879 if filter not in self.filterpats:
879 if filter not in self.filterpats:
880 l = []
880 l = []
881 for pat, cmd in self.ui.configitems(filter):
881 for pat, cmd in self.ui.configitems(filter):
882 if cmd == '!':
882 if cmd == '!':
883 continue
883 continue
884 mf = matchmod.match(self.root, '', [pat])
884 mf = matchmod.match(self.root, '', [pat])
885 fn = None
885 fn = None
886 params = cmd
886 params = cmd
887 for name, filterfn in self._datafilters.iteritems():
887 for name, filterfn in self._datafilters.iteritems():
888 if cmd.startswith(name):
888 if cmd.startswith(name):
889 fn = filterfn
889 fn = filterfn
890 params = cmd[len(name):].lstrip()
890 params = cmd[len(name):].lstrip()
891 break
891 break
892 if not fn:
892 if not fn:
893 fn = lambda s, c, **kwargs: util.filter(s, c)
893 fn = lambda s, c, **kwargs: util.filter(s, c)
894 # Wrap old filters not supporting keyword arguments
894 # Wrap old filters not supporting keyword arguments
895 if not inspect.getargspec(fn)[2]:
895 if not inspect.getargspec(fn)[2]:
896 oldfn = fn
896 oldfn = fn
897 fn = lambda s, c, **kwargs: oldfn(s, c)
897 fn = lambda s, c, **kwargs: oldfn(s, c)
898 l.append((mf, fn, params))
898 l.append((mf, fn, params))
899 self.filterpats[filter] = l
899 self.filterpats[filter] = l
900 return self.filterpats[filter]
900 return self.filterpats[filter]
901
901
902 def _filter(self, filterpats, filename, data):
902 def _filter(self, filterpats, filename, data):
903 for mf, fn, cmd in filterpats:
903 for mf, fn, cmd in filterpats:
904 if mf(filename):
904 if mf(filename):
905 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
905 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
906 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
906 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
907 break
907 break
908
908
909 return data
909 return data
910
910
911 @unfilteredpropertycache
911 @unfilteredpropertycache
912 def _encodefilterpats(self):
912 def _encodefilterpats(self):
913 return self._loadfilter('encode')
913 return self._loadfilter('encode')
914
914
915 @unfilteredpropertycache
915 @unfilteredpropertycache
916 def _decodefilterpats(self):
916 def _decodefilterpats(self):
917 return self._loadfilter('decode')
917 return self._loadfilter('decode')
918
918
919 def adddatafilter(self, name, filter):
919 def adddatafilter(self, name, filter):
920 self._datafilters[name] = filter
920 self._datafilters[name] = filter
921
921
922 def wread(self, filename):
922 def wread(self, filename):
923 if self.wvfs.islink(filename):
923 if self.wvfs.islink(filename):
924 data = self.wvfs.readlink(filename)
924 data = self.wvfs.readlink(filename)
925 else:
925 else:
926 data = self.wvfs.read(filename)
926 data = self.wvfs.read(filename)
927 return self._filter(self._encodefilterpats, filename, data)
927 return self._filter(self._encodefilterpats, filename, data)
928
928
929 def wwrite(self, filename, data, flags, backgroundclose=False):
929 def wwrite(self, filename, data, flags, backgroundclose=False):
930 """write ``data`` into ``filename`` in the working directory
930 """write ``data`` into ``filename`` in the working directory
931
931
932 This returns length of written (maybe decoded) data.
932 This returns length of written (maybe decoded) data.
933 """
933 """
934 data = self._filter(self._decodefilterpats, filename, data)
934 data = self._filter(self._decodefilterpats, filename, data)
935 if 'l' in flags:
935 if 'l' in flags:
936 self.wvfs.symlink(data, filename)
936 self.wvfs.symlink(data, filename)
937 else:
937 else:
938 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
938 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
939 if 'x' in flags:
939 if 'x' in flags:
940 self.wvfs.setflags(filename, False, True)
940 self.wvfs.setflags(filename, False, True)
941 return len(data)
941 return len(data)
942
942
943 def wwritedata(self, filename, data):
943 def wwritedata(self, filename, data):
944 return self._filter(self._decodefilterpats, filename, data)
944 return self._filter(self._decodefilterpats, filename, data)
945
945
946 def currenttransaction(self):
946 def currenttransaction(self):
947 """return the current transaction or None if non exists"""
947 """return the current transaction or None if non exists"""
948 if self._transref:
948 if self._transref:
949 tr = self._transref()
949 tr = self._transref()
950 else:
950 else:
951 tr = None
951 tr = None
952
952
953 if tr and tr.running():
953 if tr and tr.running():
954 return tr
954 return tr
955 return None
955 return None
956
956
957 def transaction(self, desc, report=None):
957 def transaction(self, desc, report=None):
958 if (self.ui.configbool('devel', 'all-warnings')
958 if (self.ui.configbool('devel', 'all-warnings')
959 or self.ui.configbool('devel', 'check-locks')):
959 or self.ui.configbool('devel', 'check-locks')):
960 if self._currentlock(self._lockref) is None:
960 if self._currentlock(self._lockref) is None:
961 raise error.ProgrammingError('transaction requires locking')
961 raise error.ProgrammingError('transaction requires locking')
962 tr = self.currenttransaction()
962 tr = self.currenttransaction()
963 if tr is not None:
963 if tr is not None:
964 return tr.nest()
964 return tr.nest()
965
965
966 # abort here if the journal already exists
966 # abort here if the journal already exists
967 if self.svfs.exists("journal"):
967 if self.svfs.exists("journal"):
968 raise error.RepoError(
968 raise error.RepoError(
969 _("abandoned transaction found"),
969 _("abandoned transaction found"),
970 hint=_("run 'hg recover' to clean up transaction"))
970 hint=_("run 'hg recover' to clean up transaction"))
971
971
972 idbase = "%.40f#%f" % (random.random(), time.time())
972 idbase = "%.40f#%f" % (random.random(), time.time())
973 ha = hex(hashlib.sha1(idbase).digest())
973 ha = hex(hashlib.sha1(idbase).digest())
974 txnid = 'TXN:' + ha
974 txnid = 'TXN:' + ha
975 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
975 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
976
976
977 self._writejournal(desc)
977 self._writejournal(desc)
978 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
978 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
979 if report:
979 if report:
980 rp = report
980 rp = report
981 else:
981 else:
982 rp = self.ui.warn
982 rp = self.ui.warn
983 vfsmap = {'plain': self.vfs} # root of .hg/
983 vfsmap = {'plain': self.vfs} # root of .hg/
984 # we must avoid cyclic reference between repo and transaction.
984 # we must avoid cyclic reference between repo and transaction.
985 reporef = weakref.ref(self)
985 reporef = weakref.ref(self)
986 # Code to track tag movement
986 # Code to track tag movement
987 #
987 #
988 # Since tags are all handled as file content, it is actually quite hard
988 # Since tags are all handled as file content, it is actually quite hard
989 # to track these movement from a code perspective. So we fallback to a
989 # to track these movement from a code perspective. So we fallback to a
990 # tracking at the repository level. One could envision to track changes
990 # tracking at the repository level. One could envision to track changes
991 # to the '.hgtags' file through changegroup apply but that fails to
991 # to the '.hgtags' file through changegroup apply but that fails to
992 # cope with case where transaction expose new heads without changegroup
992 # cope with case where transaction expose new heads without changegroup
993 # being involved (eg: phase movement).
993 # being involved (eg: phase movement).
994 #
994 #
995 # For now, We gate the feature behind a flag since this likely comes
995 # For now, We gate the feature behind a flag since this likely comes
996 # with performance impacts. The current code run more often than needed
996 # with performance impacts. The current code run more often than needed
997 # and do not use caches as much as it could. The current focus is on
997 # and do not use caches as much as it could. The current focus is on
998 # the behavior of the feature so we disable it by default. The flag
998 # the behavior of the feature so we disable it by default. The flag
999 # will be removed when we are happy with the performance impact.
999 # will be removed when we are happy with the performance impact.
1000 #
1000 #
1001 # Once this feature is no longer experimental move the following
1001 # Once this feature is no longer experimental move the following
1002 # documentation to the appropriate help section:
1002 # documentation to the appropriate help section:
1003 #
1003 #
1004 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1004 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1005 # tags (new or changed or deleted tags). In addition the details of
1005 # tags (new or changed or deleted tags). In addition the details of
1006 # these changes are made available in a file at:
1006 # these changes are made available in a file at:
1007 # ``REPOROOT/.hg/changes/tags.changes``.
1007 # ``REPOROOT/.hg/changes/tags.changes``.
1008 # Make sure you check for HG_TAG_MOVED before reading that file as it
1008 # Make sure you check for HG_TAG_MOVED before reading that file as it
1009 # might exist from a previous transaction even if no tag were touched
1009 # might exist from a previous transaction even if no tag were touched
1010 # in this one. Changes are recorded in a line base format::
1010 # in this one. Changes are recorded in a line base format::
1011 #
1011 #
1012 # <action> <hex-node> <tag-name>\n
1012 # <action> <hex-node> <tag-name>\n
1013 #
1013 #
1014 # Actions are defined as follow:
1014 # Actions are defined as follow:
1015 # "-R": tag is removed,
1015 # "-R": tag is removed,
1016 # "+A": tag is added,
1016 # "+A": tag is added,
1017 # "-M": tag is moved (old value),
1017 # "-M": tag is moved (old value),
1018 # "+M": tag is moved (new value),
1018 # "+M": tag is moved (new value),
1019 tracktags = lambda x: None
1019 tracktags = lambda x: None
1020 # experimental config: experimental.hook-track-tags
1020 # experimental config: experimental.hook-track-tags
1021 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1021 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1022 False)
1022 False)
1023 if desc != 'strip' and shouldtracktags:
1023 if desc != 'strip' and shouldtracktags:
1024 oldheads = self.changelog.headrevs()
1024 oldheads = self.changelog.headrevs()
1025 def tracktags(tr2):
1025 def tracktags(tr2):
1026 repo = reporef()
1026 repo = reporef()
1027 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1027 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1028 newheads = repo.changelog.headrevs()
1028 newheads = repo.changelog.headrevs()
1029 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1029 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1030 # notes: we compare lists here.
1030 # notes: we compare lists here.
1031 # As we do it only once buiding set would not be cheaper
1031 # As we do it only once buiding set would not be cheaper
1032 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1032 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1033 if changes:
1033 if changes:
1034 tr2.hookargs['tag_moved'] = '1'
1034 tr2.hookargs['tag_moved'] = '1'
1035 with repo.vfs('changes/tags.changes', 'w',
1035 with repo.vfs('changes/tags.changes', 'w',
1036 atomictemp=True) as changesfile:
1036 atomictemp=True) as changesfile:
1037 # note: we do not register the file to the transaction
1037 # note: we do not register the file to the transaction
1038 # because we needs it to still exist on the transaction
1038 # because we needs it to still exist on the transaction
1039 # is close (for txnclose hooks)
1039 # is close (for txnclose hooks)
1040 tagsmod.writediff(changesfile, changes)
1040 tagsmod.writediff(changesfile, changes)
1041 def validate(tr2):
1041 def validate(tr2):
1042 """will run pre-closing hooks"""
1042 """will run pre-closing hooks"""
1043 # XXX the transaction API is a bit lacking here so we take a hacky
1043 # XXX the transaction API is a bit lacking here so we take a hacky
1044 # path for now
1044 # path for now
1045 #
1045 #
1046 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1046 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1047 # dict is copied before these run. In addition we needs the data
1047 # dict is copied before these run. In addition we needs the data
1048 # available to in memory hooks too.
1048 # available to in memory hooks too.
1049 #
1049 #
1050 # Moreover, we also need to make sure this runs before txnclose
1050 # Moreover, we also need to make sure this runs before txnclose
1051 # hooks and there is no "pending" mechanism that would execute
1051 # hooks and there is no "pending" mechanism that would execute
1052 # logic only if hooks are about to run.
1052 # logic only if hooks are about to run.
1053 #
1053 #
1054 # Fixing this limitation of the transaction is also needed to track
1054 # Fixing this limitation of the transaction is also needed to track
1055 # other families of changes (bookmarks, phases, obsolescence).
1055 # other families of changes (bookmarks, phases, obsolescence).
1056 #
1056 #
1057 # This will have to be fixed before we remove the experimental
1057 # This will have to be fixed before we remove the experimental
1058 # gating.
1058 # gating.
1059 tracktags(tr2)
1059 tracktags(tr2)
1060 reporef().hook('pretxnclose', throw=True,
1060 reporef().hook('pretxnclose', throw=True,
1061 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1061 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1062 def releasefn(tr, success):
1062 def releasefn(tr, success):
1063 repo = reporef()
1063 repo = reporef()
1064 if success:
1064 if success:
1065 # this should be explicitly invoked here, because
1065 # this should be explicitly invoked here, because
1066 # in-memory changes aren't written out at closing
1066 # in-memory changes aren't written out at closing
1067 # transaction, if tr.addfilegenerator (via
1067 # transaction, if tr.addfilegenerator (via
1068 # dirstate.write or so) isn't invoked while
1068 # dirstate.write or so) isn't invoked while
1069 # transaction running
1069 # transaction running
1070 repo.dirstate.write(None)
1070 repo.dirstate.write(None)
1071 else:
1071 else:
1072 # discard all changes (including ones already written
1072 # discard all changes (including ones already written
1073 # out) in this transaction
1073 # out) in this transaction
1074 repo.dirstate.restorebackup(None, prefix='journal.')
1074 repo.dirstate.restorebackup(None, prefix='journal.')
1075
1075
1076 repo.invalidate(clearfilecache=True)
1076 repo.invalidate(clearfilecache=True)
1077
1077
1078 tr = transaction.transaction(rp, self.svfs, vfsmap,
1078 tr = transaction.transaction(rp, self.svfs, vfsmap,
1079 "journal",
1079 "journal",
1080 "undo",
1080 "undo",
1081 aftertrans(renames),
1081 aftertrans(renames),
1082 self.store.createmode,
1082 self.store.createmode,
1083 validator=validate,
1083 validator=validate,
1084 releasefn=releasefn)
1084 releasefn=releasefn)
1085 tr.changes['revs'] = set()
1085 tr.changes['revs'] = set()
1086 tr.changes['obsmarkers'] = set()
1086
1087
1087 tr.hookargs['txnid'] = txnid
1088 tr.hookargs['txnid'] = txnid
1088 # note: writing the fncache only during finalize mean that the file is
1089 # note: writing the fncache only during finalize mean that the file is
1089 # outdated when running hooks. As fncache is used for streaming clone,
1090 # outdated when running hooks. As fncache is used for streaming clone,
1090 # this is not expected to break anything that happen during the hooks.
1091 # this is not expected to break anything that happen during the hooks.
1091 tr.addfinalize('flush-fncache', self.store.write)
1092 tr.addfinalize('flush-fncache', self.store.write)
1092 def txnclosehook(tr2):
1093 def txnclosehook(tr2):
1093 """To be run if transaction is successful, will schedule a hook run
1094 """To be run if transaction is successful, will schedule a hook run
1094 """
1095 """
1095 # Don't reference tr2 in hook() so we don't hold a reference.
1096 # Don't reference tr2 in hook() so we don't hold a reference.
1096 # This reduces memory consumption when there are multiple
1097 # This reduces memory consumption when there are multiple
1097 # transactions per lock. This can likely go away if issue5045
1098 # transactions per lock. This can likely go away if issue5045
1098 # fixes the function accumulation.
1099 # fixes the function accumulation.
1099 hookargs = tr2.hookargs
1100 hookargs = tr2.hookargs
1100
1101
1101 def hook():
1102 def hook():
1102 reporef().hook('txnclose', throw=False, txnname=desc,
1103 reporef().hook('txnclose', throw=False, txnname=desc,
1103 **pycompat.strkwargs(hookargs))
1104 **pycompat.strkwargs(hookargs))
1104 reporef()._afterlock(hook)
1105 reporef()._afterlock(hook)
1105 tr.addfinalize('txnclose-hook', txnclosehook)
1106 tr.addfinalize('txnclose-hook', txnclosehook)
1106 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1107 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1107 def txnaborthook(tr2):
1108 def txnaborthook(tr2):
1108 """To be run if transaction is aborted
1109 """To be run if transaction is aborted
1109 """
1110 """
1110 reporef().hook('txnabort', throw=False, txnname=desc,
1111 reporef().hook('txnabort', throw=False, txnname=desc,
1111 **tr2.hookargs)
1112 **tr2.hookargs)
1112 tr.addabort('txnabort-hook', txnaborthook)
1113 tr.addabort('txnabort-hook', txnaborthook)
1113 # avoid eager cache invalidation. in-memory data should be identical
1114 # avoid eager cache invalidation. in-memory data should be identical
1114 # to stored data if transaction has no error.
1115 # to stored data if transaction has no error.
1115 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1116 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1116 self._transref = weakref.ref(tr)
1117 self._transref = weakref.ref(tr)
1117 return tr
1118 return tr
1118
1119
1119 def _journalfiles(self):
1120 def _journalfiles(self):
1120 return ((self.svfs, 'journal'),
1121 return ((self.svfs, 'journal'),
1121 (self.vfs, 'journal.dirstate'),
1122 (self.vfs, 'journal.dirstate'),
1122 (self.vfs, 'journal.branch'),
1123 (self.vfs, 'journal.branch'),
1123 (self.vfs, 'journal.desc'),
1124 (self.vfs, 'journal.desc'),
1124 (self.vfs, 'journal.bookmarks'),
1125 (self.vfs, 'journal.bookmarks'),
1125 (self.svfs, 'journal.phaseroots'))
1126 (self.svfs, 'journal.phaseroots'))
1126
1127
1127 def undofiles(self):
1128 def undofiles(self):
1128 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1129 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1129
1130
1130 @unfilteredmethod
1131 @unfilteredmethod
1131 def _writejournal(self, desc):
1132 def _writejournal(self, desc):
1132 self.dirstate.savebackup(None, prefix='journal.')
1133 self.dirstate.savebackup(None, prefix='journal.')
1133 self.vfs.write("journal.branch",
1134 self.vfs.write("journal.branch",
1134 encoding.fromlocal(self.dirstate.branch()))
1135 encoding.fromlocal(self.dirstate.branch()))
1135 self.vfs.write("journal.desc",
1136 self.vfs.write("journal.desc",
1136 "%d\n%s\n" % (len(self), desc))
1137 "%d\n%s\n" % (len(self), desc))
1137 self.vfs.write("journal.bookmarks",
1138 self.vfs.write("journal.bookmarks",
1138 self.vfs.tryread("bookmarks"))
1139 self.vfs.tryread("bookmarks"))
1139 self.svfs.write("journal.phaseroots",
1140 self.svfs.write("journal.phaseroots",
1140 self.svfs.tryread("phaseroots"))
1141 self.svfs.tryread("phaseroots"))
1141
1142
1142 def recover(self):
1143 def recover(self):
1143 with self.lock():
1144 with self.lock():
1144 if self.svfs.exists("journal"):
1145 if self.svfs.exists("journal"):
1145 self.ui.status(_("rolling back interrupted transaction\n"))
1146 self.ui.status(_("rolling back interrupted transaction\n"))
1146 vfsmap = {'': self.svfs,
1147 vfsmap = {'': self.svfs,
1147 'plain': self.vfs,}
1148 'plain': self.vfs,}
1148 transaction.rollback(self.svfs, vfsmap, "journal",
1149 transaction.rollback(self.svfs, vfsmap, "journal",
1149 self.ui.warn)
1150 self.ui.warn)
1150 self.invalidate()
1151 self.invalidate()
1151 return True
1152 return True
1152 else:
1153 else:
1153 self.ui.warn(_("no interrupted transaction available\n"))
1154 self.ui.warn(_("no interrupted transaction available\n"))
1154 return False
1155 return False
1155
1156
1156 def rollback(self, dryrun=False, force=False):
1157 def rollback(self, dryrun=False, force=False):
1157 wlock = lock = dsguard = None
1158 wlock = lock = dsguard = None
1158 try:
1159 try:
1159 wlock = self.wlock()
1160 wlock = self.wlock()
1160 lock = self.lock()
1161 lock = self.lock()
1161 if self.svfs.exists("undo"):
1162 if self.svfs.exists("undo"):
1162 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1163 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1163
1164
1164 return self._rollback(dryrun, force, dsguard)
1165 return self._rollback(dryrun, force, dsguard)
1165 else:
1166 else:
1166 self.ui.warn(_("no rollback information available\n"))
1167 self.ui.warn(_("no rollback information available\n"))
1167 return 1
1168 return 1
1168 finally:
1169 finally:
1169 release(dsguard, lock, wlock)
1170 release(dsguard, lock, wlock)
1170
1171
1171 @unfilteredmethod # Until we get smarter cache management
1172 @unfilteredmethod # Until we get smarter cache management
1172 def _rollback(self, dryrun, force, dsguard):
1173 def _rollback(self, dryrun, force, dsguard):
1173 ui = self.ui
1174 ui = self.ui
1174 try:
1175 try:
1175 args = self.vfs.read('undo.desc').splitlines()
1176 args = self.vfs.read('undo.desc').splitlines()
1176 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1177 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1177 if len(args) >= 3:
1178 if len(args) >= 3:
1178 detail = args[2]
1179 detail = args[2]
1179 oldtip = oldlen - 1
1180 oldtip = oldlen - 1
1180
1181
1181 if detail and ui.verbose:
1182 if detail and ui.verbose:
1182 msg = (_('repository tip rolled back to revision %d'
1183 msg = (_('repository tip rolled back to revision %d'
1183 ' (undo %s: %s)\n')
1184 ' (undo %s: %s)\n')
1184 % (oldtip, desc, detail))
1185 % (oldtip, desc, detail))
1185 else:
1186 else:
1186 msg = (_('repository tip rolled back to revision %d'
1187 msg = (_('repository tip rolled back to revision %d'
1187 ' (undo %s)\n')
1188 ' (undo %s)\n')
1188 % (oldtip, desc))
1189 % (oldtip, desc))
1189 except IOError:
1190 except IOError:
1190 msg = _('rolling back unknown transaction\n')
1191 msg = _('rolling back unknown transaction\n')
1191 desc = None
1192 desc = None
1192
1193
1193 if not force and self['.'] != self['tip'] and desc == 'commit':
1194 if not force and self['.'] != self['tip'] and desc == 'commit':
1194 raise error.Abort(
1195 raise error.Abort(
1195 _('rollback of last commit while not checked out '
1196 _('rollback of last commit while not checked out '
1196 'may lose data'), hint=_('use -f to force'))
1197 'may lose data'), hint=_('use -f to force'))
1197
1198
1198 ui.status(msg)
1199 ui.status(msg)
1199 if dryrun:
1200 if dryrun:
1200 return 0
1201 return 0
1201
1202
1202 parents = self.dirstate.parents()
1203 parents = self.dirstate.parents()
1203 self.destroying()
1204 self.destroying()
1204 vfsmap = {'plain': self.vfs, '': self.svfs}
1205 vfsmap = {'plain': self.vfs, '': self.svfs}
1205 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1206 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1206 if self.vfs.exists('undo.bookmarks'):
1207 if self.vfs.exists('undo.bookmarks'):
1207 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1208 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1208 if self.svfs.exists('undo.phaseroots'):
1209 if self.svfs.exists('undo.phaseroots'):
1209 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1210 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1210 self.invalidate()
1211 self.invalidate()
1211
1212
1212 parentgone = (parents[0] not in self.changelog.nodemap or
1213 parentgone = (parents[0] not in self.changelog.nodemap or
1213 parents[1] not in self.changelog.nodemap)
1214 parents[1] not in self.changelog.nodemap)
1214 if parentgone:
1215 if parentgone:
1215 # prevent dirstateguard from overwriting already restored one
1216 # prevent dirstateguard from overwriting already restored one
1216 dsguard.close()
1217 dsguard.close()
1217
1218
1218 self.dirstate.restorebackup(None, prefix='undo.')
1219 self.dirstate.restorebackup(None, prefix='undo.')
1219 try:
1220 try:
1220 branch = self.vfs.read('undo.branch')
1221 branch = self.vfs.read('undo.branch')
1221 self.dirstate.setbranch(encoding.tolocal(branch))
1222 self.dirstate.setbranch(encoding.tolocal(branch))
1222 except IOError:
1223 except IOError:
1223 ui.warn(_('named branch could not be reset: '
1224 ui.warn(_('named branch could not be reset: '
1224 'current branch is still \'%s\'\n')
1225 'current branch is still \'%s\'\n')
1225 % self.dirstate.branch())
1226 % self.dirstate.branch())
1226
1227
1227 parents = tuple([p.rev() for p in self[None].parents()])
1228 parents = tuple([p.rev() for p in self[None].parents()])
1228 if len(parents) > 1:
1229 if len(parents) > 1:
1229 ui.status(_('working directory now based on '
1230 ui.status(_('working directory now based on '
1230 'revisions %d and %d\n') % parents)
1231 'revisions %d and %d\n') % parents)
1231 else:
1232 else:
1232 ui.status(_('working directory now based on '
1233 ui.status(_('working directory now based on '
1233 'revision %d\n') % parents)
1234 'revision %d\n') % parents)
1234 mergemod.mergestate.clean(self, self['.'].node())
1235 mergemod.mergestate.clean(self, self['.'].node())
1235
1236
1236 # TODO: if we know which new heads may result from this rollback, pass
1237 # TODO: if we know which new heads may result from this rollback, pass
1237 # them to destroy(), which will prevent the branchhead cache from being
1238 # them to destroy(), which will prevent the branchhead cache from being
1238 # invalidated.
1239 # invalidated.
1239 self.destroyed()
1240 self.destroyed()
1240 return 0
1241 return 0
1241
1242
1242 def _buildcacheupdater(self, newtransaction):
1243 def _buildcacheupdater(self, newtransaction):
1243 """called during transaction to build the callback updating cache
1244 """called during transaction to build the callback updating cache
1244
1245
1245 Lives on the repository to help extension who might want to augment
1246 Lives on the repository to help extension who might want to augment
1246 this logic. For this purpose, the created transaction is passed to the
1247 this logic. For this purpose, the created transaction is passed to the
1247 method.
1248 method.
1248 """
1249 """
1249 # we must avoid cyclic reference between repo and transaction.
1250 # we must avoid cyclic reference between repo and transaction.
1250 reporef = weakref.ref(self)
1251 reporef = weakref.ref(self)
1251 def updater(tr):
1252 def updater(tr):
1252 repo = reporef()
1253 repo = reporef()
1253 repo.updatecaches(tr)
1254 repo.updatecaches(tr)
1254 return updater
1255 return updater
1255
1256
1256 @unfilteredmethod
1257 @unfilteredmethod
1257 def updatecaches(self, tr=None):
1258 def updatecaches(self, tr=None):
1258 """warm appropriate caches
1259 """warm appropriate caches
1259
1260
1260 If this function is called after a transaction closed. The transaction
1261 If this function is called after a transaction closed. The transaction
1261 will be available in the 'tr' argument. This can be used to selectively
1262 will be available in the 'tr' argument. This can be used to selectively
1262 update caches relevant to the changes in that transaction.
1263 update caches relevant to the changes in that transaction.
1263 """
1264 """
1264 if tr is not None and tr.hookargs.get('source') == 'strip':
1265 if tr is not None and tr.hookargs.get('source') == 'strip':
1265 # During strip, many caches are invalid but
1266 # During strip, many caches are invalid but
1266 # later call to `destroyed` will refresh them.
1267 # later call to `destroyed` will refresh them.
1267 return
1268 return
1268
1269
1269 if tr is None or tr.changes['revs']:
1270 if tr is None or tr.changes['revs']:
1270 # updating the unfiltered branchmap should refresh all the others,
1271 # updating the unfiltered branchmap should refresh all the others,
1271 self.ui.debug('updating the branch cache\n')
1272 self.ui.debug('updating the branch cache\n')
1272 branchmap.updatecache(self.filtered('served'))
1273 branchmap.updatecache(self.filtered('served'))
1273
1274
1274 def invalidatecaches(self):
1275 def invalidatecaches(self):
1275
1276
1276 if '_tagscache' in vars(self):
1277 if '_tagscache' in vars(self):
1277 # can't use delattr on proxy
1278 # can't use delattr on proxy
1278 del self.__dict__['_tagscache']
1279 del self.__dict__['_tagscache']
1279
1280
1280 self.unfiltered()._branchcaches.clear()
1281 self.unfiltered()._branchcaches.clear()
1281 self.invalidatevolatilesets()
1282 self.invalidatevolatilesets()
1282
1283
1283 def invalidatevolatilesets(self):
1284 def invalidatevolatilesets(self):
1284 self.filteredrevcache.clear()
1285 self.filteredrevcache.clear()
1285 obsolete.clearobscaches(self)
1286 obsolete.clearobscaches(self)
1286
1287
1287 def invalidatedirstate(self):
1288 def invalidatedirstate(self):
1288 '''Invalidates the dirstate, causing the next call to dirstate
1289 '''Invalidates the dirstate, causing the next call to dirstate
1289 to check if it was modified since the last time it was read,
1290 to check if it was modified since the last time it was read,
1290 rereading it if it has.
1291 rereading it if it has.
1291
1292
1292 This is different to dirstate.invalidate() that it doesn't always
1293 This is different to dirstate.invalidate() that it doesn't always
1293 rereads the dirstate. Use dirstate.invalidate() if you want to
1294 rereads the dirstate. Use dirstate.invalidate() if you want to
1294 explicitly read the dirstate again (i.e. restoring it to a previous
1295 explicitly read the dirstate again (i.e. restoring it to a previous
1295 known good state).'''
1296 known good state).'''
1296 if hasunfilteredcache(self, 'dirstate'):
1297 if hasunfilteredcache(self, 'dirstate'):
1297 for k in self.dirstate._filecache:
1298 for k in self.dirstate._filecache:
1298 try:
1299 try:
1299 delattr(self.dirstate, k)
1300 delattr(self.dirstate, k)
1300 except AttributeError:
1301 except AttributeError:
1301 pass
1302 pass
1302 delattr(self.unfiltered(), 'dirstate')
1303 delattr(self.unfiltered(), 'dirstate')
1303
1304
1304 def invalidate(self, clearfilecache=False):
1305 def invalidate(self, clearfilecache=False):
1305 '''Invalidates both store and non-store parts other than dirstate
1306 '''Invalidates both store and non-store parts other than dirstate
1306
1307
1307 If a transaction is running, invalidation of store is omitted,
1308 If a transaction is running, invalidation of store is omitted,
1308 because discarding in-memory changes might cause inconsistency
1309 because discarding in-memory changes might cause inconsistency
1309 (e.g. incomplete fncache causes unintentional failure, but
1310 (e.g. incomplete fncache causes unintentional failure, but
1310 redundant one doesn't).
1311 redundant one doesn't).
1311 '''
1312 '''
1312 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1313 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1313 for k in list(self._filecache.keys()):
1314 for k in list(self._filecache.keys()):
1314 # dirstate is invalidated separately in invalidatedirstate()
1315 # dirstate is invalidated separately in invalidatedirstate()
1315 if k == 'dirstate':
1316 if k == 'dirstate':
1316 continue
1317 continue
1317
1318
1318 if clearfilecache:
1319 if clearfilecache:
1319 del self._filecache[k]
1320 del self._filecache[k]
1320 try:
1321 try:
1321 delattr(unfiltered, k)
1322 delattr(unfiltered, k)
1322 except AttributeError:
1323 except AttributeError:
1323 pass
1324 pass
1324 self.invalidatecaches()
1325 self.invalidatecaches()
1325 if not self.currenttransaction():
1326 if not self.currenttransaction():
1326 # TODO: Changing contents of store outside transaction
1327 # TODO: Changing contents of store outside transaction
1327 # causes inconsistency. We should make in-memory store
1328 # causes inconsistency. We should make in-memory store
1328 # changes detectable, and abort if changed.
1329 # changes detectable, and abort if changed.
1329 self.store.invalidatecaches()
1330 self.store.invalidatecaches()
1330
1331
1331 def invalidateall(self):
1332 def invalidateall(self):
1332 '''Fully invalidates both store and non-store parts, causing the
1333 '''Fully invalidates both store and non-store parts, causing the
1333 subsequent operation to reread any outside changes.'''
1334 subsequent operation to reread any outside changes.'''
1334 # extension should hook this to invalidate its caches
1335 # extension should hook this to invalidate its caches
1335 self.invalidate()
1336 self.invalidate()
1336 self.invalidatedirstate()
1337 self.invalidatedirstate()
1337
1338
1338 @unfilteredmethod
1339 @unfilteredmethod
1339 def _refreshfilecachestats(self, tr):
1340 def _refreshfilecachestats(self, tr):
1340 """Reload stats of cached files so that they are flagged as valid"""
1341 """Reload stats of cached files so that they are flagged as valid"""
1341 for k, ce in self._filecache.items():
1342 for k, ce in self._filecache.items():
1342 if k == 'dirstate' or k not in self.__dict__:
1343 if k == 'dirstate' or k not in self.__dict__:
1343 continue
1344 continue
1344 ce.refresh()
1345 ce.refresh()
1345
1346
1346 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1347 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1347 inheritchecker=None, parentenvvar=None):
1348 inheritchecker=None, parentenvvar=None):
1348 parentlock = None
1349 parentlock = None
1349 # the contents of parentenvvar are used by the underlying lock to
1350 # the contents of parentenvvar are used by the underlying lock to
1350 # determine whether it can be inherited
1351 # determine whether it can be inherited
1351 if parentenvvar is not None:
1352 if parentenvvar is not None:
1352 parentlock = encoding.environ.get(parentenvvar)
1353 parentlock = encoding.environ.get(parentenvvar)
1353 try:
1354 try:
1354 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1355 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1355 acquirefn=acquirefn, desc=desc,
1356 acquirefn=acquirefn, desc=desc,
1356 inheritchecker=inheritchecker,
1357 inheritchecker=inheritchecker,
1357 parentlock=parentlock)
1358 parentlock=parentlock)
1358 except error.LockHeld as inst:
1359 except error.LockHeld as inst:
1359 if not wait:
1360 if not wait:
1360 raise
1361 raise
1361 # show more details for new-style locks
1362 # show more details for new-style locks
1362 if ':' in inst.locker:
1363 if ':' in inst.locker:
1363 host, pid = inst.locker.split(":", 1)
1364 host, pid = inst.locker.split(":", 1)
1364 self.ui.warn(
1365 self.ui.warn(
1365 _("waiting for lock on %s held by process %r "
1366 _("waiting for lock on %s held by process %r "
1366 "on host %r\n") % (desc, pid, host))
1367 "on host %r\n") % (desc, pid, host))
1367 else:
1368 else:
1368 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1369 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1369 (desc, inst.locker))
1370 (desc, inst.locker))
1370 # default to 600 seconds timeout
1371 # default to 600 seconds timeout
1371 l = lockmod.lock(vfs, lockname,
1372 l = lockmod.lock(vfs, lockname,
1372 int(self.ui.config("ui", "timeout", "600")),
1373 int(self.ui.config("ui", "timeout", "600")),
1373 releasefn=releasefn, acquirefn=acquirefn,
1374 releasefn=releasefn, acquirefn=acquirefn,
1374 desc=desc)
1375 desc=desc)
1375 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1376 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1376 return l
1377 return l
1377
1378
1378 def _afterlock(self, callback):
1379 def _afterlock(self, callback):
1379 """add a callback to be run when the repository is fully unlocked
1380 """add a callback to be run when the repository is fully unlocked
1380
1381
1381 The callback will be executed when the outermost lock is released
1382 The callback will be executed when the outermost lock is released
1382 (with wlock being higher level than 'lock')."""
1383 (with wlock being higher level than 'lock')."""
1383 for ref in (self._wlockref, self._lockref):
1384 for ref in (self._wlockref, self._lockref):
1384 l = ref and ref()
1385 l = ref and ref()
1385 if l and l.held:
1386 if l and l.held:
1386 l.postrelease.append(callback)
1387 l.postrelease.append(callback)
1387 break
1388 break
1388 else: # no lock have been found.
1389 else: # no lock have been found.
1389 callback()
1390 callback()
1390
1391
1391 def lock(self, wait=True):
1392 def lock(self, wait=True):
1392 '''Lock the repository store (.hg/store) and return a weak reference
1393 '''Lock the repository store (.hg/store) and return a weak reference
1393 to the lock. Use this before modifying the store (e.g. committing or
1394 to the lock. Use this before modifying the store (e.g. committing or
1394 stripping). If you are opening a transaction, get a lock as well.)
1395 stripping). If you are opening a transaction, get a lock as well.)
1395
1396
1396 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1397 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1397 'wlock' first to avoid a dead-lock hazard.'''
1398 'wlock' first to avoid a dead-lock hazard.'''
1398 l = self._currentlock(self._lockref)
1399 l = self._currentlock(self._lockref)
1399 if l is not None:
1400 if l is not None:
1400 l.lock()
1401 l.lock()
1401 return l
1402 return l
1402
1403
1403 l = self._lock(self.svfs, "lock", wait, None,
1404 l = self._lock(self.svfs, "lock", wait, None,
1404 self.invalidate, _('repository %s') % self.origroot)
1405 self.invalidate, _('repository %s') % self.origroot)
1405 self._lockref = weakref.ref(l)
1406 self._lockref = weakref.ref(l)
1406 return l
1407 return l
1407
1408
1408 def _wlockchecktransaction(self):
1409 def _wlockchecktransaction(self):
1409 if self.currenttransaction() is not None:
1410 if self.currenttransaction() is not None:
1410 raise error.LockInheritanceContractViolation(
1411 raise error.LockInheritanceContractViolation(
1411 'wlock cannot be inherited in the middle of a transaction')
1412 'wlock cannot be inherited in the middle of a transaction')
1412
1413
1413 def wlock(self, wait=True):
1414 def wlock(self, wait=True):
1414 '''Lock the non-store parts of the repository (everything under
1415 '''Lock the non-store parts of the repository (everything under
1415 .hg except .hg/store) and return a weak reference to the lock.
1416 .hg except .hg/store) and return a weak reference to the lock.
1416
1417
1417 Use this before modifying files in .hg.
1418 Use this before modifying files in .hg.
1418
1419
1419 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1420 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1420 'wlock' first to avoid a dead-lock hazard.'''
1421 'wlock' first to avoid a dead-lock hazard.'''
1421 l = self._wlockref and self._wlockref()
1422 l = self._wlockref and self._wlockref()
1422 if l is not None and l.held:
1423 if l is not None and l.held:
1423 l.lock()
1424 l.lock()
1424 return l
1425 return l
1425
1426
1426 # We do not need to check for non-waiting lock acquisition. Such
1427 # We do not need to check for non-waiting lock acquisition. Such
1427 # acquisition would not cause dead-lock as they would just fail.
1428 # acquisition would not cause dead-lock as they would just fail.
1428 if wait and (self.ui.configbool('devel', 'all-warnings')
1429 if wait and (self.ui.configbool('devel', 'all-warnings')
1429 or self.ui.configbool('devel', 'check-locks')):
1430 or self.ui.configbool('devel', 'check-locks')):
1430 if self._currentlock(self._lockref) is not None:
1431 if self._currentlock(self._lockref) is not None:
1431 self.ui.develwarn('"wlock" acquired after "lock"')
1432 self.ui.develwarn('"wlock" acquired after "lock"')
1432
1433
1433 def unlock():
1434 def unlock():
1434 if self.dirstate.pendingparentchange():
1435 if self.dirstate.pendingparentchange():
1435 self.dirstate.invalidate()
1436 self.dirstate.invalidate()
1436 else:
1437 else:
1437 self.dirstate.write(None)
1438 self.dirstate.write(None)
1438
1439
1439 self._filecache['dirstate'].refresh()
1440 self._filecache['dirstate'].refresh()
1440
1441
1441 l = self._lock(self.vfs, "wlock", wait, unlock,
1442 l = self._lock(self.vfs, "wlock", wait, unlock,
1442 self.invalidatedirstate, _('working directory of %s') %
1443 self.invalidatedirstate, _('working directory of %s') %
1443 self.origroot,
1444 self.origroot,
1444 inheritchecker=self._wlockchecktransaction,
1445 inheritchecker=self._wlockchecktransaction,
1445 parentenvvar='HG_WLOCK_LOCKER')
1446 parentenvvar='HG_WLOCK_LOCKER')
1446 self._wlockref = weakref.ref(l)
1447 self._wlockref = weakref.ref(l)
1447 return l
1448 return l
1448
1449
1449 def _currentlock(self, lockref):
1450 def _currentlock(self, lockref):
1450 """Returns the lock if it's held, or None if it's not."""
1451 """Returns the lock if it's held, or None if it's not."""
1451 if lockref is None:
1452 if lockref is None:
1452 return None
1453 return None
1453 l = lockref()
1454 l = lockref()
1454 if l is None or not l.held:
1455 if l is None or not l.held:
1455 return None
1456 return None
1456 return l
1457 return l
1457
1458
1458 def currentwlock(self):
1459 def currentwlock(self):
1459 """Returns the wlock if it's held, or None if it's not."""
1460 """Returns the wlock if it's held, or None if it's not."""
1460 return self._currentlock(self._wlockref)
1461 return self._currentlock(self._wlockref)
1461
1462
1462 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1463 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1463 """
1464 """
1464 commit an individual file as part of a larger transaction
1465 commit an individual file as part of a larger transaction
1465 """
1466 """
1466
1467
1467 fname = fctx.path()
1468 fname = fctx.path()
1468 fparent1 = manifest1.get(fname, nullid)
1469 fparent1 = manifest1.get(fname, nullid)
1469 fparent2 = manifest2.get(fname, nullid)
1470 fparent2 = manifest2.get(fname, nullid)
1470 if isinstance(fctx, context.filectx):
1471 if isinstance(fctx, context.filectx):
1471 node = fctx.filenode()
1472 node = fctx.filenode()
1472 if node in [fparent1, fparent2]:
1473 if node in [fparent1, fparent2]:
1473 self.ui.debug('reusing %s filelog entry\n' % fname)
1474 self.ui.debug('reusing %s filelog entry\n' % fname)
1474 if manifest1.flags(fname) != fctx.flags():
1475 if manifest1.flags(fname) != fctx.flags():
1475 changelist.append(fname)
1476 changelist.append(fname)
1476 return node
1477 return node
1477
1478
1478 flog = self.file(fname)
1479 flog = self.file(fname)
1479 meta = {}
1480 meta = {}
1480 copy = fctx.renamed()
1481 copy = fctx.renamed()
1481 if copy and copy[0] != fname:
1482 if copy and copy[0] != fname:
1482 # Mark the new revision of this file as a copy of another
1483 # Mark the new revision of this file as a copy of another
1483 # file. This copy data will effectively act as a parent
1484 # file. This copy data will effectively act as a parent
1484 # of this new revision. If this is a merge, the first
1485 # of this new revision. If this is a merge, the first
1485 # parent will be the nullid (meaning "look up the copy data")
1486 # parent will be the nullid (meaning "look up the copy data")
1486 # and the second one will be the other parent. For example:
1487 # and the second one will be the other parent. For example:
1487 #
1488 #
1488 # 0 --- 1 --- 3 rev1 changes file foo
1489 # 0 --- 1 --- 3 rev1 changes file foo
1489 # \ / rev2 renames foo to bar and changes it
1490 # \ / rev2 renames foo to bar and changes it
1490 # \- 2 -/ rev3 should have bar with all changes and
1491 # \- 2 -/ rev3 should have bar with all changes and
1491 # should record that bar descends from
1492 # should record that bar descends from
1492 # bar in rev2 and foo in rev1
1493 # bar in rev2 and foo in rev1
1493 #
1494 #
1494 # this allows this merge to succeed:
1495 # this allows this merge to succeed:
1495 #
1496 #
1496 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1497 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1497 # \ / merging rev3 and rev4 should use bar@rev2
1498 # \ / merging rev3 and rev4 should use bar@rev2
1498 # \- 2 --- 4 as the merge base
1499 # \- 2 --- 4 as the merge base
1499 #
1500 #
1500
1501
1501 cfname = copy[0]
1502 cfname = copy[0]
1502 crev = manifest1.get(cfname)
1503 crev = manifest1.get(cfname)
1503 newfparent = fparent2
1504 newfparent = fparent2
1504
1505
1505 if manifest2: # branch merge
1506 if manifest2: # branch merge
1506 if fparent2 == nullid or crev is None: # copied on remote side
1507 if fparent2 == nullid or crev is None: # copied on remote side
1507 if cfname in manifest2:
1508 if cfname in manifest2:
1508 crev = manifest2[cfname]
1509 crev = manifest2[cfname]
1509 newfparent = fparent1
1510 newfparent = fparent1
1510
1511
1511 # Here, we used to search backwards through history to try to find
1512 # Here, we used to search backwards through history to try to find
1512 # where the file copy came from if the source of a copy was not in
1513 # where the file copy came from if the source of a copy was not in
1513 # the parent directory. However, this doesn't actually make sense to
1514 # the parent directory. However, this doesn't actually make sense to
1514 # do (what does a copy from something not in your working copy even
1515 # do (what does a copy from something not in your working copy even
1515 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1516 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1516 # the user that copy information was dropped, so if they didn't
1517 # the user that copy information was dropped, so if they didn't
1517 # expect this outcome it can be fixed, but this is the correct
1518 # expect this outcome it can be fixed, but this is the correct
1518 # behavior in this circumstance.
1519 # behavior in this circumstance.
1519
1520
1520 if crev:
1521 if crev:
1521 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1522 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1522 meta["copy"] = cfname
1523 meta["copy"] = cfname
1523 meta["copyrev"] = hex(crev)
1524 meta["copyrev"] = hex(crev)
1524 fparent1, fparent2 = nullid, newfparent
1525 fparent1, fparent2 = nullid, newfparent
1525 else:
1526 else:
1526 self.ui.warn(_("warning: can't find ancestor for '%s' "
1527 self.ui.warn(_("warning: can't find ancestor for '%s' "
1527 "copied from '%s'!\n") % (fname, cfname))
1528 "copied from '%s'!\n") % (fname, cfname))
1528
1529
1529 elif fparent1 == nullid:
1530 elif fparent1 == nullid:
1530 fparent1, fparent2 = fparent2, nullid
1531 fparent1, fparent2 = fparent2, nullid
1531 elif fparent2 != nullid:
1532 elif fparent2 != nullid:
1532 # is one parent an ancestor of the other?
1533 # is one parent an ancestor of the other?
1533 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1534 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1534 if fparent1 in fparentancestors:
1535 if fparent1 in fparentancestors:
1535 fparent1, fparent2 = fparent2, nullid
1536 fparent1, fparent2 = fparent2, nullid
1536 elif fparent2 in fparentancestors:
1537 elif fparent2 in fparentancestors:
1537 fparent2 = nullid
1538 fparent2 = nullid
1538
1539
1539 # is the file changed?
1540 # is the file changed?
1540 text = fctx.data()
1541 text = fctx.data()
1541 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1542 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1542 changelist.append(fname)
1543 changelist.append(fname)
1543 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1544 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1544 # are just the flags changed during merge?
1545 # are just the flags changed during merge?
1545 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1546 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1546 changelist.append(fname)
1547 changelist.append(fname)
1547
1548
1548 return fparent1
1549 return fparent1
1549
1550
1550 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1551 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1551 """check for commit arguments that aren't committable"""
1552 """check for commit arguments that aren't committable"""
1552 if match.isexact() or match.prefix():
1553 if match.isexact() or match.prefix():
1553 matched = set(status.modified + status.added + status.removed)
1554 matched = set(status.modified + status.added + status.removed)
1554
1555
1555 for f in match.files():
1556 for f in match.files():
1556 f = self.dirstate.normalize(f)
1557 f = self.dirstate.normalize(f)
1557 if f == '.' or f in matched or f in wctx.substate:
1558 if f == '.' or f in matched or f in wctx.substate:
1558 continue
1559 continue
1559 if f in status.deleted:
1560 if f in status.deleted:
1560 fail(f, _('file not found!'))
1561 fail(f, _('file not found!'))
1561 if f in vdirs: # visited directory
1562 if f in vdirs: # visited directory
1562 d = f + '/'
1563 d = f + '/'
1563 for mf in matched:
1564 for mf in matched:
1564 if mf.startswith(d):
1565 if mf.startswith(d):
1565 break
1566 break
1566 else:
1567 else:
1567 fail(f, _("no match under directory!"))
1568 fail(f, _("no match under directory!"))
1568 elif f not in self.dirstate:
1569 elif f not in self.dirstate:
1569 fail(f, _("file not tracked!"))
1570 fail(f, _("file not tracked!"))
1570
1571
1571 @unfilteredmethod
1572 @unfilteredmethod
1572 def commit(self, text="", user=None, date=None, match=None, force=False,
1573 def commit(self, text="", user=None, date=None, match=None, force=False,
1573 editor=False, extra=None):
1574 editor=False, extra=None):
1574 """Add a new revision to current repository.
1575 """Add a new revision to current repository.
1575
1576
1576 Revision information is gathered from the working directory,
1577 Revision information is gathered from the working directory,
1577 match can be used to filter the committed files. If editor is
1578 match can be used to filter the committed files. If editor is
1578 supplied, it is called to get a commit message.
1579 supplied, it is called to get a commit message.
1579 """
1580 """
1580 if extra is None:
1581 if extra is None:
1581 extra = {}
1582 extra = {}
1582
1583
1583 def fail(f, msg):
1584 def fail(f, msg):
1584 raise error.Abort('%s: %s' % (f, msg))
1585 raise error.Abort('%s: %s' % (f, msg))
1585
1586
1586 if not match:
1587 if not match:
1587 match = matchmod.always(self.root, '')
1588 match = matchmod.always(self.root, '')
1588
1589
1589 if not force:
1590 if not force:
1590 vdirs = []
1591 vdirs = []
1591 match.explicitdir = vdirs.append
1592 match.explicitdir = vdirs.append
1592 match.bad = fail
1593 match.bad = fail
1593
1594
1594 wlock = lock = tr = None
1595 wlock = lock = tr = None
1595 try:
1596 try:
1596 wlock = self.wlock()
1597 wlock = self.wlock()
1597 lock = self.lock() # for recent changelog (see issue4368)
1598 lock = self.lock() # for recent changelog (see issue4368)
1598
1599
1599 wctx = self[None]
1600 wctx = self[None]
1600 merge = len(wctx.parents()) > 1
1601 merge = len(wctx.parents()) > 1
1601
1602
1602 if not force and merge and not match.always():
1603 if not force and merge and not match.always():
1603 raise error.Abort(_('cannot partially commit a merge '
1604 raise error.Abort(_('cannot partially commit a merge '
1604 '(do not specify files or patterns)'))
1605 '(do not specify files or patterns)'))
1605
1606
1606 status = self.status(match=match, clean=force)
1607 status = self.status(match=match, clean=force)
1607 if force:
1608 if force:
1608 status.modified.extend(status.clean) # mq may commit clean files
1609 status.modified.extend(status.clean) # mq may commit clean files
1609
1610
1610 # check subrepos
1611 # check subrepos
1611 subs = []
1612 subs = []
1612 commitsubs = set()
1613 commitsubs = set()
1613 newstate = wctx.substate.copy()
1614 newstate = wctx.substate.copy()
1614 # only manage subrepos and .hgsubstate if .hgsub is present
1615 # only manage subrepos and .hgsubstate if .hgsub is present
1615 if '.hgsub' in wctx:
1616 if '.hgsub' in wctx:
1616 # we'll decide whether to track this ourselves, thanks
1617 # we'll decide whether to track this ourselves, thanks
1617 for c in status.modified, status.added, status.removed:
1618 for c in status.modified, status.added, status.removed:
1618 if '.hgsubstate' in c:
1619 if '.hgsubstate' in c:
1619 c.remove('.hgsubstate')
1620 c.remove('.hgsubstate')
1620
1621
1621 # compare current state to last committed state
1622 # compare current state to last committed state
1622 # build new substate based on last committed state
1623 # build new substate based on last committed state
1623 oldstate = wctx.p1().substate
1624 oldstate = wctx.p1().substate
1624 for s in sorted(newstate.keys()):
1625 for s in sorted(newstate.keys()):
1625 if not match(s):
1626 if not match(s):
1626 # ignore working copy, use old state if present
1627 # ignore working copy, use old state if present
1627 if s in oldstate:
1628 if s in oldstate:
1628 newstate[s] = oldstate[s]
1629 newstate[s] = oldstate[s]
1629 continue
1630 continue
1630 if not force:
1631 if not force:
1631 raise error.Abort(
1632 raise error.Abort(
1632 _("commit with new subrepo %s excluded") % s)
1633 _("commit with new subrepo %s excluded") % s)
1633 dirtyreason = wctx.sub(s).dirtyreason(True)
1634 dirtyreason = wctx.sub(s).dirtyreason(True)
1634 if dirtyreason:
1635 if dirtyreason:
1635 if not self.ui.configbool('ui', 'commitsubrepos'):
1636 if not self.ui.configbool('ui', 'commitsubrepos'):
1636 raise error.Abort(dirtyreason,
1637 raise error.Abort(dirtyreason,
1637 hint=_("use --subrepos for recursive commit"))
1638 hint=_("use --subrepos for recursive commit"))
1638 subs.append(s)
1639 subs.append(s)
1639 commitsubs.add(s)
1640 commitsubs.add(s)
1640 else:
1641 else:
1641 bs = wctx.sub(s).basestate()
1642 bs = wctx.sub(s).basestate()
1642 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1643 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1643 if oldstate.get(s, (None, None, None))[1] != bs:
1644 if oldstate.get(s, (None, None, None))[1] != bs:
1644 subs.append(s)
1645 subs.append(s)
1645
1646
1646 # check for removed subrepos
1647 # check for removed subrepos
1647 for p in wctx.parents():
1648 for p in wctx.parents():
1648 r = [s for s in p.substate if s not in newstate]
1649 r = [s for s in p.substate if s not in newstate]
1649 subs += [s for s in r if match(s)]
1650 subs += [s for s in r if match(s)]
1650 if subs:
1651 if subs:
1651 if (not match('.hgsub') and
1652 if (not match('.hgsub') and
1652 '.hgsub' in (wctx.modified() + wctx.added())):
1653 '.hgsub' in (wctx.modified() + wctx.added())):
1653 raise error.Abort(
1654 raise error.Abort(
1654 _("can't commit subrepos without .hgsub"))
1655 _("can't commit subrepos without .hgsub"))
1655 status.modified.insert(0, '.hgsubstate')
1656 status.modified.insert(0, '.hgsubstate')
1656
1657
1657 elif '.hgsub' in status.removed:
1658 elif '.hgsub' in status.removed:
1658 # clean up .hgsubstate when .hgsub is removed
1659 # clean up .hgsubstate when .hgsub is removed
1659 if ('.hgsubstate' in wctx and
1660 if ('.hgsubstate' in wctx and
1660 '.hgsubstate' not in (status.modified + status.added +
1661 '.hgsubstate' not in (status.modified + status.added +
1661 status.removed)):
1662 status.removed)):
1662 status.removed.insert(0, '.hgsubstate')
1663 status.removed.insert(0, '.hgsubstate')
1663
1664
1664 # make sure all explicit patterns are matched
1665 # make sure all explicit patterns are matched
1665 if not force:
1666 if not force:
1666 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1667 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1667
1668
1668 cctx = context.workingcommitctx(self, status,
1669 cctx = context.workingcommitctx(self, status,
1669 text, user, date, extra)
1670 text, user, date, extra)
1670
1671
1671 # internal config: ui.allowemptycommit
1672 # internal config: ui.allowemptycommit
1672 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1673 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1673 or extra.get('close') or merge or cctx.files()
1674 or extra.get('close') or merge or cctx.files()
1674 or self.ui.configbool('ui', 'allowemptycommit'))
1675 or self.ui.configbool('ui', 'allowemptycommit'))
1675 if not allowemptycommit:
1676 if not allowemptycommit:
1676 return None
1677 return None
1677
1678
1678 if merge and cctx.deleted():
1679 if merge and cctx.deleted():
1679 raise error.Abort(_("cannot commit merge with missing files"))
1680 raise error.Abort(_("cannot commit merge with missing files"))
1680
1681
1681 ms = mergemod.mergestate.read(self)
1682 ms = mergemod.mergestate.read(self)
1682 mergeutil.checkunresolved(ms)
1683 mergeutil.checkunresolved(ms)
1683
1684
1684 if editor:
1685 if editor:
1685 cctx._text = editor(self, cctx, subs)
1686 cctx._text = editor(self, cctx, subs)
1686 edited = (text != cctx._text)
1687 edited = (text != cctx._text)
1687
1688
1688 # Save commit message in case this transaction gets rolled back
1689 # Save commit message in case this transaction gets rolled back
1689 # (e.g. by a pretxncommit hook). Leave the content alone on
1690 # (e.g. by a pretxncommit hook). Leave the content alone on
1690 # the assumption that the user will use the same editor again.
1691 # the assumption that the user will use the same editor again.
1691 msgfn = self.savecommitmessage(cctx._text)
1692 msgfn = self.savecommitmessage(cctx._text)
1692
1693
1693 # commit subs and write new state
1694 # commit subs and write new state
1694 if subs:
1695 if subs:
1695 for s in sorted(commitsubs):
1696 for s in sorted(commitsubs):
1696 sub = wctx.sub(s)
1697 sub = wctx.sub(s)
1697 self.ui.status(_('committing subrepository %s\n') %
1698 self.ui.status(_('committing subrepository %s\n') %
1698 subrepo.subrelpath(sub))
1699 subrepo.subrelpath(sub))
1699 sr = sub.commit(cctx._text, user, date)
1700 sr = sub.commit(cctx._text, user, date)
1700 newstate[s] = (newstate[s][0], sr)
1701 newstate[s] = (newstate[s][0], sr)
1701 subrepo.writestate(self, newstate)
1702 subrepo.writestate(self, newstate)
1702
1703
1703 p1, p2 = self.dirstate.parents()
1704 p1, p2 = self.dirstate.parents()
1704 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1705 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1705 try:
1706 try:
1706 self.hook("precommit", throw=True, parent1=hookp1,
1707 self.hook("precommit", throw=True, parent1=hookp1,
1707 parent2=hookp2)
1708 parent2=hookp2)
1708 tr = self.transaction('commit')
1709 tr = self.transaction('commit')
1709 ret = self.commitctx(cctx, True)
1710 ret = self.commitctx(cctx, True)
1710 except: # re-raises
1711 except: # re-raises
1711 if edited:
1712 if edited:
1712 self.ui.write(
1713 self.ui.write(
1713 _('note: commit message saved in %s\n') % msgfn)
1714 _('note: commit message saved in %s\n') % msgfn)
1714 raise
1715 raise
1715 # update bookmarks, dirstate and mergestate
1716 # update bookmarks, dirstate and mergestate
1716 bookmarks.update(self, [p1, p2], ret)
1717 bookmarks.update(self, [p1, p2], ret)
1717 cctx.markcommitted(ret)
1718 cctx.markcommitted(ret)
1718 ms.reset()
1719 ms.reset()
1719 tr.close()
1720 tr.close()
1720
1721
1721 finally:
1722 finally:
1722 lockmod.release(tr, lock, wlock)
1723 lockmod.release(tr, lock, wlock)
1723
1724
1724 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1725 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1725 # hack for command that use a temporary commit (eg: histedit)
1726 # hack for command that use a temporary commit (eg: histedit)
1726 # temporary commit got stripped before hook release
1727 # temporary commit got stripped before hook release
1727 if self.changelog.hasnode(ret):
1728 if self.changelog.hasnode(ret):
1728 self.hook("commit", node=node, parent1=parent1,
1729 self.hook("commit", node=node, parent1=parent1,
1729 parent2=parent2)
1730 parent2=parent2)
1730 self._afterlock(commithook)
1731 self._afterlock(commithook)
1731 return ret
1732 return ret
1732
1733
1733 @unfilteredmethod
1734 @unfilteredmethod
1734 def commitctx(self, ctx, error=False):
1735 def commitctx(self, ctx, error=False):
1735 """Add a new revision to current repository.
1736 """Add a new revision to current repository.
1736 Revision information is passed via the context argument.
1737 Revision information is passed via the context argument.
1737 """
1738 """
1738
1739
1739 tr = None
1740 tr = None
1740 p1, p2 = ctx.p1(), ctx.p2()
1741 p1, p2 = ctx.p1(), ctx.p2()
1741 user = ctx.user()
1742 user = ctx.user()
1742
1743
1743 lock = self.lock()
1744 lock = self.lock()
1744 try:
1745 try:
1745 tr = self.transaction("commit")
1746 tr = self.transaction("commit")
1746 trp = weakref.proxy(tr)
1747 trp = weakref.proxy(tr)
1747
1748
1748 if ctx.manifestnode():
1749 if ctx.manifestnode():
1749 # reuse an existing manifest revision
1750 # reuse an existing manifest revision
1750 mn = ctx.manifestnode()
1751 mn = ctx.manifestnode()
1751 files = ctx.files()
1752 files = ctx.files()
1752 elif ctx.files():
1753 elif ctx.files():
1753 m1ctx = p1.manifestctx()
1754 m1ctx = p1.manifestctx()
1754 m2ctx = p2.manifestctx()
1755 m2ctx = p2.manifestctx()
1755 mctx = m1ctx.copy()
1756 mctx = m1ctx.copy()
1756
1757
1757 m = mctx.read()
1758 m = mctx.read()
1758 m1 = m1ctx.read()
1759 m1 = m1ctx.read()
1759 m2 = m2ctx.read()
1760 m2 = m2ctx.read()
1760
1761
1761 # check in files
1762 # check in files
1762 added = []
1763 added = []
1763 changed = []
1764 changed = []
1764 removed = list(ctx.removed())
1765 removed = list(ctx.removed())
1765 linkrev = len(self)
1766 linkrev = len(self)
1766 self.ui.note(_("committing files:\n"))
1767 self.ui.note(_("committing files:\n"))
1767 for f in sorted(ctx.modified() + ctx.added()):
1768 for f in sorted(ctx.modified() + ctx.added()):
1768 self.ui.note(f + "\n")
1769 self.ui.note(f + "\n")
1769 try:
1770 try:
1770 fctx = ctx[f]
1771 fctx = ctx[f]
1771 if fctx is None:
1772 if fctx is None:
1772 removed.append(f)
1773 removed.append(f)
1773 else:
1774 else:
1774 added.append(f)
1775 added.append(f)
1775 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1776 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1776 trp, changed)
1777 trp, changed)
1777 m.setflag(f, fctx.flags())
1778 m.setflag(f, fctx.flags())
1778 except OSError as inst:
1779 except OSError as inst:
1779 self.ui.warn(_("trouble committing %s!\n") % f)
1780 self.ui.warn(_("trouble committing %s!\n") % f)
1780 raise
1781 raise
1781 except IOError as inst:
1782 except IOError as inst:
1782 errcode = getattr(inst, 'errno', errno.ENOENT)
1783 errcode = getattr(inst, 'errno', errno.ENOENT)
1783 if error or errcode and errcode != errno.ENOENT:
1784 if error or errcode and errcode != errno.ENOENT:
1784 self.ui.warn(_("trouble committing %s!\n") % f)
1785 self.ui.warn(_("trouble committing %s!\n") % f)
1785 raise
1786 raise
1786
1787
1787 # update manifest
1788 # update manifest
1788 self.ui.note(_("committing manifest\n"))
1789 self.ui.note(_("committing manifest\n"))
1789 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1790 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1790 drop = [f for f in removed if f in m]
1791 drop = [f for f in removed if f in m]
1791 for f in drop:
1792 for f in drop:
1792 del m[f]
1793 del m[f]
1793 mn = mctx.write(trp, linkrev,
1794 mn = mctx.write(trp, linkrev,
1794 p1.manifestnode(), p2.manifestnode(),
1795 p1.manifestnode(), p2.manifestnode(),
1795 added, drop)
1796 added, drop)
1796 files = changed + removed
1797 files = changed + removed
1797 else:
1798 else:
1798 mn = p1.manifestnode()
1799 mn = p1.manifestnode()
1799 files = []
1800 files = []
1800
1801
1801 # update changelog
1802 # update changelog
1802 self.ui.note(_("committing changelog\n"))
1803 self.ui.note(_("committing changelog\n"))
1803 self.changelog.delayupdate(tr)
1804 self.changelog.delayupdate(tr)
1804 n = self.changelog.add(mn, files, ctx.description(),
1805 n = self.changelog.add(mn, files, ctx.description(),
1805 trp, p1.node(), p2.node(),
1806 trp, p1.node(), p2.node(),
1806 user, ctx.date(), ctx.extra().copy())
1807 user, ctx.date(), ctx.extra().copy())
1807 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1808 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1808 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1809 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1809 parent2=xp2)
1810 parent2=xp2)
1810 # set the new commit is proper phase
1811 # set the new commit is proper phase
1811 targetphase = subrepo.newcommitphase(self.ui, ctx)
1812 targetphase = subrepo.newcommitphase(self.ui, ctx)
1812 if targetphase:
1813 if targetphase:
1813 # retract boundary do not alter parent changeset.
1814 # retract boundary do not alter parent changeset.
1814 # if a parent have higher the resulting phase will
1815 # if a parent have higher the resulting phase will
1815 # be compliant anyway
1816 # be compliant anyway
1816 #
1817 #
1817 # if minimal phase was 0 we don't need to retract anything
1818 # if minimal phase was 0 we don't need to retract anything
1818 phases.retractboundary(self, tr, targetphase, [n])
1819 phases.retractboundary(self, tr, targetphase, [n])
1819 tr.close()
1820 tr.close()
1820 return n
1821 return n
1821 finally:
1822 finally:
1822 if tr:
1823 if tr:
1823 tr.release()
1824 tr.release()
1824 lock.release()
1825 lock.release()
1825
1826
1826 @unfilteredmethod
1827 @unfilteredmethod
1827 def destroying(self):
1828 def destroying(self):
1828 '''Inform the repository that nodes are about to be destroyed.
1829 '''Inform the repository that nodes are about to be destroyed.
1829 Intended for use by strip and rollback, so there's a common
1830 Intended for use by strip and rollback, so there's a common
1830 place for anything that has to be done before destroying history.
1831 place for anything that has to be done before destroying history.
1831
1832
1832 This is mostly useful for saving state that is in memory and waiting
1833 This is mostly useful for saving state that is in memory and waiting
1833 to be flushed when the current lock is released. Because a call to
1834 to be flushed when the current lock is released. Because a call to
1834 destroyed is imminent, the repo will be invalidated causing those
1835 destroyed is imminent, the repo will be invalidated causing those
1835 changes to stay in memory (waiting for the next unlock), or vanish
1836 changes to stay in memory (waiting for the next unlock), or vanish
1836 completely.
1837 completely.
1837 '''
1838 '''
1838 # When using the same lock to commit and strip, the phasecache is left
1839 # When using the same lock to commit and strip, the phasecache is left
1839 # dirty after committing. Then when we strip, the repo is invalidated,
1840 # dirty after committing. Then when we strip, the repo is invalidated,
1840 # causing those changes to disappear.
1841 # causing those changes to disappear.
1841 if '_phasecache' in vars(self):
1842 if '_phasecache' in vars(self):
1842 self._phasecache.write()
1843 self._phasecache.write()
1843
1844
1844 @unfilteredmethod
1845 @unfilteredmethod
1845 def destroyed(self):
1846 def destroyed(self):
1846 '''Inform the repository that nodes have been destroyed.
1847 '''Inform the repository that nodes have been destroyed.
1847 Intended for use by strip and rollback, so there's a common
1848 Intended for use by strip and rollback, so there's a common
1848 place for anything that has to be done after destroying history.
1849 place for anything that has to be done after destroying history.
1849 '''
1850 '''
1850 # When one tries to:
1851 # When one tries to:
1851 # 1) destroy nodes thus calling this method (e.g. strip)
1852 # 1) destroy nodes thus calling this method (e.g. strip)
1852 # 2) use phasecache somewhere (e.g. commit)
1853 # 2) use phasecache somewhere (e.g. commit)
1853 #
1854 #
1854 # then 2) will fail because the phasecache contains nodes that were
1855 # then 2) will fail because the phasecache contains nodes that were
1855 # removed. We can either remove phasecache from the filecache,
1856 # removed. We can either remove phasecache from the filecache,
1856 # causing it to reload next time it is accessed, or simply filter
1857 # causing it to reload next time it is accessed, or simply filter
1857 # the removed nodes now and write the updated cache.
1858 # the removed nodes now and write the updated cache.
1858 self._phasecache.filterunknown(self)
1859 self._phasecache.filterunknown(self)
1859 self._phasecache.write()
1860 self._phasecache.write()
1860
1861
1861 # refresh all repository caches
1862 # refresh all repository caches
1862 self.updatecaches()
1863 self.updatecaches()
1863
1864
1864 # Ensure the persistent tag cache is updated. Doing it now
1865 # Ensure the persistent tag cache is updated. Doing it now
1865 # means that the tag cache only has to worry about destroyed
1866 # means that the tag cache only has to worry about destroyed
1866 # heads immediately after a strip/rollback. That in turn
1867 # heads immediately after a strip/rollback. That in turn
1867 # guarantees that "cachetip == currenttip" (comparing both rev
1868 # guarantees that "cachetip == currenttip" (comparing both rev
1868 # and node) always means no nodes have been added or destroyed.
1869 # and node) always means no nodes have been added or destroyed.
1869
1870
1870 # XXX this is suboptimal when qrefresh'ing: we strip the current
1871 # XXX this is suboptimal when qrefresh'ing: we strip the current
1871 # head, refresh the tag cache, then immediately add a new head.
1872 # head, refresh the tag cache, then immediately add a new head.
1872 # But I think doing it this way is necessary for the "instant
1873 # But I think doing it this way is necessary for the "instant
1873 # tag cache retrieval" case to work.
1874 # tag cache retrieval" case to work.
1874 self.invalidate()
1875 self.invalidate()
1875
1876
1876 def walk(self, match, node=None):
1877 def walk(self, match, node=None):
1877 '''
1878 '''
1878 walk recursively through the directory tree or a given
1879 walk recursively through the directory tree or a given
1879 changeset, finding all files matched by the match
1880 changeset, finding all files matched by the match
1880 function
1881 function
1881 '''
1882 '''
1882 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1883 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1883 return self[node].walk(match)
1884 return self[node].walk(match)
1884
1885
1885 def status(self, node1='.', node2=None, match=None,
1886 def status(self, node1='.', node2=None, match=None,
1886 ignored=False, clean=False, unknown=False,
1887 ignored=False, clean=False, unknown=False,
1887 listsubrepos=False):
1888 listsubrepos=False):
1888 '''a convenience method that calls node1.status(node2)'''
1889 '''a convenience method that calls node1.status(node2)'''
1889 return self[node1].status(node2, match, ignored, clean, unknown,
1890 return self[node1].status(node2, match, ignored, clean, unknown,
1890 listsubrepos)
1891 listsubrepos)
1891
1892
1892 def addpostdsstatus(self, ps):
1893 def addpostdsstatus(self, ps):
1893 """Add a callback to run within the wlock, at the point at which status
1894 """Add a callback to run within the wlock, at the point at which status
1894 fixups happen.
1895 fixups happen.
1895
1896
1896 On status completion, callback(wctx, status) will be called with the
1897 On status completion, callback(wctx, status) will be called with the
1897 wlock held, unless the dirstate has changed from underneath or the wlock
1898 wlock held, unless the dirstate has changed from underneath or the wlock
1898 couldn't be grabbed.
1899 couldn't be grabbed.
1899
1900
1900 Callbacks should not capture and use a cached copy of the dirstate --
1901 Callbacks should not capture and use a cached copy of the dirstate --
1901 it might change in the meanwhile. Instead, they should access the
1902 it might change in the meanwhile. Instead, they should access the
1902 dirstate via wctx.repo().dirstate.
1903 dirstate via wctx.repo().dirstate.
1903
1904
1904 This list is emptied out after each status run -- extensions should
1905 This list is emptied out after each status run -- extensions should
1905 make sure it adds to this list each time dirstate.status is called.
1906 make sure it adds to this list each time dirstate.status is called.
1906 Extensions should also make sure they don't call this for statuses
1907 Extensions should also make sure they don't call this for statuses
1907 that don't involve the dirstate.
1908 that don't involve the dirstate.
1908 """
1909 """
1909
1910
1910 # The list is located here for uniqueness reasons -- it is actually
1911 # The list is located here for uniqueness reasons -- it is actually
1911 # managed by the workingctx, but that isn't unique per-repo.
1912 # managed by the workingctx, but that isn't unique per-repo.
1912 self._postdsstatus.append(ps)
1913 self._postdsstatus.append(ps)
1913
1914
1914 def postdsstatus(self):
1915 def postdsstatus(self):
1915 """Used by workingctx to get the list of post-dirstate-status hooks."""
1916 """Used by workingctx to get the list of post-dirstate-status hooks."""
1916 return self._postdsstatus
1917 return self._postdsstatus
1917
1918
1918 def clearpostdsstatus(self):
1919 def clearpostdsstatus(self):
1919 """Used by workingctx to clear post-dirstate-status hooks."""
1920 """Used by workingctx to clear post-dirstate-status hooks."""
1920 del self._postdsstatus[:]
1921 del self._postdsstatus[:]
1921
1922
1922 def heads(self, start=None):
1923 def heads(self, start=None):
1923 if start is None:
1924 if start is None:
1924 cl = self.changelog
1925 cl = self.changelog
1925 headrevs = reversed(cl.headrevs())
1926 headrevs = reversed(cl.headrevs())
1926 return [cl.node(rev) for rev in headrevs]
1927 return [cl.node(rev) for rev in headrevs]
1927
1928
1928 heads = self.changelog.heads(start)
1929 heads = self.changelog.heads(start)
1929 # sort the output in rev descending order
1930 # sort the output in rev descending order
1930 return sorted(heads, key=self.changelog.rev, reverse=True)
1931 return sorted(heads, key=self.changelog.rev, reverse=True)
1931
1932
1932 def branchheads(self, branch=None, start=None, closed=False):
1933 def branchheads(self, branch=None, start=None, closed=False):
1933 '''return a (possibly filtered) list of heads for the given branch
1934 '''return a (possibly filtered) list of heads for the given branch
1934
1935
1935 Heads are returned in topological order, from newest to oldest.
1936 Heads are returned in topological order, from newest to oldest.
1936 If branch is None, use the dirstate branch.
1937 If branch is None, use the dirstate branch.
1937 If start is not None, return only heads reachable from start.
1938 If start is not None, return only heads reachable from start.
1938 If closed is True, return heads that are marked as closed as well.
1939 If closed is True, return heads that are marked as closed as well.
1939 '''
1940 '''
1940 if branch is None:
1941 if branch is None:
1941 branch = self[None].branch()
1942 branch = self[None].branch()
1942 branches = self.branchmap()
1943 branches = self.branchmap()
1943 if branch not in branches:
1944 if branch not in branches:
1944 return []
1945 return []
1945 # the cache returns heads ordered lowest to highest
1946 # the cache returns heads ordered lowest to highest
1946 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1947 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1947 if start is not None:
1948 if start is not None:
1948 # filter out the heads that cannot be reached from startrev
1949 # filter out the heads that cannot be reached from startrev
1949 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1950 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1950 bheads = [h for h in bheads if h in fbheads]
1951 bheads = [h for h in bheads if h in fbheads]
1951 return bheads
1952 return bheads
1952
1953
1953 def branches(self, nodes):
1954 def branches(self, nodes):
1954 if not nodes:
1955 if not nodes:
1955 nodes = [self.changelog.tip()]
1956 nodes = [self.changelog.tip()]
1956 b = []
1957 b = []
1957 for n in nodes:
1958 for n in nodes:
1958 t = n
1959 t = n
1959 while True:
1960 while True:
1960 p = self.changelog.parents(n)
1961 p = self.changelog.parents(n)
1961 if p[1] != nullid or p[0] == nullid:
1962 if p[1] != nullid or p[0] == nullid:
1962 b.append((t, n, p[0], p[1]))
1963 b.append((t, n, p[0], p[1]))
1963 break
1964 break
1964 n = p[0]
1965 n = p[0]
1965 return b
1966 return b
1966
1967
1967 def between(self, pairs):
1968 def between(self, pairs):
1968 r = []
1969 r = []
1969
1970
1970 for top, bottom in pairs:
1971 for top, bottom in pairs:
1971 n, l, i = top, [], 0
1972 n, l, i = top, [], 0
1972 f = 1
1973 f = 1
1973
1974
1974 while n != bottom and n != nullid:
1975 while n != bottom and n != nullid:
1975 p = self.changelog.parents(n)[0]
1976 p = self.changelog.parents(n)[0]
1976 if i == f:
1977 if i == f:
1977 l.append(n)
1978 l.append(n)
1978 f = f * 2
1979 f = f * 2
1979 n = p
1980 n = p
1980 i += 1
1981 i += 1
1981
1982
1982 r.append(l)
1983 r.append(l)
1983
1984
1984 return r
1985 return r
1985
1986
1986 def checkpush(self, pushop):
1987 def checkpush(self, pushop):
1987 """Extensions can override this function if additional checks have
1988 """Extensions can override this function if additional checks have
1988 to be performed before pushing, or call it if they override push
1989 to be performed before pushing, or call it if they override push
1989 command.
1990 command.
1990 """
1991 """
1991 pass
1992 pass
1992
1993
1993 @unfilteredpropertycache
1994 @unfilteredpropertycache
1994 def prepushoutgoinghooks(self):
1995 def prepushoutgoinghooks(self):
1995 """Return util.hooks consists of a pushop with repo, remote, outgoing
1996 """Return util.hooks consists of a pushop with repo, remote, outgoing
1996 methods, which are called before pushing changesets.
1997 methods, which are called before pushing changesets.
1997 """
1998 """
1998 return util.hooks()
1999 return util.hooks()
1999
2000
2000 def pushkey(self, namespace, key, old, new):
2001 def pushkey(self, namespace, key, old, new):
2001 try:
2002 try:
2002 tr = self.currenttransaction()
2003 tr = self.currenttransaction()
2003 hookargs = {}
2004 hookargs = {}
2004 if tr is not None:
2005 if tr is not None:
2005 hookargs.update(tr.hookargs)
2006 hookargs.update(tr.hookargs)
2006 hookargs['namespace'] = namespace
2007 hookargs['namespace'] = namespace
2007 hookargs['key'] = key
2008 hookargs['key'] = key
2008 hookargs['old'] = old
2009 hookargs['old'] = old
2009 hookargs['new'] = new
2010 hookargs['new'] = new
2010 self.hook('prepushkey', throw=True, **hookargs)
2011 self.hook('prepushkey', throw=True, **hookargs)
2011 except error.HookAbort as exc:
2012 except error.HookAbort as exc:
2012 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2013 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2013 if exc.hint:
2014 if exc.hint:
2014 self.ui.write_err(_("(%s)\n") % exc.hint)
2015 self.ui.write_err(_("(%s)\n") % exc.hint)
2015 return False
2016 return False
2016 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2017 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2017 ret = pushkey.push(self, namespace, key, old, new)
2018 ret = pushkey.push(self, namespace, key, old, new)
2018 def runhook():
2019 def runhook():
2019 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2020 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2020 ret=ret)
2021 ret=ret)
2021 self._afterlock(runhook)
2022 self._afterlock(runhook)
2022 return ret
2023 return ret
2023
2024
2024 def listkeys(self, namespace):
2025 def listkeys(self, namespace):
2025 self.hook('prelistkeys', throw=True, namespace=namespace)
2026 self.hook('prelistkeys', throw=True, namespace=namespace)
2026 self.ui.debug('listing keys for "%s"\n' % namespace)
2027 self.ui.debug('listing keys for "%s"\n' % namespace)
2027 values = pushkey.list(self, namespace)
2028 values = pushkey.list(self, namespace)
2028 self.hook('listkeys', namespace=namespace, values=values)
2029 self.hook('listkeys', namespace=namespace, values=values)
2029 return values
2030 return values
2030
2031
2031 def debugwireargs(self, one, two, three=None, four=None, five=None):
2032 def debugwireargs(self, one, two, three=None, four=None, five=None):
2032 '''used to test argument passing over the wire'''
2033 '''used to test argument passing over the wire'''
2033 return "%s %s %s %s %s" % (one, two, three, four, five)
2034 return "%s %s %s %s %s" % (one, two, three, four, five)
2034
2035
2035 def savecommitmessage(self, text):
2036 def savecommitmessage(self, text):
2036 fp = self.vfs('last-message.txt', 'wb')
2037 fp = self.vfs('last-message.txt', 'wb')
2037 try:
2038 try:
2038 fp.write(text)
2039 fp.write(text)
2039 finally:
2040 finally:
2040 fp.close()
2041 fp.close()
2041 return self.pathto(fp.name[len(self.root) + 1:])
2042 return self.pathto(fp.name[len(self.root) + 1:])
2042
2043
2043 # used to avoid circular references so destructors work
2044 # used to avoid circular references so destructors work
2044 def aftertrans(files):
2045 def aftertrans(files):
2045 renamefiles = [tuple(t) for t in files]
2046 renamefiles = [tuple(t) for t in files]
2046 def a():
2047 def a():
2047 for vfs, src, dest in renamefiles:
2048 for vfs, src, dest in renamefiles:
2048 # if src and dest refer to a same file, vfs.rename is a no-op,
2049 # if src and dest refer to a same file, vfs.rename is a no-op,
2049 # leaving both src and dest on disk. delete dest to make sure
2050 # leaving both src and dest on disk. delete dest to make sure
2050 # the rename couldn't be such a no-op.
2051 # the rename couldn't be such a no-op.
2051 vfs.tryunlink(dest)
2052 vfs.tryunlink(dest)
2052 try:
2053 try:
2053 vfs.rename(src, dest)
2054 vfs.rename(src, dest)
2054 except OSError: # journal file does not yet exist
2055 except OSError: # journal file does not yet exist
2055 pass
2056 pass
2056 return a
2057 return a
2057
2058
2058 def undoname(fn):
2059 def undoname(fn):
2059 base, name = os.path.split(fn)
2060 base, name = os.path.split(fn)
2060 assert name.startswith('journal')
2061 assert name.startswith('journal')
2061 return os.path.join(base, name.replace('journal', 'undo', 1))
2062 return os.path.join(base, name.replace('journal', 'undo', 1))
2062
2063
2063 def instance(ui, path, create):
2064 def instance(ui, path, create):
2064 return localrepository(ui, util.urllocalpath(path), create)
2065 return localrepository(ui, util.urllocalpath(path), create)
2065
2066
2066 def islocal(path):
2067 def islocal(path):
2067 return True
2068 return True
2068
2069
2069 def newreporequirements(repo):
2070 def newreporequirements(repo):
2070 """Determine the set of requirements for a new local repository.
2071 """Determine the set of requirements for a new local repository.
2071
2072
2072 Extensions can wrap this function to specify custom requirements for
2073 Extensions can wrap this function to specify custom requirements for
2073 new repositories.
2074 new repositories.
2074 """
2075 """
2075 ui = repo.ui
2076 ui = repo.ui
2076 requirements = {'revlogv1'}
2077 requirements = {'revlogv1'}
2077 if ui.configbool('format', 'usestore'):
2078 if ui.configbool('format', 'usestore'):
2078 requirements.add('store')
2079 requirements.add('store')
2079 if ui.configbool('format', 'usefncache'):
2080 if ui.configbool('format', 'usefncache'):
2080 requirements.add('fncache')
2081 requirements.add('fncache')
2081 if ui.configbool('format', 'dotencode'):
2082 if ui.configbool('format', 'dotencode'):
2082 requirements.add('dotencode')
2083 requirements.add('dotencode')
2083
2084
2084 compengine = ui.config('experimental', 'format.compression', 'zlib')
2085 compengine = ui.config('experimental', 'format.compression', 'zlib')
2085 if compengine not in util.compengines:
2086 if compengine not in util.compengines:
2086 raise error.Abort(_('compression engine %s defined by '
2087 raise error.Abort(_('compression engine %s defined by '
2087 'experimental.format.compression not available') %
2088 'experimental.format.compression not available') %
2088 compengine,
2089 compengine,
2089 hint=_('run "hg debuginstall" to list available '
2090 hint=_('run "hg debuginstall" to list available '
2090 'compression engines'))
2091 'compression engines'))
2091
2092
2092 # zlib is the historical default and doesn't need an explicit requirement.
2093 # zlib is the historical default and doesn't need an explicit requirement.
2093 if compengine != 'zlib':
2094 if compengine != 'zlib':
2094 requirements.add('exp-compression-%s' % compengine)
2095 requirements.add('exp-compression-%s' % compengine)
2095
2096
2096 if scmutil.gdinitconfig(ui):
2097 if scmutil.gdinitconfig(ui):
2097 requirements.add('generaldelta')
2098 requirements.add('generaldelta')
2098 if ui.configbool('experimental', 'treemanifest', False):
2099 if ui.configbool('experimental', 'treemanifest', False):
2099 requirements.add('treemanifest')
2100 requirements.add('treemanifest')
2100 if ui.configbool('experimental', 'manifestv2', False):
2101 if ui.configbool('experimental', 'manifestv2', False):
2101 requirements.add('manifestv2')
2102 requirements.add('manifestv2')
2102
2103
2103 revlogv2 = ui.config('experimental', 'revlogv2')
2104 revlogv2 = ui.config('experimental', 'revlogv2')
2104 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2105 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2105 requirements.remove('revlogv1')
2106 requirements.remove('revlogv1')
2106 # generaldelta is implied by revlogv2.
2107 # generaldelta is implied by revlogv2.
2107 requirements.discard('generaldelta')
2108 requirements.discard('generaldelta')
2108 requirements.add(REVLOGV2_REQUIREMENT)
2109 requirements.add(REVLOGV2_REQUIREMENT)
2109
2110
2110 return requirements
2111 return requirements
@@ -1,1031 +1,1034
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'evolution'))
105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off):
181 def _fm0readmarkers(data, off):
182 # Loop on markers
182 # Loop on markers
183 l = len(data)
183 l = len(data)
184 while off + _fm0fsize <= l:
184 while off + _fm0fsize <= l:
185 # read fixed part
185 # read fixed part
186 cur = data[off:off + _fm0fsize]
186 cur = data[off:off + _fm0fsize]
187 off += _fm0fsize
187 off += _fm0fsize
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 # read replacement
189 # read replacement
190 sucs = ()
190 sucs = ()
191 if numsuc:
191 if numsuc:
192 s = (_fm0fnodesize * numsuc)
192 s = (_fm0fnodesize * numsuc)
193 cur = data[off:off + s]
193 cur = data[off:off + s]
194 sucs = _unpack(_fm0node * numsuc, cur)
194 sucs = _unpack(_fm0node * numsuc, cur)
195 off += s
195 off += s
196 # read metadata
196 # read metadata
197 # (metadata will be decoded on demand)
197 # (metadata will be decoded on demand)
198 metadata = data[off:off + mdsize]
198 metadata = data[off:off + mdsize]
199 if len(metadata) != mdsize:
199 if len(metadata) != mdsize:
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 'short, %d bytes expected, got %d')
201 'short, %d bytes expected, got %d')
202 % (mdsize, len(metadata)))
202 % (mdsize, len(metadata)))
203 off += mdsize
203 off += mdsize
204 metadata = _fm0decodemeta(metadata)
204 metadata = _fm0decodemeta(metadata)
205 try:
205 try:
206 when, offset = metadata.pop('date', '0 0').split(' ')
206 when, offset = metadata.pop('date', '0 0').split(' ')
207 date = float(when), int(offset)
207 date = float(when), int(offset)
208 except ValueError:
208 except ValueError:
209 date = (0., 0)
209 date = (0., 0)
210 parents = None
210 parents = None
211 if 'p2' in metadata:
211 if 'p2' in metadata:
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 elif 'p1' in metadata:
213 elif 'p1' in metadata:
214 parents = (metadata.pop('p1', None),)
214 parents = (metadata.pop('p1', None),)
215 elif 'p0' in metadata:
215 elif 'p0' in metadata:
216 parents = ()
216 parents = ()
217 if parents is not None:
217 if parents is not None:
218 try:
218 try:
219 parents = tuple(node.bin(p) for p in parents)
219 parents = tuple(node.bin(p) for p in parents)
220 # if parent content is not a nodeid, drop the data
220 # if parent content is not a nodeid, drop the data
221 for p in parents:
221 for p in parents:
222 if len(p) != 20:
222 if len(p) != 20:
223 parents = None
223 parents = None
224 break
224 break
225 except TypeError:
225 except TypeError:
226 # if content cannot be translated to nodeid drop the data.
226 # if content cannot be translated to nodeid drop the data.
227 parents = None
227 parents = None
228
228
229 metadata = tuple(sorted(metadata.iteritems()))
229 metadata = tuple(sorted(metadata.iteritems()))
230
230
231 yield (pre, sucs, flags, metadata, date, parents)
231 yield (pre, sucs, flags, metadata, date, parents)
232
232
233 def _fm0encodeonemarker(marker):
233 def _fm0encodeonemarker(marker):
234 pre, sucs, flags, metadata, date, parents = marker
234 pre, sucs, flags, metadata, date, parents = marker
235 if flags & usingsha256:
235 if flags & usingsha256:
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 metadata = dict(metadata)
237 metadata = dict(metadata)
238 time, tz = date
238 time, tz = date
239 metadata['date'] = '%r %i' % (time, tz)
239 metadata['date'] = '%r %i' % (time, tz)
240 if parents is not None:
240 if parents is not None:
241 if not parents:
241 if not parents:
242 # mark that we explicitly recorded no parents
242 # mark that we explicitly recorded no parents
243 metadata['p0'] = ''
243 metadata['p0'] = ''
244 for i, p in enumerate(parents, 1):
244 for i, p in enumerate(parents, 1):
245 metadata['p%i' % i] = node.hex(p)
245 metadata['p%i' % i] = node.hex(p)
246 metadata = _fm0encodemeta(metadata)
246 metadata = _fm0encodemeta(metadata)
247 numsuc = len(sucs)
247 numsuc = len(sucs)
248 format = _fm0fixed + (_fm0node * numsuc)
248 format = _fm0fixed + (_fm0node * numsuc)
249 data = [numsuc, len(metadata), flags, pre]
249 data = [numsuc, len(metadata), flags, pre]
250 data.extend(sucs)
250 data.extend(sucs)
251 return _pack(format, *data) + metadata
251 return _pack(format, *data) + metadata
252
252
253 def _fm0encodemeta(meta):
253 def _fm0encodemeta(meta):
254 """Return encoded metadata string to string mapping.
254 """Return encoded metadata string to string mapping.
255
255
256 Assume no ':' in key and no '\0' in both key and value."""
256 Assume no ':' in key and no '\0' in both key and value."""
257 for key, value in meta.iteritems():
257 for key, value in meta.iteritems():
258 if ':' in key or '\0' in key:
258 if ':' in key or '\0' in key:
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 if '\0' in value:
260 if '\0' in value:
261 raise ValueError("':' is forbidden in metadata value'")
261 raise ValueError("':' is forbidden in metadata value'")
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263
263
264 def _fm0decodemeta(data):
264 def _fm0decodemeta(data):
265 """Return string to string dictionary from encoded version."""
265 """Return string to string dictionary from encoded version."""
266 d = {}
266 d = {}
267 for l in data.split('\0'):
267 for l in data.split('\0'):
268 if l:
268 if l:
269 key, value = l.split(':')
269 key, value = l.split(':')
270 d[key] = value
270 d[key] = value
271 return d
271 return d
272
272
273 ## Parsing and writing of version "1"
273 ## Parsing and writing of version "1"
274 #
274 #
275 # The header is followed by the markers. Each marker is made of:
275 # The header is followed by the markers. Each marker is made of:
276 #
276 #
277 # - uint32: total size of the marker (including this field)
277 # - uint32: total size of the marker (including this field)
278 #
278 #
279 # - float64: date in seconds since epoch
279 # - float64: date in seconds since epoch
280 #
280 #
281 # - int16: timezone offset in minutes
281 # - int16: timezone offset in minutes
282 #
282 #
283 # - uint16: a bit field. It is reserved for flags used in common
283 # - uint16: a bit field. It is reserved for flags used in common
284 # obsolete marker operations, to avoid repeated decoding of metadata
284 # obsolete marker operations, to avoid repeated decoding of metadata
285 # entries.
285 # entries.
286 #
286 #
287 # - uint8: number of successors "N", can be zero.
287 # - uint8: number of successors "N", can be zero.
288 #
288 #
289 # - uint8: number of parents "P", can be zero.
289 # - uint8: number of parents "P", can be zero.
290 #
290 #
291 # 0: parents data stored but no parent,
291 # 0: parents data stored but no parent,
292 # 1: one parent stored,
292 # 1: one parent stored,
293 # 2: two parents stored,
293 # 2: two parents stored,
294 # 3: no parent data stored
294 # 3: no parent data stored
295 #
295 #
296 # - uint8: number of metadata entries M
296 # - uint8: number of metadata entries M
297 #
297 #
298 # - 20 or 32 bytes: precursor changeset identifier.
298 # - 20 or 32 bytes: precursor changeset identifier.
299 #
299 #
300 # - N*(20 or 32) bytes: successors changesets identifiers.
300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 #
301 #
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 #
303 #
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 #
305 #
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 _fm1version = 1
307 _fm1version = 1
308 _fm1fixed = '>IdhHBBB20s'
308 _fm1fixed = '>IdhHBBB20s'
309 _fm1nodesha1 = '20s'
309 _fm1nodesha1 = '20s'
310 _fm1nodesha256 = '32s'
310 _fm1nodesha256 = '32s'
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1fsize = _calcsize(_fm1fixed)
314 _fm1parentnone = 3
314 _fm1parentnone = 3
315 _fm1parentshift = 14
315 _fm1parentshift = 14
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 _fm1metapair = 'BB'
317 _fm1metapair = 'BB'
318 _fm1metapairsize = _calcsize('BB')
318 _fm1metapairsize = _calcsize('BB')
319
319
320 def _fm1purereadmarkers(data, off):
320 def _fm1purereadmarkers(data, off):
321 # make some global constants local for performance
321 # make some global constants local for performance
322 noneflag = _fm1parentnone
322 noneflag = _fm1parentnone
323 sha2flag = usingsha256
323 sha2flag = usingsha256
324 sha1size = _fm1nodesha1size
324 sha1size = _fm1nodesha1size
325 sha2size = _fm1nodesha256size
325 sha2size = _fm1nodesha256size
326 sha1fmt = _fm1nodesha1
326 sha1fmt = _fm1nodesha1
327 sha2fmt = _fm1nodesha256
327 sha2fmt = _fm1nodesha256
328 metasize = _fm1metapairsize
328 metasize = _fm1metapairsize
329 metafmt = _fm1metapair
329 metafmt = _fm1metapair
330 fsize = _fm1fsize
330 fsize = _fm1fsize
331 unpack = _unpack
331 unpack = _unpack
332
332
333 # Loop on markers
333 # Loop on markers
334 stop = len(data) - _fm1fsize
334 stop = len(data) - _fm1fsize
335 ufixed = struct.Struct(_fm1fixed).unpack
335 ufixed = struct.Struct(_fm1fixed).unpack
336
336
337 while off <= stop:
337 while off <= stop:
338 # read fixed part
338 # read fixed part
339 o1 = off + fsize
339 o1 = off + fsize
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341
341
342 if flags & sha2flag:
342 if flags & sha2flag:
343 # FIXME: prec was read as a SHA1, needs to be amended
343 # FIXME: prec was read as a SHA1, needs to be amended
344
344
345 # read 0 or more successors
345 # read 0 or more successors
346 if numsuc == 1:
346 if numsuc == 1:
347 o2 = o1 + sha2size
347 o2 = o1 + sha2size
348 sucs = (data[o1:o2],)
348 sucs = (data[o1:o2],)
349 else:
349 else:
350 o2 = o1 + sha2size * numsuc
350 o2 = o1 + sha2size * numsuc
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352
352
353 # read parents
353 # read parents
354 if numpar == noneflag:
354 if numpar == noneflag:
355 o3 = o2
355 o3 = o2
356 parents = None
356 parents = None
357 elif numpar == 1:
357 elif numpar == 1:
358 o3 = o2 + sha2size
358 o3 = o2 + sha2size
359 parents = (data[o2:o3],)
359 parents = (data[o2:o3],)
360 else:
360 else:
361 o3 = o2 + sha2size * numpar
361 o3 = o2 + sha2size * numpar
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 else:
363 else:
364 # read 0 or more successors
364 # read 0 or more successors
365 if numsuc == 1:
365 if numsuc == 1:
366 o2 = o1 + sha1size
366 o2 = o1 + sha1size
367 sucs = (data[o1:o2],)
367 sucs = (data[o1:o2],)
368 else:
368 else:
369 o2 = o1 + sha1size * numsuc
369 o2 = o1 + sha1size * numsuc
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371
371
372 # read parents
372 # read parents
373 if numpar == noneflag:
373 if numpar == noneflag:
374 o3 = o2
374 o3 = o2
375 parents = None
375 parents = None
376 elif numpar == 1:
376 elif numpar == 1:
377 o3 = o2 + sha1size
377 o3 = o2 + sha1size
378 parents = (data[o2:o3],)
378 parents = (data[o2:o3],)
379 else:
379 else:
380 o3 = o2 + sha1size * numpar
380 o3 = o2 + sha1size * numpar
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382
382
383 # read metadata
383 # read metadata
384 off = o3 + metasize * nummeta
384 off = o3 + metasize * nummeta
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 metadata = []
386 metadata = []
387 for idx in xrange(0, len(metapairsize), 2):
387 for idx in xrange(0, len(metapairsize), 2):
388 o1 = off + metapairsize[idx]
388 o1 = off + metapairsize[idx]
389 o2 = o1 + metapairsize[idx + 1]
389 o2 = o1 + metapairsize[idx + 1]
390 metadata.append((data[off:o1], data[o1:o2]))
390 metadata.append((data[off:o1], data[o1:o2]))
391 off = o2
391 off = o2
392
392
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394
394
395 def _fm1encodeonemarker(marker):
395 def _fm1encodeonemarker(marker):
396 pre, sucs, flags, metadata, date, parents = marker
396 pre, sucs, flags, metadata, date, parents = marker
397 # determine node size
397 # determine node size
398 _fm1node = _fm1nodesha1
398 _fm1node = _fm1nodesha1
399 if flags & usingsha256:
399 if flags & usingsha256:
400 _fm1node = _fm1nodesha256
400 _fm1node = _fm1nodesha256
401 numsuc = len(sucs)
401 numsuc = len(sucs)
402 numextranodes = numsuc
402 numextranodes = numsuc
403 if parents is None:
403 if parents is None:
404 numpar = _fm1parentnone
404 numpar = _fm1parentnone
405 else:
405 else:
406 numpar = len(parents)
406 numpar = len(parents)
407 numextranodes += numpar
407 numextranodes += numpar
408 formatnodes = _fm1node * numextranodes
408 formatnodes = _fm1node * numextranodes
409 formatmeta = _fm1metapair * len(metadata)
409 formatmeta = _fm1metapair * len(metadata)
410 format = _fm1fixed + formatnodes + formatmeta
410 format = _fm1fixed + formatnodes + formatmeta
411 # tz is stored in minutes so we divide by 60
411 # tz is stored in minutes so we divide by 60
412 tz = date[1]//60
412 tz = date[1]//60
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 data.extend(sucs)
414 data.extend(sucs)
415 if parents is not None:
415 if parents is not None:
416 data.extend(parents)
416 data.extend(parents)
417 totalsize = _calcsize(format)
417 totalsize = _calcsize(format)
418 for key, value in metadata:
418 for key, value in metadata:
419 lk = len(key)
419 lk = len(key)
420 lv = len(value)
420 lv = len(value)
421 data.append(lk)
421 data.append(lk)
422 data.append(lv)
422 data.append(lv)
423 totalsize += lk + lv
423 totalsize += lk + lv
424 data[0] = totalsize
424 data[0] = totalsize
425 data = [_pack(format, *data)]
425 data = [_pack(format, *data)]
426 for key, value in metadata:
426 for key, value in metadata:
427 data.append(key)
427 data.append(key)
428 data.append(value)
428 data.append(value)
429 return ''.join(data)
429 return ''.join(data)
430
430
431 def _fm1readmarkers(data, off):
431 def _fm1readmarkers(data, off):
432 native = getattr(parsers, 'fm1readmarkers', None)
432 native = getattr(parsers, 'fm1readmarkers', None)
433 if not native:
433 if not native:
434 return _fm1purereadmarkers(data, off)
434 return _fm1purereadmarkers(data, off)
435 stop = len(data) - _fm1fsize
435 stop = len(data) - _fm1fsize
436 return native(data, off, stop)
436 return native(data, off, stop)
437
437
438 # mapping to read/write various marker formats
438 # mapping to read/write various marker formats
439 # <version> -> (decoder, encoder)
439 # <version> -> (decoder, encoder)
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442
442
443 def _readmarkerversion(data):
443 def _readmarkerversion(data):
444 return _unpack('>B', data[0:1])[0]
444 return _unpack('>B', data[0:1])[0]
445
445
446 @util.nogc
446 @util.nogc
447 def _readmarkers(data):
447 def _readmarkers(data):
448 """Read and enumerate markers from raw data"""
448 """Read and enumerate markers from raw data"""
449 diskversion = _readmarkerversion(data)
449 diskversion = _readmarkerversion(data)
450 off = 1
450 off = 1
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off)
454 return diskversion, formats[diskversion][0](data, off)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468 @util.nogc
468 @util.nogc
469 def _addsuccessors(successors, markers):
469 def _addsuccessors(successors, markers):
470 for mark in markers:
470 for mark in markers:
471 successors.setdefault(mark[0], set()).add(mark)
471 successors.setdefault(mark[0], set()).add(mark)
472
472
473 @util.nogc
473 @util.nogc
474 def _addprecursors(precursors, markers):
474 def _addprecursors(precursors, markers):
475 for mark in markers:
475 for mark in markers:
476 for suc in mark[1]:
476 for suc in mark[1]:
477 precursors.setdefault(suc, set()).add(mark)
477 precursors.setdefault(suc, set()).add(mark)
478
478
479 @util.nogc
479 @util.nogc
480 def _addchildren(children, markers):
480 def _addchildren(children, markers):
481 for mark in markers:
481 for mark in markers:
482 parents = mark[5]
482 parents = mark[5]
483 if parents is not None:
483 if parents is not None:
484 for p in parents:
484 for p in parents:
485 children.setdefault(p, set()).add(mark)
485 children.setdefault(p, set()).add(mark)
486
486
487 def _checkinvalidmarkers(markers):
487 def _checkinvalidmarkers(markers):
488 """search for marker with invalid data and raise error if needed
488 """search for marker with invalid data and raise error if needed
489
489
490 Exist as a separated function to allow the evolve extension for a more
490 Exist as a separated function to allow the evolve extension for a more
491 subtle handling.
491 subtle handling.
492 """
492 """
493 for mark in markers:
493 for mark in markers:
494 if node.nullid in mark[1]:
494 if node.nullid in mark[1]:
495 raise error.Abort(_('bad obsolescence marker detected: '
495 raise error.Abort(_('bad obsolescence marker detected: '
496 'invalid successors nullid'))
496 'invalid successors nullid'))
497
497
498 class obsstore(object):
498 class obsstore(object):
499 """Store obsolete markers
499 """Store obsolete markers
500
500
501 Markers can be accessed with two mappings:
501 Markers can be accessed with two mappings:
502 - precursors[x] -> set(markers on precursors edges of x)
502 - precursors[x] -> set(markers on precursors edges of x)
503 - successors[x] -> set(markers on successors edges of x)
503 - successors[x] -> set(markers on successors edges of x)
504 - children[x] -> set(markers on precursors edges of children(x)
504 - children[x] -> set(markers on precursors edges of children(x)
505 """
505 """
506
506
507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 # prec: nodeid, precursor changesets
508 # prec: nodeid, precursor changesets
509 # succs: tuple of nodeid, successor changesets (0-N length)
509 # succs: tuple of nodeid, successor changesets (0-N length)
510 # flag: integer, flag field carrying modifier for the markers (see doc)
510 # flag: integer, flag field carrying modifier for the markers (see doc)
511 # meta: binary blob, encoded metadata dictionary
511 # meta: binary blob, encoded metadata dictionary
512 # date: (float, int) tuple, date of marker creation
512 # date: (float, int) tuple, date of marker creation
513 # parents: (tuple of nodeid) or None, parents of precursors
513 # parents: (tuple of nodeid) or None, parents of precursors
514 # None is used when no data has been recorded
514 # None is used when no data has been recorded
515
515
516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 # caches for various obsolescence related cache
517 # caches for various obsolescence related cache
518 self.caches = {}
518 self.caches = {}
519 self.svfs = svfs
519 self.svfs = svfs
520 self._defaultformat = defaultformat
520 self._defaultformat = defaultformat
521 self._readonly = readonly
521 self._readonly = readonly
522
522
523 def __iter__(self):
523 def __iter__(self):
524 return iter(self._all)
524 return iter(self._all)
525
525
526 def __len__(self):
526 def __len__(self):
527 return len(self._all)
527 return len(self._all)
528
528
529 def __nonzero__(self):
529 def __nonzero__(self):
530 if not self._cached('_all'):
530 if not self._cached('_all'):
531 try:
531 try:
532 return self.svfs.stat('obsstore').st_size > 1
532 return self.svfs.stat('obsstore').st_size > 1
533 except OSError as inst:
533 except OSError as inst:
534 if inst.errno != errno.ENOENT:
534 if inst.errno != errno.ENOENT:
535 raise
535 raise
536 # just build an empty _all list if no obsstore exists, which
536 # just build an empty _all list if no obsstore exists, which
537 # avoids further stat() syscalls
537 # avoids further stat() syscalls
538 pass
538 pass
539 return bool(self._all)
539 return bool(self._all)
540
540
541 __bool__ = __nonzero__
541 __bool__ = __nonzero__
542
542
543 @property
543 @property
544 def readonly(self):
544 def readonly(self):
545 """True if marker creation is disabled
545 """True if marker creation is disabled
546
546
547 Remove me in the future when obsolete marker is always on."""
547 Remove me in the future when obsolete marker is always on."""
548 return self._readonly
548 return self._readonly
549
549
550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 date=None, metadata=None, ui=None):
551 date=None, metadata=None, ui=None):
552 """obsolete: add a new obsolete marker
552 """obsolete: add a new obsolete marker
553
553
554 * ensuring it is hashable
554 * ensuring it is hashable
555 * check mandatory metadata
555 * check mandatory metadata
556 * encode metadata
556 * encode metadata
557
557
558 If you are a human writing code creating marker you want to use the
558 If you are a human writing code creating marker you want to use the
559 `createmarkers` function in this module instead.
559 `createmarkers` function in this module instead.
560
560
561 return True if a new marker have been added, False if the markers
561 return True if a new marker have been added, False if the markers
562 already existed (no op).
562 already existed (no op).
563 """
563 """
564 if metadata is None:
564 if metadata is None:
565 metadata = {}
565 metadata = {}
566 if date is None:
566 if date is None:
567 if 'date' in metadata:
567 if 'date' in metadata:
568 # as a courtesy for out-of-tree extensions
568 # as a courtesy for out-of-tree extensions
569 date = util.parsedate(metadata.pop('date'))
569 date = util.parsedate(metadata.pop('date'))
570 elif ui is not None:
570 elif ui is not None:
571 date = ui.configdate('devel', 'default-date')
571 date = ui.configdate('devel', 'default-date')
572 if date is None:
572 if date is None:
573 date = util.makedate()
573 date = util.makedate()
574 else:
574 else:
575 date = util.makedate()
575 date = util.makedate()
576 if len(prec) != 20:
576 if len(prec) != 20:
577 raise ValueError(prec)
577 raise ValueError(prec)
578 for succ in succs:
578 for succ in succs:
579 if len(succ) != 20:
579 if len(succ) != 20:
580 raise ValueError(succ)
580 raise ValueError(succ)
581 if prec in succs:
581 if prec in succs:
582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583
583
584 metadata = tuple(sorted(metadata.iteritems()))
584 metadata = tuple(sorted(metadata.iteritems()))
585
585
586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
587 return bool(self.add(transaction, [marker]))
587 return bool(self.add(transaction, [marker]))
588
588
589 def add(self, transaction, markers):
589 def add(self, transaction, markers):
590 """Add new markers to the store
590 """Add new markers to the store
591
591
592 Take care of filtering duplicate.
592 Take care of filtering duplicate.
593 Return the number of new marker."""
593 Return the number of new marker."""
594 if self._readonly:
594 if self._readonly:
595 raise error.Abort(_('creating obsolete markers is not enabled on '
595 raise error.Abort(_('creating obsolete markers is not enabled on '
596 'this repo'))
596 'this repo'))
597 known = set()
597 known = set()
598 getsuccessors = self.successors.get
598 getsuccessors = self.successors.get
599 new = []
599 new = []
600 for m in markers:
600 for m in markers:
601 if m not in getsuccessors(m[0], ()) and m not in known:
601 if m not in getsuccessors(m[0], ()) and m not in known:
602 known.add(m)
602 known.add(m)
603 new.append(m)
603 new.append(m)
604 if new:
604 if new:
605 f = self.svfs('obsstore', 'ab')
605 f = self.svfs('obsstore', 'ab')
606 try:
606 try:
607 offset = f.tell()
607 offset = f.tell()
608 transaction.add('obsstore', offset)
608 transaction.add('obsstore', offset)
609 # offset == 0: new file - add the version header
609 # offset == 0: new file - add the version header
610 for bytes in encodemarkers(new, offset == 0, self._version):
610 for bytes in encodemarkers(new, offset == 0, self._version):
611 f.write(bytes)
611 f.write(bytes)
612 finally:
612 finally:
613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 # call 'filecacheentry.refresh()' here
614 # call 'filecacheentry.refresh()' here
615 f.close()
615 f.close()
616 addedmarkers = transaction.changes.get('obsmarkers')
617 if addedmarkers is not None:
618 addedmarkers.update(new)
616 self._addmarkers(new)
619 self._addmarkers(new)
617 # new marker *may* have changed several set. invalidate the cache.
620 # new marker *may* have changed several set. invalidate the cache.
618 self.caches.clear()
621 self.caches.clear()
619 # records the number of new markers for the transaction hooks
622 # records the number of new markers for the transaction hooks
620 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
623 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
621 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
624 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
622 return len(new)
625 return len(new)
623
626
624 def mergemarkers(self, transaction, data):
627 def mergemarkers(self, transaction, data):
625 """merge a binary stream of markers inside the obsstore
628 """merge a binary stream of markers inside the obsstore
626
629
627 Returns the number of new markers added."""
630 Returns the number of new markers added."""
628 version, markers = _readmarkers(data)
631 version, markers = _readmarkers(data)
629 return self.add(transaction, markers)
632 return self.add(transaction, markers)
630
633
631 @propertycache
634 @propertycache
632 def _data(self):
635 def _data(self):
633 return self.svfs.tryread('obsstore')
636 return self.svfs.tryread('obsstore')
634
637
635 @propertycache
638 @propertycache
636 def _version(self):
639 def _version(self):
637 if len(self._data) >= 1:
640 if len(self._data) >= 1:
638 return _readmarkerversion(self._data)
641 return _readmarkerversion(self._data)
639 else:
642 else:
640 return self._defaultformat
643 return self._defaultformat
641
644
642 @propertycache
645 @propertycache
643 def _all(self):
646 def _all(self):
644 data = self._data
647 data = self._data
645 if not data:
648 if not data:
646 return []
649 return []
647 self._version, markers = _readmarkers(data)
650 self._version, markers = _readmarkers(data)
648 markers = list(markers)
651 markers = list(markers)
649 _checkinvalidmarkers(markers)
652 _checkinvalidmarkers(markers)
650 return markers
653 return markers
651
654
652 @propertycache
655 @propertycache
653 def successors(self):
656 def successors(self):
654 successors = {}
657 successors = {}
655 _addsuccessors(successors, self._all)
658 _addsuccessors(successors, self._all)
656 return successors
659 return successors
657
660
658 @propertycache
661 @propertycache
659 def precursors(self):
662 def precursors(self):
660 precursors = {}
663 precursors = {}
661 _addprecursors(precursors, self._all)
664 _addprecursors(precursors, self._all)
662 return precursors
665 return precursors
663
666
664 @propertycache
667 @propertycache
665 def children(self):
668 def children(self):
666 children = {}
669 children = {}
667 _addchildren(children, self._all)
670 _addchildren(children, self._all)
668 return children
671 return children
669
672
670 def _cached(self, attr):
673 def _cached(self, attr):
671 return attr in self.__dict__
674 return attr in self.__dict__
672
675
673 def _addmarkers(self, markers):
676 def _addmarkers(self, markers):
674 markers = list(markers) # to allow repeated iteration
677 markers = list(markers) # to allow repeated iteration
675 self._all.extend(markers)
678 self._all.extend(markers)
676 if self._cached('successors'):
679 if self._cached('successors'):
677 _addsuccessors(self.successors, markers)
680 _addsuccessors(self.successors, markers)
678 if self._cached('precursors'):
681 if self._cached('precursors'):
679 _addprecursors(self.precursors, markers)
682 _addprecursors(self.precursors, markers)
680 if self._cached('children'):
683 if self._cached('children'):
681 _addchildren(self.children, markers)
684 _addchildren(self.children, markers)
682 _checkinvalidmarkers(markers)
685 _checkinvalidmarkers(markers)
683
686
684 def relevantmarkers(self, nodes):
687 def relevantmarkers(self, nodes):
685 """return a set of all obsolescence markers relevant to a set of nodes.
688 """return a set of all obsolescence markers relevant to a set of nodes.
686
689
687 "relevant" to a set of nodes mean:
690 "relevant" to a set of nodes mean:
688
691
689 - marker that use this changeset as successor
692 - marker that use this changeset as successor
690 - prune marker of direct children on this changeset
693 - prune marker of direct children on this changeset
691 - recursive application of the two rules on precursors of these markers
694 - recursive application of the two rules on precursors of these markers
692
695
693 It is a set so you cannot rely on order."""
696 It is a set so you cannot rely on order."""
694
697
695 pendingnodes = set(nodes)
698 pendingnodes = set(nodes)
696 seenmarkers = set()
699 seenmarkers = set()
697 seennodes = set(pendingnodes)
700 seennodes = set(pendingnodes)
698 precursorsmarkers = self.precursors
701 precursorsmarkers = self.precursors
699 succsmarkers = self.successors
702 succsmarkers = self.successors
700 children = self.children
703 children = self.children
701 while pendingnodes:
704 while pendingnodes:
702 direct = set()
705 direct = set()
703 for current in pendingnodes:
706 for current in pendingnodes:
704 direct.update(precursorsmarkers.get(current, ()))
707 direct.update(precursorsmarkers.get(current, ()))
705 pruned = [m for m in children.get(current, ()) if not m[1]]
708 pruned = [m for m in children.get(current, ()) if not m[1]]
706 direct.update(pruned)
709 direct.update(pruned)
707 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
710 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
708 direct.update(pruned)
711 direct.update(pruned)
709 direct -= seenmarkers
712 direct -= seenmarkers
710 pendingnodes = set([m[0] for m in direct])
713 pendingnodes = set([m[0] for m in direct])
711 seenmarkers |= direct
714 seenmarkers |= direct
712 pendingnodes -= seennodes
715 pendingnodes -= seennodes
713 seennodes |= pendingnodes
716 seennodes |= pendingnodes
714 return seenmarkers
717 return seenmarkers
715
718
716 def makestore(ui, repo):
719 def makestore(ui, repo):
717 """Create an obsstore instance from a repo."""
720 """Create an obsstore instance from a repo."""
718 # read default format for new obsstore.
721 # read default format for new obsstore.
719 # developer config: format.obsstore-version
722 # developer config: format.obsstore-version
720 defaultformat = ui.configint('format', 'obsstore-version')
723 defaultformat = ui.configint('format', 'obsstore-version')
721 # rely on obsstore class default when possible.
724 # rely on obsstore class default when possible.
722 kwargs = {}
725 kwargs = {}
723 if defaultformat is not None:
726 if defaultformat is not None:
724 kwargs['defaultformat'] = defaultformat
727 kwargs['defaultformat'] = defaultformat
725 readonly = not isenabled(repo, createmarkersopt)
728 readonly = not isenabled(repo, createmarkersopt)
726 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
729 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
727 if store and readonly:
730 if store and readonly:
728 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
731 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
729 % len(list(store)))
732 % len(list(store)))
730 return store
733 return store
731
734
732 def commonversion(versions):
735 def commonversion(versions):
733 """Return the newest version listed in both versions and our local formats.
736 """Return the newest version listed in both versions and our local formats.
734
737
735 Returns None if no common version exists.
738 Returns None if no common version exists.
736 """
739 """
737 versions.sort(reverse=True)
740 versions.sort(reverse=True)
738 # search for highest version known on both side
741 # search for highest version known on both side
739 for v in versions:
742 for v in versions:
740 if v in formats:
743 if v in formats:
741 return v
744 return v
742 return None
745 return None
743
746
744 # arbitrary picked to fit into 8K limit from HTTP server
747 # arbitrary picked to fit into 8K limit from HTTP server
745 # you have to take in account:
748 # you have to take in account:
746 # - the version header
749 # - the version header
747 # - the base85 encoding
750 # - the base85 encoding
748 _maxpayload = 5300
751 _maxpayload = 5300
749
752
750 def _pushkeyescape(markers):
753 def _pushkeyescape(markers):
751 """encode markers into a dict suitable for pushkey exchange
754 """encode markers into a dict suitable for pushkey exchange
752
755
753 - binary data is base85 encoded
756 - binary data is base85 encoded
754 - split in chunks smaller than 5300 bytes"""
757 - split in chunks smaller than 5300 bytes"""
755 keys = {}
758 keys = {}
756 parts = []
759 parts = []
757 currentlen = _maxpayload * 2 # ensure we create a new part
760 currentlen = _maxpayload * 2 # ensure we create a new part
758 for marker in markers:
761 for marker in markers:
759 nextdata = _fm0encodeonemarker(marker)
762 nextdata = _fm0encodeonemarker(marker)
760 if (len(nextdata) + currentlen > _maxpayload):
763 if (len(nextdata) + currentlen > _maxpayload):
761 currentpart = []
764 currentpart = []
762 currentlen = 0
765 currentlen = 0
763 parts.append(currentpart)
766 parts.append(currentpart)
764 currentpart.append(nextdata)
767 currentpart.append(nextdata)
765 currentlen += len(nextdata)
768 currentlen += len(nextdata)
766 for idx, part in enumerate(reversed(parts)):
769 for idx, part in enumerate(reversed(parts)):
767 data = ''.join([_pack('>B', _fm0version)] + part)
770 data = ''.join([_pack('>B', _fm0version)] + part)
768 keys['dump%i' % idx] = util.b85encode(data)
771 keys['dump%i' % idx] = util.b85encode(data)
769 return keys
772 return keys
770
773
771 def listmarkers(repo):
774 def listmarkers(repo):
772 """List markers over pushkey"""
775 """List markers over pushkey"""
773 if not repo.obsstore:
776 if not repo.obsstore:
774 return {}
777 return {}
775 return _pushkeyescape(sorted(repo.obsstore))
778 return _pushkeyescape(sorted(repo.obsstore))
776
779
777 def pushmarker(repo, key, old, new):
780 def pushmarker(repo, key, old, new):
778 """Push markers over pushkey"""
781 """Push markers over pushkey"""
779 if not key.startswith('dump'):
782 if not key.startswith('dump'):
780 repo.ui.warn(_('unknown key: %r') % key)
783 repo.ui.warn(_('unknown key: %r') % key)
781 return False
784 return False
782 if old:
785 if old:
783 repo.ui.warn(_('unexpected old value for %r') % key)
786 repo.ui.warn(_('unexpected old value for %r') % key)
784 return False
787 return False
785 data = util.b85decode(new)
788 data = util.b85decode(new)
786 lock = repo.lock()
789 lock = repo.lock()
787 try:
790 try:
788 tr = repo.transaction('pushkey: obsolete markers')
791 tr = repo.transaction('pushkey: obsolete markers')
789 try:
792 try:
790 repo.obsstore.mergemarkers(tr, data)
793 repo.obsstore.mergemarkers(tr, data)
791 repo.invalidatevolatilesets()
794 repo.invalidatevolatilesets()
792 tr.close()
795 tr.close()
793 return True
796 return True
794 finally:
797 finally:
795 tr.release()
798 tr.release()
796 finally:
799 finally:
797 lock.release()
800 lock.release()
798
801
799 # keep compatibility for the 4.3 cycle
802 # keep compatibility for the 4.3 cycle
800 def allprecursors(obsstore, nodes, ignoreflags=0):
803 def allprecursors(obsstore, nodes, ignoreflags=0):
801 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
804 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
802 util.nouideprecwarn(movemsg, '4.3')
805 util.nouideprecwarn(movemsg, '4.3')
803 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
806 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
804
807
805 def allsuccessors(obsstore, nodes, ignoreflags=0):
808 def allsuccessors(obsstore, nodes, ignoreflags=0):
806 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
809 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
807 util.nouideprecwarn(movemsg, '4.3')
810 util.nouideprecwarn(movemsg, '4.3')
808 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
811 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
809
812
810 def marker(repo, data):
813 def marker(repo, data):
811 movemsg = 'obsolete.marker moved to obsutil.marker'
814 movemsg = 'obsolete.marker moved to obsutil.marker'
812 repo.ui.deprecwarn(movemsg, '4.3')
815 repo.ui.deprecwarn(movemsg, '4.3')
813 return obsutil.marker(repo, data)
816 return obsutil.marker(repo, data)
814
817
815 def getmarkers(repo, nodes=None, exclusive=False):
818 def getmarkers(repo, nodes=None, exclusive=False):
816 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
819 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
817 repo.ui.deprecwarn(movemsg, '4.3')
820 repo.ui.deprecwarn(movemsg, '4.3')
818 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
821 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
819
822
820 def exclusivemarkers(repo, nodes):
823 def exclusivemarkers(repo, nodes):
821 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
824 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
822 repo.ui.deprecwarn(movemsg, '4.3')
825 repo.ui.deprecwarn(movemsg, '4.3')
823 return obsutil.exclusivemarkers(repo, nodes)
826 return obsutil.exclusivemarkers(repo, nodes)
824
827
825 def foreground(repo, nodes):
828 def foreground(repo, nodes):
826 movemsg = 'obsolete.foreground moved to obsutil.foreground'
829 movemsg = 'obsolete.foreground moved to obsutil.foreground'
827 repo.ui.deprecwarn(movemsg, '4.3')
830 repo.ui.deprecwarn(movemsg, '4.3')
828 return obsutil.foreground(repo, nodes)
831 return obsutil.foreground(repo, nodes)
829
832
830 def successorssets(repo, initialnode, cache=None):
833 def successorssets(repo, initialnode, cache=None):
831 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
834 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
832 repo.ui.deprecwarn(movemsg, '4.3')
835 repo.ui.deprecwarn(movemsg, '4.3')
833 return obsutil.successorssets(repo, initialnode, cache=cache)
836 return obsutil.successorssets(repo, initialnode, cache=cache)
834
837
835 # mapping of 'set-name' -> <function to compute this set>
838 # mapping of 'set-name' -> <function to compute this set>
836 cachefuncs = {}
839 cachefuncs = {}
837 def cachefor(name):
840 def cachefor(name):
838 """Decorator to register a function as computing the cache for a set"""
841 """Decorator to register a function as computing the cache for a set"""
839 def decorator(func):
842 def decorator(func):
840 if name in cachefuncs:
843 if name in cachefuncs:
841 msg = "duplicated registration for volatileset '%s' (existing: %r)"
844 msg = "duplicated registration for volatileset '%s' (existing: %r)"
842 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
845 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
843 cachefuncs[name] = func
846 cachefuncs[name] = func
844 return func
847 return func
845 return decorator
848 return decorator
846
849
847 def getrevs(repo, name):
850 def getrevs(repo, name):
848 """Return the set of revision that belong to the <name> set
851 """Return the set of revision that belong to the <name> set
849
852
850 Such access may compute the set and cache it for future use"""
853 Such access may compute the set and cache it for future use"""
851 repo = repo.unfiltered()
854 repo = repo.unfiltered()
852 if not repo.obsstore:
855 if not repo.obsstore:
853 return frozenset()
856 return frozenset()
854 if name not in repo.obsstore.caches:
857 if name not in repo.obsstore.caches:
855 repo.obsstore.caches[name] = cachefuncs[name](repo)
858 repo.obsstore.caches[name] = cachefuncs[name](repo)
856 return repo.obsstore.caches[name]
859 return repo.obsstore.caches[name]
857
860
858 # To be simple we need to invalidate obsolescence cache when:
861 # To be simple we need to invalidate obsolescence cache when:
859 #
862 #
860 # - new changeset is added:
863 # - new changeset is added:
861 # - public phase is changed
864 # - public phase is changed
862 # - obsolescence marker are added
865 # - obsolescence marker are added
863 # - strip is used a repo
866 # - strip is used a repo
864 def clearobscaches(repo):
867 def clearobscaches(repo):
865 """Remove all obsolescence related cache from a repo
868 """Remove all obsolescence related cache from a repo
866
869
867 This remove all cache in obsstore is the obsstore already exist on the
870 This remove all cache in obsstore is the obsstore already exist on the
868 repo.
871 repo.
869
872
870 (We could be smarter here given the exact event that trigger the cache
873 (We could be smarter here given the exact event that trigger the cache
871 clearing)"""
874 clearing)"""
872 # only clear cache is there is obsstore data in this repo
875 # only clear cache is there is obsstore data in this repo
873 if 'obsstore' in repo._filecache:
876 if 'obsstore' in repo._filecache:
874 repo.obsstore.caches.clear()
877 repo.obsstore.caches.clear()
875
878
876 def _mutablerevs(repo):
879 def _mutablerevs(repo):
877 """the set of mutable revision in the repository"""
880 """the set of mutable revision in the repository"""
878 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
881 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
879
882
880 @cachefor('obsolete')
883 @cachefor('obsolete')
881 def _computeobsoleteset(repo):
884 def _computeobsoleteset(repo):
882 """the set of obsolete revisions"""
885 """the set of obsolete revisions"""
883 getnode = repo.changelog.node
886 getnode = repo.changelog.node
884 notpublic = _mutablerevs(repo)
887 notpublic = _mutablerevs(repo)
885 isobs = repo.obsstore.successors.__contains__
888 isobs = repo.obsstore.successors.__contains__
886 obs = set(r for r in notpublic if isobs(getnode(r)))
889 obs = set(r for r in notpublic if isobs(getnode(r)))
887 return obs
890 return obs
888
891
889 @cachefor('unstable')
892 @cachefor('unstable')
890 def _computeunstableset(repo):
893 def _computeunstableset(repo):
891 """the set of non obsolete revisions with obsolete parents"""
894 """the set of non obsolete revisions with obsolete parents"""
892 pfunc = repo.changelog.parentrevs
895 pfunc = repo.changelog.parentrevs
893 mutable = _mutablerevs(repo)
896 mutable = _mutablerevs(repo)
894 obsolete = getrevs(repo, 'obsolete')
897 obsolete = getrevs(repo, 'obsolete')
895 others = mutable - obsolete
898 others = mutable - obsolete
896 unstable = set()
899 unstable = set()
897 for r in sorted(others):
900 for r in sorted(others):
898 # A rev is unstable if one of its parent is obsolete or unstable
901 # A rev is unstable if one of its parent is obsolete or unstable
899 # this works since we traverse following growing rev order
902 # this works since we traverse following growing rev order
900 for p in pfunc(r):
903 for p in pfunc(r):
901 if p in obsolete or p in unstable:
904 if p in obsolete or p in unstable:
902 unstable.add(r)
905 unstable.add(r)
903 break
906 break
904 return unstable
907 return unstable
905
908
906 @cachefor('suspended')
909 @cachefor('suspended')
907 def _computesuspendedset(repo):
910 def _computesuspendedset(repo):
908 """the set of obsolete parents with non obsolete descendants"""
911 """the set of obsolete parents with non obsolete descendants"""
909 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
912 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
910 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
913 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
911
914
912 @cachefor('extinct')
915 @cachefor('extinct')
913 def _computeextinctset(repo):
916 def _computeextinctset(repo):
914 """the set of obsolete parents without non obsolete descendants"""
917 """the set of obsolete parents without non obsolete descendants"""
915 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
918 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
916
919
917
920
918 @cachefor('bumped')
921 @cachefor('bumped')
919 def _computebumpedset(repo):
922 def _computebumpedset(repo):
920 """the set of revs trying to obsolete public revisions"""
923 """the set of revs trying to obsolete public revisions"""
921 bumped = set()
924 bumped = set()
922 # util function (avoid attribute lookup in the loop)
925 # util function (avoid attribute lookup in the loop)
923 phase = repo._phasecache.phase # would be faster to grab the full list
926 phase = repo._phasecache.phase # would be faster to grab the full list
924 public = phases.public
927 public = phases.public
925 cl = repo.changelog
928 cl = repo.changelog
926 torev = cl.nodemap.get
929 torev = cl.nodemap.get
927 for ctx in repo.set('(not public()) and (not obsolete())'):
930 for ctx in repo.set('(not public()) and (not obsolete())'):
928 rev = ctx.rev()
931 rev = ctx.rev()
929 # We only evaluate mutable, non-obsolete revision
932 # We only evaluate mutable, non-obsolete revision
930 node = ctx.node()
933 node = ctx.node()
931 # (future) A cache of precursors may worth if split is very common
934 # (future) A cache of precursors may worth if split is very common
932 for pnode in obsutil.allprecursors(repo.obsstore, [node],
935 for pnode in obsutil.allprecursors(repo.obsstore, [node],
933 ignoreflags=bumpedfix):
936 ignoreflags=bumpedfix):
934 prev = torev(pnode) # unfiltered! but so is phasecache
937 prev = torev(pnode) # unfiltered! but so is phasecache
935 if (prev is not None) and (phase(repo, prev) <= public):
938 if (prev is not None) and (phase(repo, prev) <= public):
936 # we have a public precursor
939 # we have a public precursor
937 bumped.add(rev)
940 bumped.add(rev)
938 break # Next draft!
941 break # Next draft!
939 return bumped
942 return bumped
940
943
941 @cachefor('divergent')
944 @cachefor('divergent')
942 def _computedivergentset(repo):
945 def _computedivergentset(repo):
943 """the set of rev that compete to be the final successors of some revision.
946 """the set of rev that compete to be the final successors of some revision.
944 """
947 """
945 divergent = set()
948 divergent = set()
946 obsstore = repo.obsstore
949 obsstore = repo.obsstore
947 newermap = {}
950 newermap = {}
948 for ctx in repo.set('(not public()) - obsolete()'):
951 for ctx in repo.set('(not public()) - obsolete()'):
949 mark = obsstore.precursors.get(ctx.node(), ())
952 mark = obsstore.precursors.get(ctx.node(), ())
950 toprocess = set(mark)
953 toprocess = set(mark)
951 seen = set()
954 seen = set()
952 while toprocess:
955 while toprocess:
953 prec = toprocess.pop()[0]
956 prec = toprocess.pop()[0]
954 if prec in seen:
957 if prec in seen:
955 continue # emergency cycle hanging prevention
958 continue # emergency cycle hanging prevention
956 seen.add(prec)
959 seen.add(prec)
957 if prec not in newermap:
960 if prec not in newermap:
958 obsutil.successorssets(repo, prec, newermap)
961 obsutil.successorssets(repo, prec, newermap)
959 newer = [n for n in newermap[prec] if n]
962 newer = [n for n in newermap[prec] if n]
960 if len(newer) > 1:
963 if len(newer) > 1:
961 divergent.add(ctx.rev())
964 divergent.add(ctx.rev())
962 break
965 break
963 toprocess.update(obsstore.precursors.get(prec, ()))
966 toprocess.update(obsstore.precursors.get(prec, ()))
964 return divergent
967 return divergent
965
968
966
969
967 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
970 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
968 operation=None):
971 operation=None):
969 """Add obsolete markers between changesets in a repo
972 """Add obsolete markers between changesets in a repo
970
973
971 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
974 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
972 tuple. `old` and `news` are changectx. metadata is an optional dictionary
975 tuple. `old` and `news` are changectx. metadata is an optional dictionary
973 containing metadata for this marker only. It is merged with the global
976 containing metadata for this marker only. It is merged with the global
974 metadata specified through the `metadata` argument of this function,
977 metadata specified through the `metadata` argument of this function,
975
978
976 Trying to obsolete a public changeset will raise an exception.
979 Trying to obsolete a public changeset will raise an exception.
977
980
978 Current user and date are used except if specified otherwise in the
981 Current user and date are used except if specified otherwise in the
979 metadata attribute.
982 metadata attribute.
980
983
981 This function operates within a transaction of its own, but does
984 This function operates within a transaction of its own, but does
982 not take any lock on the repo.
985 not take any lock on the repo.
983 """
986 """
984 # prepare metadata
987 # prepare metadata
985 if metadata is None:
988 if metadata is None:
986 metadata = {}
989 metadata = {}
987 if 'user' not in metadata:
990 if 'user' not in metadata:
988 metadata['user'] = repo.ui.username()
991 metadata['user'] = repo.ui.username()
989 useoperation = repo.ui.configbool('experimental',
992 useoperation = repo.ui.configbool('experimental',
990 'evolution.track-operation',
993 'evolution.track-operation',
991 False)
994 False)
992 if useoperation and operation:
995 if useoperation and operation:
993 metadata['operation'] = operation
996 metadata['operation'] = operation
994 tr = repo.transaction('add-obsolescence-marker')
997 tr = repo.transaction('add-obsolescence-marker')
995 try:
998 try:
996 markerargs = []
999 markerargs = []
997 for rel in relations:
1000 for rel in relations:
998 prec = rel[0]
1001 prec = rel[0]
999 sucs = rel[1]
1002 sucs = rel[1]
1000 localmetadata = metadata.copy()
1003 localmetadata = metadata.copy()
1001 if 2 < len(rel):
1004 if 2 < len(rel):
1002 localmetadata.update(rel[2])
1005 localmetadata.update(rel[2])
1003
1006
1004 if not prec.mutable():
1007 if not prec.mutable():
1005 raise error.Abort(_("cannot obsolete public changeset: %s")
1008 raise error.Abort(_("cannot obsolete public changeset: %s")
1006 % prec,
1009 % prec,
1007 hint="see 'hg help phases' for details")
1010 hint="see 'hg help phases' for details")
1008 nprec = prec.node()
1011 nprec = prec.node()
1009 nsucs = tuple(s.node() for s in sucs)
1012 nsucs = tuple(s.node() for s in sucs)
1010 npare = None
1013 npare = None
1011 if not nsucs:
1014 if not nsucs:
1012 npare = tuple(p.node() for p in prec.parents())
1015 npare = tuple(p.node() for p in prec.parents())
1013 if nprec in nsucs:
1016 if nprec in nsucs:
1014 raise error.Abort(_("changeset %s cannot obsolete itself")
1017 raise error.Abort(_("changeset %s cannot obsolete itself")
1015 % prec)
1018 % prec)
1016
1019
1017 # Creating the marker causes the hidden cache to become invalid,
1020 # Creating the marker causes the hidden cache to become invalid,
1018 # which causes recomputation when we ask for prec.parents() above.
1021 # which causes recomputation when we ask for prec.parents() above.
1019 # Resulting in n^2 behavior. So let's prepare all of the args
1022 # Resulting in n^2 behavior. So let's prepare all of the args
1020 # first, then create the markers.
1023 # first, then create the markers.
1021 markerargs.append((nprec, nsucs, npare, localmetadata))
1024 markerargs.append((nprec, nsucs, npare, localmetadata))
1022
1025
1023 for args in markerargs:
1026 for args in markerargs:
1024 nprec, nsucs, npare, localmetadata = args
1027 nprec, nsucs, npare, localmetadata = args
1025 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1028 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1026 date=date, metadata=localmetadata,
1029 date=date, metadata=localmetadata,
1027 ui=repo.ui)
1030 ui=repo.ui)
1028 repo.filteredrevcache.clear()
1031 repo.filteredrevcache.clear()
1029 tr.close()
1032 tr.close()
1030 finally:
1033 finally:
1031 tr.release()
1034 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now