##// END OF EJS Templates
localrepo: migrate to context manager for changing dirstate parents
Augie Fackler -
r32350:9742f937 default
parent child Browse files
Show More
@@ -1,2073 +1,2072 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'}
117 'unbundle'}
118 legacycaps = moderncaps.union({'changegroupsubset'})
118 legacycaps = moderncaps.union({'changegroupsubset'})
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if exchange.bundle2requested(bundlecaps):
167 if exchange.bundle2requested(bundlecaps):
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = {
250 supportedformats = {
251 'revlogv1',
251 'revlogv1',
252 'generaldelta',
252 'generaldelta',
253 'treemanifest',
253 'treemanifest',
254 'manifestv2',
254 'manifestv2',
255 }
255 }
256 _basesupported = supportedformats | {
256 _basesupported = supportedformats | {
257 'store',
257 'store',
258 'fncache',
258 'fncache',
259 'shared',
259 'shared',
260 'relshared',
260 'relshared',
261 'dotencode',
261 'dotencode',
262 }
262 }
263 openerreqs = {
263 openerreqs = {
264 'revlogv1',
264 'revlogv1',
265 'generaldelta',
265 'generaldelta',
266 'treemanifest',
266 'treemanifest',
267 'manifestv2',
267 'manifestv2',
268 }
268 }
269 filtername = None
269 filtername = None
270
270
271 # a list of (ui, featureset) functions.
271 # a list of (ui, featureset) functions.
272 # only functions defined in module of enabled extensions are invoked
272 # only functions defined in module of enabled extensions are invoked
273 featuresetupfuncs = set()
273 featuresetupfuncs = set()
274
274
275 def __init__(self, baseui, path, create=False):
275 def __init__(self, baseui, path, create=False):
276 self.requirements = set()
276 self.requirements = set()
277 # wvfs: rooted at the repository root, used to access the working copy
277 # wvfs: rooted at the repository root, used to access the working copy
278 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
278 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
279 # vfs: rooted at .hg, used to access repo files outside of .hg/store
279 # vfs: rooted at .hg, used to access repo files outside of .hg/store
280 self.vfs = None
280 self.vfs = None
281 # svfs: usually rooted at .hg/store, used to access repository history
281 # svfs: usually rooted at .hg/store, used to access repository history
282 # If this is a shared repository, this vfs may point to another
282 # If this is a shared repository, this vfs may point to another
283 # repository's .hg/store directory.
283 # repository's .hg/store directory.
284 self.svfs = None
284 self.svfs = None
285 self.root = self.wvfs.base
285 self.root = self.wvfs.base
286 self.path = self.wvfs.join(".hg")
286 self.path = self.wvfs.join(".hg")
287 self.origroot = path
287 self.origroot = path
288 self.auditor = pathutil.pathauditor(self.root, self._checknested)
288 self.auditor = pathutil.pathauditor(self.root, self._checknested)
289 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
289 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
290 realfs=False)
290 realfs=False)
291 self.vfs = vfsmod.vfs(self.path)
291 self.vfs = vfsmod.vfs(self.path)
292 self.baseui = baseui
292 self.baseui = baseui
293 self.ui = baseui.copy()
293 self.ui = baseui.copy()
294 self.ui.copy = baseui.copy # prevent copying repo configuration
294 self.ui.copy = baseui.copy # prevent copying repo configuration
295 # A list of callback to shape the phase if no data were found.
295 # A list of callback to shape the phase if no data were found.
296 # Callback are in the form: func(repo, roots) --> processed root.
296 # Callback are in the form: func(repo, roots) --> processed root.
297 # This list it to be filled by extension during repo setup
297 # This list it to be filled by extension during repo setup
298 self._phasedefaults = []
298 self._phasedefaults = []
299 try:
299 try:
300 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
300 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
301 self._loadextensions()
301 self._loadextensions()
302 except IOError:
302 except IOError:
303 pass
303 pass
304
304
305 if self.featuresetupfuncs:
305 if self.featuresetupfuncs:
306 self.supported = set(self._basesupported) # use private copy
306 self.supported = set(self._basesupported) # use private copy
307 extmods = set(m.__name__ for n, m
307 extmods = set(m.__name__ for n, m
308 in extensions.extensions(self.ui))
308 in extensions.extensions(self.ui))
309 for setupfunc in self.featuresetupfuncs:
309 for setupfunc in self.featuresetupfuncs:
310 if setupfunc.__module__ in extmods:
310 if setupfunc.__module__ in extmods:
311 setupfunc(self.ui, self.supported)
311 setupfunc(self.ui, self.supported)
312 else:
312 else:
313 self.supported = self._basesupported
313 self.supported = self._basesupported
314 color.setup(self.ui)
314 color.setup(self.ui)
315
315
316 # Add compression engines.
316 # Add compression engines.
317 for name in util.compengines:
317 for name in util.compengines:
318 engine = util.compengines[name]
318 engine = util.compengines[name]
319 if engine.revlogheader():
319 if engine.revlogheader():
320 self.supported.add('exp-compression-%s' % name)
320 self.supported.add('exp-compression-%s' % name)
321
321
322 if not self.vfs.isdir():
322 if not self.vfs.isdir():
323 if create:
323 if create:
324 self.requirements = newreporequirements(self)
324 self.requirements = newreporequirements(self)
325
325
326 if not self.wvfs.exists():
326 if not self.wvfs.exists():
327 self.wvfs.makedirs()
327 self.wvfs.makedirs()
328 self.vfs.makedir(notindexed=True)
328 self.vfs.makedir(notindexed=True)
329
329
330 if 'store' in self.requirements:
330 if 'store' in self.requirements:
331 self.vfs.mkdir("store")
331 self.vfs.mkdir("store")
332
332
333 # create an invalid changelog
333 # create an invalid changelog
334 self.vfs.append(
334 self.vfs.append(
335 "00changelog.i",
335 "00changelog.i",
336 '\0\0\0\2' # represents revlogv2
336 '\0\0\0\2' # represents revlogv2
337 ' dummy changelog to prevent using the old repo layout'
337 ' dummy changelog to prevent using the old repo layout'
338 )
338 )
339 else:
339 else:
340 raise error.RepoError(_("repository %s not found") % path)
340 raise error.RepoError(_("repository %s not found") % path)
341 elif create:
341 elif create:
342 raise error.RepoError(_("repository %s already exists") % path)
342 raise error.RepoError(_("repository %s already exists") % path)
343 else:
343 else:
344 try:
344 try:
345 self.requirements = scmutil.readrequires(
345 self.requirements = scmutil.readrequires(
346 self.vfs, self.supported)
346 self.vfs, self.supported)
347 except IOError as inst:
347 except IOError as inst:
348 if inst.errno != errno.ENOENT:
348 if inst.errno != errno.ENOENT:
349 raise
349 raise
350
350
351 self.sharedpath = self.path
351 self.sharedpath = self.path
352 try:
352 try:
353 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
353 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
354 if 'relshared' in self.requirements:
354 if 'relshared' in self.requirements:
355 sharedpath = self.vfs.join(sharedpath)
355 sharedpath = self.vfs.join(sharedpath)
356 vfs = vfsmod.vfs(sharedpath, realpath=True)
356 vfs = vfsmod.vfs(sharedpath, realpath=True)
357 s = vfs.base
357 s = vfs.base
358 if not vfs.exists():
358 if not vfs.exists():
359 raise error.RepoError(
359 raise error.RepoError(
360 _('.hg/sharedpath points to nonexistent directory %s') % s)
360 _('.hg/sharedpath points to nonexistent directory %s') % s)
361 self.sharedpath = s
361 self.sharedpath = s
362 except IOError as inst:
362 except IOError as inst:
363 if inst.errno != errno.ENOENT:
363 if inst.errno != errno.ENOENT:
364 raise
364 raise
365
365
366 self.store = store.store(
366 self.store = store.store(
367 self.requirements, self.sharedpath, vfsmod.vfs)
367 self.requirements, self.sharedpath, vfsmod.vfs)
368 self.spath = self.store.path
368 self.spath = self.store.path
369 self.svfs = self.store.vfs
369 self.svfs = self.store.vfs
370 self.sjoin = self.store.join
370 self.sjoin = self.store.join
371 self.vfs.createmode = self.store.createmode
371 self.vfs.createmode = self.store.createmode
372 self._applyopenerreqs()
372 self._applyopenerreqs()
373 if create:
373 if create:
374 self._writerequirements()
374 self._writerequirements()
375
375
376 self._dirstatevalidatewarned = False
376 self._dirstatevalidatewarned = False
377
377
378 self._branchcaches = {}
378 self._branchcaches = {}
379 self._revbranchcache = None
379 self._revbranchcache = None
380 self.filterpats = {}
380 self.filterpats = {}
381 self._datafilters = {}
381 self._datafilters = {}
382 self._transref = self._lockref = self._wlockref = None
382 self._transref = self._lockref = self._wlockref = None
383
383
384 # A cache for various files under .hg/ that tracks file changes,
384 # A cache for various files under .hg/ that tracks file changes,
385 # (used by the filecache decorator)
385 # (used by the filecache decorator)
386 #
386 #
387 # Maps a property name to its util.filecacheentry
387 # Maps a property name to its util.filecacheentry
388 self._filecache = {}
388 self._filecache = {}
389
389
390 # hold sets of revision to be filtered
390 # hold sets of revision to be filtered
391 # should be cleared when something might have changed the filter value:
391 # should be cleared when something might have changed the filter value:
392 # - new changesets,
392 # - new changesets,
393 # - phase change,
393 # - phase change,
394 # - new obsolescence marker,
394 # - new obsolescence marker,
395 # - working directory parent change,
395 # - working directory parent change,
396 # - bookmark changes
396 # - bookmark changes
397 self.filteredrevcache = {}
397 self.filteredrevcache = {}
398
398
399 # generic mapping between names and nodes
399 # generic mapping between names and nodes
400 self.names = namespaces.namespaces()
400 self.names = namespaces.namespaces()
401
401
402 def close(self):
402 def close(self):
403 self._writecaches()
403 self._writecaches()
404
404
405 def _loadextensions(self):
405 def _loadextensions(self):
406 extensions.loadall(self.ui)
406 extensions.loadall(self.ui)
407
407
408 def _writecaches(self):
408 def _writecaches(self):
409 if self._revbranchcache:
409 if self._revbranchcache:
410 self._revbranchcache.write()
410 self._revbranchcache.write()
411
411
412 def _restrictcapabilities(self, caps):
412 def _restrictcapabilities(self, caps):
413 if self.ui.configbool('experimental', 'bundle2-advertise', True):
413 if self.ui.configbool('experimental', 'bundle2-advertise', True):
414 caps = set(caps)
414 caps = set(caps)
415 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
415 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
416 caps.add('bundle2=' + urlreq.quote(capsblob))
416 caps.add('bundle2=' + urlreq.quote(capsblob))
417 return caps
417 return caps
418
418
419 def _applyopenerreqs(self):
419 def _applyopenerreqs(self):
420 self.svfs.options = dict((r, 1) for r in self.requirements
420 self.svfs.options = dict((r, 1) for r in self.requirements
421 if r in self.openerreqs)
421 if r in self.openerreqs)
422 # experimental config: format.chunkcachesize
422 # experimental config: format.chunkcachesize
423 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
423 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
424 if chunkcachesize is not None:
424 if chunkcachesize is not None:
425 self.svfs.options['chunkcachesize'] = chunkcachesize
425 self.svfs.options['chunkcachesize'] = chunkcachesize
426 # experimental config: format.maxchainlen
426 # experimental config: format.maxchainlen
427 maxchainlen = self.ui.configint('format', 'maxchainlen')
427 maxchainlen = self.ui.configint('format', 'maxchainlen')
428 if maxchainlen is not None:
428 if maxchainlen is not None:
429 self.svfs.options['maxchainlen'] = maxchainlen
429 self.svfs.options['maxchainlen'] = maxchainlen
430 # experimental config: format.manifestcachesize
430 # experimental config: format.manifestcachesize
431 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
431 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
432 if manifestcachesize is not None:
432 if manifestcachesize is not None:
433 self.svfs.options['manifestcachesize'] = manifestcachesize
433 self.svfs.options['manifestcachesize'] = manifestcachesize
434 # experimental config: format.aggressivemergedeltas
434 # experimental config: format.aggressivemergedeltas
435 aggressivemergedeltas = self.ui.configbool('format',
435 aggressivemergedeltas = self.ui.configbool('format',
436 'aggressivemergedeltas', False)
436 'aggressivemergedeltas', False)
437 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
437 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
438 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
438 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
439
439
440 for r in self.requirements:
440 for r in self.requirements:
441 if r.startswith('exp-compression-'):
441 if r.startswith('exp-compression-'):
442 self.svfs.options['compengine'] = r[len('exp-compression-'):]
442 self.svfs.options['compengine'] = r[len('exp-compression-'):]
443
443
444 def _writerequirements(self):
444 def _writerequirements(self):
445 scmutil.writerequires(self.vfs, self.requirements)
445 scmutil.writerequires(self.vfs, self.requirements)
446
446
447 def _checknested(self, path):
447 def _checknested(self, path):
448 """Determine if path is a legal nested repository."""
448 """Determine if path is a legal nested repository."""
449 if not path.startswith(self.root):
449 if not path.startswith(self.root):
450 return False
450 return False
451 subpath = path[len(self.root) + 1:]
451 subpath = path[len(self.root) + 1:]
452 normsubpath = util.pconvert(subpath)
452 normsubpath = util.pconvert(subpath)
453
453
454 # XXX: Checking against the current working copy is wrong in
454 # XXX: Checking against the current working copy is wrong in
455 # the sense that it can reject things like
455 # the sense that it can reject things like
456 #
456 #
457 # $ hg cat -r 10 sub/x.txt
457 # $ hg cat -r 10 sub/x.txt
458 #
458 #
459 # if sub/ is no longer a subrepository in the working copy
459 # if sub/ is no longer a subrepository in the working copy
460 # parent revision.
460 # parent revision.
461 #
461 #
462 # However, it can of course also allow things that would have
462 # However, it can of course also allow things that would have
463 # been rejected before, such as the above cat command if sub/
463 # been rejected before, such as the above cat command if sub/
464 # is a subrepository now, but was a normal directory before.
464 # is a subrepository now, but was a normal directory before.
465 # The old path auditor would have rejected by mistake since it
465 # The old path auditor would have rejected by mistake since it
466 # panics when it sees sub/.hg/.
466 # panics when it sees sub/.hg/.
467 #
467 #
468 # All in all, checking against the working copy seems sensible
468 # All in all, checking against the working copy seems sensible
469 # since we want to prevent access to nested repositories on
469 # since we want to prevent access to nested repositories on
470 # the filesystem *now*.
470 # the filesystem *now*.
471 ctx = self[None]
471 ctx = self[None]
472 parts = util.splitpath(subpath)
472 parts = util.splitpath(subpath)
473 while parts:
473 while parts:
474 prefix = '/'.join(parts)
474 prefix = '/'.join(parts)
475 if prefix in ctx.substate:
475 if prefix in ctx.substate:
476 if prefix == normsubpath:
476 if prefix == normsubpath:
477 return True
477 return True
478 else:
478 else:
479 sub = ctx.sub(prefix)
479 sub = ctx.sub(prefix)
480 return sub.checknested(subpath[len(prefix) + 1:])
480 return sub.checknested(subpath[len(prefix) + 1:])
481 else:
481 else:
482 parts.pop()
482 parts.pop()
483 return False
483 return False
484
484
485 def peer(self):
485 def peer(self):
486 return localpeer(self) # not cached to avoid reference cycle
486 return localpeer(self) # not cached to avoid reference cycle
487
487
488 def unfiltered(self):
488 def unfiltered(self):
489 """Return unfiltered version of the repository
489 """Return unfiltered version of the repository
490
490
491 Intended to be overwritten by filtered repo."""
491 Intended to be overwritten by filtered repo."""
492 return self
492 return self
493
493
494 def filtered(self, name):
494 def filtered(self, name):
495 """Return a filtered version of a repository"""
495 """Return a filtered version of a repository"""
496 # build a new class with the mixin and the current class
496 # build a new class with the mixin and the current class
497 # (possibly subclass of the repo)
497 # (possibly subclass of the repo)
498 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
498 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
499 pass
499 pass
500 return filteredrepo(self, name)
500 return filteredrepo(self, name)
501
501
502 @repofilecache('bookmarks', 'bookmarks.current')
502 @repofilecache('bookmarks', 'bookmarks.current')
503 def _bookmarks(self):
503 def _bookmarks(self):
504 return bookmarks.bmstore(self)
504 return bookmarks.bmstore(self)
505
505
506 @property
506 @property
507 def _activebookmark(self):
507 def _activebookmark(self):
508 return self._bookmarks.active
508 return self._bookmarks.active
509
509
510 def bookmarkheads(self, bookmark):
510 def bookmarkheads(self, bookmark):
511 name = bookmark.split('@', 1)[0]
511 name = bookmark.split('@', 1)[0]
512 heads = []
512 heads = []
513 for mark, n in self._bookmarks.iteritems():
513 for mark, n in self._bookmarks.iteritems():
514 if mark.split('@', 1)[0] == name:
514 if mark.split('@', 1)[0] == name:
515 heads.append(n)
515 heads.append(n)
516 return heads
516 return heads
517
517
518 # _phaserevs and _phasesets depend on changelog. what we need is to
518 # _phaserevs and _phasesets depend on changelog. what we need is to
519 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
519 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
520 # can't be easily expressed in filecache mechanism.
520 # can't be easily expressed in filecache mechanism.
521 @storecache('phaseroots', '00changelog.i')
521 @storecache('phaseroots', '00changelog.i')
522 def _phasecache(self):
522 def _phasecache(self):
523 return phases.phasecache(self, self._phasedefaults)
523 return phases.phasecache(self, self._phasedefaults)
524
524
525 @storecache('obsstore')
525 @storecache('obsstore')
526 def obsstore(self):
526 def obsstore(self):
527 # read default format for new obsstore.
527 # read default format for new obsstore.
528 # developer config: format.obsstore-version
528 # developer config: format.obsstore-version
529 defaultformat = self.ui.configint('format', 'obsstore-version', None)
529 defaultformat = self.ui.configint('format', 'obsstore-version', None)
530 # rely on obsstore class default when possible.
530 # rely on obsstore class default when possible.
531 kwargs = {}
531 kwargs = {}
532 if defaultformat is not None:
532 if defaultformat is not None:
533 kwargs['defaultformat'] = defaultformat
533 kwargs['defaultformat'] = defaultformat
534 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
534 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
535 store = obsolete.obsstore(self.svfs, readonly=readonly,
535 store = obsolete.obsstore(self.svfs, readonly=readonly,
536 **kwargs)
536 **kwargs)
537 if store and readonly:
537 if store and readonly:
538 self.ui.warn(
538 self.ui.warn(
539 _('obsolete feature not enabled but %i markers found!\n')
539 _('obsolete feature not enabled but %i markers found!\n')
540 % len(list(store)))
540 % len(list(store)))
541 return store
541 return store
542
542
543 @storecache('00changelog.i')
543 @storecache('00changelog.i')
544 def changelog(self):
544 def changelog(self):
545 return changelog.changelog(self.svfs,
545 return changelog.changelog(self.svfs,
546 trypending=txnutil.mayhavepending(self.root))
546 trypending=txnutil.mayhavepending(self.root))
547
547
548 def _constructmanifest(self):
548 def _constructmanifest(self):
549 # This is a temporary function while we migrate from manifest to
549 # This is a temporary function while we migrate from manifest to
550 # manifestlog. It allows bundlerepo and unionrepo to intercept the
550 # manifestlog. It allows bundlerepo and unionrepo to intercept the
551 # manifest creation.
551 # manifest creation.
552 return manifest.manifestrevlog(self.svfs)
552 return manifest.manifestrevlog(self.svfs)
553
553
554 @storecache('00manifest.i')
554 @storecache('00manifest.i')
555 def manifestlog(self):
555 def manifestlog(self):
556 return manifest.manifestlog(self.svfs, self)
556 return manifest.manifestlog(self.svfs, self)
557
557
558 @repofilecache('dirstate')
558 @repofilecache('dirstate')
559 def dirstate(self):
559 def dirstate(self):
560 return dirstate.dirstate(self.vfs, self.ui, self.root,
560 return dirstate.dirstate(self.vfs, self.ui, self.root,
561 self._dirstatevalidate)
561 self._dirstatevalidate)
562
562
563 def _dirstatevalidate(self, node):
563 def _dirstatevalidate(self, node):
564 try:
564 try:
565 self.changelog.rev(node)
565 self.changelog.rev(node)
566 return node
566 return node
567 except error.LookupError:
567 except error.LookupError:
568 if not self._dirstatevalidatewarned:
568 if not self._dirstatevalidatewarned:
569 self._dirstatevalidatewarned = True
569 self._dirstatevalidatewarned = True
570 self.ui.warn(_("warning: ignoring unknown"
570 self.ui.warn(_("warning: ignoring unknown"
571 " working parent %s!\n") % short(node))
571 " working parent %s!\n") % short(node))
572 return nullid
572 return nullid
573
573
574 def __getitem__(self, changeid):
574 def __getitem__(self, changeid):
575 if changeid is None or changeid == wdirrev:
575 if changeid is None or changeid == wdirrev:
576 return context.workingctx(self)
576 return context.workingctx(self)
577 if isinstance(changeid, slice):
577 if isinstance(changeid, slice):
578 return [context.changectx(self, i)
578 return [context.changectx(self, i)
579 for i in xrange(*changeid.indices(len(self)))
579 for i in xrange(*changeid.indices(len(self)))
580 if i not in self.changelog.filteredrevs]
580 if i not in self.changelog.filteredrevs]
581 return context.changectx(self, changeid)
581 return context.changectx(self, changeid)
582
582
583 def __contains__(self, changeid):
583 def __contains__(self, changeid):
584 try:
584 try:
585 self[changeid]
585 self[changeid]
586 return True
586 return True
587 except error.RepoLookupError:
587 except error.RepoLookupError:
588 return False
588 return False
589
589
590 def __nonzero__(self):
590 def __nonzero__(self):
591 return True
591 return True
592
592
593 __bool__ = __nonzero__
593 __bool__ = __nonzero__
594
594
595 def __len__(self):
595 def __len__(self):
596 return len(self.changelog)
596 return len(self.changelog)
597
597
598 def __iter__(self):
598 def __iter__(self):
599 return iter(self.changelog)
599 return iter(self.changelog)
600
600
601 def revs(self, expr, *args):
601 def revs(self, expr, *args):
602 '''Find revisions matching a revset.
602 '''Find revisions matching a revset.
603
603
604 The revset is specified as a string ``expr`` that may contain
604 The revset is specified as a string ``expr`` that may contain
605 %-formatting to escape certain types. See ``revsetlang.formatspec``.
605 %-formatting to escape certain types. See ``revsetlang.formatspec``.
606
606
607 Revset aliases from the configuration are not expanded. To expand
607 Revset aliases from the configuration are not expanded. To expand
608 user aliases, consider calling ``scmutil.revrange()`` or
608 user aliases, consider calling ``scmutil.revrange()`` or
609 ``repo.anyrevs([expr], user=True)``.
609 ``repo.anyrevs([expr], user=True)``.
610
610
611 Returns a revset.abstractsmartset, which is a list-like interface
611 Returns a revset.abstractsmartset, which is a list-like interface
612 that contains integer revisions.
612 that contains integer revisions.
613 '''
613 '''
614 expr = revsetlang.formatspec(expr, *args)
614 expr = revsetlang.formatspec(expr, *args)
615 m = revset.match(None, expr)
615 m = revset.match(None, expr)
616 return m(self)
616 return m(self)
617
617
618 def set(self, expr, *args):
618 def set(self, expr, *args):
619 '''Find revisions matching a revset and emit changectx instances.
619 '''Find revisions matching a revset and emit changectx instances.
620
620
621 This is a convenience wrapper around ``revs()`` that iterates the
621 This is a convenience wrapper around ``revs()`` that iterates the
622 result and is a generator of changectx instances.
622 result and is a generator of changectx instances.
623
623
624 Revset aliases from the configuration are not expanded. To expand
624 Revset aliases from the configuration are not expanded. To expand
625 user aliases, consider calling ``scmutil.revrange()``.
625 user aliases, consider calling ``scmutil.revrange()``.
626 '''
626 '''
627 for r in self.revs(expr, *args):
627 for r in self.revs(expr, *args):
628 yield self[r]
628 yield self[r]
629
629
630 def anyrevs(self, specs, user=False):
630 def anyrevs(self, specs, user=False):
631 '''Find revisions matching one of the given revsets.
631 '''Find revisions matching one of the given revsets.
632
632
633 Revset aliases from the configuration are not expanded by default. To
633 Revset aliases from the configuration are not expanded by default. To
634 expand user aliases, specify ``user=True``.
634 expand user aliases, specify ``user=True``.
635 '''
635 '''
636 if user:
636 if user:
637 m = revset.matchany(self.ui, specs, repo=self)
637 m = revset.matchany(self.ui, specs, repo=self)
638 else:
638 else:
639 m = revset.matchany(None, specs)
639 m = revset.matchany(None, specs)
640 return m(self)
640 return m(self)
641
641
642 def url(self):
642 def url(self):
643 return 'file:' + self.root
643 return 'file:' + self.root
644
644
645 def hook(self, name, throw=False, **args):
645 def hook(self, name, throw=False, **args):
646 """Call a hook, passing this repo instance.
646 """Call a hook, passing this repo instance.
647
647
648 This a convenience method to aid invoking hooks. Extensions likely
648 This a convenience method to aid invoking hooks. Extensions likely
649 won't call this unless they have registered a custom hook or are
649 won't call this unless they have registered a custom hook or are
650 replacing code that is expected to call a hook.
650 replacing code that is expected to call a hook.
651 """
651 """
652 return hook.hook(self.ui, self, name, throw, **args)
652 return hook.hook(self.ui, self, name, throw, **args)
653
653
654 @filteredpropertycache
654 @filteredpropertycache
655 def _tagscache(self):
655 def _tagscache(self):
656 '''Returns a tagscache object that contains various tags related
656 '''Returns a tagscache object that contains various tags related
657 caches.'''
657 caches.'''
658
658
659 # This simplifies its cache management by having one decorated
659 # This simplifies its cache management by having one decorated
660 # function (this one) and the rest simply fetch things from it.
660 # function (this one) and the rest simply fetch things from it.
661 class tagscache(object):
661 class tagscache(object):
662 def __init__(self):
662 def __init__(self):
663 # These two define the set of tags for this repository. tags
663 # These two define the set of tags for this repository. tags
664 # maps tag name to node; tagtypes maps tag name to 'global' or
664 # maps tag name to node; tagtypes maps tag name to 'global' or
665 # 'local'. (Global tags are defined by .hgtags across all
665 # 'local'. (Global tags are defined by .hgtags across all
666 # heads, and local tags are defined in .hg/localtags.)
666 # heads, and local tags are defined in .hg/localtags.)
667 # They constitute the in-memory cache of tags.
667 # They constitute the in-memory cache of tags.
668 self.tags = self.tagtypes = None
668 self.tags = self.tagtypes = None
669
669
670 self.nodetagscache = self.tagslist = None
670 self.nodetagscache = self.tagslist = None
671
671
672 cache = tagscache()
672 cache = tagscache()
673 cache.tags, cache.tagtypes = self._findtags()
673 cache.tags, cache.tagtypes = self._findtags()
674
674
675 return cache
675 return cache
676
676
677 def tags(self):
677 def tags(self):
678 '''return a mapping of tag to node'''
678 '''return a mapping of tag to node'''
679 t = {}
679 t = {}
680 if self.changelog.filteredrevs:
680 if self.changelog.filteredrevs:
681 tags, tt = self._findtags()
681 tags, tt = self._findtags()
682 else:
682 else:
683 tags = self._tagscache.tags
683 tags = self._tagscache.tags
684 for k, v in tags.iteritems():
684 for k, v in tags.iteritems():
685 try:
685 try:
686 # ignore tags to unknown nodes
686 # ignore tags to unknown nodes
687 self.changelog.rev(v)
687 self.changelog.rev(v)
688 t[k] = v
688 t[k] = v
689 except (error.LookupError, ValueError):
689 except (error.LookupError, ValueError):
690 pass
690 pass
691 return t
691 return t
692
692
693 def _findtags(self):
693 def _findtags(self):
694 '''Do the hard work of finding tags. Return a pair of dicts
694 '''Do the hard work of finding tags. Return a pair of dicts
695 (tags, tagtypes) where tags maps tag name to node, and tagtypes
695 (tags, tagtypes) where tags maps tag name to node, and tagtypes
696 maps tag name to a string like \'global\' or \'local\'.
696 maps tag name to a string like \'global\' or \'local\'.
697 Subclasses or extensions are free to add their own tags, but
697 Subclasses or extensions are free to add their own tags, but
698 should be aware that the returned dicts will be retained for the
698 should be aware that the returned dicts will be retained for the
699 duration of the localrepo object.'''
699 duration of the localrepo object.'''
700
700
701 # XXX what tagtype should subclasses/extensions use? Currently
701 # XXX what tagtype should subclasses/extensions use? Currently
702 # mq and bookmarks add tags, but do not set the tagtype at all.
702 # mq and bookmarks add tags, but do not set the tagtype at all.
703 # Should each extension invent its own tag type? Should there
703 # Should each extension invent its own tag type? Should there
704 # be one tagtype for all such "virtual" tags? Or is the status
704 # be one tagtype for all such "virtual" tags? Or is the status
705 # quo fine?
705 # quo fine?
706
706
707
707
708 # map tag name to (node, hist)
708 # map tag name to (node, hist)
709 alltags = tagsmod.findglobaltags(self.ui, self)
709 alltags = tagsmod.findglobaltags(self.ui, self)
710 # map tag name to tag type
710 # map tag name to tag type
711 tagtypes = dict((tag, 'global') for tag in alltags)
711 tagtypes = dict((tag, 'global') for tag in alltags)
712
712
713 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
713 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
714
714
715 # Build the return dicts. Have to re-encode tag names because
715 # Build the return dicts. Have to re-encode tag names because
716 # the tags module always uses UTF-8 (in order not to lose info
716 # the tags module always uses UTF-8 (in order not to lose info
717 # writing to the cache), but the rest of Mercurial wants them in
717 # writing to the cache), but the rest of Mercurial wants them in
718 # local encoding.
718 # local encoding.
719 tags = {}
719 tags = {}
720 for (name, (node, hist)) in alltags.iteritems():
720 for (name, (node, hist)) in alltags.iteritems():
721 if node != nullid:
721 if node != nullid:
722 tags[encoding.tolocal(name)] = node
722 tags[encoding.tolocal(name)] = node
723 tags['tip'] = self.changelog.tip()
723 tags['tip'] = self.changelog.tip()
724 tagtypes = dict([(encoding.tolocal(name), value)
724 tagtypes = dict([(encoding.tolocal(name), value)
725 for (name, value) in tagtypes.iteritems()])
725 for (name, value) in tagtypes.iteritems()])
726 return (tags, tagtypes)
726 return (tags, tagtypes)
727
727
728 def tagtype(self, tagname):
728 def tagtype(self, tagname):
729 '''
729 '''
730 return the type of the given tag. result can be:
730 return the type of the given tag. result can be:
731
731
732 'local' : a local tag
732 'local' : a local tag
733 'global' : a global tag
733 'global' : a global tag
734 None : tag does not exist
734 None : tag does not exist
735 '''
735 '''
736
736
737 return self._tagscache.tagtypes.get(tagname)
737 return self._tagscache.tagtypes.get(tagname)
738
738
739 def tagslist(self):
739 def tagslist(self):
740 '''return a list of tags ordered by revision'''
740 '''return a list of tags ordered by revision'''
741 if not self._tagscache.tagslist:
741 if not self._tagscache.tagslist:
742 l = []
742 l = []
743 for t, n in self.tags().iteritems():
743 for t, n in self.tags().iteritems():
744 l.append((self.changelog.rev(n), t, n))
744 l.append((self.changelog.rev(n), t, n))
745 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
745 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
746
746
747 return self._tagscache.tagslist
747 return self._tagscache.tagslist
748
748
749 def nodetags(self, node):
749 def nodetags(self, node):
750 '''return the tags associated with a node'''
750 '''return the tags associated with a node'''
751 if not self._tagscache.nodetagscache:
751 if not self._tagscache.nodetagscache:
752 nodetagscache = {}
752 nodetagscache = {}
753 for t, n in self._tagscache.tags.iteritems():
753 for t, n in self._tagscache.tags.iteritems():
754 nodetagscache.setdefault(n, []).append(t)
754 nodetagscache.setdefault(n, []).append(t)
755 for tags in nodetagscache.itervalues():
755 for tags in nodetagscache.itervalues():
756 tags.sort()
756 tags.sort()
757 self._tagscache.nodetagscache = nodetagscache
757 self._tagscache.nodetagscache = nodetagscache
758 return self._tagscache.nodetagscache.get(node, [])
758 return self._tagscache.nodetagscache.get(node, [])
759
759
760 def nodebookmarks(self, node):
760 def nodebookmarks(self, node):
761 """return the list of bookmarks pointing to the specified node"""
761 """return the list of bookmarks pointing to the specified node"""
762 marks = []
762 marks = []
763 for bookmark, n in self._bookmarks.iteritems():
763 for bookmark, n in self._bookmarks.iteritems():
764 if n == node:
764 if n == node:
765 marks.append(bookmark)
765 marks.append(bookmark)
766 return sorted(marks)
766 return sorted(marks)
767
767
768 def branchmap(self):
768 def branchmap(self):
769 '''returns a dictionary {branch: [branchheads]} with branchheads
769 '''returns a dictionary {branch: [branchheads]} with branchheads
770 ordered by increasing revision number'''
770 ordered by increasing revision number'''
771 branchmap.updatecache(self)
771 branchmap.updatecache(self)
772 return self._branchcaches[self.filtername]
772 return self._branchcaches[self.filtername]
773
773
774 @unfilteredmethod
774 @unfilteredmethod
775 def revbranchcache(self):
775 def revbranchcache(self):
776 if not self._revbranchcache:
776 if not self._revbranchcache:
777 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
777 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 return self._revbranchcache
778 return self._revbranchcache
779
779
780 def branchtip(self, branch, ignoremissing=False):
780 def branchtip(self, branch, ignoremissing=False):
781 '''return the tip node for a given branch
781 '''return the tip node for a given branch
782
782
783 If ignoremissing is True, then this method will not raise an error.
783 If ignoremissing is True, then this method will not raise an error.
784 This is helpful for callers that only expect None for a missing branch
784 This is helpful for callers that only expect None for a missing branch
785 (e.g. namespace).
785 (e.g. namespace).
786
786
787 '''
787 '''
788 try:
788 try:
789 return self.branchmap().branchtip(branch)
789 return self.branchmap().branchtip(branch)
790 except KeyError:
790 except KeyError:
791 if not ignoremissing:
791 if not ignoremissing:
792 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
792 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 else:
793 else:
794 pass
794 pass
795
795
796 def lookup(self, key):
796 def lookup(self, key):
797 return self[key].node()
797 return self[key].node()
798
798
799 def lookupbranch(self, key, remote=None):
799 def lookupbranch(self, key, remote=None):
800 repo = remote or self
800 repo = remote or self
801 if key in repo.branchmap():
801 if key in repo.branchmap():
802 return key
802 return key
803
803
804 repo = (remote and remote.local()) and remote or self
804 repo = (remote and remote.local()) and remote or self
805 return repo[key].branch()
805 return repo[key].branch()
806
806
807 def known(self, nodes):
807 def known(self, nodes):
808 cl = self.changelog
808 cl = self.changelog
809 nm = cl.nodemap
809 nm = cl.nodemap
810 filtered = cl.filteredrevs
810 filtered = cl.filteredrevs
811 result = []
811 result = []
812 for n in nodes:
812 for n in nodes:
813 r = nm.get(n)
813 r = nm.get(n)
814 resp = not (r is None or r in filtered)
814 resp = not (r is None or r in filtered)
815 result.append(resp)
815 result.append(resp)
816 return result
816 return result
817
817
818 def local(self):
818 def local(self):
819 return self
819 return self
820
820
821 def publishing(self):
821 def publishing(self):
822 # it's safe (and desirable) to trust the publish flag unconditionally
822 # it's safe (and desirable) to trust the publish flag unconditionally
823 # so that we don't finalize changes shared between users via ssh or nfs
823 # so that we don't finalize changes shared between users via ssh or nfs
824 return self.ui.configbool('phases', 'publish', True, untrusted=True)
824 return self.ui.configbool('phases', 'publish', True, untrusted=True)
825
825
826 def cancopy(self):
826 def cancopy(self):
827 # so statichttprepo's override of local() works
827 # so statichttprepo's override of local() works
828 if not self.local():
828 if not self.local():
829 return False
829 return False
830 if not self.publishing():
830 if not self.publishing():
831 return True
831 return True
832 # if publishing we can't copy if there is filtered content
832 # if publishing we can't copy if there is filtered content
833 return not self.filtered('visible').changelog.filteredrevs
833 return not self.filtered('visible').changelog.filteredrevs
834
834
835 def shared(self):
835 def shared(self):
836 '''the type of shared repository (None if not shared)'''
836 '''the type of shared repository (None if not shared)'''
837 if self.sharedpath != self.path:
837 if self.sharedpath != self.path:
838 return 'store'
838 return 'store'
839 return None
839 return None
840
840
841 def wjoin(self, f, *insidef):
841 def wjoin(self, f, *insidef):
842 return self.vfs.reljoin(self.root, f, *insidef)
842 return self.vfs.reljoin(self.root, f, *insidef)
843
843
844 def file(self, f):
844 def file(self, f):
845 if f[0] == '/':
845 if f[0] == '/':
846 f = f[1:]
846 f = f[1:]
847 return filelog.filelog(self.svfs, f)
847 return filelog.filelog(self.svfs, f)
848
848
849 def changectx(self, changeid):
849 def changectx(self, changeid):
850 return self[changeid]
850 return self[changeid]
851
851
852 def setparents(self, p1, p2=nullid):
852 def setparents(self, p1, p2=nullid):
853 self.dirstate.beginparentchange()
853 with self.dirstate.parentchange():
854 copies = self.dirstate.setparents(p1, p2)
854 copies = self.dirstate.setparents(p1, p2)
855 pctx = self[p1]
855 pctx = self[p1]
856 if copies:
856 if copies:
857 # Adjust copy records, the dirstate cannot do it, it
857 # Adjust copy records, the dirstate cannot do it, it
858 # requires access to parents manifests. Preserve them
858 # requires access to parents manifests. Preserve them
859 # only for entries added to first parent.
859 # only for entries added to first parent.
860 for f in copies:
860 for f in copies:
861 if f not in pctx and copies[f] in pctx:
861 if f not in pctx and copies[f] in pctx:
862 self.dirstate.copy(copies[f], f)
862 self.dirstate.copy(copies[f], f)
863 if p2 == nullid:
863 if p2 == nullid:
864 for f, s in sorted(self.dirstate.copies().items()):
864 for f, s in sorted(self.dirstate.copies().items()):
865 if f not in pctx and s not in pctx:
865 if f not in pctx and s not in pctx:
866 self.dirstate.copy(None, f)
866 self.dirstate.copy(None, f)
867 self.dirstate.endparentchange()
868
867
869 def filectx(self, path, changeid=None, fileid=None):
868 def filectx(self, path, changeid=None, fileid=None):
870 """changeid can be a changeset revision, node, or tag.
869 """changeid can be a changeset revision, node, or tag.
871 fileid can be a file revision or node."""
870 fileid can be a file revision or node."""
872 return context.filectx(self, path, changeid, fileid)
871 return context.filectx(self, path, changeid, fileid)
873
872
874 def getcwd(self):
873 def getcwd(self):
875 return self.dirstate.getcwd()
874 return self.dirstate.getcwd()
876
875
877 def pathto(self, f, cwd=None):
876 def pathto(self, f, cwd=None):
878 return self.dirstate.pathto(f, cwd)
877 return self.dirstate.pathto(f, cwd)
879
878
880 def _loadfilter(self, filter):
879 def _loadfilter(self, filter):
881 if filter not in self.filterpats:
880 if filter not in self.filterpats:
882 l = []
881 l = []
883 for pat, cmd in self.ui.configitems(filter):
882 for pat, cmd in self.ui.configitems(filter):
884 if cmd == '!':
883 if cmd == '!':
885 continue
884 continue
886 mf = matchmod.match(self.root, '', [pat])
885 mf = matchmod.match(self.root, '', [pat])
887 fn = None
886 fn = None
888 params = cmd
887 params = cmd
889 for name, filterfn in self._datafilters.iteritems():
888 for name, filterfn in self._datafilters.iteritems():
890 if cmd.startswith(name):
889 if cmd.startswith(name):
891 fn = filterfn
890 fn = filterfn
892 params = cmd[len(name):].lstrip()
891 params = cmd[len(name):].lstrip()
893 break
892 break
894 if not fn:
893 if not fn:
895 fn = lambda s, c, **kwargs: util.filter(s, c)
894 fn = lambda s, c, **kwargs: util.filter(s, c)
896 # Wrap old filters not supporting keyword arguments
895 # Wrap old filters not supporting keyword arguments
897 if not inspect.getargspec(fn)[2]:
896 if not inspect.getargspec(fn)[2]:
898 oldfn = fn
897 oldfn = fn
899 fn = lambda s, c, **kwargs: oldfn(s, c)
898 fn = lambda s, c, **kwargs: oldfn(s, c)
900 l.append((mf, fn, params))
899 l.append((mf, fn, params))
901 self.filterpats[filter] = l
900 self.filterpats[filter] = l
902 return self.filterpats[filter]
901 return self.filterpats[filter]
903
902
904 def _filter(self, filterpats, filename, data):
903 def _filter(self, filterpats, filename, data):
905 for mf, fn, cmd in filterpats:
904 for mf, fn, cmd in filterpats:
906 if mf(filename):
905 if mf(filename):
907 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
906 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
908 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
907 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
909 break
908 break
910
909
911 return data
910 return data
912
911
913 @unfilteredpropertycache
912 @unfilteredpropertycache
914 def _encodefilterpats(self):
913 def _encodefilterpats(self):
915 return self._loadfilter('encode')
914 return self._loadfilter('encode')
916
915
917 @unfilteredpropertycache
916 @unfilteredpropertycache
918 def _decodefilterpats(self):
917 def _decodefilterpats(self):
919 return self._loadfilter('decode')
918 return self._loadfilter('decode')
920
919
921 def adddatafilter(self, name, filter):
920 def adddatafilter(self, name, filter):
922 self._datafilters[name] = filter
921 self._datafilters[name] = filter
923
922
924 def wread(self, filename):
923 def wread(self, filename):
925 if self.wvfs.islink(filename):
924 if self.wvfs.islink(filename):
926 data = self.wvfs.readlink(filename)
925 data = self.wvfs.readlink(filename)
927 else:
926 else:
928 data = self.wvfs.read(filename)
927 data = self.wvfs.read(filename)
929 return self._filter(self._encodefilterpats, filename, data)
928 return self._filter(self._encodefilterpats, filename, data)
930
929
931 def wwrite(self, filename, data, flags, backgroundclose=False):
930 def wwrite(self, filename, data, flags, backgroundclose=False):
932 """write ``data`` into ``filename`` in the working directory
931 """write ``data`` into ``filename`` in the working directory
933
932
934 This returns length of written (maybe decoded) data.
933 This returns length of written (maybe decoded) data.
935 """
934 """
936 data = self._filter(self._decodefilterpats, filename, data)
935 data = self._filter(self._decodefilterpats, filename, data)
937 if 'l' in flags:
936 if 'l' in flags:
938 self.wvfs.symlink(data, filename)
937 self.wvfs.symlink(data, filename)
939 else:
938 else:
940 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
939 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
941 if 'x' in flags:
940 if 'x' in flags:
942 self.wvfs.setflags(filename, False, True)
941 self.wvfs.setflags(filename, False, True)
943 return len(data)
942 return len(data)
944
943
945 def wwritedata(self, filename, data):
944 def wwritedata(self, filename, data):
946 return self._filter(self._decodefilterpats, filename, data)
945 return self._filter(self._decodefilterpats, filename, data)
947
946
948 def currenttransaction(self):
947 def currenttransaction(self):
949 """return the current transaction or None if non exists"""
948 """return the current transaction or None if non exists"""
950 if self._transref:
949 if self._transref:
951 tr = self._transref()
950 tr = self._transref()
952 else:
951 else:
953 tr = None
952 tr = None
954
953
955 if tr and tr.running():
954 if tr and tr.running():
956 return tr
955 return tr
957 return None
956 return None
958
957
959 def transaction(self, desc, report=None):
958 def transaction(self, desc, report=None):
960 if (self.ui.configbool('devel', 'all-warnings')
959 if (self.ui.configbool('devel', 'all-warnings')
961 or self.ui.configbool('devel', 'check-locks')):
960 or self.ui.configbool('devel', 'check-locks')):
962 if self._currentlock(self._lockref) is None:
961 if self._currentlock(self._lockref) is None:
963 raise error.ProgrammingError('transaction requires locking')
962 raise error.ProgrammingError('transaction requires locking')
964 tr = self.currenttransaction()
963 tr = self.currenttransaction()
965 if tr is not None:
964 if tr is not None:
966 return tr.nest()
965 return tr.nest()
967
966
968 # abort here if the journal already exists
967 # abort here if the journal already exists
969 if self.svfs.exists("journal"):
968 if self.svfs.exists("journal"):
970 raise error.RepoError(
969 raise error.RepoError(
971 _("abandoned transaction found"),
970 _("abandoned transaction found"),
972 hint=_("run 'hg recover' to clean up transaction"))
971 hint=_("run 'hg recover' to clean up transaction"))
973
972
974 idbase = "%.40f#%f" % (random.random(), time.time())
973 idbase = "%.40f#%f" % (random.random(), time.time())
975 ha = hex(hashlib.sha1(idbase).digest())
974 ha = hex(hashlib.sha1(idbase).digest())
976 txnid = 'TXN:' + ha
975 txnid = 'TXN:' + ha
977 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
976 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
978
977
979 self._writejournal(desc)
978 self._writejournal(desc)
980 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
979 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
981 if report:
980 if report:
982 rp = report
981 rp = report
983 else:
982 else:
984 rp = self.ui.warn
983 rp = self.ui.warn
985 vfsmap = {'plain': self.vfs} # root of .hg/
984 vfsmap = {'plain': self.vfs} # root of .hg/
986 # we must avoid cyclic reference between repo and transaction.
985 # we must avoid cyclic reference between repo and transaction.
987 reporef = weakref.ref(self)
986 reporef = weakref.ref(self)
988 # Code to track tag movement
987 # Code to track tag movement
989 #
988 #
990 # Since tags are all handled as file content, it is actually quite hard
989 # Since tags are all handled as file content, it is actually quite hard
991 # to track these movement from a code perspective. So we fallback to a
990 # to track these movement from a code perspective. So we fallback to a
992 # tracking at the repository level. One could envision to track changes
991 # tracking at the repository level. One could envision to track changes
993 # to the '.hgtags' file through changegroup apply but that fails to
992 # to the '.hgtags' file through changegroup apply but that fails to
994 # cope with case where transaction expose new heads without changegroup
993 # cope with case where transaction expose new heads without changegroup
995 # being involved (eg: phase movement).
994 # being involved (eg: phase movement).
996 #
995 #
997 # For now, We gate the feature behind a flag since this likely comes
996 # For now, We gate the feature behind a flag since this likely comes
998 # with performance impacts. The current code run more often than needed
997 # with performance impacts. The current code run more often than needed
999 # and do not use caches as much as it could. The current focus is on
998 # and do not use caches as much as it could. The current focus is on
1000 # the behavior of the feature so we disable it by default. The flag
999 # the behavior of the feature so we disable it by default. The flag
1001 # will be removed when we are happy with the performance impact.
1000 # will be removed when we are happy with the performance impact.
1002 #
1001 #
1003 # Once this feature is no longer experimental move the following
1002 # Once this feature is no longer experimental move the following
1004 # documentation to the appropriate help section:
1003 # documentation to the appropriate help section:
1005 #
1004 #
1006 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1005 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1007 # tags (new or changed or deleted tags). In addition the details of
1006 # tags (new or changed or deleted tags). In addition the details of
1008 # these changes are made available in a file at:
1007 # these changes are made available in a file at:
1009 # ``REPOROOT/.hg/changes/tags.changes``.
1008 # ``REPOROOT/.hg/changes/tags.changes``.
1010 # Make sure you check for HG_TAG_MOVED before reading that file as it
1009 # Make sure you check for HG_TAG_MOVED before reading that file as it
1011 # might exist from a previous transaction even if no tag were touched
1010 # might exist from a previous transaction even if no tag were touched
1012 # in this one. Changes are recorded in a line base format::
1011 # in this one. Changes are recorded in a line base format::
1013 #
1012 #
1014 # <action> <hex-node> <tag-name>\n
1013 # <action> <hex-node> <tag-name>\n
1015 #
1014 #
1016 # Actions are defined as follow:
1015 # Actions are defined as follow:
1017 # "-R": tag is removed,
1016 # "-R": tag is removed,
1018 # "+A": tag is added,
1017 # "+A": tag is added,
1019 # "-M": tag is moved (old value),
1018 # "-M": tag is moved (old value),
1020 # "+M": tag is moved (new value),
1019 # "+M": tag is moved (new value),
1021 tracktags = lambda x: None
1020 tracktags = lambda x: None
1022 # experimental config: experimental.hook-track-tags
1021 # experimental config: experimental.hook-track-tags
1023 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1022 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1024 False)
1023 False)
1025 if desc != 'strip' and shouldtracktags:
1024 if desc != 'strip' and shouldtracktags:
1026 oldheads = self.changelog.headrevs()
1025 oldheads = self.changelog.headrevs()
1027 def tracktags(tr2):
1026 def tracktags(tr2):
1028 repo = reporef()
1027 repo = reporef()
1029 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1028 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1030 newheads = repo.changelog.headrevs()
1029 newheads = repo.changelog.headrevs()
1031 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1030 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1032 # notes: we compare lists here.
1031 # notes: we compare lists here.
1033 # As we do it only once buiding set would not be cheaper
1032 # As we do it only once buiding set would not be cheaper
1034 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1033 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1035 if changes:
1034 if changes:
1036 tr2.hookargs['tag_moved'] = '1'
1035 tr2.hookargs['tag_moved'] = '1'
1037 with repo.vfs('changes/tags.changes', 'w',
1036 with repo.vfs('changes/tags.changes', 'w',
1038 atomictemp=True) as changesfile:
1037 atomictemp=True) as changesfile:
1039 # note: we do not register the file to the transaction
1038 # note: we do not register the file to the transaction
1040 # because we needs it to still exist on the transaction
1039 # because we needs it to still exist on the transaction
1041 # is close (for txnclose hooks)
1040 # is close (for txnclose hooks)
1042 tagsmod.writediff(changesfile, changes)
1041 tagsmod.writediff(changesfile, changes)
1043 def validate(tr2):
1042 def validate(tr2):
1044 """will run pre-closing hooks"""
1043 """will run pre-closing hooks"""
1045 # XXX the transaction API is a bit lacking here so we take a hacky
1044 # XXX the transaction API is a bit lacking here so we take a hacky
1046 # path for now
1045 # path for now
1047 #
1046 #
1048 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1047 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1049 # dict is copied before these run. In addition we needs the data
1048 # dict is copied before these run. In addition we needs the data
1050 # available to in memory hooks too.
1049 # available to in memory hooks too.
1051 #
1050 #
1052 # Moreover, we also need to make sure this runs before txnclose
1051 # Moreover, we also need to make sure this runs before txnclose
1053 # hooks and there is no "pending" mechanism that would execute
1052 # hooks and there is no "pending" mechanism that would execute
1054 # logic only if hooks are about to run.
1053 # logic only if hooks are about to run.
1055 #
1054 #
1056 # Fixing this limitation of the transaction is also needed to track
1055 # Fixing this limitation of the transaction is also needed to track
1057 # other families of changes (bookmarks, phases, obsolescence).
1056 # other families of changes (bookmarks, phases, obsolescence).
1058 #
1057 #
1059 # This will have to be fixed before we remove the experimental
1058 # This will have to be fixed before we remove the experimental
1060 # gating.
1059 # gating.
1061 tracktags(tr2)
1060 tracktags(tr2)
1062 reporef().hook('pretxnclose', throw=True,
1061 reporef().hook('pretxnclose', throw=True,
1063 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1062 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1064 def releasefn(tr, success):
1063 def releasefn(tr, success):
1065 repo = reporef()
1064 repo = reporef()
1066 if success:
1065 if success:
1067 # this should be explicitly invoked here, because
1066 # this should be explicitly invoked here, because
1068 # in-memory changes aren't written out at closing
1067 # in-memory changes aren't written out at closing
1069 # transaction, if tr.addfilegenerator (via
1068 # transaction, if tr.addfilegenerator (via
1070 # dirstate.write or so) isn't invoked while
1069 # dirstate.write or so) isn't invoked while
1071 # transaction running
1070 # transaction running
1072 repo.dirstate.write(None)
1071 repo.dirstate.write(None)
1073 else:
1072 else:
1074 # discard all changes (including ones already written
1073 # discard all changes (including ones already written
1075 # out) in this transaction
1074 # out) in this transaction
1076 repo.dirstate.restorebackup(None, prefix='journal.')
1075 repo.dirstate.restorebackup(None, prefix='journal.')
1077
1076
1078 repo.invalidate(clearfilecache=True)
1077 repo.invalidate(clearfilecache=True)
1079
1078
1080 tr = transaction.transaction(rp, self.svfs, vfsmap,
1079 tr = transaction.transaction(rp, self.svfs, vfsmap,
1081 "journal",
1080 "journal",
1082 "undo",
1081 "undo",
1083 aftertrans(renames),
1082 aftertrans(renames),
1084 self.store.createmode,
1083 self.store.createmode,
1085 validator=validate,
1084 validator=validate,
1086 releasefn=releasefn)
1085 releasefn=releasefn)
1087 tr.changes['revs'] = set()
1086 tr.changes['revs'] = set()
1088
1087
1089 tr.hookargs['txnid'] = txnid
1088 tr.hookargs['txnid'] = txnid
1090 # note: writing the fncache only during finalize mean that the file is
1089 # note: writing the fncache only during finalize mean that the file is
1091 # outdated when running hooks. As fncache is used for streaming clone,
1090 # outdated when running hooks. As fncache is used for streaming clone,
1092 # this is not expected to break anything that happen during the hooks.
1091 # this is not expected to break anything that happen during the hooks.
1093 tr.addfinalize('flush-fncache', self.store.write)
1092 tr.addfinalize('flush-fncache', self.store.write)
1094 def txnclosehook(tr2):
1093 def txnclosehook(tr2):
1095 """To be run if transaction is successful, will schedule a hook run
1094 """To be run if transaction is successful, will schedule a hook run
1096 """
1095 """
1097 # Don't reference tr2 in hook() so we don't hold a reference.
1096 # Don't reference tr2 in hook() so we don't hold a reference.
1098 # This reduces memory consumption when there are multiple
1097 # This reduces memory consumption when there are multiple
1099 # transactions per lock. This can likely go away if issue5045
1098 # transactions per lock. This can likely go away if issue5045
1100 # fixes the function accumulation.
1099 # fixes the function accumulation.
1101 hookargs = tr2.hookargs
1100 hookargs = tr2.hookargs
1102
1101
1103 def hook():
1102 def hook():
1104 reporef().hook('txnclose', throw=False, txnname=desc,
1103 reporef().hook('txnclose', throw=False, txnname=desc,
1105 **pycompat.strkwargs(hookargs))
1104 **pycompat.strkwargs(hookargs))
1106 reporef()._afterlock(hook)
1105 reporef()._afterlock(hook)
1107 tr.addfinalize('txnclose-hook', txnclosehook)
1106 tr.addfinalize('txnclose-hook', txnclosehook)
1108 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1107 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1109 def txnaborthook(tr2):
1108 def txnaborthook(tr2):
1110 """To be run if transaction is aborted
1109 """To be run if transaction is aborted
1111 """
1110 """
1112 reporef().hook('txnabort', throw=False, txnname=desc,
1111 reporef().hook('txnabort', throw=False, txnname=desc,
1113 **tr2.hookargs)
1112 **tr2.hookargs)
1114 tr.addabort('txnabort-hook', txnaborthook)
1113 tr.addabort('txnabort-hook', txnaborthook)
1115 # avoid eager cache invalidation. in-memory data should be identical
1114 # avoid eager cache invalidation. in-memory data should be identical
1116 # to stored data if transaction has no error.
1115 # to stored data if transaction has no error.
1117 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1116 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1118 self._transref = weakref.ref(tr)
1117 self._transref = weakref.ref(tr)
1119 return tr
1118 return tr
1120
1119
1121 def _journalfiles(self):
1120 def _journalfiles(self):
1122 return ((self.svfs, 'journal'),
1121 return ((self.svfs, 'journal'),
1123 (self.vfs, 'journal.dirstate'),
1122 (self.vfs, 'journal.dirstate'),
1124 (self.vfs, 'journal.branch'),
1123 (self.vfs, 'journal.branch'),
1125 (self.vfs, 'journal.desc'),
1124 (self.vfs, 'journal.desc'),
1126 (self.vfs, 'journal.bookmarks'),
1125 (self.vfs, 'journal.bookmarks'),
1127 (self.svfs, 'journal.phaseroots'))
1126 (self.svfs, 'journal.phaseroots'))
1128
1127
1129 def undofiles(self):
1128 def undofiles(self):
1130 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1129 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1131
1130
1132 def _writejournal(self, desc):
1131 def _writejournal(self, desc):
1133 self.dirstate.savebackup(None, prefix='journal.')
1132 self.dirstate.savebackup(None, prefix='journal.')
1134 self.vfs.write("journal.branch",
1133 self.vfs.write("journal.branch",
1135 encoding.fromlocal(self.dirstate.branch()))
1134 encoding.fromlocal(self.dirstate.branch()))
1136 self.vfs.write("journal.desc",
1135 self.vfs.write("journal.desc",
1137 "%d\n%s\n" % (len(self), desc))
1136 "%d\n%s\n" % (len(self), desc))
1138 self.vfs.write("journal.bookmarks",
1137 self.vfs.write("journal.bookmarks",
1139 self.vfs.tryread("bookmarks"))
1138 self.vfs.tryread("bookmarks"))
1140 self.svfs.write("journal.phaseroots",
1139 self.svfs.write("journal.phaseroots",
1141 self.svfs.tryread("phaseroots"))
1140 self.svfs.tryread("phaseroots"))
1142
1141
1143 def recover(self):
1142 def recover(self):
1144 with self.lock():
1143 with self.lock():
1145 if self.svfs.exists("journal"):
1144 if self.svfs.exists("journal"):
1146 self.ui.status(_("rolling back interrupted transaction\n"))
1145 self.ui.status(_("rolling back interrupted transaction\n"))
1147 vfsmap = {'': self.svfs,
1146 vfsmap = {'': self.svfs,
1148 'plain': self.vfs,}
1147 'plain': self.vfs,}
1149 transaction.rollback(self.svfs, vfsmap, "journal",
1148 transaction.rollback(self.svfs, vfsmap, "journal",
1150 self.ui.warn)
1149 self.ui.warn)
1151 self.invalidate()
1150 self.invalidate()
1152 return True
1151 return True
1153 else:
1152 else:
1154 self.ui.warn(_("no interrupted transaction available\n"))
1153 self.ui.warn(_("no interrupted transaction available\n"))
1155 return False
1154 return False
1156
1155
1157 def rollback(self, dryrun=False, force=False):
1156 def rollback(self, dryrun=False, force=False):
1158 wlock = lock = dsguard = None
1157 wlock = lock = dsguard = None
1159 try:
1158 try:
1160 wlock = self.wlock()
1159 wlock = self.wlock()
1161 lock = self.lock()
1160 lock = self.lock()
1162 if self.svfs.exists("undo"):
1161 if self.svfs.exists("undo"):
1163 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1162 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1164
1163
1165 return self._rollback(dryrun, force, dsguard)
1164 return self._rollback(dryrun, force, dsguard)
1166 else:
1165 else:
1167 self.ui.warn(_("no rollback information available\n"))
1166 self.ui.warn(_("no rollback information available\n"))
1168 return 1
1167 return 1
1169 finally:
1168 finally:
1170 release(dsguard, lock, wlock)
1169 release(dsguard, lock, wlock)
1171
1170
1172 @unfilteredmethod # Until we get smarter cache management
1171 @unfilteredmethod # Until we get smarter cache management
1173 def _rollback(self, dryrun, force, dsguard):
1172 def _rollback(self, dryrun, force, dsguard):
1174 ui = self.ui
1173 ui = self.ui
1175 try:
1174 try:
1176 args = self.vfs.read('undo.desc').splitlines()
1175 args = self.vfs.read('undo.desc').splitlines()
1177 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1176 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1178 if len(args) >= 3:
1177 if len(args) >= 3:
1179 detail = args[2]
1178 detail = args[2]
1180 oldtip = oldlen - 1
1179 oldtip = oldlen - 1
1181
1180
1182 if detail and ui.verbose:
1181 if detail and ui.verbose:
1183 msg = (_('repository tip rolled back to revision %s'
1182 msg = (_('repository tip rolled back to revision %s'
1184 ' (undo %s: %s)\n')
1183 ' (undo %s: %s)\n')
1185 % (oldtip, desc, detail))
1184 % (oldtip, desc, detail))
1186 else:
1185 else:
1187 msg = (_('repository tip rolled back to revision %s'
1186 msg = (_('repository tip rolled back to revision %s'
1188 ' (undo %s)\n')
1187 ' (undo %s)\n')
1189 % (oldtip, desc))
1188 % (oldtip, desc))
1190 except IOError:
1189 except IOError:
1191 msg = _('rolling back unknown transaction\n')
1190 msg = _('rolling back unknown transaction\n')
1192 desc = None
1191 desc = None
1193
1192
1194 if not force and self['.'] != self['tip'] and desc == 'commit':
1193 if not force and self['.'] != self['tip'] and desc == 'commit':
1195 raise error.Abort(
1194 raise error.Abort(
1196 _('rollback of last commit while not checked out '
1195 _('rollback of last commit while not checked out '
1197 'may lose data'), hint=_('use -f to force'))
1196 'may lose data'), hint=_('use -f to force'))
1198
1197
1199 ui.status(msg)
1198 ui.status(msg)
1200 if dryrun:
1199 if dryrun:
1201 return 0
1200 return 0
1202
1201
1203 parents = self.dirstate.parents()
1202 parents = self.dirstate.parents()
1204 self.destroying()
1203 self.destroying()
1205 vfsmap = {'plain': self.vfs, '': self.svfs}
1204 vfsmap = {'plain': self.vfs, '': self.svfs}
1206 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1205 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1207 if self.vfs.exists('undo.bookmarks'):
1206 if self.vfs.exists('undo.bookmarks'):
1208 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1207 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1209 if self.svfs.exists('undo.phaseroots'):
1208 if self.svfs.exists('undo.phaseroots'):
1210 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1209 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1211 self.invalidate()
1210 self.invalidate()
1212
1211
1213 parentgone = (parents[0] not in self.changelog.nodemap or
1212 parentgone = (parents[0] not in self.changelog.nodemap or
1214 parents[1] not in self.changelog.nodemap)
1213 parents[1] not in self.changelog.nodemap)
1215 if parentgone:
1214 if parentgone:
1216 # prevent dirstateguard from overwriting already restored one
1215 # prevent dirstateguard from overwriting already restored one
1217 dsguard.close()
1216 dsguard.close()
1218
1217
1219 self.dirstate.restorebackup(None, prefix='undo.')
1218 self.dirstate.restorebackup(None, prefix='undo.')
1220 try:
1219 try:
1221 branch = self.vfs.read('undo.branch')
1220 branch = self.vfs.read('undo.branch')
1222 self.dirstate.setbranch(encoding.tolocal(branch))
1221 self.dirstate.setbranch(encoding.tolocal(branch))
1223 except IOError:
1222 except IOError:
1224 ui.warn(_('named branch could not be reset: '
1223 ui.warn(_('named branch could not be reset: '
1225 'current branch is still \'%s\'\n')
1224 'current branch is still \'%s\'\n')
1226 % self.dirstate.branch())
1225 % self.dirstate.branch())
1227
1226
1228 parents = tuple([p.rev() for p in self[None].parents()])
1227 parents = tuple([p.rev() for p in self[None].parents()])
1229 if len(parents) > 1:
1228 if len(parents) > 1:
1230 ui.status(_('working directory now based on '
1229 ui.status(_('working directory now based on '
1231 'revisions %d and %d\n') % parents)
1230 'revisions %d and %d\n') % parents)
1232 else:
1231 else:
1233 ui.status(_('working directory now based on '
1232 ui.status(_('working directory now based on '
1234 'revision %d\n') % parents)
1233 'revision %d\n') % parents)
1235 mergemod.mergestate.clean(self, self['.'].node())
1234 mergemod.mergestate.clean(self, self['.'].node())
1236
1235
1237 # TODO: if we know which new heads may result from this rollback, pass
1236 # TODO: if we know which new heads may result from this rollback, pass
1238 # them to destroy(), which will prevent the branchhead cache from being
1237 # them to destroy(), which will prevent the branchhead cache from being
1239 # invalidated.
1238 # invalidated.
1240 self.destroyed()
1239 self.destroyed()
1241 return 0
1240 return 0
1242
1241
1243 def _buildcacheupdater(self, newtransaction):
1242 def _buildcacheupdater(self, newtransaction):
1244 """called during transaction to build the callback updating cache
1243 """called during transaction to build the callback updating cache
1245
1244
1246 Lives on the repository to help extension who might want to augment
1245 Lives on the repository to help extension who might want to augment
1247 this logic. For this purpose, the created transaction is passed to the
1246 this logic. For this purpose, the created transaction is passed to the
1248 method.
1247 method.
1249 """
1248 """
1250 # we must avoid cyclic reference between repo and transaction.
1249 # we must avoid cyclic reference between repo and transaction.
1251 reporef = weakref.ref(self)
1250 reporef = weakref.ref(self)
1252 def updater(tr):
1251 def updater(tr):
1253 repo = reporef()
1252 repo = reporef()
1254 repo.updatecaches(tr)
1253 repo.updatecaches(tr)
1255 return updater
1254 return updater
1256
1255
1257 @unfilteredmethod
1256 @unfilteredmethod
1258 def updatecaches(self, tr=None):
1257 def updatecaches(self, tr=None):
1259 """warm appropriate caches
1258 """warm appropriate caches
1260
1259
1261 If this function is called after a transaction closed. The transaction
1260 If this function is called after a transaction closed. The transaction
1262 will be available in the 'tr' argument. This can be used to selectively
1261 will be available in the 'tr' argument. This can be used to selectively
1263 update caches relevant to the changes in that transaction.
1262 update caches relevant to the changes in that transaction.
1264 """
1263 """
1265 if tr is not None and tr.hookargs.get('source') == 'strip':
1264 if tr is not None and tr.hookargs.get('source') == 'strip':
1266 # During strip, many caches are invalid but
1265 # During strip, many caches are invalid but
1267 # later call to `destroyed` will refresh them.
1266 # later call to `destroyed` will refresh them.
1268 return
1267 return
1269
1268
1270 if tr is None or tr.changes['revs']:
1269 if tr is None or tr.changes['revs']:
1271 # updating the unfiltered branchmap should refresh all the others,
1270 # updating the unfiltered branchmap should refresh all the others,
1272 self.ui.debug('updating the branch cache\n')
1271 self.ui.debug('updating the branch cache\n')
1273 branchmap.updatecache(self.filtered('served'))
1272 branchmap.updatecache(self.filtered('served'))
1274
1273
1275 def invalidatecaches(self):
1274 def invalidatecaches(self):
1276
1275
1277 if '_tagscache' in vars(self):
1276 if '_tagscache' in vars(self):
1278 # can't use delattr on proxy
1277 # can't use delattr on proxy
1279 del self.__dict__['_tagscache']
1278 del self.__dict__['_tagscache']
1280
1279
1281 self.unfiltered()._branchcaches.clear()
1280 self.unfiltered()._branchcaches.clear()
1282 self.invalidatevolatilesets()
1281 self.invalidatevolatilesets()
1283
1282
1284 def invalidatevolatilesets(self):
1283 def invalidatevolatilesets(self):
1285 self.filteredrevcache.clear()
1284 self.filteredrevcache.clear()
1286 obsolete.clearobscaches(self)
1285 obsolete.clearobscaches(self)
1287
1286
1288 def invalidatedirstate(self):
1287 def invalidatedirstate(self):
1289 '''Invalidates the dirstate, causing the next call to dirstate
1288 '''Invalidates the dirstate, causing the next call to dirstate
1290 to check if it was modified since the last time it was read,
1289 to check if it was modified since the last time it was read,
1291 rereading it if it has.
1290 rereading it if it has.
1292
1291
1293 This is different to dirstate.invalidate() that it doesn't always
1292 This is different to dirstate.invalidate() that it doesn't always
1294 rereads the dirstate. Use dirstate.invalidate() if you want to
1293 rereads the dirstate. Use dirstate.invalidate() if you want to
1295 explicitly read the dirstate again (i.e. restoring it to a previous
1294 explicitly read the dirstate again (i.e. restoring it to a previous
1296 known good state).'''
1295 known good state).'''
1297 if hasunfilteredcache(self, 'dirstate'):
1296 if hasunfilteredcache(self, 'dirstate'):
1298 for k in self.dirstate._filecache:
1297 for k in self.dirstate._filecache:
1299 try:
1298 try:
1300 delattr(self.dirstate, k)
1299 delattr(self.dirstate, k)
1301 except AttributeError:
1300 except AttributeError:
1302 pass
1301 pass
1303 delattr(self.unfiltered(), 'dirstate')
1302 delattr(self.unfiltered(), 'dirstate')
1304
1303
1305 def invalidate(self, clearfilecache=False):
1304 def invalidate(self, clearfilecache=False):
1306 '''Invalidates both store and non-store parts other than dirstate
1305 '''Invalidates both store and non-store parts other than dirstate
1307
1306
1308 If a transaction is running, invalidation of store is omitted,
1307 If a transaction is running, invalidation of store is omitted,
1309 because discarding in-memory changes might cause inconsistency
1308 because discarding in-memory changes might cause inconsistency
1310 (e.g. incomplete fncache causes unintentional failure, but
1309 (e.g. incomplete fncache causes unintentional failure, but
1311 redundant one doesn't).
1310 redundant one doesn't).
1312 '''
1311 '''
1313 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1312 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1314 for k in list(self._filecache.keys()):
1313 for k in list(self._filecache.keys()):
1315 # dirstate is invalidated separately in invalidatedirstate()
1314 # dirstate is invalidated separately in invalidatedirstate()
1316 if k == 'dirstate':
1315 if k == 'dirstate':
1317 continue
1316 continue
1318
1317
1319 if clearfilecache:
1318 if clearfilecache:
1320 del self._filecache[k]
1319 del self._filecache[k]
1321 try:
1320 try:
1322 delattr(unfiltered, k)
1321 delattr(unfiltered, k)
1323 except AttributeError:
1322 except AttributeError:
1324 pass
1323 pass
1325 self.invalidatecaches()
1324 self.invalidatecaches()
1326 if not self.currenttransaction():
1325 if not self.currenttransaction():
1327 # TODO: Changing contents of store outside transaction
1326 # TODO: Changing contents of store outside transaction
1328 # causes inconsistency. We should make in-memory store
1327 # causes inconsistency. We should make in-memory store
1329 # changes detectable, and abort if changed.
1328 # changes detectable, and abort if changed.
1330 self.store.invalidatecaches()
1329 self.store.invalidatecaches()
1331
1330
1332 def invalidateall(self):
1331 def invalidateall(self):
1333 '''Fully invalidates both store and non-store parts, causing the
1332 '''Fully invalidates both store and non-store parts, causing the
1334 subsequent operation to reread any outside changes.'''
1333 subsequent operation to reread any outside changes.'''
1335 # extension should hook this to invalidate its caches
1334 # extension should hook this to invalidate its caches
1336 self.invalidate()
1335 self.invalidate()
1337 self.invalidatedirstate()
1336 self.invalidatedirstate()
1338
1337
1339 @unfilteredmethod
1338 @unfilteredmethod
1340 def _refreshfilecachestats(self, tr):
1339 def _refreshfilecachestats(self, tr):
1341 """Reload stats of cached files so that they are flagged as valid"""
1340 """Reload stats of cached files so that they are flagged as valid"""
1342 for k, ce in self._filecache.items():
1341 for k, ce in self._filecache.items():
1343 if k == 'dirstate' or k not in self.__dict__:
1342 if k == 'dirstate' or k not in self.__dict__:
1344 continue
1343 continue
1345 ce.refresh()
1344 ce.refresh()
1346
1345
1347 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1346 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1348 inheritchecker=None, parentenvvar=None):
1347 inheritchecker=None, parentenvvar=None):
1349 parentlock = None
1348 parentlock = None
1350 # the contents of parentenvvar are used by the underlying lock to
1349 # the contents of parentenvvar are used by the underlying lock to
1351 # determine whether it can be inherited
1350 # determine whether it can be inherited
1352 if parentenvvar is not None:
1351 if parentenvvar is not None:
1353 parentlock = encoding.environ.get(parentenvvar)
1352 parentlock = encoding.environ.get(parentenvvar)
1354 try:
1353 try:
1355 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1354 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1356 acquirefn=acquirefn, desc=desc,
1355 acquirefn=acquirefn, desc=desc,
1357 inheritchecker=inheritchecker,
1356 inheritchecker=inheritchecker,
1358 parentlock=parentlock)
1357 parentlock=parentlock)
1359 except error.LockHeld as inst:
1358 except error.LockHeld as inst:
1360 if not wait:
1359 if not wait:
1361 raise
1360 raise
1362 # show more details for new-style locks
1361 # show more details for new-style locks
1363 if ':' in inst.locker:
1362 if ':' in inst.locker:
1364 host, pid = inst.locker.split(":", 1)
1363 host, pid = inst.locker.split(":", 1)
1365 self.ui.warn(
1364 self.ui.warn(
1366 _("waiting for lock on %s held by process %r "
1365 _("waiting for lock on %s held by process %r "
1367 "on host %r\n") % (desc, pid, host))
1366 "on host %r\n") % (desc, pid, host))
1368 else:
1367 else:
1369 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1368 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1370 (desc, inst.locker))
1369 (desc, inst.locker))
1371 # default to 600 seconds timeout
1370 # default to 600 seconds timeout
1372 l = lockmod.lock(vfs, lockname,
1371 l = lockmod.lock(vfs, lockname,
1373 int(self.ui.config("ui", "timeout", "600")),
1372 int(self.ui.config("ui", "timeout", "600")),
1374 releasefn=releasefn, acquirefn=acquirefn,
1373 releasefn=releasefn, acquirefn=acquirefn,
1375 desc=desc)
1374 desc=desc)
1376 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1375 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1377 return l
1376 return l
1378
1377
1379 def _afterlock(self, callback):
1378 def _afterlock(self, callback):
1380 """add a callback to be run when the repository is fully unlocked
1379 """add a callback to be run when the repository is fully unlocked
1381
1380
1382 The callback will be executed when the outermost lock is released
1381 The callback will be executed when the outermost lock is released
1383 (with wlock being higher level than 'lock')."""
1382 (with wlock being higher level than 'lock')."""
1384 for ref in (self._wlockref, self._lockref):
1383 for ref in (self._wlockref, self._lockref):
1385 l = ref and ref()
1384 l = ref and ref()
1386 if l and l.held:
1385 if l and l.held:
1387 l.postrelease.append(callback)
1386 l.postrelease.append(callback)
1388 break
1387 break
1389 else: # no lock have been found.
1388 else: # no lock have been found.
1390 callback()
1389 callback()
1391
1390
1392 def lock(self, wait=True):
1391 def lock(self, wait=True):
1393 '''Lock the repository store (.hg/store) and return a weak reference
1392 '''Lock the repository store (.hg/store) and return a weak reference
1394 to the lock. Use this before modifying the store (e.g. committing or
1393 to the lock. Use this before modifying the store (e.g. committing or
1395 stripping). If you are opening a transaction, get a lock as well.)
1394 stripping). If you are opening a transaction, get a lock as well.)
1396
1395
1397 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1396 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1398 'wlock' first to avoid a dead-lock hazard.'''
1397 'wlock' first to avoid a dead-lock hazard.'''
1399 l = self._currentlock(self._lockref)
1398 l = self._currentlock(self._lockref)
1400 if l is not None:
1399 if l is not None:
1401 l.lock()
1400 l.lock()
1402 return l
1401 return l
1403
1402
1404 l = self._lock(self.svfs, "lock", wait, None,
1403 l = self._lock(self.svfs, "lock", wait, None,
1405 self.invalidate, _('repository %s') % self.origroot)
1404 self.invalidate, _('repository %s') % self.origroot)
1406 self._lockref = weakref.ref(l)
1405 self._lockref = weakref.ref(l)
1407 return l
1406 return l
1408
1407
1409 def _wlockchecktransaction(self):
1408 def _wlockchecktransaction(self):
1410 if self.currenttransaction() is not None:
1409 if self.currenttransaction() is not None:
1411 raise error.LockInheritanceContractViolation(
1410 raise error.LockInheritanceContractViolation(
1412 'wlock cannot be inherited in the middle of a transaction')
1411 'wlock cannot be inherited in the middle of a transaction')
1413
1412
1414 def wlock(self, wait=True):
1413 def wlock(self, wait=True):
1415 '''Lock the non-store parts of the repository (everything under
1414 '''Lock the non-store parts of the repository (everything under
1416 .hg except .hg/store) and return a weak reference to the lock.
1415 .hg except .hg/store) and return a weak reference to the lock.
1417
1416
1418 Use this before modifying files in .hg.
1417 Use this before modifying files in .hg.
1419
1418
1420 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1419 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1421 'wlock' first to avoid a dead-lock hazard.'''
1420 'wlock' first to avoid a dead-lock hazard.'''
1422 l = self._wlockref and self._wlockref()
1421 l = self._wlockref and self._wlockref()
1423 if l is not None and l.held:
1422 if l is not None and l.held:
1424 l.lock()
1423 l.lock()
1425 return l
1424 return l
1426
1425
1427 # We do not need to check for non-waiting lock acquisition. Such
1426 # We do not need to check for non-waiting lock acquisition. Such
1428 # acquisition would not cause dead-lock as they would just fail.
1427 # acquisition would not cause dead-lock as they would just fail.
1429 if wait and (self.ui.configbool('devel', 'all-warnings')
1428 if wait and (self.ui.configbool('devel', 'all-warnings')
1430 or self.ui.configbool('devel', 'check-locks')):
1429 or self.ui.configbool('devel', 'check-locks')):
1431 if self._currentlock(self._lockref) is not None:
1430 if self._currentlock(self._lockref) is not None:
1432 self.ui.develwarn('"wlock" acquired after "lock"')
1431 self.ui.develwarn('"wlock" acquired after "lock"')
1433
1432
1434 def unlock():
1433 def unlock():
1435 if self.dirstate.pendingparentchange():
1434 if self.dirstate.pendingparentchange():
1436 self.dirstate.invalidate()
1435 self.dirstate.invalidate()
1437 else:
1436 else:
1438 self.dirstate.write(None)
1437 self.dirstate.write(None)
1439
1438
1440 self._filecache['dirstate'].refresh()
1439 self._filecache['dirstate'].refresh()
1441
1440
1442 l = self._lock(self.vfs, "wlock", wait, unlock,
1441 l = self._lock(self.vfs, "wlock", wait, unlock,
1443 self.invalidatedirstate, _('working directory of %s') %
1442 self.invalidatedirstate, _('working directory of %s') %
1444 self.origroot,
1443 self.origroot,
1445 inheritchecker=self._wlockchecktransaction,
1444 inheritchecker=self._wlockchecktransaction,
1446 parentenvvar='HG_WLOCK_LOCKER')
1445 parentenvvar='HG_WLOCK_LOCKER')
1447 self._wlockref = weakref.ref(l)
1446 self._wlockref = weakref.ref(l)
1448 return l
1447 return l
1449
1448
1450 def _currentlock(self, lockref):
1449 def _currentlock(self, lockref):
1451 """Returns the lock if it's held, or None if it's not."""
1450 """Returns the lock if it's held, or None if it's not."""
1452 if lockref is None:
1451 if lockref is None:
1453 return None
1452 return None
1454 l = lockref()
1453 l = lockref()
1455 if l is None or not l.held:
1454 if l is None or not l.held:
1456 return None
1455 return None
1457 return l
1456 return l
1458
1457
1459 def currentwlock(self):
1458 def currentwlock(self):
1460 """Returns the wlock if it's held, or None if it's not."""
1459 """Returns the wlock if it's held, or None if it's not."""
1461 return self._currentlock(self._wlockref)
1460 return self._currentlock(self._wlockref)
1462
1461
1463 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1462 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1464 """
1463 """
1465 commit an individual file as part of a larger transaction
1464 commit an individual file as part of a larger transaction
1466 """
1465 """
1467
1466
1468 fname = fctx.path()
1467 fname = fctx.path()
1469 fparent1 = manifest1.get(fname, nullid)
1468 fparent1 = manifest1.get(fname, nullid)
1470 fparent2 = manifest2.get(fname, nullid)
1469 fparent2 = manifest2.get(fname, nullid)
1471 if isinstance(fctx, context.filectx):
1470 if isinstance(fctx, context.filectx):
1472 node = fctx.filenode()
1471 node = fctx.filenode()
1473 if node in [fparent1, fparent2]:
1472 if node in [fparent1, fparent2]:
1474 self.ui.debug('reusing %s filelog entry\n' % fname)
1473 self.ui.debug('reusing %s filelog entry\n' % fname)
1475 if manifest1.flags(fname) != fctx.flags():
1474 if manifest1.flags(fname) != fctx.flags():
1476 changelist.append(fname)
1475 changelist.append(fname)
1477 return node
1476 return node
1478
1477
1479 flog = self.file(fname)
1478 flog = self.file(fname)
1480 meta = {}
1479 meta = {}
1481 copy = fctx.renamed()
1480 copy = fctx.renamed()
1482 if copy and copy[0] != fname:
1481 if copy and copy[0] != fname:
1483 # Mark the new revision of this file as a copy of another
1482 # Mark the new revision of this file as a copy of another
1484 # file. This copy data will effectively act as a parent
1483 # file. This copy data will effectively act as a parent
1485 # of this new revision. If this is a merge, the first
1484 # of this new revision. If this is a merge, the first
1486 # parent will be the nullid (meaning "look up the copy data")
1485 # parent will be the nullid (meaning "look up the copy data")
1487 # and the second one will be the other parent. For example:
1486 # and the second one will be the other parent. For example:
1488 #
1487 #
1489 # 0 --- 1 --- 3 rev1 changes file foo
1488 # 0 --- 1 --- 3 rev1 changes file foo
1490 # \ / rev2 renames foo to bar and changes it
1489 # \ / rev2 renames foo to bar and changes it
1491 # \- 2 -/ rev3 should have bar with all changes and
1490 # \- 2 -/ rev3 should have bar with all changes and
1492 # should record that bar descends from
1491 # should record that bar descends from
1493 # bar in rev2 and foo in rev1
1492 # bar in rev2 and foo in rev1
1494 #
1493 #
1495 # this allows this merge to succeed:
1494 # this allows this merge to succeed:
1496 #
1495 #
1497 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1496 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1498 # \ / merging rev3 and rev4 should use bar@rev2
1497 # \ / merging rev3 and rev4 should use bar@rev2
1499 # \- 2 --- 4 as the merge base
1498 # \- 2 --- 4 as the merge base
1500 #
1499 #
1501
1500
1502 cfname = copy[0]
1501 cfname = copy[0]
1503 crev = manifest1.get(cfname)
1502 crev = manifest1.get(cfname)
1504 newfparent = fparent2
1503 newfparent = fparent2
1505
1504
1506 if manifest2: # branch merge
1505 if manifest2: # branch merge
1507 if fparent2 == nullid or crev is None: # copied on remote side
1506 if fparent2 == nullid or crev is None: # copied on remote side
1508 if cfname in manifest2:
1507 if cfname in manifest2:
1509 crev = manifest2[cfname]
1508 crev = manifest2[cfname]
1510 newfparent = fparent1
1509 newfparent = fparent1
1511
1510
1512 # Here, we used to search backwards through history to try to find
1511 # Here, we used to search backwards through history to try to find
1513 # where the file copy came from if the source of a copy was not in
1512 # where the file copy came from if the source of a copy was not in
1514 # the parent directory. However, this doesn't actually make sense to
1513 # the parent directory. However, this doesn't actually make sense to
1515 # do (what does a copy from something not in your working copy even
1514 # do (what does a copy from something not in your working copy even
1516 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1515 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1517 # the user that copy information was dropped, so if they didn't
1516 # the user that copy information was dropped, so if they didn't
1518 # expect this outcome it can be fixed, but this is the correct
1517 # expect this outcome it can be fixed, but this is the correct
1519 # behavior in this circumstance.
1518 # behavior in this circumstance.
1520
1519
1521 if crev:
1520 if crev:
1522 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1521 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1523 meta["copy"] = cfname
1522 meta["copy"] = cfname
1524 meta["copyrev"] = hex(crev)
1523 meta["copyrev"] = hex(crev)
1525 fparent1, fparent2 = nullid, newfparent
1524 fparent1, fparent2 = nullid, newfparent
1526 else:
1525 else:
1527 self.ui.warn(_("warning: can't find ancestor for '%s' "
1526 self.ui.warn(_("warning: can't find ancestor for '%s' "
1528 "copied from '%s'!\n") % (fname, cfname))
1527 "copied from '%s'!\n") % (fname, cfname))
1529
1528
1530 elif fparent1 == nullid:
1529 elif fparent1 == nullid:
1531 fparent1, fparent2 = fparent2, nullid
1530 fparent1, fparent2 = fparent2, nullid
1532 elif fparent2 != nullid:
1531 elif fparent2 != nullid:
1533 # is one parent an ancestor of the other?
1532 # is one parent an ancestor of the other?
1534 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1533 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1535 if fparent1 in fparentancestors:
1534 if fparent1 in fparentancestors:
1536 fparent1, fparent2 = fparent2, nullid
1535 fparent1, fparent2 = fparent2, nullid
1537 elif fparent2 in fparentancestors:
1536 elif fparent2 in fparentancestors:
1538 fparent2 = nullid
1537 fparent2 = nullid
1539
1538
1540 # is the file changed?
1539 # is the file changed?
1541 text = fctx.data()
1540 text = fctx.data()
1542 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1541 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1543 changelist.append(fname)
1542 changelist.append(fname)
1544 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1543 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1545 # are just the flags changed during merge?
1544 # are just the flags changed during merge?
1546 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1545 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1547 changelist.append(fname)
1546 changelist.append(fname)
1548
1547
1549 return fparent1
1548 return fparent1
1550
1549
1551 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1550 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1552 """check for commit arguments that aren't committable"""
1551 """check for commit arguments that aren't committable"""
1553 if match.isexact() or match.prefix():
1552 if match.isexact() or match.prefix():
1554 matched = set(status.modified + status.added + status.removed)
1553 matched = set(status.modified + status.added + status.removed)
1555
1554
1556 for f in match.files():
1555 for f in match.files():
1557 f = self.dirstate.normalize(f)
1556 f = self.dirstate.normalize(f)
1558 if f == '.' or f in matched or f in wctx.substate:
1557 if f == '.' or f in matched or f in wctx.substate:
1559 continue
1558 continue
1560 if f in status.deleted:
1559 if f in status.deleted:
1561 fail(f, _('file not found!'))
1560 fail(f, _('file not found!'))
1562 if f in vdirs: # visited directory
1561 if f in vdirs: # visited directory
1563 d = f + '/'
1562 d = f + '/'
1564 for mf in matched:
1563 for mf in matched:
1565 if mf.startswith(d):
1564 if mf.startswith(d):
1566 break
1565 break
1567 else:
1566 else:
1568 fail(f, _("no match under directory!"))
1567 fail(f, _("no match under directory!"))
1569 elif f not in self.dirstate:
1568 elif f not in self.dirstate:
1570 fail(f, _("file not tracked!"))
1569 fail(f, _("file not tracked!"))
1571
1570
1572 @unfilteredmethod
1571 @unfilteredmethod
1573 def commit(self, text="", user=None, date=None, match=None, force=False,
1572 def commit(self, text="", user=None, date=None, match=None, force=False,
1574 editor=False, extra=None):
1573 editor=False, extra=None):
1575 """Add a new revision to current repository.
1574 """Add a new revision to current repository.
1576
1575
1577 Revision information is gathered from the working directory,
1576 Revision information is gathered from the working directory,
1578 match can be used to filter the committed files. If editor is
1577 match can be used to filter the committed files. If editor is
1579 supplied, it is called to get a commit message.
1578 supplied, it is called to get a commit message.
1580 """
1579 """
1581 if extra is None:
1580 if extra is None:
1582 extra = {}
1581 extra = {}
1583
1582
1584 def fail(f, msg):
1583 def fail(f, msg):
1585 raise error.Abort('%s: %s' % (f, msg))
1584 raise error.Abort('%s: %s' % (f, msg))
1586
1585
1587 if not match:
1586 if not match:
1588 match = matchmod.always(self.root, '')
1587 match = matchmod.always(self.root, '')
1589
1588
1590 if not force:
1589 if not force:
1591 vdirs = []
1590 vdirs = []
1592 match.explicitdir = vdirs.append
1591 match.explicitdir = vdirs.append
1593 match.bad = fail
1592 match.bad = fail
1594
1593
1595 wlock = lock = tr = None
1594 wlock = lock = tr = None
1596 try:
1595 try:
1597 wlock = self.wlock()
1596 wlock = self.wlock()
1598 lock = self.lock() # for recent changelog (see issue4368)
1597 lock = self.lock() # for recent changelog (see issue4368)
1599
1598
1600 wctx = self[None]
1599 wctx = self[None]
1601 merge = len(wctx.parents()) > 1
1600 merge = len(wctx.parents()) > 1
1602
1601
1603 if not force and merge and not match.always():
1602 if not force and merge and not match.always():
1604 raise error.Abort(_('cannot partially commit a merge '
1603 raise error.Abort(_('cannot partially commit a merge '
1605 '(do not specify files or patterns)'))
1604 '(do not specify files or patterns)'))
1606
1605
1607 status = self.status(match=match, clean=force)
1606 status = self.status(match=match, clean=force)
1608 if force:
1607 if force:
1609 status.modified.extend(status.clean) # mq may commit clean files
1608 status.modified.extend(status.clean) # mq may commit clean files
1610
1609
1611 # check subrepos
1610 # check subrepos
1612 subs = []
1611 subs = []
1613 commitsubs = set()
1612 commitsubs = set()
1614 newstate = wctx.substate.copy()
1613 newstate = wctx.substate.copy()
1615 # only manage subrepos and .hgsubstate if .hgsub is present
1614 # only manage subrepos and .hgsubstate if .hgsub is present
1616 if '.hgsub' in wctx:
1615 if '.hgsub' in wctx:
1617 # we'll decide whether to track this ourselves, thanks
1616 # we'll decide whether to track this ourselves, thanks
1618 for c in status.modified, status.added, status.removed:
1617 for c in status.modified, status.added, status.removed:
1619 if '.hgsubstate' in c:
1618 if '.hgsubstate' in c:
1620 c.remove('.hgsubstate')
1619 c.remove('.hgsubstate')
1621
1620
1622 # compare current state to last committed state
1621 # compare current state to last committed state
1623 # build new substate based on last committed state
1622 # build new substate based on last committed state
1624 oldstate = wctx.p1().substate
1623 oldstate = wctx.p1().substate
1625 for s in sorted(newstate.keys()):
1624 for s in sorted(newstate.keys()):
1626 if not match(s):
1625 if not match(s):
1627 # ignore working copy, use old state if present
1626 # ignore working copy, use old state if present
1628 if s in oldstate:
1627 if s in oldstate:
1629 newstate[s] = oldstate[s]
1628 newstate[s] = oldstate[s]
1630 continue
1629 continue
1631 if not force:
1630 if not force:
1632 raise error.Abort(
1631 raise error.Abort(
1633 _("commit with new subrepo %s excluded") % s)
1632 _("commit with new subrepo %s excluded") % s)
1634 dirtyreason = wctx.sub(s).dirtyreason(True)
1633 dirtyreason = wctx.sub(s).dirtyreason(True)
1635 if dirtyreason:
1634 if dirtyreason:
1636 if not self.ui.configbool('ui', 'commitsubrepos'):
1635 if not self.ui.configbool('ui', 'commitsubrepos'):
1637 raise error.Abort(dirtyreason,
1636 raise error.Abort(dirtyreason,
1638 hint=_("use --subrepos for recursive commit"))
1637 hint=_("use --subrepos for recursive commit"))
1639 subs.append(s)
1638 subs.append(s)
1640 commitsubs.add(s)
1639 commitsubs.add(s)
1641 else:
1640 else:
1642 bs = wctx.sub(s).basestate()
1641 bs = wctx.sub(s).basestate()
1643 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1642 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1644 if oldstate.get(s, (None, None, None))[1] != bs:
1643 if oldstate.get(s, (None, None, None))[1] != bs:
1645 subs.append(s)
1644 subs.append(s)
1646
1645
1647 # check for removed subrepos
1646 # check for removed subrepos
1648 for p in wctx.parents():
1647 for p in wctx.parents():
1649 r = [s for s in p.substate if s not in newstate]
1648 r = [s for s in p.substate if s not in newstate]
1650 subs += [s for s in r if match(s)]
1649 subs += [s for s in r if match(s)]
1651 if subs:
1650 if subs:
1652 if (not match('.hgsub') and
1651 if (not match('.hgsub') and
1653 '.hgsub' in (wctx.modified() + wctx.added())):
1652 '.hgsub' in (wctx.modified() + wctx.added())):
1654 raise error.Abort(
1653 raise error.Abort(
1655 _("can't commit subrepos without .hgsub"))
1654 _("can't commit subrepos without .hgsub"))
1656 status.modified.insert(0, '.hgsubstate')
1655 status.modified.insert(0, '.hgsubstate')
1657
1656
1658 elif '.hgsub' in status.removed:
1657 elif '.hgsub' in status.removed:
1659 # clean up .hgsubstate when .hgsub is removed
1658 # clean up .hgsubstate when .hgsub is removed
1660 if ('.hgsubstate' in wctx and
1659 if ('.hgsubstate' in wctx and
1661 '.hgsubstate' not in (status.modified + status.added +
1660 '.hgsubstate' not in (status.modified + status.added +
1662 status.removed)):
1661 status.removed)):
1663 status.removed.insert(0, '.hgsubstate')
1662 status.removed.insert(0, '.hgsubstate')
1664
1663
1665 # make sure all explicit patterns are matched
1664 # make sure all explicit patterns are matched
1666 if not force:
1665 if not force:
1667 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1666 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1668
1667
1669 cctx = context.workingcommitctx(self, status,
1668 cctx = context.workingcommitctx(self, status,
1670 text, user, date, extra)
1669 text, user, date, extra)
1671
1670
1672 # internal config: ui.allowemptycommit
1671 # internal config: ui.allowemptycommit
1673 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1672 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1674 or extra.get('close') or merge or cctx.files()
1673 or extra.get('close') or merge or cctx.files()
1675 or self.ui.configbool('ui', 'allowemptycommit'))
1674 or self.ui.configbool('ui', 'allowemptycommit'))
1676 if not allowemptycommit:
1675 if not allowemptycommit:
1677 return None
1676 return None
1678
1677
1679 if merge and cctx.deleted():
1678 if merge and cctx.deleted():
1680 raise error.Abort(_("cannot commit merge with missing files"))
1679 raise error.Abort(_("cannot commit merge with missing files"))
1681
1680
1682 ms = mergemod.mergestate.read(self)
1681 ms = mergemod.mergestate.read(self)
1683 mergeutil.checkunresolved(ms)
1682 mergeutil.checkunresolved(ms)
1684
1683
1685 if editor:
1684 if editor:
1686 cctx._text = editor(self, cctx, subs)
1685 cctx._text = editor(self, cctx, subs)
1687 edited = (text != cctx._text)
1686 edited = (text != cctx._text)
1688
1687
1689 # Save commit message in case this transaction gets rolled back
1688 # Save commit message in case this transaction gets rolled back
1690 # (e.g. by a pretxncommit hook). Leave the content alone on
1689 # (e.g. by a pretxncommit hook). Leave the content alone on
1691 # the assumption that the user will use the same editor again.
1690 # the assumption that the user will use the same editor again.
1692 msgfn = self.savecommitmessage(cctx._text)
1691 msgfn = self.savecommitmessage(cctx._text)
1693
1692
1694 # commit subs and write new state
1693 # commit subs and write new state
1695 if subs:
1694 if subs:
1696 for s in sorted(commitsubs):
1695 for s in sorted(commitsubs):
1697 sub = wctx.sub(s)
1696 sub = wctx.sub(s)
1698 self.ui.status(_('committing subrepository %s\n') %
1697 self.ui.status(_('committing subrepository %s\n') %
1699 subrepo.subrelpath(sub))
1698 subrepo.subrelpath(sub))
1700 sr = sub.commit(cctx._text, user, date)
1699 sr = sub.commit(cctx._text, user, date)
1701 newstate[s] = (newstate[s][0], sr)
1700 newstate[s] = (newstate[s][0], sr)
1702 subrepo.writestate(self, newstate)
1701 subrepo.writestate(self, newstate)
1703
1702
1704 p1, p2 = self.dirstate.parents()
1703 p1, p2 = self.dirstate.parents()
1705 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1704 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1706 try:
1705 try:
1707 self.hook("precommit", throw=True, parent1=hookp1,
1706 self.hook("precommit", throw=True, parent1=hookp1,
1708 parent2=hookp2)
1707 parent2=hookp2)
1709 tr = self.transaction('commit')
1708 tr = self.transaction('commit')
1710 ret = self.commitctx(cctx, True)
1709 ret = self.commitctx(cctx, True)
1711 except: # re-raises
1710 except: # re-raises
1712 if edited:
1711 if edited:
1713 self.ui.write(
1712 self.ui.write(
1714 _('note: commit message saved in %s\n') % msgfn)
1713 _('note: commit message saved in %s\n') % msgfn)
1715 raise
1714 raise
1716 # update bookmarks, dirstate and mergestate
1715 # update bookmarks, dirstate and mergestate
1717 bookmarks.update(self, [p1, p2], ret)
1716 bookmarks.update(self, [p1, p2], ret)
1718 cctx.markcommitted(ret)
1717 cctx.markcommitted(ret)
1719 ms.reset()
1718 ms.reset()
1720 tr.close()
1719 tr.close()
1721
1720
1722 finally:
1721 finally:
1723 lockmod.release(tr, lock, wlock)
1722 lockmod.release(tr, lock, wlock)
1724
1723
1725 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1724 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1726 # hack for command that use a temporary commit (eg: histedit)
1725 # hack for command that use a temporary commit (eg: histedit)
1727 # temporary commit got stripped before hook release
1726 # temporary commit got stripped before hook release
1728 if self.changelog.hasnode(ret):
1727 if self.changelog.hasnode(ret):
1729 self.hook("commit", node=node, parent1=parent1,
1728 self.hook("commit", node=node, parent1=parent1,
1730 parent2=parent2)
1729 parent2=parent2)
1731 self._afterlock(commithook)
1730 self._afterlock(commithook)
1732 return ret
1731 return ret
1733
1732
1734 @unfilteredmethod
1733 @unfilteredmethod
1735 def commitctx(self, ctx, error=False):
1734 def commitctx(self, ctx, error=False):
1736 """Add a new revision to current repository.
1735 """Add a new revision to current repository.
1737 Revision information is passed via the context argument.
1736 Revision information is passed via the context argument.
1738 """
1737 """
1739
1738
1740 tr = None
1739 tr = None
1741 p1, p2 = ctx.p1(), ctx.p2()
1740 p1, p2 = ctx.p1(), ctx.p2()
1742 user = ctx.user()
1741 user = ctx.user()
1743
1742
1744 lock = self.lock()
1743 lock = self.lock()
1745 try:
1744 try:
1746 tr = self.transaction("commit")
1745 tr = self.transaction("commit")
1747 trp = weakref.proxy(tr)
1746 trp = weakref.proxy(tr)
1748
1747
1749 if ctx.manifestnode():
1748 if ctx.manifestnode():
1750 # reuse an existing manifest revision
1749 # reuse an existing manifest revision
1751 mn = ctx.manifestnode()
1750 mn = ctx.manifestnode()
1752 files = ctx.files()
1751 files = ctx.files()
1753 elif ctx.files():
1752 elif ctx.files():
1754 m1ctx = p1.manifestctx()
1753 m1ctx = p1.manifestctx()
1755 m2ctx = p2.manifestctx()
1754 m2ctx = p2.manifestctx()
1756 mctx = m1ctx.copy()
1755 mctx = m1ctx.copy()
1757
1756
1758 m = mctx.read()
1757 m = mctx.read()
1759 m1 = m1ctx.read()
1758 m1 = m1ctx.read()
1760 m2 = m2ctx.read()
1759 m2 = m2ctx.read()
1761
1760
1762 # check in files
1761 # check in files
1763 added = []
1762 added = []
1764 changed = []
1763 changed = []
1765 removed = list(ctx.removed())
1764 removed = list(ctx.removed())
1766 linkrev = len(self)
1765 linkrev = len(self)
1767 self.ui.note(_("committing files:\n"))
1766 self.ui.note(_("committing files:\n"))
1768 for f in sorted(ctx.modified() + ctx.added()):
1767 for f in sorted(ctx.modified() + ctx.added()):
1769 self.ui.note(f + "\n")
1768 self.ui.note(f + "\n")
1770 try:
1769 try:
1771 fctx = ctx[f]
1770 fctx = ctx[f]
1772 if fctx is None:
1771 if fctx is None:
1773 removed.append(f)
1772 removed.append(f)
1774 else:
1773 else:
1775 added.append(f)
1774 added.append(f)
1776 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1775 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1777 trp, changed)
1776 trp, changed)
1778 m.setflag(f, fctx.flags())
1777 m.setflag(f, fctx.flags())
1779 except OSError as inst:
1778 except OSError as inst:
1780 self.ui.warn(_("trouble committing %s!\n") % f)
1779 self.ui.warn(_("trouble committing %s!\n") % f)
1781 raise
1780 raise
1782 except IOError as inst:
1781 except IOError as inst:
1783 errcode = getattr(inst, 'errno', errno.ENOENT)
1782 errcode = getattr(inst, 'errno', errno.ENOENT)
1784 if error or errcode and errcode != errno.ENOENT:
1783 if error or errcode and errcode != errno.ENOENT:
1785 self.ui.warn(_("trouble committing %s!\n") % f)
1784 self.ui.warn(_("trouble committing %s!\n") % f)
1786 raise
1785 raise
1787
1786
1788 # update manifest
1787 # update manifest
1789 self.ui.note(_("committing manifest\n"))
1788 self.ui.note(_("committing manifest\n"))
1790 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1789 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1791 drop = [f for f in removed if f in m]
1790 drop = [f for f in removed if f in m]
1792 for f in drop:
1791 for f in drop:
1793 del m[f]
1792 del m[f]
1794 mn = mctx.write(trp, linkrev,
1793 mn = mctx.write(trp, linkrev,
1795 p1.manifestnode(), p2.manifestnode(),
1794 p1.manifestnode(), p2.manifestnode(),
1796 added, drop)
1795 added, drop)
1797 files = changed + removed
1796 files = changed + removed
1798 else:
1797 else:
1799 mn = p1.manifestnode()
1798 mn = p1.manifestnode()
1800 files = []
1799 files = []
1801
1800
1802 # update changelog
1801 # update changelog
1803 self.ui.note(_("committing changelog\n"))
1802 self.ui.note(_("committing changelog\n"))
1804 self.changelog.delayupdate(tr)
1803 self.changelog.delayupdate(tr)
1805 n = self.changelog.add(mn, files, ctx.description(),
1804 n = self.changelog.add(mn, files, ctx.description(),
1806 trp, p1.node(), p2.node(),
1805 trp, p1.node(), p2.node(),
1807 user, ctx.date(), ctx.extra().copy())
1806 user, ctx.date(), ctx.extra().copy())
1808 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1807 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1809 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1808 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1810 parent2=xp2)
1809 parent2=xp2)
1811 # set the new commit is proper phase
1810 # set the new commit is proper phase
1812 targetphase = subrepo.newcommitphase(self.ui, ctx)
1811 targetphase = subrepo.newcommitphase(self.ui, ctx)
1813 if targetphase:
1812 if targetphase:
1814 # retract boundary do not alter parent changeset.
1813 # retract boundary do not alter parent changeset.
1815 # if a parent have higher the resulting phase will
1814 # if a parent have higher the resulting phase will
1816 # be compliant anyway
1815 # be compliant anyway
1817 #
1816 #
1818 # if minimal phase was 0 we don't need to retract anything
1817 # if minimal phase was 0 we don't need to retract anything
1819 phases.retractboundary(self, tr, targetphase, [n])
1818 phases.retractboundary(self, tr, targetphase, [n])
1820 tr.close()
1819 tr.close()
1821 return n
1820 return n
1822 finally:
1821 finally:
1823 if tr:
1822 if tr:
1824 tr.release()
1823 tr.release()
1825 lock.release()
1824 lock.release()
1826
1825
1827 @unfilteredmethod
1826 @unfilteredmethod
1828 def destroying(self):
1827 def destroying(self):
1829 '''Inform the repository that nodes are about to be destroyed.
1828 '''Inform the repository that nodes are about to be destroyed.
1830 Intended for use by strip and rollback, so there's a common
1829 Intended for use by strip and rollback, so there's a common
1831 place for anything that has to be done before destroying history.
1830 place for anything that has to be done before destroying history.
1832
1831
1833 This is mostly useful for saving state that is in memory and waiting
1832 This is mostly useful for saving state that is in memory and waiting
1834 to be flushed when the current lock is released. Because a call to
1833 to be flushed when the current lock is released. Because a call to
1835 destroyed is imminent, the repo will be invalidated causing those
1834 destroyed is imminent, the repo will be invalidated causing those
1836 changes to stay in memory (waiting for the next unlock), or vanish
1835 changes to stay in memory (waiting for the next unlock), or vanish
1837 completely.
1836 completely.
1838 '''
1837 '''
1839 # When using the same lock to commit and strip, the phasecache is left
1838 # When using the same lock to commit and strip, the phasecache is left
1840 # dirty after committing. Then when we strip, the repo is invalidated,
1839 # dirty after committing. Then when we strip, the repo is invalidated,
1841 # causing those changes to disappear.
1840 # causing those changes to disappear.
1842 if '_phasecache' in vars(self):
1841 if '_phasecache' in vars(self):
1843 self._phasecache.write()
1842 self._phasecache.write()
1844
1843
1845 @unfilteredmethod
1844 @unfilteredmethod
1846 def destroyed(self):
1845 def destroyed(self):
1847 '''Inform the repository that nodes have been destroyed.
1846 '''Inform the repository that nodes have been destroyed.
1848 Intended for use by strip and rollback, so there's a common
1847 Intended for use by strip and rollback, so there's a common
1849 place for anything that has to be done after destroying history.
1848 place for anything that has to be done after destroying history.
1850 '''
1849 '''
1851 # When one tries to:
1850 # When one tries to:
1852 # 1) destroy nodes thus calling this method (e.g. strip)
1851 # 1) destroy nodes thus calling this method (e.g. strip)
1853 # 2) use phasecache somewhere (e.g. commit)
1852 # 2) use phasecache somewhere (e.g. commit)
1854 #
1853 #
1855 # then 2) will fail because the phasecache contains nodes that were
1854 # then 2) will fail because the phasecache contains nodes that were
1856 # removed. We can either remove phasecache from the filecache,
1855 # removed. We can either remove phasecache from the filecache,
1857 # causing it to reload next time it is accessed, or simply filter
1856 # causing it to reload next time it is accessed, or simply filter
1858 # the removed nodes now and write the updated cache.
1857 # the removed nodes now and write the updated cache.
1859 self._phasecache.filterunknown(self)
1858 self._phasecache.filterunknown(self)
1860 self._phasecache.write()
1859 self._phasecache.write()
1861
1860
1862 # refresh all repository caches
1861 # refresh all repository caches
1863 self.updatecaches()
1862 self.updatecaches()
1864
1863
1865 # Ensure the persistent tag cache is updated. Doing it now
1864 # Ensure the persistent tag cache is updated. Doing it now
1866 # means that the tag cache only has to worry about destroyed
1865 # means that the tag cache only has to worry about destroyed
1867 # heads immediately after a strip/rollback. That in turn
1866 # heads immediately after a strip/rollback. That in turn
1868 # guarantees that "cachetip == currenttip" (comparing both rev
1867 # guarantees that "cachetip == currenttip" (comparing both rev
1869 # and node) always means no nodes have been added or destroyed.
1868 # and node) always means no nodes have been added or destroyed.
1870
1869
1871 # XXX this is suboptimal when qrefresh'ing: we strip the current
1870 # XXX this is suboptimal when qrefresh'ing: we strip the current
1872 # head, refresh the tag cache, then immediately add a new head.
1871 # head, refresh the tag cache, then immediately add a new head.
1873 # But I think doing it this way is necessary for the "instant
1872 # But I think doing it this way is necessary for the "instant
1874 # tag cache retrieval" case to work.
1873 # tag cache retrieval" case to work.
1875 self.invalidate()
1874 self.invalidate()
1876
1875
1877 def walk(self, match, node=None):
1876 def walk(self, match, node=None):
1878 '''
1877 '''
1879 walk recursively through the directory tree or a given
1878 walk recursively through the directory tree or a given
1880 changeset, finding all files matched by the match
1879 changeset, finding all files matched by the match
1881 function
1880 function
1882 '''
1881 '''
1883 return self[node].walk(match)
1882 return self[node].walk(match)
1884
1883
1885 def status(self, node1='.', node2=None, match=None,
1884 def status(self, node1='.', node2=None, match=None,
1886 ignored=False, clean=False, unknown=False,
1885 ignored=False, clean=False, unknown=False,
1887 listsubrepos=False):
1886 listsubrepos=False):
1888 '''a convenience method that calls node1.status(node2)'''
1887 '''a convenience method that calls node1.status(node2)'''
1889 return self[node1].status(node2, match, ignored, clean, unknown,
1888 return self[node1].status(node2, match, ignored, clean, unknown,
1890 listsubrepos)
1889 listsubrepos)
1891
1890
1892 def heads(self, start=None):
1891 def heads(self, start=None):
1893 if start is None:
1892 if start is None:
1894 cl = self.changelog
1893 cl = self.changelog
1895 headrevs = reversed(cl.headrevs())
1894 headrevs = reversed(cl.headrevs())
1896 return [cl.node(rev) for rev in headrevs]
1895 return [cl.node(rev) for rev in headrevs]
1897
1896
1898 heads = self.changelog.heads(start)
1897 heads = self.changelog.heads(start)
1899 # sort the output in rev descending order
1898 # sort the output in rev descending order
1900 return sorted(heads, key=self.changelog.rev, reverse=True)
1899 return sorted(heads, key=self.changelog.rev, reverse=True)
1901
1900
1902 def branchheads(self, branch=None, start=None, closed=False):
1901 def branchheads(self, branch=None, start=None, closed=False):
1903 '''return a (possibly filtered) list of heads for the given branch
1902 '''return a (possibly filtered) list of heads for the given branch
1904
1903
1905 Heads are returned in topological order, from newest to oldest.
1904 Heads are returned in topological order, from newest to oldest.
1906 If branch is None, use the dirstate branch.
1905 If branch is None, use the dirstate branch.
1907 If start is not None, return only heads reachable from start.
1906 If start is not None, return only heads reachable from start.
1908 If closed is True, return heads that are marked as closed as well.
1907 If closed is True, return heads that are marked as closed as well.
1909 '''
1908 '''
1910 if branch is None:
1909 if branch is None:
1911 branch = self[None].branch()
1910 branch = self[None].branch()
1912 branches = self.branchmap()
1911 branches = self.branchmap()
1913 if branch not in branches:
1912 if branch not in branches:
1914 return []
1913 return []
1915 # the cache returns heads ordered lowest to highest
1914 # the cache returns heads ordered lowest to highest
1916 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1915 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1917 if start is not None:
1916 if start is not None:
1918 # filter out the heads that cannot be reached from startrev
1917 # filter out the heads that cannot be reached from startrev
1919 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1918 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1920 bheads = [h for h in bheads if h in fbheads]
1919 bheads = [h for h in bheads if h in fbheads]
1921 return bheads
1920 return bheads
1922
1921
1923 def branches(self, nodes):
1922 def branches(self, nodes):
1924 if not nodes:
1923 if not nodes:
1925 nodes = [self.changelog.tip()]
1924 nodes = [self.changelog.tip()]
1926 b = []
1925 b = []
1927 for n in nodes:
1926 for n in nodes:
1928 t = n
1927 t = n
1929 while True:
1928 while True:
1930 p = self.changelog.parents(n)
1929 p = self.changelog.parents(n)
1931 if p[1] != nullid or p[0] == nullid:
1930 if p[1] != nullid or p[0] == nullid:
1932 b.append((t, n, p[0], p[1]))
1931 b.append((t, n, p[0], p[1]))
1933 break
1932 break
1934 n = p[0]
1933 n = p[0]
1935 return b
1934 return b
1936
1935
1937 def between(self, pairs):
1936 def between(self, pairs):
1938 r = []
1937 r = []
1939
1938
1940 for top, bottom in pairs:
1939 for top, bottom in pairs:
1941 n, l, i = top, [], 0
1940 n, l, i = top, [], 0
1942 f = 1
1941 f = 1
1943
1942
1944 while n != bottom and n != nullid:
1943 while n != bottom and n != nullid:
1945 p = self.changelog.parents(n)[0]
1944 p = self.changelog.parents(n)[0]
1946 if i == f:
1945 if i == f:
1947 l.append(n)
1946 l.append(n)
1948 f = f * 2
1947 f = f * 2
1949 n = p
1948 n = p
1950 i += 1
1949 i += 1
1951
1950
1952 r.append(l)
1951 r.append(l)
1953
1952
1954 return r
1953 return r
1955
1954
1956 def checkpush(self, pushop):
1955 def checkpush(self, pushop):
1957 """Extensions can override this function if additional checks have
1956 """Extensions can override this function if additional checks have
1958 to be performed before pushing, or call it if they override push
1957 to be performed before pushing, or call it if they override push
1959 command.
1958 command.
1960 """
1959 """
1961 pass
1960 pass
1962
1961
1963 @unfilteredpropertycache
1962 @unfilteredpropertycache
1964 def prepushoutgoinghooks(self):
1963 def prepushoutgoinghooks(self):
1965 """Return util.hooks consists of a pushop with repo, remote, outgoing
1964 """Return util.hooks consists of a pushop with repo, remote, outgoing
1966 methods, which are called before pushing changesets.
1965 methods, which are called before pushing changesets.
1967 """
1966 """
1968 return util.hooks()
1967 return util.hooks()
1969
1968
1970 def pushkey(self, namespace, key, old, new):
1969 def pushkey(self, namespace, key, old, new):
1971 try:
1970 try:
1972 tr = self.currenttransaction()
1971 tr = self.currenttransaction()
1973 hookargs = {}
1972 hookargs = {}
1974 if tr is not None:
1973 if tr is not None:
1975 hookargs.update(tr.hookargs)
1974 hookargs.update(tr.hookargs)
1976 hookargs['namespace'] = namespace
1975 hookargs['namespace'] = namespace
1977 hookargs['key'] = key
1976 hookargs['key'] = key
1978 hookargs['old'] = old
1977 hookargs['old'] = old
1979 hookargs['new'] = new
1978 hookargs['new'] = new
1980 self.hook('prepushkey', throw=True, **hookargs)
1979 self.hook('prepushkey', throw=True, **hookargs)
1981 except error.HookAbort as exc:
1980 except error.HookAbort as exc:
1982 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1981 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1983 if exc.hint:
1982 if exc.hint:
1984 self.ui.write_err(_("(%s)\n") % exc.hint)
1983 self.ui.write_err(_("(%s)\n") % exc.hint)
1985 return False
1984 return False
1986 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1985 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1987 ret = pushkey.push(self, namespace, key, old, new)
1986 ret = pushkey.push(self, namespace, key, old, new)
1988 def runhook():
1987 def runhook():
1989 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1988 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1990 ret=ret)
1989 ret=ret)
1991 self._afterlock(runhook)
1990 self._afterlock(runhook)
1992 return ret
1991 return ret
1993
1992
1994 def listkeys(self, namespace):
1993 def listkeys(self, namespace):
1995 self.hook('prelistkeys', throw=True, namespace=namespace)
1994 self.hook('prelistkeys', throw=True, namespace=namespace)
1996 self.ui.debug('listing keys for "%s"\n' % namespace)
1995 self.ui.debug('listing keys for "%s"\n' % namespace)
1997 values = pushkey.list(self, namespace)
1996 values = pushkey.list(self, namespace)
1998 self.hook('listkeys', namespace=namespace, values=values)
1997 self.hook('listkeys', namespace=namespace, values=values)
1999 return values
1998 return values
2000
1999
2001 def debugwireargs(self, one, two, three=None, four=None, five=None):
2000 def debugwireargs(self, one, two, three=None, four=None, five=None):
2002 '''used to test argument passing over the wire'''
2001 '''used to test argument passing over the wire'''
2003 return "%s %s %s %s %s" % (one, two, three, four, five)
2002 return "%s %s %s %s %s" % (one, two, three, four, five)
2004
2003
2005 def savecommitmessage(self, text):
2004 def savecommitmessage(self, text):
2006 fp = self.vfs('last-message.txt', 'wb')
2005 fp = self.vfs('last-message.txt', 'wb')
2007 try:
2006 try:
2008 fp.write(text)
2007 fp.write(text)
2009 finally:
2008 finally:
2010 fp.close()
2009 fp.close()
2011 return self.pathto(fp.name[len(self.root) + 1:])
2010 return self.pathto(fp.name[len(self.root) + 1:])
2012
2011
2013 # used to avoid circular references so destructors work
2012 # used to avoid circular references so destructors work
2014 def aftertrans(files):
2013 def aftertrans(files):
2015 renamefiles = [tuple(t) for t in files]
2014 renamefiles = [tuple(t) for t in files]
2016 def a():
2015 def a():
2017 for vfs, src, dest in renamefiles:
2016 for vfs, src, dest in renamefiles:
2018 # if src and dest refer to a same file, vfs.rename is a no-op,
2017 # if src and dest refer to a same file, vfs.rename is a no-op,
2019 # leaving both src and dest on disk. delete dest to make sure
2018 # leaving both src and dest on disk. delete dest to make sure
2020 # the rename couldn't be such a no-op.
2019 # the rename couldn't be such a no-op.
2021 vfs.tryunlink(dest)
2020 vfs.tryunlink(dest)
2022 try:
2021 try:
2023 vfs.rename(src, dest)
2022 vfs.rename(src, dest)
2024 except OSError: # journal file does not yet exist
2023 except OSError: # journal file does not yet exist
2025 pass
2024 pass
2026 return a
2025 return a
2027
2026
2028 def undoname(fn):
2027 def undoname(fn):
2029 base, name = os.path.split(fn)
2028 base, name = os.path.split(fn)
2030 assert name.startswith('journal')
2029 assert name.startswith('journal')
2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2030 return os.path.join(base, name.replace('journal', 'undo', 1))
2032
2031
2033 def instance(ui, path, create):
2032 def instance(ui, path, create):
2034 return localrepository(ui, util.urllocalpath(path), create)
2033 return localrepository(ui, util.urllocalpath(path), create)
2035
2034
2036 def islocal(path):
2035 def islocal(path):
2037 return True
2036 return True
2038
2037
2039 def newreporequirements(repo):
2038 def newreporequirements(repo):
2040 """Determine the set of requirements for a new local repository.
2039 """Determine the set of requirements for a new local repository.
2041
2040
2042 Extensions can wrap this function to specify custom requirements for
2041 Extensions can wrap this function to specify custom requirements for
2043 new repositories.
2042 new repositories.
2044 """
2043 """
2045 ui = repo.ui
2044 ui = repo.ui
2046 requirements = {'revlogv1'}
2045 requirements = {'revlogv1'}
2047 if ui.configbool('format', 'usestore', True):
2046 if ui.configbool('format', 'usestore', True):
2048 requirements.add('store')
2047 requirements.add('store')
2049 if ui.configbool('format', 'usefncache', True):
2048 if ui.configbool('format', 'usefncache', True):
2050 requirements.add('fncache')
2049 requirements.add('fncache')
2051 if ui.configbool('format', 'dotencode', True):
2050 if ui.configbool('format', 'dotencode', True):
2052 requirements.add('dotencode')
2051 requirements.add('dotencode')
2053
2052
2054 compengine = ui.config('experimental', 'format.compression', 'zlib')
2053 compengine = ui.config('experimental', 'format.compression', 'zlib')
2055 if compengine not in util.compengines:
2054 if compengine not in util.compengines:
2056 raise error.Abort(_('compression engine %s defined by '
2055 raise error.Abort(_('compression engine %s defined by '
2057 'experimental.format.compression not available') %
2056 'experimental.format.compression not available') %
2058 compengine,
2057 compengine,
2059 hint=_('run "hg debuginstall" to list available '
2058 hint=_('run "hg debuginstall" to list available '
2060 'compression engines'))
2059 'compression engines'))
2061
2060
2062 # zlib is the historical default and doesn't need an explicit requirement.
2061 # zlib is the historical default and doesn't need an explicit requirement.
2063 if compengine != 'zlib':
2062 if compengine != 'zlib':
2064 requirements.add('exp-compression-%s' % compengine)
2063 requirements.add('exp-compression-%s' % compengine)
2065
2064
2066 if scmutil.gdinitconfig(ui):
2065 if scmutil.gdinitconfig(ui):
2067 requirements.add('generaldelta')
2066 requirements.add('generaldelta')
2068 if ui.configbool('experimental', 'treemanifest', False):
2067 if ui.configbool('experimental', 'treemanifest', False):
2069 requirements.add('treemanifest')
2068 requirements.add('treemanifest')
2070 if ui.configbool('experimental', 'manifestv2', False):
2069 if ui.configbool('experimental', 'manifestv2', False):
2071 requirements.add('manifestv2')
2070 requirements.add('manifestv2')
2072
2071
2073 return requirements
2072 return requirements
General Comments 0
You need to be logged in to leave comments. Login now