##// END OF EJS Templates
transaction: run _writejournal unfiltered...
marmoute -
r32452:b647b923 default
parent child Browse files
Show More
@@ -1,2065 +1,2066 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'}
117 'unbundle'}
118 legacycaps = moderncaps.union({'changegroupsubset'})
118 legacycaps = moderncaps.union({'changegroupsubset'})
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if exchange.bundle2requested(bundlecaps):
167 if exchange.bundle2requested(bundlecaps):
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = {
250 supportedformats = {
251 'revlogv1',
251 'revlogv1',
252 'generaldelta',
252 'generaldelta',
253 'treemanifest',
253 'treemanifest',
254 'manifestv2',
254 'manifestv2',
255 }
255 }
256 _basesupported = supportedformats | {
256 _basesupported = supportedformats | {
257 'store',
257 'store',
258 'fncache',
258 'fncache',
259 'shared',
259 'shared',
260 'relshared',
260 'relshared',
261 'dotencode',
261 'dotencode',
262 }
262 }
263 openerreqs = {
263 openerreqs = {
264 'revlogv1',
264 'revlogv1',
265 'generaldelta',
265 'generaldelta',
266 'treemanifest',
266 'treemanifest',
267 'manifestv2',
267 'manifestv2',
268 }
268 }
269 filtername = None
269 filtername = None
270
270
271 # a list of (ui, featureset) functions.
271 # a list of (ui, featureset) functions.
272 # only functions defined in module of enabled extensions are invoked
272 # only functions defined in module of enabled extensions are invoked
273 featuresetupfuncs = set()
273 featuresetupfuncs = set()
274
274
275 def __init__(self, baseui, path, create=False):
275 def __init__(self, baseui, path, create=False):
276 self.requirements = set()
276 self.requirements = set()
277 # wvfs: rooted at the repository root, used to access the working copy
277 # wvfs: rooted at the repository root, used to access the working copy
278 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
278 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
279 # vfs: rooted at .hg, used to access repo files outside of .hg/store
279 # vfs: rooted at .hg, used to access repo files outside of .hg/store
280 self.vfs = None
280 self.vfs = None
281 # svfs: usually rooted at .hg/store, used to access repository history
281 # svfs: usually rooted at .hg/store, used to access repository history
282 # If this is a shared repository, this vfs may point to another
282 # If this is a shared repository, this vfs may point to another
283 # repository's .hg/store directory.
283 # repository's .hg/store directory.
284 self.svfs = None
284 self.svfs = None
285 self.root = self.wvfs.base
285 self.root = self.wvfs.base
286 self.path = self.wvfs.join(".hg")
286 self.path = self.wvfs.join(".hg")
287 self.origroot = path
287 self.origroot = path
288 self.auditor = pathutil.pathauditor(self.root, self._checknested)
288 self.auditor = pathutil.pathauditor(self.root, self._checknested)
289 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
289 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
290 realfs=False)
290 realfs=False)
291 self.vfs = vfsmod.vfs(self.path)
291 self.vfs = vfsmod.vfs(self.path)
292 self.baseui = baseui
292 self.baseui = baseui
293 self.ui = baseui.copy()
293 self.ui = baseui.copy()
294 self.ui.copy = baseui.copy # prevent copying repo configuration
294 self.ui.copy = baseui.copy # prevent copying repo configuration
295 # A list of callback to shape the phase if no data were found.
295 # A list of callback to shape the phase if no data were found.
296 # Callback are in the form: func(repo, roots) --> processed root.
296 # Callback are in the form: func(repo, roots) --> processed root.
297 # This list it to be filled by extension during repo setup
297 # This list it to be filled by extension during repo setup
298 self._phasedefaults = []
298 self._phasedefaults = []
299 try:
299 try:
300 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
300 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
301 self._loadextensions()
301 self._loadextensions()
302 except IOError:
302 except IOError:
303 pass
303 pass
304
304
305 if self.featuresetupfuncs:
305 if self.featuresetupfuncs:
306 self.supported = set(self._basesupported) # use private copy
306 self.supported = set(self._basesupported) # use private copy
307 extmods = set(m.__name__ for n, m
307 extmods = set(m.__name__ for n, m
308 in extensions.extensions(self.ui))
308 in extensions.extensions(self.ui))
309 for setupfunc in self.featuresetupfuncs:
309 for setupfunc in self.featuresetupfuncs:
310 if setupfunc.__module__ in extmods:
310 if setupfunc.__module__ in extmods:
311 setupfunc(self.ui, self.supported)
311 setupfunc(self.ui, self.supported)
312 else:
312 else:
313 self.supported = self._basesupported
313 self.supported = self._basesupported
314 color.setup(self.ui)
314 color.setup(self.ui)
315
315
316 # Add compression engines.
316 # Add compression engines.
317 for name in util.compengines:
317 for name in util.compengines:
318 engine = util.compengines[name]
318 engine = util.compengines[name]
319 if engine.revlogheader():
319 if engine.revlogheader():
320 self.supported.add('exp-compression-%s' % name)
320 self.supported.add('exp-compression-%s' % name)
321
321
322 if not self.vfs.isdir():
322 if not self.vfs.isdir():
323 if create:
323 if create:
324 self.requirements = newreporequirements(self)
324 self.requirements = newreporequirements(self)
325
325
326 if not self.wvfs.exists():
326 if not self.wvfs.exists():
327 self.wvfs.makedirs()
327 self.wvfs.makedirs()
328 self.vfs.makedir(notindexed=True)
328 self.vfs.makedir(notindexed=True)
329
329
330 if 'store' in self.requirements:
330 if 'store' in self.requirements:
331 self.vfs.mkdir("store")
331 self.vfs.mkdir("store")
332
332
333 # create an invalid changelog
333 # create an invalid changelog
334 self.vfs.append(
334 self.vfs.append(
335 "00changelog.i",
335 "00changelog.i",
336 '\0\0\0\2' # represents revlogv2
336 '\0\0\0\2' # represents revlogv2
337 ' dummy changelog to prevent using the old repo layout'
337 ' dummy changelog to prevent using the old repo layout'
338 )
338 )
339 else:
339 else:
340 raise error.RepoError(_("repository %s not found") % path)
340 raise error.RepoError(_("repository %s not found") % path)
341 elif create:
341 elif create:
342 raise error.RepoError(_("repository %s already exists") % path)
342 raise error.RepoError(_("repository %s already exists") % path)
343 else:
343 else:
344 try:
344 try:
345 self.requirements = scmutil.readrequires(
345 self.requirements = scmutil.readrequires(
346 self.vfs, self.supported)
346 self.vfs, self.supported)
347 except IOError as inst:
347 except IOError as inst:
348 if inst.errno != errno.ENOENT:
348 if inst.errno != errno.ENOENT:
349 raise
349 raise
350
350
351 self.sharedpath = self.path
351 self.sharedpath = self.path
352 try:
352 try:
353 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
353 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
354 if 'relshared' in self.requirements:
354 if 'relshared' in self.requirements:
355 sharedpath = self.vfs.join(sharedpath)
355 sharedpath = self.vfs.join(sharedpath)
356 vfs = vfsmod.vfs(sharedpath, realpath=True)
356 vfs = vfsmod.vfs(sharedpath, realpath=True)
357 s = vfs.base
357 s = vfs.base
358 if not vfs.exists():
358 if not vfs.exists():
359 raise error.RepoError(
359 raise error.RepoError(
360 _('.hg/sharedpath points to nonexistent directory %s') % s)
360 _('.hg/sharedpath points to nonexistent directory %s') % s)
361 self.sharedpath = s
361 self.sharedpath = s
362 except IOError as inst:
362 except IOError as inst:
363 if inst.errno != errno.ENOENT:
363 if inst.errno != errno.ENOENT:
364 raise
364 raise
365
365
366 self.store = store.store(
366 self.store = store.store(
367 self.requirements, self.sharedpath, vfsmod.vfs)
367 self.requirements, self.sharedpath, vfsmod.vfs)
368 self.spath = self.store.path
368 self.spath = self.store.path
369 self.svfs = self.store.vfs
369 self.svfs = self.store.vfs
370 self.sjoin = self.store.join
370 self.sjoin = self.store.join
371 self.vfs.createmode = self.store.createmode
371 self.vfs.createmode = self.store.createmode
372 self._applyopenerreqs()
372 self._applyopenerreqs()
373 if create:
373 if create:
374 self._writerequirements()
374 self._writerequirements()
375
375
376 self._dirstatevalidatewarned = False
376 self._dirstatevalidatewarned = False
377
377
378 self._branchcaches = {}
378 self._branchcaches = {}
379 self._revbranchcache = None
379 self._revbranchcache = None
380 self.filterpats = {}
380 self.filterpats = {}
381 self._datafilters = {}
381 self._datafilters = {}
382 self._transref = self._lockref = self._wlockref = None
382 self._transref = self._lockref = self._wlockref = None
383
383
384 # A cache for various files under .hg/ that tracks file changes,
384 # A cache for various files under .hg/ that tracks file changes,
385 # (used by the filecache decorator)
385 # (used by the filecache decorator)
386 #
386 #
387 # Maps a property name to its util.filecacheentry
387 # Maps a property name to its util.filecacheentry
388 self._filecache = {}
388 self._filecache = {}
389
389
390 # hold sets of revision to be filtered
390 # hold sets of revision to be filtered
391 # should be cleared when something might have changed the filter value:
391 # should be cleared when something might have changed the filter value:
392 # - new changesets,
392 # - new changesets,
393 # - phase change,
393 # - phase change,
394 # - new obsolescence marker,
394 # - new obsolescence marker,
395 # - working directory parent change,
395 # - working directory parent change,
396 # - bookmark changes
396 # - bookmark changes
397 self.filteredrevcache = {}
397 self.filteredrevcache = {}
398
398
399 # generic mapping between names and nodes
399 # generic mapping between names and nodes
400 self.names = namespaces.namespaces()
400 self.names = namespaces.namespaces()
401
401
402 def close(self):
402 def close(self):
403 self._writecaches()
403 self._writecaches()
404
404
405 def _loadextensions(self):
405 def _loadextensions(self):
406 extensions.loadall(self.ui)
406 extensions.loadall(self.ui)
407
407
408 def _writecaches(self):
408 def _writecaches(self):
409 if self._revbranchcache:
409 if self._revbranchcache:
410 self._revbranchcache.write()
410 self._revbranchcache.write()
411
411
412 def _restrictcapabilities(self, caps):
412 def _restrictcapabilities(self, caps):
413 if self.ui.configbool('experimental', 'bundle2-advertise', True):
413 if self.ui.configbool('experimental', 'bundle2-advertise', True):
414 caps = set(caps)
414 caps = set(caps)
415 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
415 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
416 caps.add('bundle2=' + urlreq.quote(capsblob))
416 caps.add('bundle2=' + urlreq.quote(capsblob))
417 return caps
417 return caps
418
418
419 def _applyopenerreqs(self):
419 def _applyopenerreqs(self):
420 self.svfs.options = dict((r, 1) for r in self.requirements
420 self.svfs.options = dict((r, 1) for r in self.requirements
421 if r in self.openerreqs)
421 if r in self.openerreqs)
422 # experimental config: format.chunkcachesize
422 # experimental config: format.chunkcachesize
423 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
423 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
424 if chunkcachesize is not None:
424 if chunkcachesize is not None:
425 self.svfs.options['chunkcachesize'] = chunkcachesize
425 self.svfs.options['chunkcachesize'] = chunkcachesize
426 # experimental config: format.maxchainlen
426 # experimental config: format.maxchainlen
427 maxchainlen = self.ui.configint('format', 'maxchainlen')
427 maxchainlen = self.ui.configint('format', 'maxchainlen')
428 if maxchainlen is not None:
428 if maxchainlen is not None:
429 self.svfs.options['maxchainlen'] = maxchainlen
429 self.svfs.options['maxchainlen'] = maxchainlen
430 # experimental config: format.manifestcachesize
430 # experimental config: format.manifestcachesize
431 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
431 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
432 if manifestcachesize is not None:
432 if manifestcachesize is not None:
433 self.svfs.options['manifestcachesize'] = manifestcachesize
433 self.svfs.options['manifestcachesize'] = manifestcachesize
434 # experimental config: format.aggressivemergedeltas
434 # experimental config: format.aggressivemergedeltas
435 aggressivemergedeltas = self.ui.configbool('format',
435 aggressivemergedeltas = self.ui.configbool('format',
436 'aggressivemergedeltas', False)
436 'aggressivemergedeltas', False)
437 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
437 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
438 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
438 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
439
439
440 for r in self.requirements:
440 for r in self.requirements:
441 if r.startswith('exp-compression-'):
441 if r.startswith('exp-compression-'):
442 self.svfs.options['compengine'] = r[len('exp-compression-'):]
442 self.svfs.options['compengine'] = r[len('exp-compression-'):]
443
443
444 def _writerequirements(self):
444 def _writerequirements(self):
445 scmutil.writerequires(self.vfs, self.requirements)
445 scmutil.writerequires(self.vfs, self.requirements)
446
446
447 def _checknested(self, path):
447 def _checknested(self, path):
448 """Determine if path is a legal nested repository."""
448 """Determine if path is a legal nested repository."""
449 if not path.startswith(self.root):
449 if not path.startswith(self.root):
450 return False
450 return False
451 subpath = path[len(self.root) + 1:]
451 subpath = path[len(self.root) + 1:]
452 normsubpath = util.pconvert(subpath)
452 normsubpath = util.pconvert(subpath)
453
453
454 # XXX: Checking against the current working copy is wrong in
454 # XXX: Checking against the current working copy is wrong in
455 # the sense that it can reject things like
455 # the sense that it can reject things like
456 #
456 #
457 # $ hg cat -r 10 sub/x.txt
457 # $ hg cat -r 10 sub/x.txt
458 #
458 #
459 # if sub/ is no longer a subrepository in the working copy
459 # if sub/ is no longer a subrepository in the working copy
460 # parent revision.
460 # parent revision.
461 #
461 #
462 # However, it can of course also allow things that would have
462 # However, it can of course also allow things that would have
463 # been rejected before, such as the above cat command if sub/
463 # been rejected before, such as the above cat command if sub/
464 # is a subrepository now, but was a normal directory before.
464 # is a subrepository now, but was a normal directory before.
465 # The old path auditor would have rejected by mistake since it
465 # The old path auditor would have rejected by mistake since it
466 # panics when it sees sub/.hg/.
466 # panics when it sees sub/.hg/.
467 #
467 #
468 # All in all, checking against the working copy seems sensible
468 # All in all, checking against the working copy seems sensible
469 # since we want to prevent access to nested repositories on
469 # since we want to prevent access to nested repositories on
470 # the filesystem *now*.
470 # the filesystem *now*.
471 ctx = self[None]
471 ctx = self[None]
472 parts = util.splitpath(subpath)
472 parts = util.splitpath(subpath)
473 while parts:
473 while parts:
474 prefix = '/'.join(parts)
474 prefix = '/'.join(parts)
475 if prefix in ctx.substate:
475 if prefix in ctx.substate:
476 if prefix == normsubpath:
476 if prefix == normsubpath:
477 return True
477 return True
478 else:
478 else:
479 sub = ctx.sub(prefix)
479 sub = ctx.sub(prefix)
480 return sub.checknested(subpath[len(prefix) + 1:])
480 return sub.checknested(subpath[len(prefix) + 1:])
481 else:
481 else:
482 parts.pop()
482 parts.pop()
483 return False
483 return False
484
484
485 def peer(self):
485 def peer(self):
486 return localpeer(self) # not cached to avoid reference cycle
486 return localpeer(self) # not cached to avoid reference cycle
487
487
488 def unfiltered(self):
488 def unfiltered(self):
489 """Return unfiltered version of the repository
489 """Return unfiltered version of the repository
490
490
491 Intended to be overwritten by filtered repo."""
491 Intended to be overwritten by filtered repo."""
492 return self
492 return self
493
493
494 def filtered(self, name):
494 def filtered(self, name):
495 """Return a filtered version of a repository"""
495 """Return a filtered version of a repository"""
496 # build a new class with the mixin and the current class
496 # build a new class with the mixin and the current class
497 # (possibly subclass of the repo)
497 # (possibly subclass of the repo)
498 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
498 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
499 pass
499 pass
500 return filteredrepo(self, name)
500 return filteredrepo(self, name)
501
501
502 @repofilecache('bookmarks', 'bookmarks.current')
502 @repofilecache('bookmarks', 'bookmarks.current')
503 def _bookmarks(self):
503 def _bookmarks(self):
504 return bookmarks.bmstore(self)
504 return bookmarks.bmstore(self)
505
505
506 @property
506 @property
507 def _activebookmark(self):
507 def _activebookmark(self):
508 return self._bookmarks.active
508 return self._bookmarks.active
509
509
510 # _phaserevs and _phasesets depend on changelog. what we need is to
510 # _phaserevs and _phasesets depend on changelog. what we need is to
511 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
511 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
512 # can't be easily expressed in filecache mechanism.
512 # can't be easily expressed in filecache mechanism.
513 @storecache('phaseroots', '00changelog.i')
513 @storecache('phaseroots', '00changelog.i')
514 def _phasecache(self):
514 def _phasecache(self):
515 return phases.phasecache(self, self._phasedefaults)
515 return phases.phasecache(self, self._phasedefaults)
516
516
517 @storecache('obsstore')
517 @storecache('obsstore')
518 def obsstore(self):
518 def obsstore(self):
519 # read default format for new obsstore.
519 # read default format for new obsstore.
520 # developer config: format.obsstore-version
520 # developer config: format.obsstore-version
521 defaultformat = self.ui.configint('format', 'obsstore-version', None)
521 defaultformat = self.ui.configint('format', 'obsstore-version', None)
522 # rely on obsstore class default when possible.
522 # rely on obsstore class default when possible.
523 kwargs = {}
523 kwargs = {}
524 if defaultformat is not None:
524 if defaultformat is not None:
525 kwargs['defaultformat'] = defaultformat
525 kwargs['defaultformat'] = defaultformat
526 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
526 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
527 store = obsolete.obsstore(self.svfs, readonly=readonly,
527 store = obsolete.obsstore(self.svfs, readonly=readonly,
528 **kwargs)
528 **kwargs)
529 if store and readonly:
529 if store and readonly:
530 self.ui.warn(
530 self.ui.warn(
531 _('obsolete feature not enabled but %i markers found!\n')
531 _('obsolete feature not enabled but %i markers found!\n')
532 % len(list(store)))
532 % len(list(store)))
533 return store
533 return store
534
534
535 @storecache('00changelog.i')
535 @storecache('00changelog.i')
536 def changelog(self):
536 def changelog(self):
537 return changelog.changelog(self.svfs,
537 return changelog.changelog(self.svfs,
538 trypending=txnutil.mayhavepending(self.root))
538 trypending=txnutil.mayhavepending(self.root))
539
539
540 def _constructmanifest(self):
540 def _constructmanifest(self):
541 # This is a temporary function while we migrate from manifest to
541 # This is a temporary function while we migrate from manifest to
542 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 # manifestlog. It allows bundlerepo and unionrepo to intercept the
543 # manifest creation.
543 # manifest creation.
544 return manifest.manifestrevlog(self.svfs)
544 return manifest.manifestrevlog(self.svfs)
545
545
546 @storecache('00manifest.i')
546 @storecache('00manifest.i')
547 def manifestlog(self):
547 def manifestlog(self):
548 return manifest.manifestlog(self.svfs, self)
548 return manifest.manifestlog(self.svfs, self)
549
549
550 @repofilecache('dirstate')
550 @repofilecache('dirstate')
551 def dirstate(self):
551 def dirstate(self):
552 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 return dirstate.dirstate(self.vfs, self.ui, self.root,
553 self._dirstatevalidate)
553 self._dirstatevalidate)
554
554
555 def _dirstatevalidate(self, node):
555 def _dirstatevalidate(self, node):
556 try:
556 try:
557 self.changelog.rev(node)
557 self.changelog.rev(node)
558 return node
558 return node
559 except error.LookupError:
559 except error.LookupError:
560 if not self._dirstatevalidatewarned:
560 if not self._dirstatevalidatewarned:
561 self._dirstatevalidatewarned = True
561 self._dirstatevalidatewarned = True
562 self.ui.warn(_("warning: ignoring unknown"
562 self.ui.warn(_("warning: ignoring unknown"
563 " working parent %s!\n") % short(node))
563 " working parent %s!\n") % short(node))
564 return nullid
564 return nullid
565
565
566 def __getitem__(self, changeid):
566 def __getitem__(self, changeid):
567 if changeid is None or changeid == wdirrev:
567 if changeid is None or changeid == wdirrev:
568 return context.workingctx(self)
568 return context.workingctx(self)
569 if isinstance(changeid, slice):
569 if isinstance(changeid, slice):
570 return [context.changectx(self, i)
570 return [context.changectx(self, i)
571 for i in xrange(*changeid.indices(len(self)))
571 for i in xrange(*changeid.indices(len(self)))
572 if i not in self.changelog.filteredrevs]
572 if i not in self.changelog.filteredrevs]
573 return context.changectx(self, changeid)
573 return context.changectx(self, changeid)
574
574
575 def __contains__(self, changeid):
575 def __contains__(self, changeid):
576 try:
576 try:
577 self[changeid]
577 self[changeid]
578 return True
578 return True
579 except error.RepoLookupError:
579 except error.RepoLookupError:
580 return False
580 return False
581
581
582 def __nonzero__(self):
582 def __nonzero__(self):
583 return True
583 return True
584
584
585 __bool__ = __nonzero__
585 __bool__ = __nonzero__
586
586
587 def __len__(self):
587 def __len__(self):
588 return len(self.changelog)
588 return len(self.changelog)
589
589
590 def __iter__(self):
590 def __iter__(self):
591 return iter(self.changelog)
591 return iter(self.changelog)
592
592
593 def revs(self, expr, *args):
593 def revs(self, expr, *args):
594 '''Find revisions matching a revset.
594 '''Find revisions matching a revset.
595
595
596 The revset is specified as a string ``expr`` that may contain
596 The revset is specified as a string ``expr`` that may contain
597 %-formatting to escape certain types. See ``revsetlang.formatspec``.
597 %-formatting to escape certain types. See ``revsetlang.formatspec``.
598
598
599 Revset aliases from the configuration are not expanded. To expand
599 Revset aliases from the configuration are not expanded. To expand
600 user aliases, consider calling ``scmutil.revrange()`` or
600 user aliases, consider calling ``scmutil.revrange()`` or
601 ``repo.anyrevs([expr], user=True)``.
601 ``repo.anyrevs([expr], user=True)``.
602
602
603 Returns a revset.abstractsmartset, which is a list-like interface
603 Returns a revset.abstractsmartset, which is a list-like interface
604 that contains integer revisions.
604 that contains integer revisions.
605 '''
605 '''
606 expr = revsetlang.formatspec(expr, *args)
606 expr = revsetlang.formatspec(expr, *args)
607 m = revset.match(None, expr)
607 m = revset.match(None, expr)
608 return m(self)
608 return m(self)
609
609
610 def set(self, expr, *args):
610 def set(self, expr, *args):
611 '''Find revisions matching a revset and emit changectx instances.
611 '''Find revisions matching a revset and emit changectx instances.
612
612
613 This is a convenience wrapper around ``revs()`` that iterates the
613 This is a convenience wrapper around ``revs()`` that iterates the
614 result and is a generator of changectx instances.
614 result and is a generator of changectx instances.
615
615
616 Revset aliases from the configuration are not expanded. To expand
616 Revset aliases from the configuration are not expanded. To expand
617 user aliases, consider calling ``scmutil.revrange()``.
617 user aliases, consider calling ``scmutil.revrange()``.
618 '''
618 '''
619 for r in self.revs(expr, *args):
619 for r in self.revs(expr, *args):
620 yield self[r]
620 yield self[r]
621
621
622 def anyrevs(self, specs, user=False):
622 def anyrevs(self, specs, user=False):
623 '''Find revisions matching one of the given revsets.
623 '''Find revisions matching one of the given revsets.
624
624
625 Revset aliases from the configuration are not expanded by default. To
625 Revset aliases from the configuration are not expanded by default. To
626 expand user aliases, specify ``user=True``.
626 expand user aliases, specify ``user=True``.
627 '''
627 '''
628 if user:
628 if user:
629 m = revset.matchany(self.ui, specs, repo=self)
629 m = revset.matchany(self.ui, specs, repo=self)
630 else:
630 else:
631 m = revset.matchany(None, specs)
631 m = revset.matchany(None, specs)
632 return m(self)
632 return m(self)
633
633
634 def url(self):
634 def url(self):
635 return 'file:' + self.root
635 return 'file:' + self.root
636
636
637 def hook(self, name, throw=False, **args):
637 def hook(self, name, throw=False, **args):
638 """Call a hook, passing this repo instance.
638 """Call a hook, passing this repo instance.
639
639
640 This a convenience method to aid invoking hooks. Extensions likely
640 This a convenience method to aid invoking hooks. Extensions likely
641 won't call this unless they have registered a custom hook or are
641 won't call this unless they have registered a custom hook or are
642 replacing code that is expected to call a hook.
642 replacing code that is expected to call a hook.
643 """
643 """
644 return hook.hook(self.ui, self, name, throw, **args)
644 return hook.hook(self.ui, self, name, throw, **args)
645
645
646 @filteredpropertycache
646 @filteredpropertycache
647 def _tagscache(self):
647 def _tagscache(self):
648 '''Returns a tagscache object that contains various tags related
648 '''Returns a tagscache object that contains various tags related
649 caches.'''
649 caches.'''
650
650
651 # This simplifies its cache management by having one decorated
651 # This simplifies its cache management by having one decorated
652 # function (this one) and the rest simply fetch things from it.
652 # function (this one) and the rest simply fetch things from it.
653 class tagscache(object):
653 class tagscache(object):
654 def __init__(self):
654 def __init__(self):
655 # These two define the set of tags for this repository. tags
655 # These two define the set of tags for this repository. tags
656 # maps tag name to node; tagtypes maps tag name to 'global' or
656 # maps tag name to node; tagtypes maps tag name to 'global' or
657 # 'local'. (Global tags are defined by .hgtags across all
657 # 'local'. (Global tags are defined by .hgtags across all
658 # heads, and local tags are defined in .hg/localtags.)
658 # heads, and local tags are defined in .hg/localtags.)
659 # They constitute the in-memory cache of tags.
659 # They constitute the in-memory cache of tags.
660 self.tags = self.tagtypes = None
660 self.tags = self.tagtypes = None
661
661
662 self.nodetagscache = self.tagslist = None
662 self.nodetagscache = self.tagslist = None
663
663
664 cache = tagscache()
664 cache = tagscache()
665 cache.tags, cache.tagtypes = self._findtags()
665 cache.tags, cache.tagtypes = self._findtags()
666
666
667 return cache
667 return cache
668
668
669 def tags(self):
669 def tags(self):
670 '''return a mapping of tag to node'''
670 '''return a mapping of tag to node'''
671 t = {}
671 t = {}
672 if self.changelog.filteredrevs:
672 if self.changelog.filteredrevs:
673 tags, tt = self._findtags()
673 tags, tt = self._findtags()
674 else:
674 else:
675 tags = self._tagscache.tags
675 tags = self._tagscache.tags
676 for k, v in tags.iteritems():
676 for k, v in tags.iteritems():
677 try:
677 try:
678 # ignore tags to unknown nodes
678 # ignore tags to unknown nodes
679 self.changelog.rev(v)
679 self.changelog.rev(v)
680 t[k] = v
680 t[k] = v
681 except (error.LookupError, ValueError):
681 except (error.LookupError, ValueError):
682 pass
682 pass
683 return t
683 return t
684
684
685 def _findtags(self):
685 def _findtags(self):
686 '''Do the hard work of finding tags. Return a pair of dicts
686 '''Do the hard work of finding tags. Return a pair of dicts
687 (tags, tagtypes) where tags maps tag name to node, and tagtypes
687 (tags, tagtypes) where tags maps tag name to node, and tagtypes
688 maps tag name to a string like \'global\' or \'local\'.
688 maps tag name to a string like \'global\' or \'local\'.
689 Subclasses or extensions are free to add their own tags, but
689 Subclasses or extensions are free to add their own tags, but
690 should be aware that the returned dicts will be retained for the
690 should be aware that the returned dicts will be retained for the
691 duration of the localrepo object.'''
691 duration of the localrepo object.'''
692
692
693 # XXX what tagtype should subclasses/extensions use? Currently
693 # XXX what tagtype should subclasses/extensions use? Currently
694 # mq and bookmarks add tags, but do not set the tagtype at all.
694 # mq and bookmarks add tags, but do not set the tagtype at all.
695 # Should each extension invent its own tag type? Should there
695 # Should each extension invent its own tag type? Should there
696 # be one tagtype for all such "virtual" tags? Or is the status
696 # be one tagtype for all such "virtual" tags? Or is the status
697 # quo fine?
697 # quo fine?
698
698
699
699
700 # map tag name to (node, hist)
700 # map tag name to (node, hist)
701 alltags = tagsmod.findglobaltags(self.ui, self)
701 alltags = tagsmod.findglobaltags(self.ui, self)
702 # map tag name to tag type
702 # map tag name to tag type
703 tagtypes = dict((tag, 'global') for tag in alltags)
703 tagtypes = dict((tag, 'global') for tag in alltags)
704
704
705 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
705 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
706
706
707 # Build the return dicts. Have to re-encode tag names because
707 # Build the return dicts. Have to re-encode tag names because
708 # the tags module always uses UTF-8 (in order not to lose info
708 # the tags module always uses UTF-8 (in order not to lose info
709 # writing to the cache), but the rest of Mercurial wants them in
709 # writing to the cache), but the rest of Mercurial wants them in
710 # local encoding.
710 # local encoding.
711 tags = {}
711 tags = {}
712 for (name, (node, hist)) in alltags.iteritems():
712 for (name, (node, hist)) in alltags.iteritems():
713 if node != nullid:
713 if node != nullid:
714 tags[encoding.tolocal(name)] = node
714 tags[encoding.tolocal(name)] = node
715 tags['tip'] = self.changelog.tip()
715 tags['tip'] = self.changelog.tip()
716 tagtypes = dict([(encoding.tolocal(name), value)
716 tagtypes = dict([(encoding.tolocal(name), value)
717 for (name, value) in tagtypes.iteritems()])
717 for (name, value) in tagtypes.iteritems()])
718 return (tags, tagtypes)
718 return (tags, tagtypes)
719
719
720 def tagtype(self, tagname):
720 def tagtype(self, tagname):
721 '''
721 '''
722 return the type of the given tag. result can be:
722 return the type of the given tag. result can be:
723
723
724 'local' : a local tag
724 'local' : a local tag
725 'global' : a global tag
725 'global' : a global tag
726 None : tag does not exist
726 None : tag does not exist
727 '''
727 '''
728
728
729 return self._tagscache.tagtypes.get(tagname)
729 return self._tagscache.tagtypes.get(tagname)
730
730
731 def tagslist(self):
731 def tagslist(self):
732 '''return a list of tags ordered by revision'''
732 '''return a list of tags ordered by revision'''
733 if not self._tagscache.tagslist:
733 if not self._tagscache.tagslist:
734 l = []
734 l = []
735 for t, n in self.tags().iteritems():
735 for t, n in self.tags().iteritems():
736 l.append((self.changelog.rev(n), t, n))
736 l.append((self.changelog.rev(n), t, n))
737 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
737 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
738
738
739 return self._tagscache.tagslist
739 return self._tagscache.tagslist
740
740
741 def nodetags(self, node):
741 def nodetags(self, node):
742 '''return the tags associated with a node'''
742 '''return the tags associated with a node'''
743 if not self._tagscache.nodetagscache:
743 if not self._tagscache.nodetagscache:
744 nodetagscache = {}
744 nodetagscache = {}
745 for t, n in self._tagscache.tags.iteritems():
745 for t, n in self._tagscache.tags.iteritems():
746 nodetagscache.setdefault(n, []).append(t)
746 nodetagscache.setdefault(n, []).append(t)
747 for tags in nodetagscache.itervalues():
747 for tags in nodetagscache.itervalues():
748 tags.sort()
748 tags.sort()
749 self._tagscache.nodetagscache = nodetagscache
749 self._tagscache.nodetagscache = nodetagscache
750 return self._tagscache.nodetagscache.get(node, [])
750 return self._tagscache.nodetagscache.get(node, [])
751
751
752 def nodebookmarks(self, node):
752 def nodebookmarks(self, node):
753 """return the list of bookmarks pointing to the specified node"""
753 """return the list of bookmarks pointing to the specified node"""
754 marks = []
754 marks = []
755 for bookmark, n in self._bookmarks.iteritems():
755 for bookmark, n in self._bookmarks.iteritems():
756 if n == node:
756 if n == node:
757 marks.append(bookmark)
757 marks.append(bookmark)
758 return sorted(marks)
758 return sorted(marks)
759
759
760 def branchmap(self):
760 def branchmap(self):
761 '''returns a dictionary {branch: [branchheads]} with branchheads
761 '''returns a dictionary {branch: [branchheads]} with branchheads
762 ordered by increasing revision number'''
762 ordered by increasing revision number'''
763 branchmap.updatecache(self)
763 branchmap.updatecache(self)
764 return self._branchcaches[self.filtername]
764 return self._branchcaches[self.filtername]
765
765
766 @unfilteredmethod
766 @unfilteredmethod
767 def revbranchcache(self):
767 def revbranchcache(self):
768 if not self._revbranchcache:
768 if not self._revbranchcache:
769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
770 return self._revbranchcache
770 return self._revbranchcache
771
771
772 def branchtip(self, branch, ignoremissing=False):
772 def branchtip(self, branch, ignoremissing=False):
773 '''return the tip node for a given branch
773 '''return the tip node for a given branch
774
774
775 If ignoremissing is True, then this method will not raise an error.
775 If ignoremissing is True, then this method will not raise an error.
776 This is helpful for callers that only expect None for a missing branch
776 This is helpful for callers that only expect None for a missing branch
777 (e.g. namespace).
777 (e.g. namespace).
778
778
779 '''
779 '''
780 try:
780 try:
781 return self.branchmap().branchtip(branch)
781 return self.branchmap().branchtip(branch)
782 except KeyError:
782 except KeyError:
783 if not ignoremissing:
783 if not ignoremissing:
784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
785 else:
785 else:
786 pass
786 pass
787
787
788 def lookup(self, key):
788 def lookup(self, key):
789 return self[key].node()
789 return self[key].node()
790
790
791 def lookupbranch(self, key, remote=None):
791 def lookupbranch(self, key, remote=None):
792 repo = remote or self
792 repo = remote or self
793 if key in repo.branchmap():
793 if key in repo.branchmap():
794 return key
794 return key
795
795
796 repo = (remote and remote.local()) and remote or self
796 repo = (remote and remote.local()) and remote or self
797 return repo[key].branch()
797 return repo[key].branch()
798
798
799 def known(self, nodes):
799 def known(self, nodes):
800 cl = self.changelog
800 cl = self.changelog
801 nm = cl.nodemap
801 nm = cl.nodemap
802 filtered = cl.filteredrevs
802 filtered = cl.filteredrevs
803 result = []
803 result = []
804 for n in nodes:
804 for n in nodes:
805 r = nm.get(n)
805 r = nm.get(n)
806 resp = not (r is None or r in filtered)
806 resp = not (r is None or r in filtered)
807 result.append(resp)
807 result.append(resp)
808 return result
808 return result
809
809
810 def local(self):
810 def local(self):
811 return self
811 return self
812
812
813 def publishing(self):
813 def publishing(self):
814 # it's safe (and desirable) to trust the publish flag unconditionally
814 # it's safe (and desirable) to trust the publish flag unconditionally
815 # so that we don't finalize changes shared between users via ssh or nfs
815 # so that we don't finalize changes shared between users via ssh or nfs
816 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816 return self.ui.configbool('phases', 'publish', True, untrusted=True)
817
817
818 def cancopy(self):
818 def cancopy(self):
819 # so statichttprepo's override of local() works
819 # so statichttprepo's override of local() works
820 if not self.local():
820 if not self.local():
821 return False
821 return False
822 if not self.publishing():
822 if not self.publishing():
823 return True
823 return True
824 # if publishing we can't copy if there is filtered content
824 # if publishing we can't copy if there is filtered content
825 return not self.filtered('visible').changelog.filteredrevs
825 return not self.filtered('visible').changelog.filteredrevs
826
826
827 def shared(self):
827 def shared(self):
828 '''the type of shared repository (None if not shared)'''
828 '''the type of shared repository (None if not shared)'''
829 if self.sharedpath != self.path:
829 if self.sharedpath != self.path:
830 return 'store'
830 return 'store'
831 return None
831 return None
832
832
833 def wjoin(self, f, *insidef):
833 def wjoin(self, f, *insidef):
834 return self.vfs.reljoin(self.root, f, *insidef)
834 return self.vfs.reljoin(self.root, f, *insidef)
835
835
836 def file(self, f):
836 def file(self, f):
837 if f[0] == '/':
837 if f[0] == '/':
838 f = f[1:]
838 f = f[1:]
839 return filelog.filelog(self.svfs, f)
839 return filelog.filelog(self.svfs, f)
840
840
841 def changectx(self, changeid):
841 def changectx(self, changeid):
842 return self[changeid]
842 return self[changeid]
843
843
844 def setparents(self, p1, p2=nullid):
844 def setparents(self, p1, p2=nullid):
845 with self.dirstate.parentchange():
845 with self.dirstate.parentchange():
846 copies = self.dirstate.setparents(p1, p2)
846 copies = self.dirstate.setparents(p1, p2)
847 pctx = self[p1]
847 pctx = self[p1]
848 if copies:
848 if copies:
849 # Adjust copy records, the dirstate cannot do it, it
849 # Adjust copy records, the dirstate cannot do it, it
850 # requires access to parents manifests. Preserve them
850 # requires access to parents manifests. Preserve them
851 # only for entries added to first parent.
851 # only for entries added to first parent.
852 for f in copies:
852 for f in copies:
853 if f not in pctx and copies[f] in pctx:
853 if f not in pctx and copies[f] in pctx:
854 self.dirstate.copy(copies[f], f)
854 self.dirstate.copy(copies[f], f)
855 if p2 == nullid:
855 if p2 == nullid:
856 for f, s in sorted(self.dirstate.copies().items()):
856 for f, s in sorted(self.dirstate.copies().items()):
857 if f not in pctx and s not in pctx:
857 if f not in pctx and s not in pctx:
858 self.dirstate.copy(None, f)
858 self.dirstate.copy(None, f)
859
859
860 def filectx(self, path, changeid=None, fileid=None):
860 def filectx(self, path, changeid=None, fileid=None):
861 """changeid can be a changeset revision, node, or tag.
861 """changeid can be a changeset revision, node, or tag.
862 fileid can be a file revision or node."""
862 fileid can be a file revision or node."""
863 return context.filectx(self, path, changeid, fileid)
863 return context.filectx(self, path, changeid, fileid)
864
864
865 def getcwd(self):
865 def getcwd(self):
866 return self.dirstate.getcwd()
866 return self.dirstate.getcwd()
867
867
868 def pathto(self, f, cwd=None):
868 def pathto(self, f, cwd=None):
869 return self.dirstate.pathto(f, cwd)
869 return self.dirstate.pathto(f, cwd)
870
870
871 def _loadfilter(self, filter):
871 def _loadfilter(self, filter):
872 if filter not in self.filterpats:
872 if filter not in self.filterpats:
873 l = []
873 l = []
874 for pat, cmd in self.ui.configitems(filter):
874 for pat, cmd in self.ui.configitems(filter):
875 if cmd == '!':
875 if cmd == '!':
876 continue
876 continue
877 mf = matchmod.match(self.root, '', [pat])
877 mf = matchmod.match(self.root, '', [pat])
878 fn = None
878 fn = None
879 params = cmd
879 params = cmd
880 for name, filterfn in self._datafilters.iteritems():
880 for name, filterfn in self._datafilters.iteritems():
881 if cmd.startswith(name):
881 if cmd.startswith(name):
882 fn = filterfn
882 fn = filterfn
883 params = cmd[len(name):].lstrip()
883 params = cmd[len(name):].lstrip()
884 break
884 break
885 if not fn:
885 if not fn:
886 fn = lambda s, c, **kwargs: util.filter(s, c)
886 fn = lambda s, c, **kwargs: util.filter(s, c)
887 # Wrap old filters not supporting keyword arguments
887 # Wrap old filters not supporting keyword arguments
888 if not inspect.getargspec(fn)[2]:
888 if not inspect.getargspec(fn)[2]:
889 oldfn = fn
889 oldfn = fn
890 fn = lambda s, c, **kwargs: oldfn(s, c)
890 fn = lambda s, c, **kwargs: oldfn(s, c)
891 l.append((mf, fn, params))
891 l.append((mf, fn, params))
892 self.filterpats[filter] = l
892 self.filterpats[filter] = l
893 return self.filterpats[filter]
893 return self.filterpats[filter]
894
894
895 def _filter(self, filterpats, filename, data):
895 def _filter(self, filterpats, filename, data):
896 for mf, fn, cmd in filterpats:
896 for mf, fn, cmd in filterpats:
897 if mf(filename):
897 if mf(filename):
898 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
898 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
899 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
899 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
900 break
900 break
901
901
902 return data
902 return data
903
903
904 @unfilteredpropertycache
904 @unfilteredpropertycache
905 def _encodefilterpats(self):
905 def _encodefilterpats(self):
906 return self._loadfilter('encode')
906 return self._loadfilter('encode')
907
907
908 @unfilteredpropertycache
908 @unfilteredpropertycache
909 def _decodefilterpats(self):
909 def _decodefilterpats(self):
910 return self._loadfilter('decode')
910 return self._loadfilter('decode')
911
911
912 def adddatafilter(self, name, filter):
912 def adddatafilter(self, name, filter):
913 self._datafilters[name] = filter
913 self._datafilters[name] = filter
914
914
915 def wread(self, filename):
915 def wread(self, filename):
916 if self.wvfs.islink(filename):
916 if self.wvfs.islink(filename):
917 data = self.wvfs.readlink(filename)
917 data = self.wvfs.readlink(filename)
918 else:
918 else:
919 data = self.wvfs.read(filename)
919 data = self.wvfs.read(filename)
920 return self._filter(self._encodefilterpats, filename, data)
920 return self._filter(self._encodefilterpats, filename, data)
921
921
922 def wwrite(self, filename, data, flags, backgroundclose=False):
922 def wwrite(self, filename, data, flags, backgroundclose=False):
923 """write ``data`` into ``filename`` in the working directory
923 """write ``data`` into ``filename`` in the working directory
924
924
925 This returns length of written (maybe decoded) data.
925 This returns length of written (maybe decoded) data.
926 """
926 """
927 data = self._filter(self._decodefilterpats, filename, data)
927 data = self._filter(self._decodefilterpats, filename, data)
928 if 'l' in flags:
928 if 'l' in flags:
929 self.wvfs.symlink(data, filename)
929 self.wvfs.symlink(data, filename)
930 else:
930 else:
931 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
931 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
932 if 'x' in flags:
932 if 'x' in flags:
933 self.wvfs.setflags(filename, False, True)
933 self.wvfs.setflags(filename, False, True)
934 return len(data)
934 return len(data)
935
935
936 def wwritedata(self, filename, data):
936 def wwritedata(self, filename, data):
937 return self._filter(self._decodefilterpats, filename, data)
937 return self._filter(self._decodefilterpats, filename, data)
938
938
939 def currenttransaction(self):
939 def currenttransaction(self):
940 """return the current transaction or None if non exists"""
940 """return the current transaction or None if non exists"""
941 if self._transref:
941 if self._transref:
942 tr = self._transref()
942 tr = self._transref()
943 else:
943 else:
944 tr = None
944 tr = None
945
945
946 if tr and tr.running():
946 if tr and tr.running():
947 return tr
947 return tr
948 return None
948 return None
949
949
950 def transaction(self, desc, report=None):
950 def transaction(self, desc, report=None):
951 if (self.ui.configbool('devel', 'all-warnings')
951 if (self.ui.configbool('devel', 'all-warnings')
952 or self.ui.configbool('devel', 'check-locks')):
952 or self.ui.configbool('devel', 'check-locks')):
953 if self._currentlock(self._lockref) is None:
953 if self._currentlock(self._lockref) is None:
954 raise error.ProgrammingError('transaction requires locking')
954 raise error.ProgrammingError('transaction requires locking')
955 tr = self.currenttransaction()
955 tr = self.currenttransaction()
956 if tr is not None:
956 if tr is not None:
957 return tr.nest()
957 return tr.nest()
958
958
959 # abort here if the journal already exists
959 # abort here if the journal already exists
960 if self.svfs.exists("journal"):
960 if self.svfs.exists("journal"):
961 raise error.RepoError(
961 raise error.RepoError(
962 _("abandoned transaction found"),
962 _("abandoned transaction found"),
963 hint=_("run 'hg recover' to clean up transaction"))
963 hint=_("run 'hg recover' to clean up transaction"))
964
964
965 idbase = "%.40f#%f" % (random.random(), time.time())
965 idbase = "%.40f#%f" % (random.random(), time.time())
966 ha = hex(hashlib.sha1(idbase).digest())
966 ha = hex(hashlib.sha1(idbase).digest())
967 txnid = 'TXN:' + ha
967 txnid = 'TXN:' + ha
968 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
968 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
969
969
970 self._writejournal(desc)
970 self._writejournal(desc)
971 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
971 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
972 if report:
972 if report:
973 rp = report
973 rp = report
974 else:
974 else:
975 rp = self.ui.warn
975 rp = self.ui.warn
976 vfsmap = {'plain': self.vfs} # root of .hg/
976 vfsmap = {'plain': self.vfs} # root of .hg/
977 # we must avoid cyclic reference between repo and transaction.
977 # we must avoid cyclic reference between repo and transaction.
978 reporef = weakref.ref(self)
978 reporef = weakref.ref(self)
979 # Code to track tag movement
979 # Code to track tag movement
980 #
980 #
981 # Since tags are all handled as file content, it is actually quite hard
981 # Since tags are all handled as file content, it is actually quite hard
982 # to track these movement from a code perspective. So we fallback to a
982 # to track these movement from a code perspective. So we fallback to a
983 # tracking at the repository level. One could envision to track changes
983 # tracking at the repository level. One could envision to track changes
984 # to the '.hgtags' file through changegroup apply but that fails to
984 # to the '.hgtags' file through changegroup apply but that fails to
985 # cope with case where transaction expose new heads without changegroup
985 # cope with case where transaction expose new heads without changegroup
986 # being involved (eg: phase movement).
986 # being involved (eg: phase movement).
987 #
987 #
988 # For now, We gate the feature behind a flag since this likely comes
988 # For now, We gate the feature behind a flag since this likely comes
989 # with performance impacts. The current code run more often than needed
989 # with performance impacts. The current code run more often than needed
990 # and do not use caches as much as it could. The current focus is on
990 # and do not use caches as much as it could. The current focus is on
991 # the behavior of the feature so we disable it by default. The flag
991 # the behavior of the feature so we disable it by default. The flag
992 # will be removed when we are happy with the performance impact.
992 # will be removed when we are happy with the performance impact.
993 #
993 #
994 # Once this feature is no longer experimental move the following
994 # Once this feature is no longer experimental move the following
995 # documentation to the appropriate help section:
995 # documentation to the appropriate help section:
996 #
996 #
997 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
997 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
998 # tags (new or changed or deleted tags). In addition the details of
998 # tags (new or changed or deleted tags). In addition the details of
999 # these changes are made available in a file at:
999 # these changes are made available in a file at:
1000 # ``REPOROOT/.hg/changes/tags.changes``.
1000 # ``REPOROOT/.hg/changes/tags.changes``.
1001 # Make sure you check for HG_TAG_MOVED before reading that file as it
1001 # Make sure you check for HG_TAG_MOVED before reading that file as it
1002 # might exist from a previous transaction even if no tag were touched
1002 # might exist from a previous transaction even if no tag were touched
1003 # in this one. Changes are recorded in a line base format::
1003 # in this one. Changes are recorded in a line base format::
1004 #
1004 #
1005 # <action> <hex-node> <tag-name>\n
1005 # <action> <hex-node> <tag-name>\n
1006 #
1006 #
1007 # Actions are defined as follow:
1007 # Actions are defined as follow:
1008 # "-R": tag is removed,
1008 # "-R": tag is removed,
1009 # "+A": tag is added,
1009 # "+A": tag is added,
1010 # "-M": tag is moved (old value),
1010 # "-M": tag is moved (old value),
1011 # "+M": tag is moved (new value),
1011 # "+M": tag is moved (new value),
1012 tracktags = lambda x: None
1012 tracktags = lambda x: None
1013 # experimental config: experimental.hook-track-tags
1013 # experimental config: experimental.hook-track-tags
1014 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1014 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1015 False)
1015 False)
1016 if desc != 'strip' and shouldtracktags:
1016 if desc != 'strip' and shouldtracktags:
1017 oldheads = self.changelog.headrevs()
1017 oldheads = self.changelog.headrevs()
1018 def tracktags(tr2):
1018 def tracktags(tr2):
1019 repo = reporef()
1019 repo = reporef()
1020 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1020 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1021 newheads = repo.changelog.headrevs()
1021 newheads = repo.changelog.headrevs()
1022 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1022 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1023 # notes: we compare lists here.
1023 # notes: we compare lists here.
1024 # As we do it only once buiding set would not be cheaper
1024 # As we do it only once buiding set would not be cheaper
1025 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1025 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1026 if changes:
1026 if changes:
1027 tr2.hookargs['tag_moved'] = '1'
1027 tr2.hookargs['tag_moved'] = '1'
1028 with repo.vfs('changes/tags.changes', 'w',
1028 with repo.vfs('changes/tags.changes', 'w',
1029 atomictemp=True) as changesfile:
1029 atomictemp=True) as changesfile:
1030 # note: we do not register the file to the transaction
1030 # note: we do not register the file to the transaction
1031 # because we needs it to still exist on the transaction
1031 # because we needs it to still exist on the transaction
1032 # is close (for txnclose hooks)
1032 # is close (for txnclose hooks)
1033 tagsmod.writediff(changesfile, changes)
1033 tagsmod.writediff(changesfile, changes)
1034 def validate(tr2):
1034 def validate(tr2):
1035 """will run pre-closing hooks"""
1035 """will run pre-closing hooks"""
1036 # XXX the transaction API is a bit lacking here so we take a hacky
1036 # XXX the transaction API is a bit lacking here so we take a hacky
1037 # path for now
1037 # path for now
1038 #
1038 #
1039 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1039 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1040 # dict is copied before these run. In addition we needs the data
1040 # dict is copied before these run. In addition we needs the data
1041 # available to in memory hooks too.
1041 # available to in memory hooks too.
1042 #
1042 #
1043 # Moreover, we also need to make sure this runs before txnclose
1043 # Moreover, we also need to make sure this runs before txnclose
1044 # hooks and there is no "pending" mechanism that would execute
1044 # hooks and there is no "pending" mechanism that would execute
1045 # logic only if hooks are about to run.
1045 # logic only if hooks are about to run.
1046 #
1046 #
1047 # Fixing this limitation of the transaction is also needed to track
1047 # Fixing this limitation of the transaction is also needed to track
1048 # other families of changes (bookmarks, phases, obsolescence).
1048 # other families of changes (bookmarks, phases, obsolescence).
1049 #
1049 #
1050 # This will have to be fixed before we remove the experimental
1050 # This will have to be fixed before we remove the experimental
1051 # gating.
1051 # gating.
1052 tracktags(tr2)
1052 tracktags(tr2)
1053 reporef().hook('pretxnclose', throw=True,
1053 reporef().hook('pretxnclose', throw=True,
1054 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1054 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1055 def releasefn(tr, success):
1055 def releasefn(tr, success):
1056 repo = reporef()
1056 repo = reporef()
1057 if success:
1057 if success:
1058 # this should be explicitly invoked here, because
1058 # this should be explicitly invoked here, because
1059 # in-memory changes aren't written out at closing
1059 # in-memory changes aren't written out at closing
1060 # transaction, if tr.addfilegenerator (via
1060 # transaction, if tr.addfilegenerator (via
1061 # dirstate.write or so) isn't invoked while
1061 # dirstate.write or so) isn't invoked while
1062 # transaction running
1062 # transaction running
1063 repo.dirstate.write(None)
1063 repo.dirstate.write(None)
1064 else:
1064 else:
1065 # discard all changes (including ones already written
1065 # discard all changes (including ones already written
1066 # out) in this transaction
1066 # out) in this transaction
1067 repo.dirstate.restorebackup(None, prefix='journal.')
1067 repo.dirstate.restorebackup(None, prefix='journal.')
1068
1068
1069 repo.invalidate(clearfilecache=True)
1069 repo.invalidate(clearfilecache=True)
1070
1070
1071 tr = transaction.transaction(rp, self.svfs, vfsmap,
1071 tr = transaction.transaction(rp, self.svfs, vfsmap,
1072 "journal",
1072 "journal",
1073 "undo",
1073 "undo",
1074 aftertrans(renames),
1074 aftertrans(renames),
1075 self.store.createmode,
1075 self.store.createmode,
1076 validator=validate,
1076 validator=validate,
1077 releasefn=releasefn)
1077 releasefn=releasefn)
1078 tr.changes['revs'] = set()
1078 tr.changes['revs'] = set()
1079
1079
1080 tr.hookargs['txnid'] = txnid
1080 tr.hookargs['txnid'] = txnid
1081 # note: writing the fncache only during finalize mean that the file is
1081 # note: writing the fncache only during finalize mean that the file is
1082 # outdated when running hooks. As fncache is used for streaming clone,
1082 # outdated when running hooks. As fncache is used for streaming clone,
1083 # this is not expected to break anything that happen during the hooks.
1083 # this is not expected to break anything that happen during the hooks.
1084 tr.addfinalize('flush-fncache', self.store.write)
1084 tr.addfinalize('flush-fncache', self.store.write)
1085 def txnclosehook(tr2):
1085 def txnclosehook(tr2):
1086 """To be run if transaction is successful, will schedule a hook run
1086 """To be run if transaction is successful, will schedule a hook run
1087 """
1087 """
1088 # Don't reference tr2 in hook() so we don't hold a reference.
1088 # Don't reference tr2 in hook() so we don't hold a reference.
1089 # This reduces memory consumption when there are multiple
1089 # This reduces memory consumption when there are multiple
1090 # transactions per lock. This can likely go away if issue5045
1090 # transactions per lock. This can likely go away if issue5045
1091 # fixes the function accumulation.
1091 # fixes the function accumulation.
1092 hookargs = tr2.hookargs
1092 hookargs = tr2.hookargs
1093
1093
1094 def hook():
1094 def hook():
1095 reporef().hook('txnclose', throw=False, txnname=desc,
1095 reporef().hook('txnclose', throw=False, txnname=desc,
1096 **pycompat.strkwargs(hookargs))
1096 **pycompat.strkwargs(hookargs))
1097 reporef()._afterlock(hook)
1097 reporef()._afterlock(hook)
1098 tr.addfinalize('txnclose-hook', txnclosehook)
1098 tr.addfinalize('txnclose-hook', txnclosehook)
1099 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1099 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1100 def txnaborthook(tr2):
1100 def txnaborthook(tr2):
1101 """To be run if transaction is aborted
1101 """To be run if transaction is aborted
1102 """
1102 """
1103 reporef().hook('txnabort', throw=False, txnname=desc,
1103 reporef().hook('txnabort', throw=False, txnname=desc,
1104 **tr2.hookargs)
1104 **tr2.hookargs)
1105 tr.addabort('txnabort-hook', txnaborthook)
1105 tr.addabort('txnabort-hook', txnaborthook)
1106 # avoid eager cache invalidation. in-memory data should be identical
1106 # avoid eager cache invalidation. in-memory data should be identical
1107 # to stored data if transaction has no error.
1107 # to stored data if transaction has no error.
1108 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1108 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1109 self._transref = weakref.ref(tr)
1109 self._transref = weakref.ref(tr)
1110 return tr
1110 return tr
1111
1111
1112 def _journalfiles(self):
1112 def _journalfiles(self):
1113 return ((self.svfs, 'journal'),
1113 return ((self.svfs, 'journal'),
1114 (self.vfs, 'journal.dirstate'),
1114 (self.vfs, 'journal.dirstate'),
1115 (self.vfs, 'journal.branch'),
1115 (self.vfs, 'journal.branch'),
1116 (self.vfs, 'journal.desc'),
1116 (self.vfs, 'journal.desc'),
1117 (self.vfs, 'journal.bookmarks'),
1117 (self.vfs, 'journal.bookmarks'),
1118 (self.svfs, 'journal.phaseroots'))
1118 (self.svfs, 'journal.phaseroots'))
1119
1119
1120 def undofiles(self):
1120 def undofiles(self):
1121 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1121 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1122
1122
1123 @unfilteredmethod
1123 def _writejournal(self, desc):
1124 def _writejournal(self, desc):
1124 self.dirstate.savebackup(None, prefix='journal.')
1125 self.dirstate.savebackup(None, prefix='journal.')
1125 self.vfs.write("journal.branch",
1126 self.vfs.write("journal.branch",
1126 encoding.fromlocal(self.dirstate.branch()))
1127 encoding.fromlocal(self.dirstate.branch()))
1127 self.vfs.write("journal.desc",
1128 self.vfs.write("journal.desc",
1128 "%d\n%s\n" % (len(self), desc))
1129 "%d\n%s\n" % (len(self), desc))
1129 self.vfs.write("journal.bookmarks",
1130 self.vfs.write("journal.bookmarks",
1130 self.vfs.tryread("bookmarks"))
1131 self.vfs.tryread("bookmarks"))
1131 self.svfs.write("journal.phaseroots",
1132 self.svfs.write("journal.phaseroots",
1132 self.svfs.tryread("phaseroots"))
1133 self.svfs.tryread("phaseroots"))
1133
1134
1134 def recover(self):
1135 def recover(self):
1135 with self.lock():
1136 with self.lock():
1136 if self.svfs.exists("journal"):
1137 if self.svfs.exists("journal"):
1137 self.ui.status(_("rolling back interrupted transaction\n"))
1138 self.ui.status(_("rolling back interrupted transaction\n"))
1138 vfsmap = {'': self.svfs,
1139 vfsmap = {'': self.svfs,
1139 'plain': self.vfs,}
1140 'plain': self.vfs,}
1140 transaction.rollback(self.svfs, vfsmap, "journal",
1141 transaction.rollback(self.svfs, vfsmap, "journal",
1141 self.ui.warn)
1142 self.ui.warn)
1142 self.invalidate()
1143 self.invalidate()
1143 return True
1144 return True
1144 else:
1145 else:
1145 self.ui.warn(_("no interrupted transaction available\n"))
1146 self.ui.warn(_("no interrupted transaction available\n"))
1146 return False
1147 return False
1147
1148
1148 def rollback(self, dryrun=False, force=False):
1149 def rollback(self, dryrun=False, force=False):
1149 wlock = lock = dsguard = None
1150 wlock = lock = dsguard = None
1150 try:
1151 try:
1151 wlock = self.wlock()
1152 wlock = self.wlock()
1152 lock = self.lock()
1153 lock = self.lock()
1153 if self.svfs.exists("undo"):
1154 if self.svfs.exists("undo"):
1154 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1155 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1155
1156
1156 return self._rollback(dryrun, force, dsguard)
1157 return self._rollback(dryrun, force, dsguard)
1157 else:
1158 else:
1158 self.ui.warn(_("no rollback information available\n"))
1159 self.ui.warn(_("no rollback information available\n"))
1159 return 1
1160 return 1
1160 finally:
1161 finally:
1161 release(dsguard, lock, wlock)
1162 release(dsguard, lock, wlock)
1162
1163
1163 @unfilteredmethod # Until we get smarter cache management
1164 @unfilteredmethod # Until we get smarter cache management
1164 def _rollback(self, dryrun, force, dsguard):
1165 def _rollback(self, dryrun, force, dsguard):
1165 ui = self.ui
1166 ui = self.ui
1166 try:
1167 try:
1167 args = self.vfs.read('undo.desc').splitlines()
1168 args = self.vfs.read('undo.desc').splitlines()
1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1169 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1169 if len(args) >= 3:
1170 if len(args) >= 3:
1170 detail = args[2]
1171 detail = args[2]
1171 oldtip = oldlen - 1
1172 oldtip = oldlen - 1
1172
1173
1173 if detail and ui.verbose:
1174 if detail and ui.verbose:
1174 msg = (_('repository tip rolled back to revision %s'
1175 msg = (_('repository tip rolled back to revision %s'
1175 ' (undo %s: %s)\n')
1176 ' (undo %s: %s)\n')
1176 % (oldtip, desc, detail))
1177 % (oldtip, desc, detail))
1177 else:
1178 else:
1178 msg = (_('repository tip rolled back to revision %s'
1179 msg = (_('repository tip rolled back to revision %s'
1179 ' (undo %s)\n')
1180 ' (undo %s)\n')
1180 % (oldtip, desc))
1181 % (oldtip, desc))
1181 except IOError:
1182 except IOError:
1182 msg = _('rolling back unknown transaction\n')
1183 msg = _('rolling back unknown transaction\n')
1183 desc = None
1184 desc = None
1184
1185
1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1186 if not force and self['.'] != self['tip'] and desc == 'commit':
1186 raise error.Abort(
1187 raise error.Abort(
1187 _('rollback of last commit while not checked out '
1188 _('rollback of last commit while not checked out '
1188 'may lose data'), hint=_('use -f to force'))
1189 'may lose data'), hint=_('use -f to force'))
1189
1190
1190 ui.status(msg)
1191 ui.status(msg)
1191 if dryrun:
1192 if dryrun:
1192 return 0
1193 return 0
1193
1194
1194 parents = self.dirstate.parents()
1195 parents = self.dirstate.parents()
1195 self.destroying()
1196 self.destroying()
1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1197 vfsmap = {'plain': self.vfs, '': self.svfs}
1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1198 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1198 if self.vfs.exists('undo.bookmarks'):
1199 if self.vfs.exists('undo.bookmarks'):
1199 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1200 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1200 if self.svfs.exists('undo.phaseroots'):
1201 if self.svfs.exists('undo.phaseroots'):
1201 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1202 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1202 self.invalidate()
1203 self.invalidate()
1203
1204
1204 parentgone = (parents[0] not in self.changelog.nodemap or
1205 parentgone = (parents[0] not in self.changelog.nodemap or
1205 parents[1] not in self.changelog.nodemap)
1206 parents[1] not in self.changelog.nodemap)
1206 if parentgone:
1207 if parentgone:
1207 # prevent dirstateguard from overwriting already restored one
1208 # prevent dirstateguard from overwriting already restored one
1208 dsguard.close()
1209 dsguard.close()
1209
1210
1210 self.dirstate.restorebackup(None, prefix='undo.')
1211 self.dirstate.restorebackup(None, prefix='undo.')
1211 try:
1212 try:
1212 branch = self.vfs.read('undo.branch')
1213 branch = self.vfs.read('undo.branch')
1213 self.dirstate.setbranch(encoding.tolocal(branch))
1214 self.dirstate.setbranch(encoding.tolocal(branch))
1214 except IOError:
1215 except IOError:
1215 ui.warn(_('named branch could not be reset: '
1216 ui.warn(_('named branch could not be reset: '
1216 'current branch is still \'%s\'\n')
1217 'current branch is still \'%s\'\n')
1217 % self.dirstate.branch())
1218 % self.dirstate.branch())
1218
1219
1219 parents = tuple([p.rev() for p in self[None].parents()])
1220 parents = tuple([p.rev() for p in self[None].parents()])
1220 if len(parents) > 1:
1221 if len(parents) > 1:
1221 ui.status(_('working directory now based on '
1222 ui.status(_('working directory now based on '
1222 'revisions %d and %d\n') % parents)
1223 'revisions %d and %d\n') % parents)
1223 else:
1224 else:
1224 ui.status(_('working directory now based on '
1225 ui.status(_('working directory now based on '
1225 'revision %d\n') % parents)
1226 'revision %d\n') % parents)
1226 mergemod.mergestate.clean(self, self['.'].node())
1227 mergemod.mergestate.clean(self, self['.'].node())
1227
1228
1228 # TODO: if we know which new heads may result from this rollback, pass
1229 # TODO: if we know which new heads may result from this rollback, pass
1229 # them to destroy(), which will prevent the branchhead cache from being
1230 # them to destroy(), which will prevent the branchhead cache from being
1230 # invalidated.
1231 # invalidated.
1231 self.destroyed()
1232 self.destroyed()
1232 return 0
1233 return 0
1233
1234
1234 def _buildcacheupdater(self, newtransaction):
1235 def _buildcacheupdater(self, newtransaction):
1235 """called during transaction to build the callback updating cache
1236 """called during transaction to build the callback updating cache
1236
1237
1237 Lives on the repository to help extension who might want to augment
1238 Lives on the repository to help extension who might want to augment
1238 this logic. For this purpose, the created transaction is passed to the
1239 this logic. For this purpose, the created transaction is passed to the
1239 method.
1240 method.
1240 """
1241 """
1241 # we must avoid cyclic reference between repo and transaction.
1242 # we must avoid cyclic reference between repo and transaction.
1242 reporef = weakref.ref(self)
1243 reporef = weakref.ref(self)
1243 def updater(tr):
1244 def updater(tr):
1244 repo = reporef()
1245 repo = reporef()
1245 repo.updatecaches(tr)
1246 repo.updatecaches(tr)
1246 return updater
1247 return updater
1247
1248
1248 @unfilteredmethod
1249 @unfilteredmethod
1249 def updatecaches(self, tr=None):
1250 def updatecaches(self, tr=None):
1250 """warm appropriate caches
1251 """warm appropriate caches
1251
1252
1252 If this function is called after a transaction closed. The transaction
1253 If this function is called after a transaction closed. The transaction
1253 will be available in the 'tr' argument. This can be used to selectively
1254 will be available in the 'tr' argument. This can be used to selectively
1254 update caches relevant to the changes in that transaction.
1255 update caches relevant to the changes in that transaction.
1255 """
1256 """
1256 if tr is not None and tr.hookargs.get('source') == 'strip':
1257 if tr is not None and tr.hookargs.get('source') == 'strip':
1257 # During strip, many caches are invalid but
1258 # During strip, many caches are invalid but
1258 # later call to `destroyed` will refresh them.
1259 # later call to `destroyed` will refresh them.
1259 return
1260 return
1260
1261
1261 if tr is None or tr.changes['revs']:
1262 if tr is None or tr.changes['revs']:
1262 # updating the unfiltered branchmap should refresh all the others,
1263 # updating the unfiltered branchmap should refresh all the others,
1263 self.ui.debug('updating the branch cache\n')
1264 self.ui.debug('updating the branch cache\n')
1264 branchmap.updatecache(self.filtered('served'))
1265 branchmap.updatecache(self.filtered('served'))
1265
1266
1266 def invalidatecaches(self):
1267 def invalidatecaches(self):
1267
1268
1268 if '_tagscache' in vars(self):
1269 if '_tagscache' in vars(self):
1269 # can't use delattr on proxy
1270 # can't use delattr on proxy
1270 del self.__dict__['_tagscache']
1271 del self.__dict__['_tagscache']
1271
1272
1272 self.unfiltered()._branchcaches.clear()
1273 self.unfiltered()._branchcaches.clear()
1273 self.invalidatevolatilesets()
1274 self.invalidatevolatilesets()
1274
1275
1275 def invalidatevolatilesets(self):
1276 def invalidatevolatilesets(self):
1276 self.filteredrevcache.clear()
1277 self.filteredrevcache.clear()
1277 obsolete.clearobscaches(self)
1278 obsolete.clearobscaches(self)
1278
1279
1279 def invalidatedirstate(self):
1280 def invalidatedirstate(self):
1280 '''Invalidates the dirstate, causing the next call to dirstate
1281 '''Invalidates the dirstate, causing the next call to dirstate
1281 to check if it was modified since the last time it was read,
1282 to check if it was modified since the last time it was read,
1282 rereading it if it has.
1283 rereading it if it has.
1283
1284
1284 This is different to dirstate.invalidate() that it doesn't always
1285 This is different to dirstate.invalidate() that it doesn't always
1285 rereads the dirstate. Use dirstate.invalidate() if you want to
1286 rereads the dirstate. Use dirstate.invalidate() if you want to
1286 explicitly read the dirstate again (i.e. restoring it to a previous
1287 explicitly read the dirstate again (i.e. restoring it to a previous
1287 known good state).'''
1288 known good state).'''
1288 if hasunfilteredcache(self, 'dirstate'):
1289 if hasunfilteredcache(self, 'dirstate'):
1289 for k in self.dirstate._filecache:
1290 for k in self.dirstate._filecache:
1290 try:
1291 try:
1291 delattr(self.dirstate, k)
1292 delattr(self.dirstate, k)
1292 except AttributeError:
1293 except AttributeError:
1293 pass
1294 pass
1294 delattr(self.unfiltered(), 'dirstate')
1295 delattr(self.unfiltered(), 'dirstate')
1295
1296
1296 def invalidate(self, clearfilecache=False):
1297 def invalidate(self, clearfilecache=False):
1297 '''Invalidates both store and non-store parts other than dirstate
1298 '''Invalidates both store and non-store parts other than dirstate
1298
1299
1299 If a transaction is running, invalidation of store is omitted,
1300 If a transaction is running, invalidation of store is omitted,
1300 because discarding in-memory changes might cause inconsistency
1301 because discarding in-memory changes might cause inconsistency
1301 (e.g. incomplete fncache causes unintentional failure, but
1302 (e.g. incomplete fncache causes unintentional failure, but
1302 redundant one doesn't).
1303 redundant one doesn't).
1303 '''
1304 '''
1304 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1305 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1305 for k in list(self._filecache.keys()):
1306 for k in list(self._filecache.keys()):
1306 # dirstate is invalidated separately in invalidatedirstate()
1307 # dirstate is invalidated separately in invalidatedirstate()
1307 if k == 'dirstate':
1308 if k == 'dirstate':
1308 continue
1309 continue
1309
1310
1310 if clearfilecache:
1311 if clearfilecache:
1311 del self._filecache[k]
1312 del self._filecache[k]
1312 try:
1313 try:
1313 delattr(unfiltered, k)
1314 delattr(unfiltered, k)
1314 except AttributeError:
1315 except AttributeError:
1315 pass
1316 pass
1316 self.invalidatecaches()
1317 self.invalidatecaches()
1317 if not self.currenttransaction():
1318 if not self.currenttransaction():
1318 # TODO: Changing contents of store outside transaction
1319 # TODO: Changing contents of store outside transaction
1319 # causes inconsistency. We should make in-memory store
1320 # causes inconsistency. We should make in-memory store
1320 # changes detectable, and abort if changed.
1321 # changes detectable, and abort if changed.
1321 self.store.invalidatecaches()
1322 self.store.invalidatecaches()
1322
1323
1323 def invalidateall(self):
1324 def invalidateall(self):
1324 '''Fully invalidates both store and non-store parts, causing the
1325 '''Fully invalidates both store and non-store parts, causing the
1325 subsequent operation to reread any outside changes.'''
1326 subsequent operation to reread any outside changes.'''
1326 # extension should hook this to invalidate its caches
1327 # extension should hook this to invalidate its caches
1327 self.invalidate()
1328 self.invalidate()
1328 self.invalidatedirstate()
1329 self.invalidatedirstate()
1329
1330
1330 @unfilteredmethod
1331 @unfilteredmethod
1331 def _refreshfilecachestats(self, tr):
1332 def _refreshfilecachestats(self, tr):
1332 """Reload stats of cached files so that they are flagged as valid"""
1333 """Reload stats of cached files so that they are flagged as valid"""
1333 for k, ce in self._filecache.items():
1334 for k, ce in self._filecache.items():
1334 if k == 'dirstate' or k not in self.__dict__:
1335 if k == 'dirstate' or k not in self.__dict__:
1335 continue
1336 continue
1336 ce.refresh()
1337 ce.refresh()
1337
1338
1338 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1339 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1339 inheritchecker=None, parentenvvar=None):
1340 inheritchecker=None, parentenvvar=None):
1340 parentlock = None
1341 parentlock = None
1341 # the contents of parentenvvar are used by the underlying lock to
1342 # the contents of parentenvvar are used by the underlying lock to
1342 # determine whether it can be inherited
1343 # determine whether it can be inherited
1343 if parentenvvar is not None:
1344 if parentenvvar is not None:
1344 parentlock = encoding.environ.get(parentenvvar)
1345 parentlock = encoding.environ.get(parentenvvar)
1345 try:
1346 try:
1346 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1347 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1347 acquirefn=acquirefn, desc=desc,
1348 acquirefn=acquirefn, desc=desc,
1348 inheritchecker=inheritchecker,
1349 inheritchecker=inheritchecker,
1349 parentlock=parentlock)
1350 parentlock=parentlock)
1350 except error.LockHeld as inst:
1351 except error.LockHeld as inst:
1351 if not wait:
1352 if not wait:
1352 raise
1353 raise
1353 # show more details for new-style locks
1354 # show more details for new-style locks
1354 if ':' in inst.locker:
1355 if ':' in inst.locker:
1355 host, pid = inst.locker.split(":", 1)
1356 host, pid = inst.locker.split(":", 1)
1356 self.ui.warn(
1357 self.ui.warn(
1357 _("waiting for lock on %s held by process %r "
1358 _("waiting for lock on %s held by process %r "
1358 "on host %r\n") % (desc, pid, host))
1359 "on host %r\n") % (desc, pid, host))
1359 else:
1360 else:
1360 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1361 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1361 (desc, inst.locker))
1362 (desc, inst.locker))
1362 # default to 600 seconds timeout
1363 # default to 600 seconds timeout
1363 l = lockmod.lock(vfs, lockname,
1364 l = lockmod.lock(vfs, lockname,
1364 int(self.ui.config("ui", "timeout", "600")),
1365 int(self.ui.config("ui", "timeout", "600")),
1365 releasefn=releasefn, acquirefn=acquirefn,
1366 releasefn=releasefn, acquirefn=acquirefn,
1366 desc=desc)
1367 desc=desc)
1367 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1368 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1368 return l
1369 return l
1369
1370
1370 def _afterlock(self, callback):
1371 def _afterlock(self, callback):
1371 """add a callback to be run when the repository is fully unlocked
1372 """add a callback to be run when the repository is fully unlocked
1372
1373
1373 The callback will be executed when the outermost lock is released
1374 The callback will be executed when the outermost lock is released
1374 (with wlock being higher level than 'lock')."""
1375 (with wlock being higher level than 'lock')."""
1375 for ref in (self._wlockref, self._lockref):
1376 for ref in (self._wlockref, self._lockref):
1376 l = ref and ref()
1377 l = ref and ref()
1377 if l and l.held:
1378 if l and l.held:
1378 l.postrelease.append(callback)
1379 l.postrelease.append(callback)
1379 break
1380 break
1380 else: # no lock have been found.
1381 else: # no lock have been found.
1381 callback()
1382 callback()
1382
1383
1383 def lock(self, wait=True):
1384 def lock(self, wait=True):
1384 '''Lock the repository store (.hg/store) and return a weak reference
1385 '''Lock the repository store (.hg/store) and return a weak reference
1385 to the lock. Use this before modifying the store (e.g. committing or
1386 to the lock. Use this before modifying the store (e.g. committing or
1386 stripping). If you are opening a transaction, get a lock as well.)
1387 stripping). If you are opening a transaction, get a lock as well.)
1387
1388
1388 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1389 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1389 'wlock' first to avoid a dead-lock hazard.'''
1390 'wlock' first to avoid a dead-lock hazard.'''
1390 l = self._currentlock(self._lockref)
1391 l = self._currentlock(self._lockref)
1391 if l is not None:
1392 if l is not None:
1392 l.lock()
1393 l.lock()
1393 return l
1394 return l
1394
1395
1395 l = self._lock(self.svfs, "lock", wait, None,
1396 l = self._lock(self.svfs, "lock", wait, None,
1396 self.invalidate, _('repository %s') % self.origroot)
1397 self.invalidate, _('repository %s') % self.origroot)
1397 self._lockref = weakref.ref(l)
1398 self._lockref = weakref.ref(l)
1398 return l
1399 return l
1399
1400
1400 def _wlockchecktransaction(self):
1401 def _wlockchecktransaction(self):
1401 if self.currenttransaction() is not None:
1402 if self.currenttransaction() is not None:
1402 raise error.LockInheritanceContractViolation(
1403 raise error.LockInheritanceContractViolation(
1403 'wlock cannot be inherited in the middle of a transaction')
1404 'wlock cannot be inherited in the middle of a transaction')
1404
1405
1405 def wlock(self, wait=True):
1406 def wlock(self, wait=True):
1406 '''Lock the non-store parts of the repository (everything under
1407 '''Lock the non-store parts of the repository (everything under
1407 .hg except .hg/store) and return a weak reference to the lock.
1408 .hg except .hg/store) and return a weak reference to the lock.
1408
1409
1409 Use this before modifying files in .hg.
1410 Use this before modifying files in .hg.
1410
1411
1411 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1412 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1412 'wlock' first to avoid a dead-lock hazard.'''
1413 'wlock' first to avoid a dead-lock hazard.'''
1413 l = self._wlockref and self._wlockref()
1414 l = self._wlockref and self._wlockref()
1414 if l is not None and l.held:
1415 if l is not None and l.held:
1415 l.lock()
1416 l.lock()
1416 return l
1417 return l
1417
1418
1418 # We do not need to check for non-waiting lock acquisition. Such
1419 # We do not need to check for non-waiting lock acquisition. Such
1419 # acquisition would not cause dead-lock as they would just fail.
1420 # acquisition would not cause dead-lock as they would just fail.
1420 if wait and (self.ui.configbool('devel', 'all-warnings')
1421 if wait and (self.ui.configbool('devel', 'all-warnings')
1421 or self.ui.configbool('devel', 'check-locks')):
1422 or self.ui.configbool('devel', 'check-locks')):
1422 if self._currentlock(self._lockref) is not None:
1423 if self._currentlock(self._lockref) is not None:
1423 self.ui.develwarn('"wlock" acquired after "lock"')
1424 self.ui.develwarn('"wlock" acquired after "lock"')
1424
1425
1425 def unlock():
1426 def unlock():
1426 if self.dirstate.pendingparentchange():
1427 if self.dirstate.pendingparentchange():
1427 self.dirstate.invalidate()
1428 self.dirstate.invalidate()
1428 else:
1429 else:
1429 self.dirstate.write(None)
1430 self.dirstate.write(None)
1430
1431
1431 self._filecache['dirstate'].refresh()
1432 self._filecache['dirstate'].refresh()
1432
1433
1433 l = self._lock(self.vfs, "wlock", wait, unlock,
1434 l = self._lock(self.vfs, "wlock", wait, unlock,
1434 self.invalidatedirstate, _('working directory of %s') %
1435 self.invalidatedirstate, _('working directory of %s') %
1435 self.origroot,
1436 self.origroot,
1436 inheritchecker=self._wlockchecktransaction,
1437 inheritchecker=self._wlockchecktransaction,
1437 parentenvvar='HG_WLOCK_LOCKER')
1438 parentenvvar='HG_WLOCK_LOCKER')
1438 self._wlockref = weakref.ref(l)
1439 self._wlockref = weakref.ref(l)
1439 return l
1440 return l
1440
1441
1441 def _currentlock(self, lockref):
1442 def _currentlock(self, lockref):
1442 """Returns the lock if it's held, or None if it's not."""
1443 """Returns the lock if it's held, or None if it's not."""
1443 if lockref is None:
1444 if lockref is None:
1444 return None
1445 return None
1445 l = lockref()
1446 l = lockref()
1446 if l is None or not l.held:
1447 if l is None or not l.held:
1447 return None
1448 return None
1448 return l
1449 return l
1449
1450
1450 def currentwlock(self):
1451 def currentwlock(self):
1451 """Returns the wlock if it's held, or None if it's not."""
1452 """Returns the wlock if it's held, or None if it's not."""
1452 return self._currentlock(self._wlockref)
1453 return self._currentlock(self._wlockref)
1453
1454
1454 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1455 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1455 """
1456 """
1456 commit an individual file as part of a larger transaction
1457 commit an individual file as part of a larger transaction
1457 """
1458 """
1458
1459
1459 fname = fctx.path()
1460 fname = fctx.path()
1460 fparent1 = manifest1.get(fname, nullid)
1461 fparent1 = manifest1.get(fname, nullid)
1461 fparent2 = manifest2.get(fname, nullid)
1462 fparent2 = manifest2.get(fname, nullid)
1462 if isinstance(fctx, context.filectx):
1463 if isinstance(fctx, context.filectx):
1463 node = fctx.filenode()
1464 node = fctx.filenode()
1464 if node in [fparent1, fparent2]:
1465 if node in [fparent1, fparent2]:
1465 self.ui.debug('reusing %s filelog entry\n' % fname)
1466 self.ui.debug('reusing %s filelog entry\n' % fname)
1466 if manifest1.flags(fname) != fctx.flags():
1467 if manifest1.flags(fname) != fctx.flags():
1467 changelist.append(fname)
1468 changelist.append(fname)
1468 return node
1469 return node
1469
1470
1470 flog = self.file(fname)
1471 flog = self.file(fname)
1471 meta = {}
1472 meta = {}
1472 copy = fctx.renamed()
1473 copy = fctx.renamed()
1473 if copy and copy[0] != fname:
1474 if copy and copy[0] != fname:
1474 # Mark the new revision of this file as a copy of another
1475 # Mark the new revision of this file as a copy of another
1475 # file. This copy data will effectively act as a parent
1476 # file. This copy data will effectively act as a parent
1476 # of this new revision. If this is a merge, the first
1477 # of this new revision. If this is a merge, the first
1477 # parent will be the nullid (meaning "look up the copy data")
1478 # parent will be the nullid (meaning "look up the copy data")
1478 # and the second one will be the other parent. For example:
1479 # and the second one will be the other parent. For example:
1479 #
1480 #
1480 # 0 --- 1 --- 3 rev1 changes file foo
1481 # 0 --- 1 --- 3 rev1 changes file foo
1481 # \ / rev2 renames foo to bar and changes it
1482 # \ / rev2 renames foo to bar and changes it
1482 # \- 2 -/ rev3 should have bar with all changes and
1483 # \- 2 -/ rev3 should have bar with all changes and
1483 # should record that bar descends from
1484 # should record that bar descends from
1484 # bar in rev2 and foo in rev1
1485 # bar in rev2 and foo in rev1
1485 #
1486 #
1486 # this allows this merge to succeed:
1487 # this allows this merge to succeed:
1487 #
1488 #
1488 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1489 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1489 # \ / merging rev3 and rev4 should use bar@rev2
1490 # \ / merging rev3 and rev4 should use bar@rev2
1490 # \- 2 --- 4 as the merge base
1491 # \- 2 --- 4 as the merge base
1491 #
1492 #
1492
1493
1493 cfname = copy[0]
1494 cfname = copy[0]
1494 crev = manifest1.get(cfname)
1495 crev = manifest1.get(cfname)
1495 newfparent = fparent2
1496 newfparent = fparent2
1496
1497
1497 if manifest2: # branch merge
1498 if manifest2: # branch merge
1498 if fparent2 == nullid or crev is None: # copied on remote side
1499 if fparent2 == nullid or crev is None: # copied on remote side
1499 if cfname in manifest2:
1500 if cfname in manifest2:
1500 crev = manifest2[cfname]
1501 crev = manifest2[cfname]
1501 newfparent = fparent1
1502 newfparent = fparent1
1502
1503
1503 # Here, we used to search backwards through history to try to find
1504 # Here, we used to search backwards through history to try to find
1504 # where the file copy came from if the source of a copy was not in
1505 # where the file copy came from if the source of a copy was not in
1505 # the parent directory. However, this doesn't actually make sense to
1506 # the parent directory. However, this doesn't actually make sense to
1506 # do (what does a copy from something not in your working copy even
1507 # do (what does a copy from something not in your working copy even
1507 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1508 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1508 # the user that copy information was dropped, so if they didn't
1509 # the user that copy information was dropped, so if they didn't
1509 # expect this outcome it can be fixed, but this is the correct
1510 # expect this outcome it can be fixed, but this is the correct
1510 # behavior in this circumstance.
1511 # behavior in this circumstance.
1511
1512
1512 if crev:
1513 if crev:
1513 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1514 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1514 meta["copy"] = cfname
1515 meta["copy"] = cfname
1515 meta["copyrev"] = hex(crev)
1516 meta["copyrev"] = hex(crev)
1516 fparent1, fparent2 = nullid, newfparent
1517 fparent1, fparent2 = nullid, newfparent
1517 else:
1518 else:
1518 self.ui.warn(_("warning: can't find ancestor for '%s' "
1519 self.ui.warn(_("warning: can't find ancestor for '%s' "
1519 "copied from '%s'!\n") % (fname, cfname))
1520 "copied from '%s'!\n") % (fname, cfname))
1520
1521
1521 elif fparent1 == nullid:
1522 elif fparent1 == nullid:
1522 fparent1, fparent2 = fparent2, nullid
1523 fparent1, fparent2 = fparent2, nullid
1523 elif fparent2 != nullid:
1524 elif fparent2 != nullid:
1524 # is one parent an ancestor of the other?
1525 # is one parent an ancestor of the other?
1525 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1526 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1526 if fparent1 in fparentancestors:
1527 if fparent1 in fparentancestors:
1527 fparent1, fparent2 = fparent2, nullid
1528 fparent1, fparent2 = fparent2, nullid
1528 elif fparent2 in fparentancestors:
1529 elif fparent2 in fparentancestors:
1529 fparent2 = nullid
1530 fparent2 = nullid
1530
1531
1531 # is the file changed?
1532 # is the file changed?
1532 text = fctx.data()
1533 text = fctx.data()
1533 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1534 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1534 changelist.append(fname)
1535 changelist.append(fname)
1535 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1536 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1536 # are just the flags changed during merge?
1537 # are just the flags changed during merge?
1537 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1538 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1538 changelist.append(fname)
1539 changelist.append(fname)
1539
1540
1540 return fparent1
1541 return fparent1
1541
1542
1542 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1543 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1543 """check for commit arguments that aren't committable"""
1544 """check for commit arguments that aren't committable"""
1544 if match.isexact() or match.prefix():
1545 if match.isexact() or match.prefix():
1545 matched = set(status.modified + status.added + status.removed)
1546 matched = set(status.modified + status.added + status.removed)
1546
1547
1547 for f in match.files():
1548 for f in match.files():
1548 f = self.dirstate.normalize(f)
1549 f = self.dirstate.normalize(f)
1549 if f == '.' or f in matched or f in wctx.substate:
1550 if f == '.' or f in matched or f in wctx.substate:
1550 continue
1551 continue
1551 if f in status.deleted:
1552 if f in status.deleted:
1552 fail(f, _('file not found!'))
1553 fail(f, _('file not found!'))
1553 if f in vdirs: # visited directory
1554 if f in vdirs: # visited directory
1554 d = f + '/'
1555 d = f + '/'
1555 for mf in matched:
1556 for mf in matched:
1556 if mf.startswith(d):
1557 if mf.startswith(d):
1557 break
1558 break
1558 else:
1559 else:
1559 fail(f, _("no match under directory!"))
1560 fail(f, _("no match under directory!"))
1560 elif f not in self.dirstate:
1561 elif f not in self.dirstate:
1561 fail(f, _("file not tracked!"))
1562 fail(f, _("file not tracked!"))
1562
1563
1563 @unfilteredmethod
1564 @unfilteredmethod
1564 def commit(self, text="", user=None, date=None, match=None, force=False,
1565 def commit(self, text="", user=None, date=None, match=None, force=False,
1565 editor=False, extra=None):
1566 editor=False, extra=None):
1566 """Add a new revision to current repository.
1567 """Add a new revision to current repository.
1567
1568
1568 Revision information is gathered from the working directory,
1569 Revision information is gathered from the working directory,
1569 match can be used to filter the committed files. If editor is
1570 match can be used to filter the committed files. If editor is
1570 supplied, it is called to get a commit message.
1571 supplied, it is called to get a commit message.
1571 """
1572 """
1572 if extra is None:
1573 if extra is None:
1573 extra = {}
1574 extra = {}
1574
1575
1575 def fail(f, msg):
1576 def fail(f, msg):
1576 raise error.Abort('%s: %s' % (f, msg))
1577 raise error.Abort('%s: %s' % (f, msg))
1577
1578
1578 if not match:
1579 if not match:
1579 match = matchmod.always(self.root, '')
1580 match = matchmod.always(self.root, '')
1580
1581
1581 if not force:
1582 if not force:
1582 vdirs = []
1583 vdirs = []
1583 match.explicitdir = vdirs.append
1584 match.explicitdir = vdirs.append
1584 match.bad = fail
1585 match.bad = fail
1585
1586
1586 wlock = lock = tr = None
1587 wlock = lock = tr = None
1587 try:
1588 try:
1588 wlock = self.wlock()
1589 wlock = self.wlock()
1589 lock = self.lock() # for recent changelog (see issue4368)
1590 lock = self.lock() # for recent changelog (see issue4368)
1590
1591
1591 wctx = self[None]
1592 wctx = self[None]
1592 merge = len(wctx.parents()) > 1
1593 merge = len(wctx.parents()) > 1
1593
1594
1594 if not force and merge and not match.always():
1595 if not force and merge and not match.always():
1595 raise error.Abort(_('cannot partially commit a merge '
1596 raise error.Abort(_('cannot partially commit a merge '
1596 '(do not specify files or patterns)'))
1597 '(do not specify files or patterns)'))
1597
1598
1598 status = self.status(match=match, clean=force)
1599 status = self.status(match=match, clean=force)
1599 if force:
1600 if force:
1600 status.modified.extend(status.clean) # mq may commit clean files
1601 status.modified.extend(status.clean) # mq may commit clean files
1601
1602
1602 # check subrepos
1603 # check subrepos
1603 subs = []
1604 subs = []
1604 commitsubs = set()
1605 commitsubs = set()
1605 newstate = wctx.substate.copy()
1606 newstate = wctx.substate.copy()
1606 # only manage subrepos and .hgsubstate if .hgsub is present
1607 # only manage subrepos and .hgsubstate if .hgsub is present
1607 if '.hgsub' in wctx:
1608 if '.hgsub' in wctx:
1608 # we'll decide whether to track this ourselves, thanks
1609 # we'll decide whether to track this ourselves, thanks
1609 for c in status.modified, status.added, status.removed:
1610 for c in status.modified, status.added, status.removed:
1610 if '.hgsubstate' in c:
1611 if '.hgsubstate' in c:
1611 c.remove('.hgsubstate')
1612 c.remove('.hgsubstate')
1612
1613
1613 # compare current state to last committed state
1614 # compare current state to last committed state
1614 # build new substate based on last committed state
1615 # build new substate based on last committed state
1615 oldstate = wctx.p1().substate
1616 oldstate = wctx.p1().substate
1616 for s in sorted(newstate.keys()):
1617 for s in sorted(newstate.keys()):
1617 if not match(s):
1618 if not match(s):
1618 # ignore working copy, use old state if present
1619 # ignore working copy, use old state if present
1619 if s in oldstate:
1620 if s in oldstate:
1620 newstate[s] = oldstate[s]
1621 newstate[s] = oldstate[s]
1621 continue
1622 continue
1622 if not force:
1623 if not force:
1623 raise error.Abort(
1624 raise error.Abort(
1624 _("commit with new subrepo %s excluded") % s)
1625 _("commit with new subrepo %s excluded") % s)
1625 dirtyreason = wctx.sub(s).dirtyreason(True)
1626 dirtyreason = wctx.sub(s).dirtyreason(True)
1626 if dirtyreason:
1627 if dirtyreason:
1627 if not self.ui.configbool('ui', 'commitsubrepos'):
1628 if not self.ui.configbool('ui', 'commitsubrepos'):
1628 raise error.Abort(dirtyreason,
1629 raise error.Abort(dirtyreason,
1629 hint=_("use --subrepos for recursive commit"))
1630 hint=_("use --subrepos for recursive commit"))
1630 subs.append(s)
1631 subs.append(s)
1631 commitsubs.add(s)
1632 commitsubs.add(s)
1632 else:
1633 else:
1633 bs = wctx.sub(s).basestate()
1634 bs = wctx.sub(s).basestate()
1634 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1635 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1635 if oldstate.get(s, (None, None, None))[1] != bs:
1636 if oldstate.get(s, (None, None, None))[1] != bs:
1636 subs.append(s)
1637 subs.append(s)
1637
1638
1638 # check for removed subrepos
1639 # check for removed subrepos
1639 for p in wctx.parents():
1640 for p in wctx.parents():
1640 r = [s for s in p.substate if s not in newstate]
1641 r = [s for s in p.substate if s not in newstate]
1641 subs += [s for s in r if match(s)]
1642 subs += [s for s in r if match(s)]
1642 if subs:
1643 if subs:
1643 if (not match('.hgsub') and
1644 if (not match('.hgsub') and
1644 '.hgsub' in (wctx.modified() + wctx.added())):
1645 '.hgsub' in (wctx.modified() + wctx.added())):
1645 raise error.Abort(
1646 raise error.Abort(
1646 _("can't commit subrepos without .hgsub"))
1647 _("can't commit subrepos without .hgsub"))
1647 status.modified.insert(0, '.hgsubstate')
1648 status.modified.insert(0, '.hgsubstate')
1648
1649
1649 elif '.hgsub' in status.removed:
1650 elif '.hgsub' in status.removed:
1650 # clean up .hgsubstate when .hgsub is removed
1651 # clean up .hgsubstate when .hgsub is removed
1651 if ('.hgsubstate' in wctx and
1652 if ('.hgsubstate' in wctx and
1652 '.hgsubstate' not in (status.modified + status.added +
1653 '.hgsubstate' not in (status.modified + status.added +
1653 status.removed)):
1654 status.removed)):
1654 status.removed.insert(0, '.hgsubstate')
1655 status.removed.insert(0, '.hgsubstate')
1655
1656
1656 # make sure all explicit patterns are matched
1657 # make sure all explicit patterns are matched
1657 if not force:
1658 if not force:
1658 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1659 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1659
1660
1660 cctx = context.workingcommitctx(self, status,
1661 cctx = context.workingcommitctx(self, status,
1661 text, user, date, extra)
1662 text, user, date, extra)
1662
1663
1663 # internal config: ui.allowemptycommit
1664 # internal config: ui.allowemptycommit
1664 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1665 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1665 or extra.get('close') or merge or cctx.files()
1666 or extra.get('close') or merge or cctx.files()
1666 or self.ui.configbool('ui', 'allowemptycommit'))
1667 or self.ui.configbool('ui', 'allowemptycommit'))
1667 if not allowemptycommit:
1668 if not allowemptycommit:
1668 return None
1669 return None
1669
1670
1670 if merge and cctx.deleted():
1671 if merge and cctx.deleted():
1671 raise error.Abort(_("cannot commit merge with missing files"))
1672 raise error.Abort(_("cannot commit merge with missing files"))
1672
1673
1673 ms = mergemod.mergestate.read(self)
1674 ms = mergemod.mergestate.read(self)
1674 mergeutil.checkunresolved(ms)
1675 mergeutil.checkunresolved(ms)
1675
1676
1676 if editor:
1677 if editor:
1677 cctx._text = editor(self, cctx, subs)
1678 cctx._text = editor(self, cctx, subs)
1678 edited = (text != cctx._text)
1679 edited = (text != cctx._text)
1679
1680
1680 # Save commit message in case this transaction gets rolled back
1681 # Save commit message in case this transaction gets rolled back
1681 # (e.g. by a pretxncommit hook). Leave the content alone on
1682 # (e.g. by a pretxncommit hook). Leave the content alone on
1682 # the assumption that the user will use the same editor again.
1683 # the assumption that the user will use the same editor again.
1683 msgfn = self.savecommitmessage(cctx._text)
1684 msgfn = self.savecommitmessage(cctx._text)
1684
1685
1685 # commit subs and write new state
1686 # commit subs and write new state
1686 if subs:
1687 if subs:
1687 for s in sorted(commitsubs):
1688 for s in sorted(commitsubs):
1688 sub = wctx.sub(s)
1689 sub = wctx.sub(s)
1689 self.ui.status(_('committing subrepository %s\n') %
1690 self.ui.status(_('committing subrepository %s\n') %
1690 subrepo.subrelpath(sub))
1691 subrepo.subrelpath(sub))
1691 sr = sub.commit(cctx._text, user, date)
1692 sr = sub.commit(cctx._text, user, date)
1692 newstate[s] = (newstate[s][0], sr)
1693 newstate[s] = (newstate[s][0], sr)
1693 subrepo.writestate(self, newstate)
1694 subrepo.writestate(self, newstate)
1694
1695
1695 p1, p2 = self.dirstate.parents()
1696 p1, p2 = self.dirstate.parents()
1696 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1697 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1697 try:
1698 try:
1698 self.hook("precommit", throw=True, parent1=hookp1,
1699 self.hook("precommit", throw=True, parent1=hookp1,
1699 parent2=hookp2)
1700 parent2=hookp2)
1700 tr = self.transaction('commit')
1701 tr = self.transaction('commit')
1701 ret = self.commitctx(cctx, True)
1702 ret = self.commitctx(cctx, True)
1702 except: # re-raises
1703 except: # re-raises
1703 if edited:
1704 if edited:
1704 self.ui.write(
1705 self.ui.write(
1705 _('note: commit message saved in %s\n') % msgfn)
1706 _('note: commit message saved in %s\n') % msgfn)
1706 raise
1707 raise
1707 # update bookmarks, dirstate and mergestate
1708 # update bookmarks, dirstate and mergestate
1708 bookmarks.update(self, [p1, p2], ret)
1709 bookmarks.update(self, [p1, p2], ret)
1709 cctx.markcommitted(ret)
1710 cctx.markcommitted(ret)
1710 ms.reset()
1711 ms.reset()
1711 tr.close()
1712 tr.close()
1712
1713
1713 finally:
1714 finally:
1714 lockmod.release(tr, lock, wlock)
1715 lockmod.release(tr, lock, wlock)
1715
1716
1716 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1717 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1717 # hack for command that use a temporary commit (eg: histedit)
1718 # hack for command that use a temporary commit (eg: histedit)
1718 # temporary commit got stripped before hook release
1719 # temporary commit got stripped before hook release
1719 if self.changelog.hasnode(ret):
1720 if self.changelog.hasnode(ret):
1720 self.hook("commit", node=node, parent1=parent1,
1721 self.hook("commit", node=node, parent1=parent1,
1721 parent2=parent2)
1722 parent2=parent2)
1722 self._afterlock(commithook)
1723 self._afterlock(commithook)
1723 return ret
1724 return ret
1724
1725
1725 @unfilteredmethod
1726 @unfilteredmethod
1726 def commitctx(self, ctx, error=False):
1727 def commitctx(self, ctx, error=False):
1727 """Add a new revision to current repository.
1728 """Add a new revision to current repository.
1728 Revision information is passed via the context argument.
1729 Revision information is passed via the context argument.
1729 """
1730 """
1730
1731
1731 tr = None
1732 tr = None
1732 p1, p2 = ctx.p1(), ctx.p2()
1733 p1, p2 = ctx.p1(), ctx.p2()
1733 user = ctx.user()
1734 user = ctx.user()
1734
1735
1735 lock = self.lock()
1736 lock = self.lock()
1736 try:
1737 try:
1737 tr = self.transaction("commit")
1738 tr = self.transaction("commit")
1738 trp = weakref.proxy(tr)
1739 trp = weakref.proxy(tr)
1739
1740
1740 if ctx.manifestnode():
1741 if ctx.manifestnode():
1741 # reuse an existing manifest revision
1742 # reuse an existing manifest revision
1742 mn = ctx.manifestnode()
1743 mn = ctx.manifestnode()
1743 files = ctx.files()
1744 files = ctx.files()
1744 elif ctx.files():
1745 elif ctx.files():
1745 m1ctx = p1.manifestctx()
1746 m1ctx = p1.manifestctx()
1746 m2ctx = p2.manifestctx()
1747 m2ctx = p2.manifestctx()
1747 mctx = m1ctx.copy()
1748 mctx = m1ctx.copy()
1748
1749
1749 m = mctx.read()
1750 m = mctx.read()
1750 m1 = m1ctx.read()
1751 m1 = m1ctx.read()
1751 m2 = m2ctx.read()
1752 m2 = m2ctx.read()
1752
1753
1753 # check in files
1754 # check in files
1754 added = []
1755 added = []
1755 changed = []
1756 changed = []
1756 removed = list(ctx.removed())
1757 removed = list(ctx.removed())
1757 linkrev = len(self)
1758 linkrev = len(self)
1758 self.ui.note(_("committing files:\n"))
1759 self.ui.note(_("committing files:\n"))
1759 for f in sorted(ctx.modified() + ctx.added()):
1760 for f in sorted(ctx.modified() + ctx.added()):
1760 self.ui.note(f + "\n")
1761 self.ui.note(f + "\n")
1761 try:
1762 try:
1762 fctx = ctx[f]
1763 fctx = ctx[f]
1763 if fctx is None:
1764 if fctx is None:
1764 removed.append(f)
1765 removed.append(f)
1765 else:
1766 else:
1766 added.append(f)
1767 added.append(f)
1767 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1768 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1768 trp, changed)
1769 trp, changed)
1769 m.setflag(f, fctx.flags())
1770 m.setflag(f, fctx.flags())
1770 except OSError as inst:
1771 except OSError as inst:
1771 self.ui.warn(_("trouble committing %s!\n") % f)
1772 self.ui.warn(_("trouble committing %s!\n") % f)
1772 raise
1773 raise
1773 except IOError as inst:
1774 except IOError as inst:
1774 errcode = getattr(inst, 'errno', errno.ENOENT)
1775 errcode = getattr(inst, 'errno', errno.ENOENT)
1775 if error or errcode and errcode != errno.ENOENT:
1776 if error or errcode and errcode != errno.ENOENT:
1776 self.ui.warn(_("trouble committing %s!\n") % f)
1777 self.ui.warn(_("trouble committing %s!\n") % f)
1777 raise
1778 raise
1778
1779
1779 # update manifest
1780 # update manifest
1780 self.ui.note(_("committing manifest\n"))
1781 self.ui.note(_("committing manifest\n"))
1781 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1782 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1782 drop = [f for f in removed if f in m]
1783 drop = [f for f in removed if f in m]
1783 for f in drop:
1784 for f in drop:
1784 del m[f]
1785 del m[f]
1785 mn = mctx.write(trp, linkrev,
1786 mn = mctx.write(trp, linkrev,
1786 p1.manifestnode(), p2.manifestnode(),
1787 p1.manifestnode(), p2.manifestnode(),
1787 added, drop)
1788 added, drop)
1788 files = changed + removed
1789 files = changed + removed
1789 else:
1790 else:
1790 mn = p1.manifestnode()
1791 mn = p1.manifestnode()
1791 files = []
1792 files = []
1792
1793
1793 # update changelog
1794 # update changelog
1794 self.ui.note(_("committing changelog\n"))
1795 self.ui.note(_("committing changelog\n"))
1795 self.changelog.delayupdate(tr)
1796 self.changelog.delayupdate(tr)
1796 n = self.changelog.add(mn, files, ctx.description(),
1797 n = self.changelog.add(mn, files, ctx.description(),
1797 trp, p1.node(), p2.node(),
1798 trp, p1.node(), p2.node(),
1798 user, ctx.date(), ctx.extra().copy())
1799 user, ctx.date(), ctx.extra().copy())
1799 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1800 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1800 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1801 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1801 parent2=xp2)
1802 parent2=xp2)
1802 # set the new commit is proper phase
1803 # set the new commit is proper phase
1803 targetphase = subrepo.newcommitphase(self.ui, ctx)
1804 targetphase = subrepo.newcommitphase(self.ui, ctx)
1804 if targetphase:
1805 if targetphase:
1805 # retract boundary do not alter parent changeset.
1806 # retract boundary do not alter parent changeset.
1806 # if a parent have higher the resulting phase will
1807 # if a parent have higher the resulting phase will
1807 # be compliant anyway
1808 # be compliant anyway
1808 #
1809 #
1809 # if minimal phase was 0 we don't need to retract anything
1810 # if minimal phase was 0 we don't need to retract anything
1810 phases.retractboundary(self, tr, targetphase, [n])
1811 phases.retractboundary(self, tr, targetphase, [n])
1811 tr.close()
1812 tr.close()
1812 return n
1813 return n
1813 finally:
1814 finally:
1814 if tr:
1815 if tr:
1815 tr.release()
1816 tr.release()
1816 lock.release()
1817 lock.release()
1817
1818
1818 @unfilteredmethod
1819 @unfilteredmethod
1819 def destroying(self):
1820 def destroying(self):
1820 '''Inform the repository that nodes are about to be destroyed.
1821 '''Inform the repository that nodes are about to be destroyed.
1821 Intended for use by strip and rollback, so there's a common
1822 Intended for use by strip and rollback, so there's a common
1822 place for anything that has to be done before destroying history.
1823 place for anything that has to be done before destroying history.
1823
1824
1824 This is mostly useful for saving state that is in memory and waiting
1825 This is mostly useful for saving state that is in memory and waiting
1825 to be flushed when the current lock is released. Because a call to
1826 to be flushed when the current lock is released. Because a call to
1826 destroyed is imminent, the repo will be invalidated causing those
1827 destroyed is imminent, the repo will be invalidated causing those
1827 changes to stay in memory (waiting for the next unlock), or vanish
1828 changes to stay in memory (waiting for the next unlock), or vanish
1828 completely.
1829 completely.
1829 '''
1830 '''
1830 # When using the same lock to commit and strip, the phasecache is left
1831 # When using the same lock to commit and strip, the phasecache is left
1831 # dirty after committing. Then when we strip, the repo is invalidated,
1832 # dirty after committing. Then when we strip, the repo is invalidated,
1832 # causing those changes to disappear.
1833 # causing those changes to disappear.
1833 if '_phasecache' in vars(self):
1834 if '_phasecache' in vars(self):
1834 self._phasecache.write()
1835 self._phasecache.write()
1835
1836
1836 @unfilteredmethod
1837 @unfilteredmethod
1837 def destroyed(self):
1838 def destroyed(self):
1838 '''Inform the repository that nodes have been destroyed.
1839 '''Inform the repository that nodes have been destroyed.
1839 Intended for use by strip and rollback, so there's a common
1840 Intended for use by strip and rollback, so there's a common
1840 place for anything that has to be done after destroying history.
1841 place for anything that has to be done after destroying history.
1841 '''
1842 '''
1842 # When one tries to:
1843 # When one tries to:
1843 # 1) destroy nodes thus calling this method (e.g. strip)
1844 # 1) destroy nodes thus calling this method (e.g. strip)
1844 # 2) use phasecache somewhere (e.g. commit)
1845 # 2) use phasecache somewhere (e.g. commit)
1845 #
1846 #
1846 # then 2) will fail because the phasecache contains nodes that were
1847 # then 2) will fail because the phasecache contains nodes that were
1847 # removed. We can either remove phasecache from the filecache,
1848 # removed. We can either remove phasecache from the filecache,
1848 # causing it to reload next time it is accessed, or simply filter
1849 # causing it to reload next time it is accessed, or simply filter
1849 # the removed nodes now and write the updated cache.
1850 # the removed nodes now and write the updated cache.
1850 self._phasecache.filterunknown(self)
1851 self._phasecache.filterunknown(self)
1851 self._phasecache.write()
1852 self._phasecache.write()
1852
1853
1853 # refresh all repository caches
1854 # refresh all repository caches
1854 self.updatecaches()
1855 self.updatecaches()
1855
1856
1856 # Ensure the persistent tag cache is updated. Doing it now
1857 # Ensure the persistent tag cache is updated. Doing it now
1857 # means that the tag cache only has to worry about destroyed
1858 # means that the tag cache only has to worry about destroyed
1858 # heads immediately after a strip/rollback. That in turn
1859 # heads immediately after a strip/rollback. That in turn
1859 # guarantees that "cachetip == currenttip" (comparing both rev
1860 # guarantees that "cachetip == currenttip" (comparing both rev
1860 # and node) always means no nodes have been added or destroyed.
1861 # and node) always means no nodes have been added or destroyed.
1861
1862
1862 # XXX this is suboptimal when qrefresh'ing: we strip the current
1863 # XXX this is suboptimal when qrefresh'ing: we strip the current
1863 # head, refresh the tag cache, then immediately add a new head.
1864 # head, refresh the tag cache, then immediately add a new head.
1864 # But I think doing it this way is necessary for the "instant
1865 # But I think doing it this way is necessary for the "instant
1865 # tag cache retrieval" case to work.
1866 # tag cache retrieval" case to work.
1866 self.invalidate()
1867 self.invalidate()
1867
1868
1868 def walk(self, match, node=None):
1869 def walk(self, match, node=None):
1869 '''
1870 '''
1870 walk recursively through the directory tree or a given
1871 walk recursively through the directory tree or a given
1871 changeset, finding all files matched by the match
1872 changeset, finding all files matched by the match
1872 function
1873 function
1873 '''
1874 '''
1874 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1875 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1875 return self[node].walk(match)
1876 return self[node].walk(match)
1876
1877
1877 def status(self, node1='.', node2=None, match=None,
1878 def status(self, node1='.', node2=None, match=None,
1878 ignored=False, clean=False, unknown=False,
1879 ignored=False, clean=False, unknown=False,
1879 listsubrepos=False):
1880 listsubrepos=False):
1880 '''a convenience method that calls node1.status(node2)'''
1881 '''a convenience method that calls node1.status(node2)'''
1881 return self[node1].status(node2, match, ignored, clean, unknown,
1882 return self[node1].status(node2, match, ignored, clean, unknown,
1882 listsubrepos)
1883 listsubrepos)
1883
1884
1884 def heads(self, start=None):
1885 def heads(self, start=None):
1885 if start is None:
1886 if start is None:
1886 cl = self.changelog
1887 cl = self.changelog
1887 headrevs = reversed(cl.headrevs())
1888 headrevs = reversed(cl.headrevs())
1888 return [cl.node(rev) for rev in headrevs]
1889 return [cl.node(rev) for rev in headrevs]
1889
1890
1890 heads = self.changelog.heads(start)
1891 heads = self.changelog.heads(start)
1891 # sort the output in rev descending order
1892 # sort the output in rev descending order
1892 return sorted(heads, key=self.changelog.rev, reverse=True)
1893 return sorted(heads, key=self.changelog.rev, reverse=True)
1893
1894
1894 def branchheads(self, branch=None, start=None, closed=False):
1895 def branchheads(self, branch=None, start=None, closed=False):
1895 '''return a (possibly filtered) list of heads for the given branch
1896 '''return a (possibly filtered) list of heads for the given branch
1896
1897
1897 Heads are returned in topological order, from newest to oldest.
1898 Heads are returned in topological order, from newest to oldest.
1898 If branch is None, use the dirstate branch.
1899 If branch is None, use the dirstate branch.
1899 If start is not None, return only heads reachable from start.
1900 If start is not None, return only heads reachable from start.
1900 If closed is True, return heads that are marked as closed as well.
1901 If closed is True, return heads that are marked as closed as well.
1901 '''
1902 '''
1902 if branch is None:
1903 if branch is None:
1903 branch = self[None].branch()
1904 branch = self[None].branch()
1904 branches = self.branchmap()
1905 branches = self.branchmap()
1905 if branch not in branches:
1906 if branch not in branches:
1906 return []
1907 return []
1907 # the cache returns heads ordered lowest to highest
1908 # the cache returns heads ordered lowest to highest
1908 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1909 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1909 if start is not None:
1910 if start is not None:
1910 # filter out the heads that cannot be reached from startrev
1911 # filter out the heads that cannot be reached from startrev
1911 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1912 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1912 bheads = [h for h in bheads if h in fbheads]
1913 bheads = [h for h in bheads if h in fbheads]
1913 return bheads
1914 return bheads
1914
1915
1915 def branches(self, nodes):
1916 def branches(self, nodes):
1916 if not nodes:
1917 if not nodes:
1917 nodes = [self.changelog.tip()]
1918 nodes = [self.changelog.tip()]
1918 b = []
1919 b = []
1919 for n in nodes:
1920 for n in nodes:
1920 t = n
1921 t = n
1921 while True:
1922 while True:
1922 p = self.changelog.parents(n)
1923 p = self.changelog.parents(n)
1923 if p[1] != nullid or p[0] == nullid:
1924 if p[1] != nullid or p[0] == nullid:
1924 b.append((t, n, p[0], p[1]))
1925 b.append((t, n, p[0], p[1]))
1925 break
1926 break
1926 n = p[0]
1927 n = p[0]
1927 return b
1928 return b
1928
1929
1929 def between(self, pairs):
1930 def between(self, pairs):
1930 r = []
1931 r = []
1931
1932
1932 for top, bottom in pairs:
1933 for top, bottom in pairs:
1933 n, l, i = top, [], 0
1934 n, l, i = top, [], 0
1934 f = 1
1935 f = 1
1935
1936
1936 while n != bottom and n != nullid:
1937 while n != bottom and n != nullid:
1937 p = self.changelog.parents(n)[0]
1938 p = self.changelog.parents(n)[0]
1938 if i == f:
1939 if i == f:
1939 l.append(n)
1940 l.append(n)
1940 f = f * 2
1941 f = f * 2
1941 n = p
1942 n = p
1942 i += 1
1943 i += 1
1943
1944
1944 r.append(l)
1945 r.append(l)
1945
1946
1946 return r
1947 return r
1947
1948
1948 def checkpush(self, pushop):
1949 def checkpush(self, pushop):
1949 """Extensions can override this function if additional checks have
1950 """Extensions can override this function if additional checks have
1950 to be performed before pushing, or call it if they override push
1951 to be performed before pushing, or call it if they override push
1951 command.
1952 command.
1952 """
1953 """
1953 pass
1954 pass
1954
1955
1955 @unfilteredpropertycache
1956 @unfilteredpropertycache
1956 def prepushoutgoinghooks(self):
1957 def prepushoutgoinghooks(self):
1957 """Return util.hooks consists of a pushop with repo, remote, outgoing
1958 """Return util.hooks consists of a pushop with repo, remote, outgoing
1958 methods, which are called before pushing changesets.
1959 methods, which are called before pushing changesets.
1959 """
1960 """
1960 return util.hooks()
1961 return util.hooks()
1961
1962
1962 def pushkey(self, namespace, key, old, new):
1963 def pushkey(self, namespace, key, old, new):
1963 try:
1964 try:
1964 tr = self.currenttransaction()
1965 tr = self.currenttransaction()
1965 hookargs = {}
1966 hookargs = {}
1966 if tr is not None:
1967 if tr is not None:
1967 hookargs.update(tr.hookargs)
1968 hookargs.update(tr.hookargs)
1968 hookargs['namespace'] = namespace
1969 hookargs['namespace'] = namespace
1969 hookargs['key'] = key
1970 hookargs['key'] = key
1970 hookargs['old'] = old
1971 hookargs['old'] = old
1971 hookargs['new'] = new
1972 hookargs['new'] = new
1972 self.hook('prepushkey', throw=True, **hookargs)
1973 self.hook('prepushkey', throw=True, **hookargs)
1973 except error.HookAbort as exc:
1974 except error.HookAbort as exc:
1974 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1975 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1975 if exc.hint:
1976 if exc.hint:
1976 self.ui.write_err(_("(%s)\n") % exc.hint)
1977 self.ui.write_err(_("(%s)\n") % exc.hint)
1977 return False
1978 return False
1978 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1979 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1979 ret = pushkey.push(self, namespace, key, old, new)
1980 ret = pushkey.push(self, namespace, key, old, new)
1980 def runhook():
1981 def runhook():
1981 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1982 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1982 ret=ret)
1983 ret=ret)
1983 self._afterlock(runhook)
1984 self._afterlock(runhook)
1984 return ret
1985 return ret
1985
1986
1986 def listkeys(self, namespace):
1987 def listkeys(self, namespace):
1987 self.hook('prelistkeys', throw=True, namespace=namespace)
1988 self.hook('prelistkeys', throw=True, namespace=namespace)
1988 self.ui.debug('listing keys for "%s"\n' % namespace)
1989 self.ui.debug('listing keys for "%s"\n' % namespace)
1989 values = pushkey.list(self, namespace)
1990 values = pushkey.list(self, namespace)
1990 self.hook('listkeys', namespace=namespace, values=values)
1991 self.hook('listkeys', namespace=namespace, values=values)
1991 return values
1992 return values
1992
1993
1993 def debugwireargs(self, one, two, three=None, four=None, five=None):
1994 def debugwireargs(self, one, two, three=None, four=None, five=None):
1994 '''used to test argument passing over the wire'''
1995 '''used to test argument passing over the wire'''
1995 return "%s %s %s %s %s" % (one, two, three, four, five)
1996 return "%s %s %s %s %s" % (one, two, three, four, five)
1996
1997
1997 def savecommitmessage(self, text):
1998 def savecommitmessage(self, text):
1998 fp = self.vfs('last-message.txt', 'wb')
1999 fp = self.vfs('last-message.txt', 'wb')
1999 try:
2000 try:
2000 fp.write(text)
2001 fp.write(text)
2001 finally:
2002 finally:
2002 fp.close()
2003 fp.close()
2003 return self.pathto(fp.name[len(self.root) + 1:])
2004 return self.pathto(fp.name[len(self.root) + 1:])
2004
2005
2005 # used to avoid circular references so destructors work
2006 # used to avoid circular references so destructors work
2006 def aftertrans(files):
2007 def aftertrans(files):
2007 renamefiles = [tuple(t) for t in files]
2008 renamefiles = [tuple(t) for t in files]
2008 def a():
2009 def a():
2009 for vfs, src, dest in renamefiles:
2010 for vfs, src, dest in renamefiles:
2010 # if src and dest refer to a same file, vfs.rename is a no-op,
2011 # if src and dest refer to a same file, vfs.rename is a no-op,
2011 # leaving both src and dest on disk. delete dest to make sure
2012 # leaving both src and dest on disk. delete dest to make sure
2012 # the rename couldn't be such a no-op.
2013 # the rename couldn't be such a no-op.
2013 vfs.tryunlink(dest)
2014 vfs.tryunlink(dest)
2014 try:
2015 try:
2015 vfs.rename(src, dest)
2016 vfs.rename(src, dest)
2016 except OSError: # journal file does not yet exist
2017 except OSError: # journal file does not yet exist
2017 pass
2018 pass
2018 return a
2019 return a
2019
2020
2020 def undoname(fn):
2021 def undoname(fn):
2021 base, name = os.path.split(fn)
2022 base, name = os.path.split(fn)
2022 assert name.startswith('journal')
2023 assert name.startswith('journal')
2023 return os.path.join(base, name.replace('journal', 'undo', 1))
2024 return os.path.join(base, name.replace('journal', 'undo', 1))
2024
2025
2025 def instance(ui, path, create):
2026 def instance(ui, path, create):
2026 return localrepository(ui, util.urllocalpath(path), create)
2027 return localrepository(ui, util.urllocalpath(path), create)
2027
2028
2028 def islocal(path):
2029 def islocal(path):
2029 return True
2030 return True
2030
2031
2031 def newreporequirements(repo):
2032 def newreporequirements(repo):
2032 """Determine the set of requirements for a new local repository.
2033 """Determine the set of requirements for a new local repository.
2033
2034
2034 Extensions can wrap this function to specify custom requirements for
2035 Extensions can wrap this function to specify custom requirements for
2035 new repositories.
2036 new repositories.
2036 """
2037 """
2037 ui = repo.ui
2038 ui = repo.ui
2038 requirements = {'revlogv1'}
2039 requirements = {'revlogv1'}
2039 if ui.configbool('format', 'usestore', True):
2040 if ui.configbool('format', 'usestore', True):
2040 requirements.add('store')
2041 requirements.add('store')
2041 if ui.configbool('format', 'usefncache', True):
2042 if ui.configbool('format', 'usefncache', True):
2042 requirements.add('fncache')
2043 requirements.add('fncache')
2043 if ui.configbool('format', 'dotencode', True):
2044 if ui.configbool('format', 'dotencode', True):
2044 requirements.add('dotencode')
2045 requirements.add('dotencode')
2045
2046
2046 compengine = ui.config('experimental', 'format.compression', 'zlib')
2047 compengine = ui.config('experimental', 'format.compression', 'zlib')
2047 if compengine not in util.compengines:
2048 if compengine not in util.compengines:
2048 raise error.Abort(_('compression engine %s defined by '
2049 raise error.Abort(_('compression engine %s defined by '
2049 'experimental.format.compression not available') %
2050 'experimental.format.compression not available') %
2050 compengine,
2051 compengine,
2051 hint=_('run "hg debuginstall" to list available '
2052 hint=_('run "hg debuginstall" to list available '
2052 'compression engines'))
2053 'compression engines'))
2053
2054
2054 # zlib is the historical default and doesn't need an explicit requirement.
2055 # zlib is the historical default and doesn't need an explicit requirement.
2055 if compengine != 'zlib':
2056 if compengine != 'zlib':
2056 requirements.add('exp-compression-%s' % compengine)
2057 requirements.add('exp-compression-%s' % compengine)
2057
2058
2058 if scmutil.gdinitconfig(ui):
2059 if scmutil.gdinitconfig(ui):
2059 requirements.add('generaldelta')
2060 requirements.add('generaldelta')
2060 if ui.configbool('experimental', 'treemanifest', False):
2061 if ui.configbool('experimental', 'treemanifest', False):
2061 requirements.add('treemanifest')
2062 requirements.add('treemanifest')
2062 if ui.configbool('experimental', 'manifestv2', False):
2063 if ui.configbool('experimental', 'manifestv2', False):
2063 requirements.add('manifestv2')
2064 requirements.add('manifestv2')
2064
2065
2065 return requirements
2066 return requirements
General Comments 0
You need to be logged in to leave comments. Login now