##// END OF EJS Templates
localrepo: improve vfs documentation...
Ryan McElroy -
r31536:48b9c9ca default
parent child Browse files
Show More
@@ -1,2085 +1,2087
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'))
117 'unbundle'))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if bundlecaps is not None and 'HG20' in bundlecaps:
167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 'manifestv2'))
251 'manifestv2'))
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 'relshared', 'dotencode'))
253 'relshared', 'dotencode'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 filtername = None
255 filtername = None
256
256
257 # a list of (ui, featureset) functions.
257 # a list of (ui, featureset) functions.
258 # only functions defined in module of enabled extensions are invoked
258 # only functions defined in module of enabled extensions are invoked
259 featuresetupfuncs = set()
259 featuresetupfuncs = set()
260
260
261 def __init__(self, baseui, path, create=False):
261 def __init__(self, baseui, path, create=False):
262 self.requirements = set()
262 self.requirements = set()
263 # vfs to access the working copy
263 # wvfs: rooted at the repository root, used to access the working copy
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 # vfs to access the content of the repository
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 self.vfs = None
266 self.vfs = None
267 # vfs to access the store part of the repository
267 # svfs: usually rooted at .hg/store, used to access repository history
268 # If this is a shared repository, this vfs may point to another
269 # repository's .hg/store directory.
268 self.svfs = None
270 self.svfs = None
269 self.root = self.wvfs.base
271 self.root = self.wvfs.base
270 self.path = self.wvfs.join(".hg")
272 self.path = self.wvfs.join(".hg")
271 self.origroot = path
273 self.origroot = path
272 self.auditor = pathutil.pathauditor(self.root, self._checknested)
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
273 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
274 realfs=False)
276 realfs=False)
275 self.vfs = vfsmod.vfs(self.path)
277 self.vfs = vfsmod.vfs(self.path)
276 self.baseui = baseui
278 self.baseui = baseui
277 self.ui = baseui.copy()
279 self.ui = baseui.copy()
278 self.ui.copy = baseui.copy # prevent copying repo configuration
280 self.ui.copy = baseui.copy # prevent copying repo configuration
279 # A list of callback to shape the phase if no data were found.
281 # A list of callback to shape the phase if no data were found.
280 # Callback are in the form: func(repo, roots) --> processed root.
282 # Callback are in the form: func(repo, roots) --> processed root.
281 # This list it to be filled by extension during repo setup
283 # This list it to be filled by extension during repo setup
282 self._phasedefaults = []
284 self._phasedefaults = []
283 try:
285 try:
284 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
285 self._loadextensions()
287 self._loadextensions()
286 except IOError:
288 except IOError:
287 pass
289 pass
288
290
289 if self.featuresetupfuncs:
291 if self.featuresetupfuncs:
290 self.supported = set(self._basesupported) # use private copy
292 self.supported = set(self._basesupported) # use private copy
291 extmods = set(m.__name__ for n, m
293 extmods = set(m.__name__ for n, m
292 in extensions.extensions(self.ui))
294 in extensions.extensions(self.ui))
293 for setupfunc in self.featuresetupfuncs:
295 for setupfunc in self.featuresetupfuncs:
294 if setupfunc.__module__ in extmods:
296 if setupfunc.__module__ in extmods:
295 setupfunc(self.ui, self.supported)
297 setupfunc(self.ui, self.supported)
296 else:
298 else:
297 self.supported = self._basesupported
299 self.supported = self._basesupported
298 color.setup(self.ui)
300 color.setup(self.ui)
299
301
300 # Add compression engines.
302 # Add compression engines.
301 for name in util.compengines:
303 for name in util.compengines:
302 engine = util.compengines[name]
304 engine = util.compengines[name]
303 if engine.revlogheader():
305 if engine.revlogheader():
304 self.supported.add('exp-compression-%s' % name)
306 self.supported.add('exp-compression-%s' % name)
305
307
306 if not self.vfs.isdir():
308 if not self.vfs.isdir():
307 if create:
309 if create:
308 self.requirements = newreporequirements(self)
310 self.requirements = newreporequirements(self)
309
311
310 if not self.wvfs.exists():
312 if not self.wvfs.exists():
311 self.wvfs.makedirs()
313 self.wvfs.makedirs()
312 self.vfs.makedir(notindexed=True)
314 self.vfs.makedir(notindexed=True)
313
315
314 if 'store' in self.requirements:
316 if 'store' in self.requirements:
315 self.vfs.mkdir("store")
317 self.vfs.mkdir("store")
316
318
317 # create an invalid changelog
319 # create an invalid changelog
318 self.vfs.append(
320 self.vfs.append(
319 "00changelog.i",
321 "00changelog.i",
320 '\0\0\0\2' # represents revlogv2
322 '\0\0\0\2' # represents revlogv2
321 ' dummy changelog to prevent using the old repo layout'
323 ' dummy changelog to prevent using the old repo layout'
322 )
324 )
323 else:
325 else:
324 raise error.RepoError(_("repository %s not found") % path)
326 raise error.RepoError(_("repository %s not found") % path)
325 elif create:
327 elif create:
326 raise error.RepoError(_("repository %s already exists") % path)
328 raise error.RepoError(_("repository %s already exists") % path)
327 else:
329 else:
328 try:
330 try:
329 self.requirements = scmutil.readrequires(
331 self.requirements = scmutil.readrequires(
330 self.vfs, self.supported)
332 self.vfs, self.supported)
331 except IOError as inst:
333 except IOError as inst:
332 if inst.errno != errno.ENOENT:
334 if inst.errno != errno.ENOENT:
333 raise
335 raise
334
336
335 self.sharedpath = self.path
337 self.sharedpath = self.path
336 try:
338 try:
337 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
338 if 'relshared' in self.requirements:
340 if 'relshared' in self.requirements:
339 sharedpath = self.vfs.join(sharedpath)
341 sharedpath = self.vfs.join(sharedpath)
340 vfs = vfsmod.vfs(sharedpath, realpath=True)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
341 s = vfs.base
343 s = vfs.base
342 if not vfs.exists():
344 if not vfs.exists():
343 raise error.RepoError(
345 raise error.RepoError(
344 _('.hg/sharedpath points to nonexistent directory %s') % s)
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
345 self.sharedpath = s
347 self.sharedpath = s
346 except IOError as inst:
348 except IOError as inst:
347 if inst.errno != errno.ENOENT:
349 if inst.errno != errno.ENOENT:
348 raise
350 raise
349
351
350 self.store = store.store(
352 self.store = store.store(
351 self.requirements, self.sharedpath, vfsmod.vfs)
353 self.requirements, self.sharedpath, vfsmod.vfs)
352 self.spath = self.store.path
354 self.spath = self.store.path
353 self.svfs = self.store.vfs
355 self.svfs = self.store.vfs
354 self.sjoin = self.store.join
356 self.sjoin = self.store.join
355 self.vfs.createmode = self.store.createmode
357 self.vfs.createmode = self.store.createmode
356 self._applyopenerreqs()
358 self._applyopenerreqs()
357 if create:
359 if create:
358 self._writerequirements()
360 self._writerequirements()
359
361
360 self._dirstatevalidatewarned = False
362 self._dirstatevalidatewarned = False
361
363
362 self._branchcaches = {}
364 self._branchcaches = {}
363 self._revbranchcache = None
365 self._revbranchcache = None
364 self.filterpats = {}
366 self.filterpats = {}
365 self._datafilters = {}
367 self._datafilters = {}
366 self._transref = self._lockref = self._wlockref = None
368 self._transref = self._lockref = self._wlockref = None
367
369
368 # A cache for various files under .hg/ that tracks file changes,
370 # A cache for various files under .hg/ that tracks file changes,
369 # (used by the filecache decorator)
371 # (used by the filecache decorator)
370 #
372 #
371 # Maps a property name to its util.filecacheentry
373 # Maps a property name to its util.filecacheentry
372 self._filecache = {}
374 self._filecache = {}
373
375
374 # hold sets of revision to be filtered
376 # hold sets of revision to be filtered
375 # should be cleared when something might have changed the filter value:
377 # should be cleared when something might have changed the filter value:
376 # - new changesets,
378 # - new changesets,
377 # - phase change,
379 # - phase change,
378 # - new obsolescence marker,
380 # - new obsolescence marker,
379 # - working directory parent change,
381 # - working directory parent change,
380 # - bookmark changes
382 # - bookmark changes
381 self.filteredrevcache = {}
383 self.filteredrevcache = {}
382
384
383 # generic mapping between names and nodes
385 # generic mapping between names and nodes
384 self.names = namespaces.namespaces()
386 self.names = namespaces.namespaces()
385
387
386 @property
388 @property
387 def wopener(self):
389 def wopener(self):
388 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
389 return self.wvfs
391 return self.wvfs
390
392
391 @property
393 @property
392 def opener(self):
394 def opener(self):
393 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
394 return self.vfs
396 return self.vfs
395
397
396 def close(self):
398 def close(self):
397 self._writecaches()
399 self._writecaches()
398
400
399 def _loadextensions(self):
401 def _loadextensions(self):
400 extensions.loadall(self.ui)
402 extensions.loadall(self.ui)
401
403
402 def _writecaches(self):
404 def _writecaches(self):
403 if self._revbranchcache:
405 if self._revbranchcache:
404 self._revbranchcache.write()
406 self._revbranchcache.write()
405
407
406 def _restrictcapabilities(self, caps):
408 def _restrictcapabilities(self, caps):
407 if self.ui.configbool('experimental', 'bundle2-advertise', True):
409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
408 caps = set(caps)
410 caps = set(caps)
409 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
410 caps.add('bundle2=' + urlreq.quote(capsblob))
412 caps.add('bundle2=' + urlreq.quote(capsblob))
411 return caps
413 return caps
412
414
413 def _applyopenerreqs(self):
415 def _applyopenerreqs(self):
414 self.svfs.options = dict((r, 1) for r in self.requirements
416 self.svfs.options = dict((r, 1) for r in self.requirements
415 if r in self.openerreqs)
417 if r in self.openerreqs)
416 # experimental config: format.chunkcachesize
418 # experimental config: format.chunkcachesize
417 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
418 if chunkcachesize is not None:
420 if chunkcachesize is not None:
419 self.svfs.options['chunkcachesize'] = chunkcachesize
421 self.svfs.options['chunkcachesize'] = chunkcachesize
420 # experimental config: format.maxchainlen
422 # experimental config: format.maxchainlen
421 maxchainlen = self.ui.configint('format', 'maxchainlen')
423 maxchainlen = self.ui.configint('format', 'maxchainlen')
422 if maxchainlen is not None:
424 if maxchainlen is not None:
423 self.svfs.options['maxchainlen'] = maxchainlen
425 self.svfs.options['maxchainlen'] = maxchainlen
424 # experimental config: format.manifestcachesize
426 # experimental config: format.manifestcachesize
425 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
426 if manifestcachesize is not None:
428 if manifestcachesize is not None:
427 self.svfs.options['manifestcachesize'] = manifestcachesize
429 self.svfs.options['manifestcachesize'] = manifestcachesize
428 # experimental config: format.aggressivemergedeltas
430 # experimental config: format.aggressivemergedeltas
429 aggressivemergedeltas = self.ui.configbool('format',
431 aggressivemergedeltas = self.ui.configbool('format',
430 'aggressivemergedeltas', False)
432 'aggressivemergedeltas', False)
431 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
432 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
433
435
434 for r in self.requirements:
436 for r in self.requirements:
435 if r.startswith('exp-compression-'):
437 if r.startswith('exp-compression-'):
436 self.svfs.options['compengine'] = r[len('exp-compression-'):]
438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
437
439
438 def _writerequirements(self):
440 def _writerequirements(self):
439 scmutil.writerequires(self.vfs, self.requirements)
441 scmutil.writerequires(self.vfs, self.requirements)
440
442
441 def _checknested(self, path):
443 def _checknested(self, path):
442 """Determine if path is a legal nested repository."""
444 """Determine if path is a legal nested repository."""
443 if not path.startswith(self.root):
445 if not path.startswith(self.root):
444 return False
446 return False
445 subpath = path[len(self.root) + 1:]
447 subpath = path[len(self.root) + 1:]
446 normsubpath = util.pconvert(subpath)
448 normsubpath = util.pconvert(subpath)
447
449
448 # XXX: Checking against the current working copy is wrong in
450 # XXX: Checking against the current working copy is wrong in
449 # the sense that it can reject things like
451 # the sense that it can reject things like
450 #
452 #
451 # $ hg cat -r 10 sub/x.txt
453 # $ hg cat -r 10 sub/x.txt
452 #
454 #
453 # if sub/ is no longer a subrepository in the working copy
455 # if sub/ is no longer a subrepository in the working copy
454 # parent revision.
456 # parent revision.
455 #
457 #
456 # However, it can of course also allow things that would have
458 # However, it can of course also allow things that would have
457 # been rejected before, such as the above cat command if sub/
459 # been rejected before, such as the above cat command if sub/
458 # is a subrepository now, but was a normal directory before.
460 # is a subrepository now, but was a normal directory before.
459 # The old path auditor would have rejected by mistake since it
461 # The old path auditor would have rejected by mistake since it
460 # panics when it sees sub/.hg/.
462 # panics when it sees sub/.hg/.
461 #
463 #
462 # All in all, checking against the working copy seems sensible
464 # All in all, checking against the working copy seems sensible
463 # since we want to prevent access to nested repositories on
465 # since we want to prevent access to nested repositories on
464 # the filesystem *now*.
466 # the filesystem *now*.
465 ctx = self[None]
467 ctx = self[None]
466 parts = util.splitpath(subpath)
468 parts = util.splitpath(subpath)
467 while parts:
469 while parts:
468 prefix = '/'.join(parts)
470 prefix = '/'.join(parts)
469 if prefix in ctx.substate:
471 if prefix in ctx.substate:
470 if prefix == normsubpath:
472 if prefix == normsubpath:
471 return True
473 return True
472 else:
474 else:
473 sub = ctx.sub(prefix)
475 sub = ctx.sub(prefix)
474 return sub.checknested(subpath[len(prefix) + 1:])
476 return sub.checknested(subpath[len(prefix) + 1:])
475 else:
477 else:
476 parts.pop()
478 parts.pop()
477 return False
479 return False
478
480
479 def peer(self):
481 def peer(self):
480 return localpeer(self) # not cached to avoid reference cycle
482 return localpeer(self) # not cached to avoid reference cycle
481
483
482 def unfiltered(self):
484 def unfiltered(self):
483 """Return unfiltered version of the repository
485 """Return unfiltered version of the repository
484
486
485 Intended to be overwritten by filtered repo."""
487 Intended to be overwritten by filtered repo."""
486 return self
488 return self
487
489
488 def filtered(self, name):
490 def filtered(self, name):
489 """Return a filtered version of a repository"""
491 """Return a filtered version of a repository"""
490 # build a new class with the mixin and the current class
492 # build a new class with the mixin and the current class
491 # (possibly subclass of the repo)
493 # (possibly subclass of the repo)
492 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
493 pass
495 pass
494 return filteredrepo(self, name)
496 return filteredrepo(self, name)
495
497
496 @repofilecache('bookmarks', 'bookmarks.current')
498 @repofilecache('bookmarks', 'bookmarks.current')
497 def _bookmarks(self):
499 def _bookmarks(self):
498 return bookmarks.bmstore(self)
500 return bookmarks.bmstore(self)
499
501
500 @property
502 @property
501 def _activebookmark(self):
503 def _activebookmark(self):
502 return self._bookmarks.active
504 return self._bookmarks.active
503
505
504 def bookmarkheads(self, bookmark):
506 def bookmarkheads(self, bookmark):
505 name = bookmark.split('@', 1)[0]
507 name = bookmark.split('@', 1)[0]
506 heads = []
508 heads = []
507 for mark, n in self._bookmarks.iteritems():
509 for mark, n in self._bookmarks.iteritems():
508 if mark.split('@', 1)[0] == name:
510 if mark.split('@', 1)[0] == name:
509 heads.append(n)
511 heads.append(n)
510 return heads
512 return heads
511
513
512 # _phaserevs and _phasesets depend on changelog. what we need is to
514 # _phaserevs and _phasesets depend on changelog. what we need is to
513 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
514 # can't be easily expressed in filecache mechanism.
516 # can't be easily expressed in filecache mechanism.
515 @storecache('phaseroots', '00changelog.i')
517 @storecache('phaseroots', '00changelog.i')
516 def _phasecache(self):
518 def _phasecache(self):
517 return phases.phasecache(self, self._phasedefaults)
519 return phases.phasecache(self, self._phasedefaults)
518
520
519 @storecache('obsstore')
521 @storecache('obsstore')
520 def obsstore(self):
522 def obsstore(self):
521 # read default format for new obsstore.
523 # read default format for new obsstore.
522 # developer config: format.obsstore-version
524 # developer config: format.obsstore-version
523 defaultformat = self.ui.configint('format', 'obsstore-version', None)
525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
524 # rely on obsstore class default when possible.
526 # rely on obsstore class default when possible.
525 kwargs = {}
527 kwargs = {}
526 if defaultformat is not None:
528 if defaultformat is not None:
527 kwargs['defaultformat'] = defaultformat
529 kwargs['defaultformat'] = defaultformat
528 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
529 store = obsolete.obsstore(self.svfs, readonly=readonly,
531 store = obsolete.obsstore(self.svfs, readonly=readonly,
530 **kwargs)
532 **kwargs)
531 if store and readonly:
533 if store and readonly:
532 self.ui.warn(
534 self.ui.warn(
533 _('obsolete feature not enabled but %i markers found!\n')
535 _('obsolete feature not enabled but %i markers found!\n')
534 % len(list(store)))
536 % len(list(store)))
535 return store
537 return store
536
538
537 @storecache('00changelog.i')
539 @storecache('00changelog.i')
538 def changelog(self):
540 def changelog(self):
539 c = changelog.changelog(self.svfs)
541 c = changelog.changelog(self.svfs)
540 if txnutil.mayhavepending(self.root):
542 if txnutil.mayhavepending(self.root):
541 c.readpending('00changelog.i.a')
543 c.readpending('00changelog.i.a')
542 return c
544 return c
543
545
544 def _constructmanifest(self):
546 def _constructmanifest(self):
545 # This is a temporary function while we migrate from manifest to
547 # This is a temporary function while we migrate from manifest to
546 # manifestlog. It allows bundlerepo and unionrepo to intercept the
548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
547 # manifest creation.
549 # manifest creation.
548 return manifest.manifestrevlog(self.svfs)
550 return manifest.manifestrevlog(self.svfs)
549
551
550 @storecache('00manifest.i')
552 @storecache('00manifest.i')
551 def manifestlog(self):
553 def manifestlog(self):
552 return manifest.manifestlog(self.svfs, self)
554 return manifest.manifestlog(self.svfs, self)
553
555
554 @repofilecache('dirstate')
556 @repofilecache('dirstate')
555 def dirstate(self):
557 def dirstate(self):
556 return dirstate.dirstate(self.vfs, self.ui, self.root,
558 return dirstate.dirstate(self.vfs, self.ui, self.root,
557 self._dirstatevalidate)
559 self._dirstatevalidate)
558
560
559 def _dirstatevalidate(self, node):
561 def _dirstatevalidate(self, node):
560 try:
562 try:
561 self.changelog.rev(node)
563 self.changelog.rev(node)
562 return node
564 return node
563 except error.LookupError:
565 except error.LookupError:
564 if not self._dirstatevalidatewarned:
566 if not self._dirstatevalidatewarned:
565 self._dirstatevalidatewarned = True
567 self._dirstatevalidatewarned = True
566 self.ui.warn(_("warning: ignoring unknown"
568 self.ui.warn(_("warning: ignoring unknown"
567 " working parent %s!\n") % short(node))
569 " working parent %s!\n") % short(node))
568 return nullid
570 return nullid
569
571
570 def __getitem__(self, changeid):
572 def __getitem__(self, changeid):
571 if changeid is None or changeid == wdirrev:
573 if changeid is None or changeid == wdirrev:
572 return context.workingctx(self)
574 return context.workingctx(self)
573 if isinstance(changeid, slice):
575 if isinstance(changeid, slice):
574 return [context.changectx(self, i)
576 return [context.changectx(self, i)
575 for i in xrange(*changeid.indices(len(self)))
577 for i in xrange(*changeid.indices(len(self)))
576 if i not in self.changelog.filteredrevs]
578 if i not in self.changelog.filteredrevs]
577 return context.changectx(self, changeid)
579 return context.changectx(self, changeid)
578
580
579 def __contains__(self, changeid):
581 def __contains__(self, changeid):
580 try:
582 try:
581 self[changeid]
583 self[changeid]
582 return True
584 return True
583 except error.RepoLookupError:
585 except error.RepoLookupError:
584 return False
586 return False
585
587
586 def __nonzero__(self):
588 def __nonzero__(self):
587 return True
589 return True
588
590
589 __bool__ = __nonzero__
591 __bool__ = __nonzero__
590
592
591 def __len__(self):
593 def __len__(self):
592 return len(self.changelog)
594 return len(self.changelog)
593
595
594 def __iter__(self):
596 def __iter__(self):
595 return iter(self.changelog)
597 return iter(self.changelog)
596
598
597 def revs(self, expr, *args):
599 def revs(self, expr, *args):
598 '''Find revisions matching a revset.
600 '''Find revisions matching a revset.
599
601
600 The revset is specified as a string ``expr`` that may contain
602 The revset is specified as a string ``expr`` that may contain
601 %-formatting to escape certain types. See ``revsetlang.formatspec``.
603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
602
604
603 Revset aliases from the configuration are not expanded. To expand
605 Revset aliases from the configuration are not expanded. To expand
604 user aliases, consider calling ``scmutil.revrange()`` or
606 user aliases, consider calling ``scmutil.revrange()`` or
605 ``repo.anyrevs([expr], user=True)``.
607 ``repo.anyrevs([expr], user=True)``.
606
608
607 Returns a revset.abstractsmartset, which is a list-like interface
609 Returns a revset.abstractsmartset, which is a list-like interface
608 that contains integer revisions.
610 that contains integer revisions.
609 '''
611 '''
610 expr = revsetlang.formatspec(expr, *args)
612 expr = revsetlang.formatspec(expr, *args)
611 m = revset.match(None, expr)
613 m = revset.match(None, expr)
612 return m(self)
614 return m(self)
613
615
614 def set(self, expr, *args):
616 def set(self, expr, *args):
615 '''Find revisions matching a revset and emit changectx instances.
617 '''Find revisions matching a revset and emit changectx instances.
616
618
617 This is a convenience wrapper around ``revs()`` that iterates the
619 This is a convenience wrapper around ``revs()`` that iterates the
618 result and is a generator of changectx instances.
620 result and is a generator of changectx instances.
619
621
620 Revset aliases from the configuration are not expanded. To expand
622 Revset aliases from the configuration are not expanded. To expand
621 user aliases, consider calling ``scmutil.revrange()``.
623 user aliases, consider calling ``scmutil.revrange()``.
622 '''
624 '''
623 for r in self.revs(expr, *args):
625 for r in self.revs(expr, *args):
624 yield self[r]
626 yield self[r]
625
627
626 def anyrevs(self, specs, user=False):
628 def anyrevs(self, specs, user=False):
627 '''Find revisions matching one of the given revsets.
629 '''Find revisions matching one of the given revsets.
628
630
629 Revset aliases from the configuration are not expanded by default. To
631 Revset aliases from the configuration are not expanded by default. To
630 expand user aliases, specify ``user=True``.
632 expand user aliases, specify ``user=True``.
631 '''
633 '''
632 if user:
634 if user:
633 m = revset.matchany(self.ui, specs, repo=self)
635 m = revset.matchany(self.ui, specs, repo=self)
634 else:
636 else:
635 m = revset.matchany(None, specs)
637 m = revset.matchany(None, specs)
636 return m(self)
638 return m(self)
637
639
638 def url(self):
640 def url(self):
639 return 'file:' + self.root
641 return 'file:' + self.root
640
642
641 def hook(self, name, throw=False, **args):
643 def hook(self, name, throw=False, **args):
642 """Call a hook, passing this repo instance.
644 """Call a hook, passing this repo instance.
643
645
644 This a convenience method to aid invoking hooks. Extensions likely
646 This a convenience method to aid invoking hooks. Extensions likely
645 won't call this unless they have registered a custom hook or are
647 won't call this unless they have registered a custom hook or are
646 replacing code that is expected to call a hook.
648 replacing code that is expected to call a hook.
647 """
649 """
648 return hook.hook(self.ui, self, name, throw, **args)
650 return hook.hook(self.ui, self, name, throw, **args)
649
651
650 @unfilteredmethod
652 @unfilteredmethod
651 def _tag(self, names, node, message, local, user, date, extra=None,
653 def _tag(self, names, node, message, local, user, date, extra=None,
652 editor=False):
654 editor=False):
653 if isinstance(names, str):
655 if isinstance(names, str):
654 names = (names,)
656 names = (names,)
655
657
656 branches = self.branchmap()
658 branches = self.branchmap()
657 for name in names:
659 for name in names:
658 self.hook('pretag', throw=True, node=hex(node), tag=name,
660 self.hook('pretag', throw=True, node=hex(node), tag=name,
659 local=local)
661 local=local)
660 if name in branches:
662 if name in branches:
661 self.ui.warn(_("warning: tag %s conflicts with existing"
663 self.ui.warn(_("warning: tag %s conflicts with existing"
662 " branch name\n") % name)
664 " branch name\n") % name)
663
665
664 def writetags(fp, names, munge, prevtags):
666 def writetags(fp, names, munge, prevtags):
665 fp.seek(0, 2)
667 fp.seek(0, 2)
666 if prevtags and prevtags[-1] != '\n':
668 if prevtags and prevtags[-1] != '\n':
667 fp.write('\n')
669 fp.write('\n')
668 for name in names:
670 for name in names:
669 if munge:
671 if munge:
670 m = munge(name)
672 m = munge(name)
671 else:
673 else:
672 m = name
674 m = name
673
675
674 if (self._tagscache.tagtypes and
676 if (self._tagscache.tagtypes and
675 name in self._tagscache.tagtypes):
677 name in self._tagscache.tagtypes):
676 old = self.tags().get(name, nullid)
678 old = self.tags().get(name, nullid)
677 fp.write('%s %s\n' % (hex(old), m))
679 fp.write('%s %s\n' % (hex(old), m))
678 fp.write('%s %s\n' % (hex(node), m))
680 fp.write('%s %s\n' % (hex(node), m))
679 fp.close()
681 fp.close()
680
682
681 prevtags = ''
683 prevtags = ''
682 if local:
684 if local:
683 try:
685 try:
684 fp = self.vfs('localtags', 'r+')
686 fp = self.vfs('localtags', 'r+')
685 except IOError:
687 except IOError:
686 fp = self.vfs('localtags', 'a')
688 fp = self.vfs('localtags', 'a')
687 else:
689 else:
688 prevtags = fp.read()
690 prevtags = fp.read()
689
691
690 # local tags are stored in the current charset
692 # local tags are stored in the current charset
691 writetags(fp, names, None, prevtags)
693 writetags(fp, names, None, prevtags)
692 for name in names:
694 for name in names:
693 self.hook('tag', node=hex(node), tag=name, local=local)
695 self.hook('tag', node=hex(node), tag=name, local=local)
694 return
696 return
695
697
696 try:
698 try:
697 fp = self.wvfs('.hgtags', 'rb+')
699 fp = self.wvfs('.hgtags', 'rb+')
698 except IOError as e:
700 except IOError as e:
699 if e.errno != errno.ENOENT:
701 if e.errno != errno.ENOENT:
700 raise
702 raise
701 fp = self.wvfs('.hgtags', 'ab')
703 fp = self.wvfs('.hgtags', 'ab')
702 else:
704 else:
703 prevtags = fp.read()
705 prevtags = fp.read()
704
706
705 # committed tags are stored in UTF-8
707 # committed tags are stored in UTF-8
706 writetags(fp, names, encoding.fromlocal, prevtags)
708 writetags(fp, names, encoding.fromlocal, prevtags)
707
709
708 fp.close()
710 fp.close()
709
711
710 self.invalidatecaches()
712 self.invalidatecaches()
711
713
712 if '.hgtags' not in self.dirstate:
714 if '.hgtags' not in self.dirstate:
713 self[None].add(['.hgtags'])
715 self[None].add(['.hgtags'])
714
716
715 m = matchmod.exact(self.root, '', ['.hgtags'])
717 m = matchmod.exact(self.root, '', ['.hgtags'])
716 tagnode = self.commit(message, user, date, extra=extra, match=m,
718 tagnode = self.commit(message, user, date, extra=extra, match=m,
717 editor=editor)
719 editor=editor)
718
720
719 for name in names:
721 for name in names:
720 self.hook('tag', node=hex(node), tag=name, local=local)
722 self.hook('tag', node=hex(node), tag=name, local=local)
721
723
722 return tagnode
724 return tagnode
723
725
724 def tag(self, names, node, message, local, user, date, editor=False):
726 def tag(self, names, node, message, local, user, date, editor=False):
725 '''tag a revision with one or more symbolic names.
727 '''tag a revision with one or more symbolic names.
726
728
727 names is a list of strings or, when adding a single tag, names may be a
729 names is a list of strings or, when adding a single tag, names may be a
728 string.
730 string.
729
731
730 if local is True, the tags are stored in a per-repository file.
732 if local is True, the tags are stored in a per-repository file.
731 otherwise, they are stored in the .hgtags file, and a new
733 otherwise, they are stored in the .hgtags file, and a new
732 changeset is committed with the change.
734 changeset is committed with the change.
733
735
734 keyword arguments:
736 keyword arguments:
735
737
736 local: whether to store tags in non-version-controlled file
738 local: whether to store tags in non-version-controlled file
737 (default False)
739 (default False)
738
740
739 message: commit message to use if committing
741 message: commit message to use if committing
740
742
741 user: name of user to use if committing
743 user: name of user to use if committing
742
744
743 date: date tuple to use if committing'''
745 date: date tuple to use if committing'''
744
746
745 if not local:
747 if not local:
746 m = matchmod.exact(self.root, '', ['.hgtags'])
748 m = matchmod.exact(self.root, '', ['.hgtags'])
747 if any(self.status(match=m, unknown=True, ignored=True)):
749 if any(self.status(match=m, unknown=True, ignored=True)):
748 raise error.Abort(_('working copy of .hgtags is changed'),
750 raise error.Abort(_('working copy of .hgtags is changed'),
749 hint=_('please commit .hgtags manually'))
751 hint=_('please commit .hgtags manually'))
750
752
751 self.tags() # instantiate the cache
753 self.tags() # instantiate the cache
752 self._tag(names, node, message, local, user, date, editor=editor)
754 self._tag(names, node, message, local, user, date, editor=editor)
753
755
754 @filteredpropertycache
756 @filteredpropertycache
755 def _tagscache(self):
757 def _tagscache(self):
756 '''Returns a tagscache object that contains various tags related
758 '''Returns a tagscache object that contains various tags related
757 caches.'''
759 caches.'''
758
760
759 # This simplifies its cache management by having one decorated
761 # This simplifies its cache management by having one decorated
760 # function (this one) and the rest simply fetch things from it.
762 # function (this one) and the rest simply fetch things from it.
761 class tagscache(object):
763 class tagscache(object):
762 def __init__(self):
764 def __init__(self):
763 # These two define the set of tags for this repository. tags
765 # These two define the set of tags for this repository. tags
764 # maps tag name to node; tagtypes maps tag name to 'global' or
766 # maps tag name to node; tagtypes maps tag name to 'global' or
765 # 'local'. (Global tags are defined by .hgtags across all
767 # 'local'. (Global tags are defined by .hgtags across all
766 # heads, and local tags are defined in .hg/localtags.)
768 # heads, and local tags are defined in .hg/localtags.)
767 # They constitute the in-memory cache of tags.
769 # They constitute the in-memory cache of tags.
768 self.tags = self.tagtypes = None
770 self.tags = self.tagtypes = None
769
771
770 self.nodetagscache = self.tagslist = None
772 self.nodetagscache = self.tagslist = None
771
773
772 cache = tagscache()
774 cache = tagscache()
773 cache.tags, cache.tagtypes = self._findtags()
775 cache.tags, cache.tagtypes = self._findtags()
774
776
775 return cache
777 return cache
776
778
777 def tags(self):
779 def tags(self):
778 '''return a mapping of tag to node'''
780 '''return a mapping of tag to node'''
779 t = {}
781 t = {}
780 if self.changelog.filteredrevs:
782 if self.changelog.filteredrevs:
781 tags, tt = self._findtags()
783 tags, tt = self._findtags()
782 else:
784 else:
783 tags = self._tagscache.tags
785 tags = self._tagscache.tags
784 for k, v in tags.iteritems():
786 for k, v in tags.iteritems():
785 try:
787 try:
786 # ignore tags to unknown nodes
788 # ignore tags to unknown nodes
787 self.changelog.rev(v)
789 self.changelog.rev(v)
788 t[k] = v
790 t[k] = v
789 except (error.LookupError, ValueError):
791 except (error.LookupError, ValueError):
790 pass
792 pass
791 return t
793 return t
792
794
793 def _findtags(self):
795 def _findtags(self):
794 '''Do the hard work of finding tags. Return a pair of dicts
796 '''Do the hard work of finding tags. Return a pair of dicts
795 (tags, tagtypes) where tags maps tag name to node, and tagtypes
797 (tags, tagtypes) where tags maps tag name to node, and tagtypes
796 maps tag name to a string like \'global\' or \'local\'.
798 maps tag name to a string like \'global\' or \'local\'.
797 Subclasses or extensions are free to add their own tags, but
799 Subclasses or extensions are free to add their own tags, but
798 should be aware that the returned dicts will be retained for the
800 should be aware that the returned dicts will be retained for the
799 duration of the localrepo object.'''
801 duration of the localrepo object.'''
800
802
801 # XXX what tagtype should subclasses/extensions use? Currently
803 # XXX what tagtype should subclasses/extensions use? Currently
802 # mq and bookmarks add tags, but do not set the tagtype at all.
804 # mq and bookmarks add tags, but do not set the tagtype at all.
803 # Should each extension invent its own tag type? Should there
805 # Should each extension invent its own tag type? Should there
804 # be one tagtype for all such "virtual" tags? Or is the status
806 # be one tagtype for all such "virtual" tags? Or is the status
805 # quo fine?
807 # quo fine?
806
808
807 alltags = {} # map tag name to (node, hist)
809 alltags = {} # map tag name to (node, hist)
808 tagtypes = {}
810 tagtypes = {}
809
811
810 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
812 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
811 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
813 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
812
814
813 # Build the return dicts. Have to re-encode tag names because
815 # Build the return dicts. Have to re-encode tag names because
814 # the tags module always uses UTF-8 (in order not to lose info
816 # the tags module always uses UTF-8 (in order not to lose info
815 # writing to the cache), but the rest of Mercurial wants them in
817 # writing to the cache), but the rest of Mercurial wants them in
816 # local encoding.
818 # local encoding.
817 tags = {}
819 tags = {}
818 for (name, (node, hist)) in alltags.iteritems():
820 for (name, (node, hist)) in alltags.iteritems():
819 if node != nullid:
821 if node != nullid:
820 tags[encoding.tolocal(name)] = node
822 tags[encoding.tolocal(name)] = node
821 tags['tip'] = self.changelog.tip()
823 tags['tip'] = self.changelog.tip()
822 tagtypes = dict([(encoding.tolocal(name), value)
824 tagtypes = dict([(encoding.tolocal(name), value)
823 for (name, value) in tagtypes.iteritems()])
825 for (name, value) in tagtypes.iteritems()])
824 return (tags, tagtypes)
826 return (tags, tagtypes)
825
827
826 def tagtype(self, tagname):
828 def tagtype(self, tagname):
827 '''
829 '''
828 return the type of the given tag. result can be:
830 return the type of the given tag. result can be:
829
831
830 'local' : a local tag
832 'local' : a local tag
831 'global' : a global tag
833 'global' : a global tag
832 None : tag does not exist
834 None : tag does not exist
833 '''
835 '''
834
836
835 return self._tagscache.tagtypes.get(tagname)
837 return self._tagscache.tagtypes.get(tagname)
836
838
837 def tagslist(self):
839 def tagslist(self):
838 '''return a list of tags ordered by revision'''
840 '''return a list of tags ordered by revision'''
839 if not self._tagscache.tagslist:
841 if not self._tagscache.tagslist:
840 l = []
842 l = []
841 for t, n in self.tags().iteritems():
843 for t, n in self.tags().iteritems():
842 l.append((self.changelog.rev(n), t, n))
844 l.append((self.changelog.rev(n), t, n))
843 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
845 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
844
846
845 return self._tagscache.tagslist
847 return self._tagscache.tagslist
846
848
847 def nodetags(self, node):
849 def nodetags(self, node):
848 '''return the tags associated with a node'''
850 '''return the tags associated with a node'''
849 if not self._tagscache.nodetagscache:
851 if not self._tagscache.nodetagscache:
850 nodetagscache = {}
852 nodetagscache = {}
851 for t, n in self._tagscache.tags.iteritems():
853 for t, n in self._tagscache.tags.iteritems():
852 nodetagscache.setdefault(n, []).append(t)
854 nodetagscache.setdefault(n, []).append(t)
853 for tags in nodetagscache.itervalues():
855 for tags in nodetagscache.itervalues():
854 tags.sort()
856 tags.sort()
855 self._tagscache.nodetagscache = nodetagscache
857 self._tagscache.nodetagscache = nodetagscache
856 return self._tagscache.nodetagscache.get(node, [])
858 return self._tagscache.nodetagscache.get(node, [])
857
859
858 def nodebookmarks(self, node):
860 def nodebookmarks(self, node):
859 """return the list of bookmarks pointing to the specified node"""
861 """return the list of bookmarks pointing to the specified node"""
860 marks = []
862 marks = []
861 for bookmark, n in self._bookmarks.iteritems():
863 for bookmark, n in self._bookmarks.iteritems():
862 if n == node:
864 if n == node:
863 marks.append(bookmark)
865 marks.append(bookmark)
864 return sorted(marks)
866 return sorted(marks)
865
867
866 def branchmap(self):
868 def branchmap(self):
867 '''returns a dictionary {branch: [branchheads]} with branchheads
869 '''returns a dictionary {branch: [branchheads]} with branchheads
868 ordered by increasing revision number'''
870 ordered by increasing revision number'''
869 branchmap.updatecache(self)
871 branchmap.updatecache(self)
870 return self._branchcaches[self.filtername]
872 return self._branchcaches[self.filtername]
871
873
872 @unfilteredmethod
874 @unfilteredmethod
873 def revbranchcache(self):
875 def revbranchcache(self):
874 if not self._revbranchcache:
876 if not self._revbranchcache:
875 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
877 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
876 return self._revbranchcache
878 return self._revbranchcache
877
879
878 def branchtip(self, branch, ignoremissing=False):
880 def branchtip(self, branch, ignoremissing=False):
879 '''return the tip node for a given branch
881 '''return the tip node for a given branch
880
882
881 If ignoremissing is True, then this method will not raise an error.
883 If ignoremissing is True, then this method will not raise an error.
882 This is helpful for callers that only expect None for a missing branch
884 This is helpful for callers that only expect None for a missing branch
883 (e.g. namespace).
885 (e.g. namespace).
884
886
885 '''
887 '''
886 try:
888 try:
887 return self.branchmap().branchtip(branch)
889 return self.branchmap().branchtip(branch)
888 except KeyError:
890 except KeyError:
889 if not ignoremissing:
891 if not ignoremissing:
890 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
892 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
891 else:
893 else:
892 pass
894 pass
893
895
894 def lookup(self, key):
896 def lookup(self, key):
895 return self[key].node()
897 return self[key].node()
896
898
897 def lookupbranch(self, key, remote=None):
899 def lookupbranch(self, key, remote=None):
898 repo = remote or self
900 repo = remote or self
899 if key in repo.branchmap():
901 if key in repo.branchmap():
900 return key
902 return key
901
903
902 repo = (remote and remote.local()) and remote or self
904 repo = (remote and remote.local()) and remote or self
903 return repo[key].branch()
905 return repo[key].branch()
904
906
905 def known(self, nodes):
907 def known(self, nodes):
906 cl = self.changelog
908 cl = self.changelog
907 nm = cl.nodemap
909 nm = cl.nodemap
908 filtered = cl.filteredrevs
910 filtered = cl.filteredrevs
909 result = []
911 result = []
910 for n in nodes:
912 for n in nodes:
911 r = nm.get(n)
913 r = nm.get(n)
912 resp = not (r is None or r in filtered)
914 resp = not (r is None or r in filtered)
913 result.append(resp)
915 result.append(resp)
914 return result
916 return result
915
917
916 def local(self):
918 def local(self):
917 return self
919 return self
918
920
919 def publishing(self):
921 def publishing(self):
920 # it's safe (and desirable) to trust the publish flag unconditionally
922 # it's safe (and desirable) to trust the publish flag unconditionally
921 # so that we don't finalize changes shared between users via ssh or nfs
923 # so that we don't finalize changes shared between users via ssh or nfs
922 return self.ui.configbool('phases', 'publish', True, untrusted=True)
924 return self.ui.configbool('phases', 'publish', True, untrusted=True)
923
925
924 def cancopy(self):
926 def cancopy(self):
925 # so statichttprepo's override of local() works
927 # so statichttprepo's override of local() works
926 if not self.local():
928 if not self.local():
927 return False
929 return False
928 if not self.publishing():
930 if not self.publishing():
929 return True
931 return True
930 # if publishing we can't copy if there is filtered content
932 # if publishing we can't copy if there is filtered content
931 return not self.filtered('visible').changelog.filteredrevs
933 return not self.filtered('visible').changelog.filteredrevs
932
934
933 def shared(self):
935 def shared(self):
934 '''the type of shared repository (None if not shared)'''
936 '''the type of shared repository (None if not shared)'''
935 if self.sharedpath != self.path:
937 if self.sharedpath != self.path:
936 return 'store'
938 return 'store'
937 return None
939 return None
938
940
939 def join(self, f, *insidef):
941 def join(self, f, *insidef):
940 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
942 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
941 return self.vfs.join(os.path.join(f, *insidef))
943 return self.vfs.join(os.path.join(f, *insidef))
942
944
943 def wjoin(self, f, *insidef):
945 def wjoin(self, f, *insidef):
944 return self.vfs.reljoin(self.root, f, *insidef)
946 return self.vfs.reljoin(self.root, f, *insidef)
945
947
946 def file(self, f):
948 def file(self, f):
947 if f[0] == '/':
949 if f[0] == '/':
948 f = f[1:]
950 f = f[1:]
949 return filelog.filelog(self.svfs, f)
951 return filelog.filelog(self.svfs, f)
950
952
951 def changectx(self, changeid):
953 def changectx(self, changeid):
952 return self[changeid]
954 return self[changeid]
953
955
954 def setparents(self, p1, p2=nullid):
956 def setparents(self, p1, p2=nullid):
955 self.dirstate.beginparentchange()
957 self.dirstate.beginparentchange()
956 copies = self.dirstate.setparents(p1, p2)
958 copies = self.dirstate.setparents(p1, p2)
957 pctx = self[p1]
959 pctx = self[p1]
958 if copies:
960 if copies:
959 # Adjust copy records, the dirstate cannot do it, it
961 # Adjust copy records, the dirstate cannot do it, it
960 # requires access to parents manifests. Preserve them
962 # requires access to parents manifests. Preserve them
961 # only for entries added to first parent.
963 # only for entries added to first parent.
962 for f in copies:
964 for f in copies:
963 if f not in pctx and copies[f] in pctx:
965 if f not in pctx and copies[f] in pctx:
964 self.dirstate.copy(copies[f], f)
966 self.dirstate.copy(copies[f], f)
965 if p2 == nullid:
967 if p2 == nullid:
966 for f, s in sorted(self.dirstate.copies().items()):
968 for f, s in sorted(self.dirstate.copies().items()):
967 if f not in pctx and s not in pctx:
969 if f not in pctx and s not in pctx:
968 self.dirstate.copy(None, f)
970 self.dirstate.copy(None, f)
969 self.dirstate.endparentchange()
971 self.dirstate.endparentchange()
970
972
971 def filectx(self, path, changeid=None, fileid=None):
973 def filectx(self, path, changeid=None, fileid=None):
972 """changeid can be a changeset revision, node, or tag.
974 """changeid can be a changeset revision, node, or tag.
973 fileid can be a file revision or node."""
975 fileid can be a file revision or node."""
974 return context.filectx(self, path, changeid, fileid)
976 return context.filectx(self, path, changeid, fileid)
975
977
976 def getcwd(self):
978 def getcwd(self):
977 return self.dirstate.getcwd()
979 return self.dirstate.getcwd()
978
980
979 def pathto(self, f, cwd=None):
981 def pathto(self, f, cwd=None):
980 return self.dirstate.pathto(f, cwd)
982 return self.dirstate.pathto(f, cwd)
981
983
982 def wfile(self, f, mode='r'):
984 def wfile(self, f, mode='r'):
983 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
985 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
984 return self.wvfs(f, mode)
986 return self.wvfs(f, mode)
985
987
986 def _link(self, f):
988 def _link(self, f):
987 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
989 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
988 '4.0')
990 '4.0')
989 return self.wvfs.islink(f)
991 return self.wvfs.islink(f)
990
992
991 def _loadfilter(self, filter):
993 def _loadfilter(self, filter):
992 if filter not in self.filterpats:
994 if filter not in self.filterpats:
993 l = []
995 l = []
994 for pat, cmd in self.ui.configitems(filter):
996 for pat, cmd in self.ui.configitems(filter):
995 if cmd == '!':
997 if cmd == '!':
996 continue
998 continue
997 mf = matchmod.match(self.root, '', [pat])
999 mf = matchmod.match(self.root, '', [pat])
998 fn = None
1000 fn = None
999 params = cmd
1001 params = cmd
1000 for name, filterfn in self._datafilters.iteritems():
1002 for name, filterfn in self._datafilters.iteritems():
1001 if cmd.startswith(name):
1003 if cmd.startswith(name):
1002 fn = filterfn
1004 fn = filterfn
1003 params = cmd[len(name):].lstrip()
1005 params = cmd[len(name):].lstrip()
1004 break
1006 break
1005 if not fn:
1007 if not fn:
1006 fn = lambda s, c, **kwargs: util.filter(s, c)
1008 fn = lambda s, c, **kwargs: util.filter(s, c)
1007 # Wrap old filters not supporting keyword arguments
1009 # Wrap old filters not supporting keyword arguments
1008 if not inspect.getargspec(fn)[2]:
1010 if not inspect.getargspec(fn)[2]:
1009 oldfn = fn
1011 oldfn = fn
1010 fn = lambda s, c, **kwargs: oldfn(s, c)
1012 fn = lambda s, c, **kwargs: oldfn(s, c)
1011 l.append((mf, fn, params))
1013 l.append((mf, fn, params))
1012 self.filterpats[filter] = l
1014 self.filterpats[filter] = l
1013 return self.filterpats[filter]
1015 return self.filterpats[filter]
1014
1016
1015 def _filter(self, filterpats, filename, data):
1017 def _filter(self, filterpats, filename, data):
1016 for mf, fn, cmd in filterpats:
1018 for mf, fn, cmd in filterpats:
1017 if mf(filename):
1019 if mf(filename):
1018 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1020 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1019 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1021 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1020 break
1022 break
1021
1023
1022 return data
1024 return data
1023
1025
1024 @unfilteredpropertycache
1026 @unfilteredpropertycache
1025 def _encodefilterpats(self):
1027 def _encodefilterpats(self):
1026 return self._loadfilter('encode')
1028 return self._loadfilter('encode')
1027
1029
1028 @unfilteredpropertycache
1030 @unfilteredpropertycache
1029 def _decodefilterpats(self):
1031 def _decodefilterpats(self):
1030 return self._loadfilter('decode')
1032 return self._loadfilter('decode')
1031
1033
1032 def adddatafilter(self, name, filter):
1034 def adddatafilter(self, name, filter):
1033 self._datafilters[name] = filter
1035 self._datafilters[name] = filter
1034
1036
1035 def wread(self, filename):
1037 def wread(self, filename):
1036 if self.wvfs.islink(filename):
1038 if self.wvfs.islink(filename):
1037 data = self.wvfs.readlink(filename)
1039 data = self.wvfs.readlink(filename)
1038 else:
1040 else:
1039 data = self.wvfs.read(filename)
1041 data = self.wvfs.read(filename)
1040 return self._filter(self._encodefilterpats, filename, data)
1042 return self._filter(self._encodefilterpats, filename, data)
1041
1043
1042 def wwrite(self, filename, data, flags, backgroundclose=False):
1044 def wwrite(self, filename, data, flags, backgroundclose=False):
1043 """write ``data`` into ``filename`` in the working directory
1045 """write ``data`` into ``filename`` in the working directory
1044
1046
1045 This returns length of written (maybe decoded) data.
1047 This returns length of written (maybe decoded) data.
1046 """
1048 """
1047 data = self._filter(self._decodefilterpats, filename, data)
1049 data = self._filter(self._decodefilterpats, filename, data)
1048 if 'l' in flags:
1050 if 'l' in flags:
1049 self.wvfs.symlink(data, filename)
1051 self.wvfs.symlink(data, filename)
1050 else:
1052 else:
1051 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1053 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1052 if 'x' in flags:
1054 if 'x' in flags:
1053 self.wvfs.setflags(filename, False, True)
1055 self.wvfs.setflags(filename, False, True)
1054 return len(data)
1056 return len(data)
1055
1057
1056 def wwritedata(self, filename, data):
1058 def wwritedata(self, filename, data):
1057 return self._filter(self._decodefilterpats, filename, data)
1059 return self._filter(self._decodefilterpats, filename, data)
1058
1060
1059 def currenttransaction(self):
1061 def currenttransaction(self):
1060 """return the current transaction or None if non exists"""
1062 """return the current transaction or None if non exists"""
1061 if self._transref:
1063 if self._transref:
1062 tr = self._transref()
1064 tr = self._transref()
1063 else:
1065 else:
1064 tr = None
1066 tr = None
1065
1067
1066 if tr and tr.running():
1068 if tr and tr.running():
1067 return tr
1069 return tr
1068 return None
1070 return None
1069
1071
1070 def transaction(self, desc, report=None):
1072 def transaction(self, desc, report=None):
1071 if (self.ui.configbool('devel', 'all-warnings')
1073 if (self.ui.configbool('devel', 'all-warnings')
1072 or self.ui.configbool('devel', 'check-locks')):
1074 or self.ui.configbool('devel', 'check-locks')):
1073 if self._currentlock(self._lockref) is None:
1075 if self._currentlock(self._lockref) is None:
1074 raise error.ProgrammingError('transaction requires locking')
1076 raise error.ProgrammingError('transaction requires locking')
1075 tr = self.currenttransaction()
1077 tr = self.currenttransaction()
1076 if tr is not None:
1078 if tr is not None:
1077 return tr.nest()
1079 return tr.nest()
1078
1080
1079 # abort here if the journal already exists
1081 # abort here if the journal already exists
1080 if self.svfs.exists("journal"):
1082 if self.svfs.exists("journal"):
1081 raise error.RepoError(
1083 raise error.RepoError(
1082 _("abandoned transaction found"),
1084 _("abandoned transaction found"),
1083 hint=_("run 'hg recover' to clean up transaction"))
1085 hint=_("run 'hg recover' to clean up transaction"))
1084
1086
1085 idbase = "%.40f#%f" % (random.random(), time.time())
1087 idbase = "%.40f#%f" % (random.random(), time.time())
1086 ha = hex(hashlib.sha1(idbase).digest())
1088 ha = hex(hashlib.sha1(idbase).digest())
1087 txnid = 'TXN:' + ha
1089 txnid = 'TXN:' + ha
1088 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1090 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1089
1091
1090 self._writejournal(desc)
1092 self._writejournal(desc)
1091 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1093 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1092 if report:
1094 if report:
1093 rp = report
1095 rp = report
1094 else:
1096 else:
1095 rp = self.ui.warn
1097 rp = self.ui.warn
1096 vfsmap = {'plain': self.vfs} # root of .hg/
1098 vfsmap = {'plain': self.vfs} # root of .hg/
1097 # we must avoid cyclic reference between repo and transaction.
1099 # we must avoid cyclic reference between repo and transaction.
1098 reporef = weakref.ref(self)
1100 reporef = weakref.ref(self)
1099 def validate(tr):
1101 def validate(tr):
1100 """will run pre-closing hooks"""
1102 """will run pre-closing hooks"""
1101 reporef().hook('pretxnclose', throw=True,
1103 reporef().hook('pretxnclose', throw=True,
1102 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1104 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1103 def releasefn(tr, success):
1105 def releasefn(tr, success):
1104 repo = reporef()
1106 repo = reporef()
1105 if success:
1107 if success:
1106 # this should be explicitly invoked here, because
1108 # this should be explicitly invoked here, because
1107 # in-memory changes aren't written out at closing
1109 # in-memory changes aren't written out at closing
1108 # transaction, if tr.addfilegenerator (via
1110 # transaction, if tr.addfilegenerator (via
1109 # dirstate.write or so) isn't invoked while
1111 # dirstate.write or so) isn't invoked while
1110 # transaction running
1112 # transaction running
1111 repo.dirstate.write(None)
1113 repo.dirstate.write(None)
1112 else:
1114 else:
1113 # discard all changes (including ones already written
1115 # discard all changes (including ones already written
1114 # out) in this transaction
1116 # out) in this transaction
1115 repo.dirstate.restorebackup(None, prefix='journal.')
1117 repo.dirstate.restorebackup(None, prefix='journal.')
1116
1118
1117 repo.invalidate(clearfilecache=True)
1119 repo.invalidate(clearfilecache=True)
1118
1120
1119 tr = transaction.transaction(rp, self.svfs, vfsmap,
1121 tr = transaction.transaction(rp, self.svfs, vfsmap,
1120 "journal",
1122 "journal",
1121 "undo",
1123 "undo",
1122 aftertrans(renames),
1124 aftertrans(renames),
1123 self.store.createmode,
1125 self.store.createmode,
1124 validator=validate,
1126 validator=validate,
1125 releasefn=releasefn)
1127 releasefn=releasefn)
1126
1128
1127 tr.hookargs['txnid'] = txnid
1129 tr.hookargs['txnid'] = txnid
1128 # note: writing the fncache only during finalize mean that the file is
1130 # note: writing the fncache only during finalize mean that the file is
1129 # outdated when running hooks. As fncache is used for streaming clone,
1131 # outdated when running hooks. As fncache is used for streaming clone,
1130 # this is not expected to break anything that happen during the hooks.
1132 # this is not expected to break anything that happen during the hooks.
1131 tr.addfinalize('flush-fncache', self.store.write)
1133 tr.addfinalize('flush-fncache', self.store.write)
1132 def txnclosehook(tr2):
1134 def txnclosehook(tr2):
1133 """To be run if transaction is successful, will schedule a hook run
1135 """To be run if transaction is successful, will schedule a hook run
1134 """
1136 """
1135 # Don't reference tr2 in hook() so we don't hold a reference.
1137 # Don't reference tr2 in hook() so we don't hold a reference.
1136 # This reduces memory consumption when there are multiple
1138 # This reduces memory consumption when there are multiple
1137 # transactions per lock. This can likely go away if issue5045
1139 # transactions per lock. This can likely go away if issue5045
1138 # fixes the function accumulation.
1140 # fixes the function accumulation.
1139 hookargs = tr2.hookargs
1141 hookargs = tr2.hookargs
1140
1142
1141 def hook():
1143 def hook():
1142 reporef().hook('txnclose', throw=False, txnname=desc,
1144 reporef().hook('txnclose', throw=False, txnname=desc,
1143 **pycompat.strkwargs(hookargs))
1145 **pycompat.strkwargs(hookargs))
1144 reporef()._afterlock(hook)
1146 reporef()._afterlock(hook)
1145 tr.addfinalize('txnclose-hook', txnclosehook)
1147 tr.addfinalize('txnclose-hook', txnclosehook)
1146 def txnaborthook(tr2):
1148 def txnaborthook(tr2):
1147 """To be run if transaction is aborted
1149 """To be run if transaction is aborted
1148 """
1150 """
1149 reporef().hook('txnabort', throw=False, txnname=desc,
1151 reporef().hook('txnabort', throw=False, txnname=desc,
1150 **tr2.hookargs)
1152 **tr2.hookargs)
1151 tr.addabort('txnabort-hook', txnaborthook)
1153 tr.addabort('txnabort-hook', txnaborthook)
1152 # avoid eager cache invalidation. in-memory data should be identical
1154 # avoid eager cache invalidation. in-memory data should be identical
1153 # to stored data if transaction has no error.
1155 # to stored data if transaction has no error.
1154 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1156 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1155 self._transref = weakref.ref(tr)
1157 self._transref = weakref.ref(tr)
1156 return tr
1158 return tr
1157
1159
1158 def _journalfiles(self):
1160 def _journalfiles(self):
1159 return ((self.svfs, 'journal'),
1161 return ((self.svfs, 'journal'),
1160 (self.vfs, 'journal.dirstate'),
1162 (self.vfs, 'journal.dirstate'),
1161 (self.vfs, 'journal.branch'),
1163 (self.vfs, 'journal.branch'),
1162 (self.vfs, 'journal.desc'),
1164 (self.vfs, 'journal.desc'),
1163 (self.vfs, 'journal.bookmarks'),
1165 (self.vfs, 'journal.bookmarks'),
1164 (self.svfs, 'journal.phaseroots'))
1166 (self.svfs, 'journal.phaseroots'))
1165
1167
1166 def undofiles(self):
1168 def undofiles(self):
1167 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1169 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1168
1170
1169 def _writejournal(self, desc):
1171 def _writejournal(self, desc):
1170 self.dirstate.savebackup(None, prefix='journal.')
1172 self.dirstate.savebackup(None, prefix='journal.')
1171 self.vfs.write("journal.branch",
1173 self.vfs.write("journal.branch",
1172 encoding.fromlocal(self.dirstate.branch()))
1174 encoding.fromlocal(self.dirstate.branch()))
1173 self.vfs.write("journal.desc",
1175 self.vfs.write("journal.desc",
1174 "%d\n%s\n" % (len(self), desc))
1176 "%d\n%s\n" % (len(self), desc))
1175 self.vfs.write("journal.bookmarks",
1177 self.vfs.write("journal.bookmarks",
1176 self.vfs.tryread("bookmarks"))
1178 self.vfs.tryread("bookmarks"))
1177 self.svfs.write("journal.phaseroots",
1179 self.svfs.write("journal.phaseroots",
1178 self.svfs.tryread("phaseroots"))
1180 self.svfs.tryread("phaseroots"))
1179
1181
1180 def recover(self):
1182 def recover(self):
1181 with self.lock():
1183 with self.lock():
1182 if self.svfs.exists("journal"):
1184 if self.svfs.exists("journal"):
1183 self.ui.status(_("rolling back interrupted transaction\n"))
1185 self.ui.status(_("rolling back interrupted transaction\n"))
1184 vfsmap = {'': self.svfs,
1186 vfsmap = {'': self.svfs,
1185 'plain': self.vfs,}
1187 'plain': self.vfs,}
1186 transaction.rollback(self.svfs, vfsmap, "journal",
1188 transaction.rollback(self.svfs, vfsmap, "journal",
1187 self.ui.warn)
1189 self.ui.warn)
1188 self.invalidate()
1190 self.invalidate()
1189 return True
1191 return True
1190 else:
1192 else:
1191 self.ui.warn(_("no interrupted transaction available\n"))
1193 self.ui.warn(_("no interrupted transaction available\n"))
1192 return False
1194 return False
1193
1195
1194 def rollback(self, dryrun=False, force=False):
1196 def rollback(self, dryrun=False, force=False):
1195 wlock = lock = dsguard = None
1197 wlock = lock = dsguard = None
1196 try:
1198 try:
1197 wlock = self.wlock()
1199 wlock = self.wlock()
1198 lock = self.lock()
1200 lock = self.lock()
1199 if self.svfs.exists("undo"):
1201 if self.svfs.exists("undo"):
1200 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1202 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1201
1203
1202 return self._rollback(dryrun, force, dsguard)
1204 return self._rollback(dryrun, force, dsguard)
1203 else:
1205 else:
1204 self.ui.warn(_("no rollback information available\n"))
1206 self.ui.warn(_("no rollback information available\n"))
1205 return 1
1207 return 1
1206 finally:
1208 finally:
1207 release(dsguard, lock, wlock)
1209 release(dsguard, lock, wlock)
1208
1210
1209 @unfilteredmethod # Until we get smarter cache management
1211 @unfilteredmethod # Until we get smarter cache management
1210 def _rollback(self, dryrun, force, dsguard):
1212 def _rollback(self, dryrun, force, dsguard):
1211 ui = self.ui
1213 ui = self.ui
1212 try:
1214 try:
1213 args = self.vfs.read('undo.desc').splitlines()
1215 args = self.vfs.read('undo.desc').splitlines()
1214 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1216 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1215 if len(args) >= 3:
1217 if len(args) >= 3:
1216 detail = args[2]
1218 detail = args[2]
1217 oldtip = oldlen - 1
1219 oldtip = oldlen - 1
1218
1220
1219 if detail and ui.verbose:
1221 if detail and ui.verbose:
1220 msg = (_('repository tip rolled back to revision %s'
1222 msg = (_('repository tip rolled back to revision %s'
1221 ' (undo %s: %s)\n')
1223 ' (undo %s: %s)\n')
1222 % (oldtip, desc, detail))
1224 % (oldtip, desc, detail))
1223 else:
1225 else:
1224 msg = (_('repository tip rolled back to revision %s'
1226 msg = (_('repository tip rolled back to revision %s'
1225 ' (undo %s)\n')
1227 ' (undo %s)\n')
1226 % (oldtip, desc))
1228 % (oldtip, desc))
1227 except IOError:
1229 except IOError:
1228 msg = _('rolling back unknown transaction\n')
1230 msg = _('rolling back unknown transaction\n')
1229 desc = None
1231 desc = None
1230
1232
1231 if not force and self['.'] != self['tip'] and desc == 'commit':
1233 if not force and self['.'] != self['tip'] and desc == 'commit':
1232 raise error.Abort(
1234 raise error.Abort(
1233 _('rollback of last commit while not checked out '
1235 _('rollback of last commit while not checked out '
1234 'may lose data'), hint=_('use -f to force'))
1236 'may lose data'), hint=_('use -f to force'))
1235
1237
1236 ui.status(msg)
1238 ui.status(msg)
1237 if dryrun:
1239 if dryrun:
1238 return 0
1240 return 0
1239
1241
1240 parents = self.dirstate.parents()
1242 parents = self.dirstate.parents()
1241 self.destroying()
1243 self.destroying()
1242 vfsmap = {'plain': self.vfs, '': self.svfs}
1244 vfsmap = {'plain': self.vfs, '': self.svfs}
1243 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1245 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1244 if self.vfs.exists('undo.bookmarks'):
1246 if self.vfs.exists('undo.bookmarks'):
1245 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1247 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1246 if self.svfs.exists('undo.phaseroots'):
1248 if self.svfs.exists('undo.phaseroots'):
1247 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1249 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1248 self.invalidate()
1250 self.invalidate()
1249
1251
1250 parentgone = (parents[0] not in self.changelog.nodemap or
1252 parentgone = (parents[0] not in self.changelog.nodemap or
1251 parents[1] not in self.changelog.nodemap)
1253 parents[1] not in self.changelog.nodemap)
1252 if parentgone:
1254 if parentgone:
1253 # prevent dirstateguard from overwriting already restored one
1255 # prevent dirstateguard from overwriting already restored one
1254 dsguard.close()
1256 dsguard.close()
1255
1257
1256 self.dirstate.restorebackup(None, prefix='undo.')
1258 self.dirstate.restorebackup(None, prefix='undo.')
1257 try:
1259 try:
1258 branch = self.vfs.read('undo.branch')
1260 branch = self.vfs.read('undo.branch')
1259 self.dirstate.setbranch(encoding.tolocal(branch))
1261 self.dirstate.setbranch(encoding.tolocal(branch))
1260 except IOError:
1262 except IOError:
1261 ui.warn(_('named branch could not be reset: '
1263 ui.warn(_('named branch could not be reset: '
1262 'current branch is still \'%s\'\n')
1264 'current branch is still \'%s\'\n')
1263 % self.dirstate.branch())
1265 % self.dirstate.branch())
1264
1266
1265 parents = tuple([p.rev() for p in self[None].parents()])
1267 parents = tuple([p.rev() for p in self[None].parents()])
1266 if len(parents) > 1:
1268 if len(parents) > 1:
1267 ui.status(_('working directory now based on '
1269 ui.status(_('working directory now based on '
1268 'revisions %d and %d\n') % parents)
1270 'revisions %d and %d\n') % parents)
1269 else:
1271 else:
1270 ui.status(_('working directory now based on '
1272 ui.status(_('working directory now based on '
1271 'revision %d\n') % parents)
1273 'revision %d\n') % parents)
1272 mergemod.mergestate.clean(self, self['.'].node())
1274 mergemod.mergestate.clean(self, self['.'].node())
1273
1275
1274 # TODO: if we know which new heads may result from this rollback, pass
1276 # TODO: if we know which new heads may result from this rollback, pass
1275 # them to destroy(), which will prevent the branchhead cache from being
1277 # them to destroy(), which will prevent the branchhead cache from being
1276 # invalidated.
1278 # invalidated.
1277 self.destroyed()
1279 self.destroyed()
1278 return 0
1280 return 0
1279
1281
1280 def invalidatecaches(self):
1282 def invalidatecaches(self):
1281
1283
1282 if '_tagscache' in vars(self):
1284 if '_tagscache' in vars(self):
1283 # can't use delattr on proxy
1285 # can't use delattr on proxy
1284 del self.__dict__['_tagscache']
1286 del self.__dict__['_tagscache']
1285
1287
1286 self.unfiltered()._branchcaches.clear()
1288 self.unfiltered()._branchcaches.clear()
1287 self.invalidatevolatilesets()
1289 self.invalidatevolatilesets()
1288
1290
1289 def invalidatevolatilesets(self):
1291 def invalidatevolatilesets(self):
1290 self.filteredrevcache.clear()
1292 self.filteredrevcache.clear()
1291 obsolete.clearobscaches(self)
1293 obsolete.clearobscaches(self)
1292
1294
1293 def invalidatedirstate(self):
1295 def invalidatedirstate(self):
1294 '''Invalidates the dirstate, causing the next call to dirstate
1296 '''Invalidates the dirstate, causing the next call to dirstate
1295 to check if it was modified since the last time it was read,
1297 to check if it was modified since the last time it was read,
1296 rereading it if it has.
1298 rereading it if it has.
1297
1299
1298 This is different to dirstate.invalidate() that it doesn't always
1300 This is different to dirstate.invalidate() that it doesn't always
1299 rereads the dirstate. Use dirstate.invalidate() if you want to
1301 rereads the dirstate. Use dirstate.invalidate() if you want to
1300 explicitly read the dirstate again (i.e. restoring it to a previous
1302 explicitly read the dirstate again (i.e. restoring it to a previous
1301 known good state).'''
1303 known good state).'''
1302 if hasunfilteredcache(self, 'dirstate'):
1304 if hasunfilteredcache(self, 'dirstate'):
1303 for k in self.dirstate._filecache:
1305 for k in self.dirstate._filecache:
1304 try:
1306 try:
1305 delattr(self.dirstate, k)
1307 delattr(self.dirstate, k)
1306 except AttributeError:
1308 except AttributeError:
1307 pass
1309 pass
1308 delattr(self.unfiltered(), 'dirstate')
1310 delattr(self.unfiltered(), 'dirstate')
1309
1311
1310 def invalidate(self, clearfilecache=False):
1312 def invalidate(self, clearfilecache=False):
1311 '''Invalidates both store and non-store parts other than dirstate
1313 '''Invalidates both store and non-store parts other than dirstate
1312
1314
1313 If a transaction is running, invalidation of store is omitted,
1315 If a transaction is running, invalidation of store is omitted,
1314 because discarding in-memory changes might cause inconsistency
1316 because discarding in-memory changes might cause inconsistency
1315 (e.g. incomplete fncache causes unintentional failure, but
1317 (e.g. incomplete fncache causes unintentional failure, but
1316 redundant one doesn't).
1318 redundant one doesn't).
1317 '''
1319 '''
1318 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1320 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1319 for k in list(self._filecache.keys()):
1321 for k in list(self._filecache.keys()):
1320 # dirstate is invalidated separately in invalidatedirstate()
1322 # dirstate is invalidated separately in invalidatedirstate()
1321 if k == 'dirstate':
1323 if k == 'dirstate':
1322 continue
1324 continue
1323
1325
1324 if clearfilecache:
1326 if clearfilecache:
1325 del self._filecache[k]
1327 del self._filecache[k]
1326 try:
1328 try:
1327 delattr(unfiltered, k)
1329 delattr(unfiltered, k)
1328 except AttributeError:
1330 except AttributeError:
1329 pass
1331 pass
1330 self.invalidatecaches()
1332 self.invalidatecaches()
1331 if not self.currenttransaction():
1333 if not self.currenttransaction():
1332 # TODO: Changing contents of store outside transaction
1334 # TODO: Changing contents of store outside transaction
1333 # causes inconsistency. We should make in-memory store
1335 # causes inconsistency. We should make in-memory store
1334 # changes detectable, and abort if changed.
1336 # changes detectable, and abort if changed.
1335 self.store.invalidatecaches()
1337 self.store.invalidatecaches()
1336
1338
1337 def invalidateall(self):
1339 def invalidateall(self):
1338 '''Fully invalidates both store and non-store parts, causing the
1340 '''Fully invalidates both store and non-store parts, causing the
1339 subsequent operation to reread any outside changes.'''
1341 subsequent operation to reread any outside changes.'''
1340 # extension should hook this to invalidate its caches
1342 # extension should hook this to invalidate its caches
1341 self.invalidate()
1343 self.invalidate()
1342 self.invalidatedirstate()
1344 self.invalidatedirstate()
1343
1345
1344 @unfilteredmethod
1346 @unfilteredmethod
1345 def _refreshfilecachestats(self, tr):
1347 def _refreshfilecachestats(self, tr):
1346 """Reload stats of cached files so that they are flagged as valid"""
1348 """Reload stats of cached files so that they are flagged as valid"""
1347 for k, ce in self._filecache.items():
1349 for k, ce in self._filecache.items():
1348 if k == 'dirstate' or k not in self.__dict__:
1350 if k == 'dirstate' or k not in self.__dict__:
1349 continue
1351 continue
1350 ce.refresh()
1352 ce.refresh()
1351
1353
1352 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1354 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1353 inheritchecker=None, parentenvvar=None):
1355 inheritchecker=None, parentenvvar=None):
1354 parentlock = None
1356 parentlock = None
1355 # the contents of parentenvvar are used by the underlying lock to
1357 # the contents of parentenvvar are used by the underlying lock to
1356 # determine whether it can be inherited
1358 # determine whether it can be inherited
1357 if parentenvvar is not None:
1359 if parentenvvar is not None:
1358 parentlock = encoding.environ.get(parentenvvar)
1360 parentlock = encoding.environ.get(parentenvvar)
1359 try:
1361 try:
1360 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1362 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1361 acquirefn=acquirefn, desc=desc,
1363 acquirefn=acquirefn, desc=desc,
1362 inheritchecker=inheritchecker,
1364 inheritchecker=inheritchecker,
1363 parentlock=parentlock)
1365 parentlock=parentlock)
1364 except error.LockHeld as inst:
1366 except error.LockHeld as inst:
1365 if not wait:
1367 if not wait:
1366 raise
1368 raise
1367 # show more details for new-style locks
1369 # show more details for new-style locks
1368 if ':' in inst.locker:
1370 if ':' in inst.locker:
1369 host, pid = inst.locker.split(":", 1)
1371 host, pid = inst.locker.split(":", 1)
1370 self.ui.warn(
1372 self.ui.warn(
1371 _("waiting for lock on %s held by process %r "
1373 _("waiting for lock on %s held by process %r "
1372 "on host %r\n") % (desc, pid, host))
1374 "on host %r\n") % (desc, pid, host))
1373 else:
1375 else:
1374 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1376 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1375 (desc, inst.locker))
1377 (desc, inst.locker))
1376 # default to 600 seconds timeout
1378 # default to 600 seconds timeout
1377 l = lockmod.lock(vfs, lockname,
1379 l = lockmod.lock(vfs, lockname,
1378 int(self.ui.config("ui", "timeout", "600")),
1380 int(self.ui.config("ui", "timeout", "600")),
1379 releasefn=releasefn, acquirefn=acquirefn,
1381 releasefn=releasefn, acquirefn=acquirefn,
1380 desc=desc)
1382 desc=desc)
1381 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1383 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1382 return l
1384 return l
1383
1385
1384 def _afterlock(self, callback):
1386 def _afterlock(self, callback):
1385 """add a callback to be run when the repository is fully unlocked
1387 """add a callback to be run when the repository is fully unlocked
1386
1388
1387 The callback will be executed when the outermost lock is released
1389 The callback will be executed when the outermost lock is released
1388 (with wlock being higher level than 'lock')."""
1390 (with wlock being higher level than 'lock')."""
1389 for ref in (self._wlockref, self._lockref):
1391 for ref in (self._wlockref, self._lockref):
1390 l = ref and ref()
1392 l = ref and ref()
1391 if l and l.held:
1393 if l and l.held:
1392 l.postrelease.append(callback)
1394 l.postrelease.append(callback)
1393 break
1395 break
1394 else: # no lock have been found.
1396 else: # no lock have been found.
1395 callback()
1397 callback()
1396
1398
1397 def lock(self, wait=True):
1399 def lock(self, wait=True):
1398 '''Lock the repository store (.hg/store) and return a weak reference
1400 '''Lock the repository store (.hg/store) and return a weak reference
1399 to the lock. Use this before modifying the store (e.g. committing or
1401 to the lock. Use this before modifying the store (e.g. committing or
1400 stripping). If you are opening a transaction, get a lock as well.)
1402 stripping). If you are opening a transaction, get a lock as well.)
1401
1403
1402 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1404 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1403 'wlock' first to avoid a dead-lock hazard.'''
1405 'wlock' first to avoid a dead-lock hazard.'''
1404 l = self._currentlock(self._lockref)
1406 l = self._currentlock(self._lockref)
1405 if l is not None:
1407 if l is not None:
1406 l.lock()
1408 l.lock()
1407 return l
1409 return l
1408
1410
1409 l = self._lock(self.svfs, "lock", wait, None,
1411 l = self._lock(self.svfs, "lock", wait, None,
1410 self.invalidate, _('repository %s') % self.origroot)
1412 self.invalidate, _('repository %s') % self.origroot)
1411 self._lockref = weakref.ref(l)
1413 self._lockref = weakref.ref(l)
1412 return l
1414 return l
1413
1415
1414 def _wlockchecktransaction(self):
1416 def _wlockchecktransaction(self):
1415 if self.currenttransaction() is not None:
1417 if self.currenttransaction() is not None:
1416 raise error.LockInheritanceContractViolation(
1418 raise error.LockInheritanceContractViolation(
1417 'wlock cannot be inherited in the middle of a transaction')
1419 'wlock cannot be inherited in the middle of a transaction')
1418
1420
1419 def wlock(self, wait=True):
1421 def wlock(self, wait=True):
1420 '''Lock the non-store parts of the repository (everything under
1422 '''Lock the non-store parts of the repository (everything under
1421 .hg except .hg/store) and return a weak reference to the lock.
1423 .hg except .hg/store) and return a weak reference to the lock.
1422
1424
1423 Use this before modifying files in .hg.
1425 Use this before modifying files in .hg.
1424
1426
1425 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1427 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1426 'wlock' first to avoid a dead-lock hazard.'''
1428 'wlock' first to avoid a dead-lock hazard.'''
1427 l = self._wlockref and self._wlockref()
1429 l = self._wlockref and self._wlockref()
1428 if l is not None and l.held:
1430 if l is not None and l.held:
1429 l.lock()
1431 l.lock()
1430 return l
1432 return l
1431
1433
1432 # We do not need to check for non-waiting lock acquisition. Such
1434 # We do not need to check for non-waiting lock acquisition. Such
1433 # acquisition would not cause dead-lock as they would just fail.
1435 # acquisition would not cause dead-lock as they would just fail.
1434 if wait and (self.ui.configbool('devel', 'all-warnings')
1436 if wait and (self.ui.configbool('devel', 'all-warnings')
1435 or self.ui.configbool('devel', 'check-locks')):
1437 or self.ui.configbool('devel', 'check-locks')):
1436 if self._currentlock(self._lockref) is not None:
1438 if self._currentlock(self._lockref) is not None:
1437 self.ui.develwarn('"wlock" acquired after "lock"')
1439 self.ui.develwarn('"wlock" acquired after "lock"')
1438
1440
1439 def unlock():
1441 def unlock():
1440 if self.dirstate.pendingparentchange():
1442 if self.dirstate.pendingparentchange():
1441 self.dirstate.invalidate()
1443 self.dirstate.invalidate()
1442 else:
1444 else:
1443 self.dirstate.write(None)
1445 self.dirstate.write(None)
1444
1446
1445 self._filecache['dirstate'].refresh()
1447 self._filecache['dirstate'].refresh()
1446
1448
1447 l = self._lock(self.vfs, "wlock", wait, unlock,
1449 l = self._lock(self.vfs, "wlock", wait, unlock,
1448 self.invalidatedirstate, _('working directory of %s') %
1450 self.invalidatedirstate, _('working directory of %s') %
1449 self.origroot,
1451 self.origroot,
1450 inheritchecker=self._wlockchecktransaction,
1452 inheritchecker=self._wlockchecktransaction,
1451 parentenvvar='HG_WLOCK_LOCKER')
1453 parentenvvar='HG_WLOCK_LOCKER')
1452 self._wlockref = weakref.ref(l)
1454 self._wlockref = weakref.ref(l)
1453 return l
1455 return l
1454
1456
1455 def _currentlock(self, lockref):
1457 def _currentlock(self, lockref):
1456 """Returns the lock if it's held, or None if it's not."""
1458 """Returns the lock if it's held, or None if it's not."""
1457 if lockref is None:
1459 if lockref is None:
1458 return None
1460 return None
1459 l = lockref()
1461 l = lockref()
1460 if l is None or not l.held:
1462 if l is None or not l.held:
1461 return None
1463 return None
1462 return l
1464 return l
1463
1465
1464 def currentwlock(self):
1466 def currentwlock(self):
1465 """Returns the wlock if it's held, or None if it's not."""
1467 """Returns the wlock if it's held, or None if it's not."""
1466 return self._currentlock(self._wlockref)
1468 return self._currentlock(self._wlockref)
1467
1469
1468 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1470 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1469 """
1471 """
1470 commit an individual file as part of a larger transaction
1472 commit an individual file as part of a larger transaction
1471 """
1473 """
1472
1474
1473 fname = fctx.path()
1475 fname = fctx.path()
1474 fparent1 = manifest1.get(fname, nullid)
1476 fparent1 = manifest1.get(fname, nullid)
1475 fparent2 = manifest2.get(fname, nullid)
1477 fparent2 = manifest2.get(fname, nullid)
1476 if isinstance(fctx, context.filectx):
1478 if isinstance(fctx, context.filectx):
1477 node = fctx.filenode()
1479 node = fctx.filenode()
1478 if node in [fparent1, fparent2]:
1480 if node in [fparent1, fparent2]:
1479 self.ui.debug('reusing %s filelog entry\n' % fname)
1481 self.ui.debug('reusing %s filelog entry\n' % fname)
1480 if manifest1.flags(fname) != fctx.flags():
1482 if manifest1.flags(fname) != fctx.flags():
1481 changelist.append(fname)
1483 changelist.append(fname)
1482 return node
1484 return node
1483
1485
1484 flog = self.file(fname)
1486 flog = self.file(fname)
1485 meta = {}
1487 meta = {}
1486 copy = fctx.renamed()
1488 copy = fctx.renamed()
1487 if copy and copy[0] != fname:
1489 if copy and copy[0] != fname:
1488 # Mark the new revision of this file as a copy of another
1490 # Mark the new revision of this file as a copy of another
1489 # file. This copy data will effectively act as a parent
1491 # file. This copy data will effectively act as a parent
1490 # of this new revision. If this is a merge, the first
1492 # of this new revision. If this is a merge, the first
1491 # parent will be the nullid (meaning "look up the copy data")
1493 # parent will be the nullid (meaning "look up the copy data")
1492 # and the second one will be the other parent. For example:
1494 # and the second one will be the other parent. For example:
1493 #
1495 #
1494 # 0 --- 1 --- 3 rev1 changes file foo
1496 # 0 --- 1 --- 3 rev1 changes file foo
1495 # \ / rev2 renames foo to bar and changes it
1497 # \ / rev2 renames foo to bar and changes it
1496 # \- 2 -/ rev3 should have bar with all changes and
1498 # \- 2 -/ rev3 should have bar with all changes and
1497 # should record that bar descends from
1499 # should record that bar descends from
1498 # bar in rev2 and foo in rev1
1500 # bar in rev2 and foo in rev1
1499 #
1501 #
1500 # this allows this merge to succeed:
1502 # this allows this merge to succeed:
1501 #
1503 #
1502 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1504 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1503 # \ / merging rev3 and rev4 should use bar@rev2
1505 # \ / merging rev3 and rev4 should use bar@rev2
1504 # \- 2 --- 4 as the merge base
1506 # \- 2 --- 4 as the merge base
1505 #
1507 #
1506
1508
1507 cfname = copy[0]
1509 cfname = copy[0]
1508 crev = manifest1.get(cfname)
1510 crev = manifest1.get(cfname)
1509 newfparent = fparent2
1511 newfparent = fparent2
1510
1512
1511 if manifest2: # branch merge
1513 if manifest2: # branch merge
1512 if fparent2 == nullid or crev is None: # copied on remote side
1514 if fparent2 == nullid or crev is None: # copied on remote side
1513 if cfname in manifest2:
1515 if cfname in manifest2:
1514 crev = manifest2[cfname]
1516 crev = manifest2[cfname]
1515 newfparent = fparent1
1517 newfparent = fparent1
1516
1518
1517 # Here, we used to search backwards through history to try to find
1519 # Here, we used to search backwards through history to try to find
1518 # where the file copy came from if the source of a copy was not in
1520 # where the file copy came from if the source of a copy was not in
1519 # the parent directory. However, this doesn't actually make sense to
1521 # the parent directory. However, this doesn't actually make sense to
1520 # do (what does a copy from something not in your working copy even
1522 # do (what does a copy from something not in your working copy even
1521 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1523 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1522 # the user that copy information was dropped, so if they didn't
1524 # the user that copy information was dropped, so if they didn't
1523 # expect this outcome it can be fixed, but this is the correct
1525 # expect this outcome it can be fixed, but this is the correct
1524 # behavior in this circumstance.
1526 # behavior in this circumstance.
1525
1527
1526 if crev:
1528 if crev:
1527 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1529 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1528 meta["copy"] = cfname
1530 meta["copy"] = cfname
1529 meta["copyrev"] = hex(crev)
1531 meta["copyrev"] = hex(crev)
1530 fparent1, fparent2 = nullid, newfparent
1532 fparent1, fparent2 = nullid, newfparent
1531 else:
1533 else:
1532 self.ui.warn(_("warning: can't find ancestor for '%s' "
1534 self.ui.warn(_("warning: can't find ancestor for '%s' "
1533 "copied from '%s'!\n") % (fname, cfname))
1535 "copied from '%s'!\n") % (fname, cfname))
1534
1536
1535 elif fparent1 == nullid:
1537 elif fparent1 == nullid:
1536 fparent1, fparent2 = fparent2, nullid
1538 fparent1, fparent2 = fparent2, nullid
1537 elif fparent2 != nullid:
1539 elif fparent2 != nullid:
1538 # is one parent an ancestor of the other?
1540 # is one parent an ancestor of the other?
1539 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1541 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1540 if fparent1 in fparentancestors:
1542 if fparent1 in fparentancestors:
1541 fparent1, fparent2 = fparent2, nullid
1543 fparent1, fparent2 = fparent2, nullid
1542 elif fparent2 in fparentancestors:
1544 elif fparent2 in fparentancestors:
1543 fparent2 = nullid
1545 fparent2 = nullid
1544
1546
1545 # is the file changed?
1547 # is the file changed?
1546 text = fctx.data()
1548 text = fctx.data()
1547 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1549 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1548 changelist.append(fname)
1550 changelist.append(fname)
1549 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1551 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1550 # are just the flags changed during merge?
1552 # are just the flags changed during merge?
1551 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1553 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1552 changelist.append(fname)
1554 changelist.append(fname)
1553
1555
1554 return fparent1
1556 return fparent1
1555
1557
1556 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1558 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1557 """check for commit arguments that aren't committable"""
1559 """check for commit arguments that aren't committable"""
1558 if match.isexact() or match.prefix():
1560 if match.isexact() or match.prefix():
1559 matched = set(status.modified + status.added + status.removed)
1561 matched = set(status.modified + status.added + status.removed)
1560
1562
1561 for f in match.files():
1563 for f in match.files():
1562 f = self.dirstate.normalize(f)
1564 f = self.dirstate.normalize(f)
1563 if f == '.' or f in matched or f in wctx.substate:
1565 if f == '.' or f in matched or f in wctx.substate:
1564 continue
1566 continue
1565 if f in status.deleted:
1567 if f in status.deleted:
1566 fail(f, _('file not found!'))
1568 fail(f, _('file not found!'))
1567 if f in vdirs: # visited directory
1569 if f in vdirs: # visited directory
1568 d = f + '/'
1570 d = f + '/'
1569 for mf in matched:
1571 for mf in matched:
1570 if mf.startswith(d):
1572 if mf.startswith(d):
1571 break
1573 break
1572 else:
1574 else:
1573 fail(f, _("no match under directory!"))
1575 fail(f, _("no match under directory!"))
1574 elif f not in self.dirstate:
1576 elif f not in self.dirstate:
1575 fail(f, _("file not tracked!"))
1577 fail(f, _("file not tracked!"))
1576
1578
1577 @unfilteredmethod
1579 @unfilteredmethod
1578 def commit(self, text="", user=None, date=None, match=None, force=False,
1580 def commit(self, text="", user=None, date=None, match=None, force=False,
1579 editor=False, extra=None):
1581 editor=False, extra=None):
1580 """Add a new revision to current repository.
1582 """Add a new revision to current repository.
1581
1583
1582 Revision information is gathered from the working directory,
1584 Revision information is gathered from the working directory,
1583 match can be used to filter the committed files. If editor is
1585 match can be used to filter the committed files. If editor is
1584 supplied, it is called to get a commit message.
1586 supplied, it is called to get a commit message.
1585 """
1587 """
1586 if extra is None:
1588 if extra is None:
1587 extra = {}
1589 extra = {}
1588
1590
1589 def fail(f, msg):
1591 def fail(f, msg):
1590 raise error.Abort('%s: %s' % (f, msg))
1592 raise error.Abort('%s: %s' % (f, msg))
1591
1593
1592 if not match:
1594 if not match:
1593 match = matchmod.always(self.root, '')
1595 match = matchmod.always(self.root, '')
1594
1596
1595 if not force:
1597 if not force:
1596 vdirs = []
1598 vdirs = []
1597 match.explicitdir = vdirs.append
1599 match.explicitdir = vdirs.append
1598 match.bad = fail
1600 match.bad = fail
1599
1601
1600 wlock = lock = tr = None
1602 wlock = lock = tr = None
1601 try:
1603 try:
1602 wlock = self.wlock()
1604 wlock = self.wlock()
1603 lock = self.lock() # for recent changelog (see issue4368)
1605 lock = self.lock() # for recent changelog (see issue4368)
1604
1606
1605 wctx = self[None]
1607 wctx = self[None]
1606 merge = len(wctx.parents()) > 1
1608 merge = len(wctx.parents()) > 1
1607
1609
1608 if not force and merge and match.ispartial():
1610 if not force and merge and match.ispartial():
1609 raise error.Abort(_('cannot partially commit a merge '
1611 raise error.Abort(_('cannot partially commit a merge '
1610 '(do not specify files or patterns)'))
1612 '(do not specify files or patterns)'))
1611
1613
1612 status = self.status(match=match, clean=force)
1614 status = self.status(match=match, clean=force)
1613 if force:
1615 if force:
1614 status.modified.extend(status.clean) # mq may commit clean files
1616 status.modified.extend(status.clean) # mq may commit clean files
1615
1617
1616 # check subrepos
1618 # check subrepos
1617 subs = []
1619 subs = []
1618 commitsubs = set()
1620 commitsubs = set()
1619 newstate = wctx.substate.copy()
1621 newstate = wctx.substate.copy()
1620 # only manage subrepos and .hgsubstate if .hgsub is present
1622 # only manage subrepos and .hgsubstate if .hgsub is present
1621 if '.hgsub' in wctx:
1623 if '.hgsub' in wctx:
1622 # we'll decide whether to track this ourselves, thanks
1624 # we'll decide whether to track this ourselves, thanks
1623 for c in status.modified, status.added, status.removed:
1625 for c in status.modified, status.added, status.removed:
1624 if '.hgsubstate' in c:
1626 if '.hgsubstate' in c:
1625 c.remove('.hgsubstate')
1627 c.remove('.hgsubstate')
1626
1628
1627 # compare current state to last committed state
1629 # compare current state to last committed state
1628 # build new substate based on last committed state
1630 # build new substate based on last committed state
1629 oldstate = wctx.p1().substate
1631 oldstate = wctx.p1().substate
1630 for s in sorted(newstate.keys()):
1632 for s in sorted(newstate.keys()):
1631 if not match(s):
1633 if not match(s):
1632 # ignore working copy, use old state if present
1634 # ignore working copy, use old state if present
1633 if s in oldstate:
1635 if s in oldstate:
1634 newstate[s] = oldstate[s]
1636 newstate[s] = oldstate[s]
1635 continue
1637 continue
1636 if not force:
1638 if not force:
1637 raise error.Abort(
1639 raise error.Abort(
1638 _("commit with new subrepo %s excluded") % s)
1640 _("commit with new subrepo %s excluded") % s)
1639 dirtyreason = wctx.sub(s).dirtyreason(True)
1641 dirtyreason = wctx.sub(s).dirtyreason(True)
1640 if dirtyreason:
1642 if dirtyreason:
1641 if not self.ui.configbool('ui', 'commitsubrepos'):
1643 if not self.ui.configbool('ui', 'commitsubrepos'):
1642 raise error.Abort(dirtyreason,
1644 raise error.Abort(dirtyreason,
1643 hint=_("use --subrepos for recursive commit"))
1645 hint=_("use --subrepos for recursive commit"))
1644 subs.append(s)
1646 subs.append(s)
1645 commitsubs.add(s)
1647 commitsubs.add(s)
1646 else:
1648 else:
1647 bs = wctx.sub(s).basestate()
1649 bs = wctx.sub(s).basestate()
1648 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1650 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1649 if oldstate.get(s, (None, None, None))[1] != bs:
1651 if oldstate.get(s, (None, None, None))[1] != bs:
1650 subs.append(s)
1652 subs.append(s)
1651
1653
1652 # check for removed subrepos
1654 # check for removed subrepos
1653 for p in wctx.parents():
1655 for p in wctx.parents():
1654 r = [s for s in p.substate if s not in newstate]
1656 r = [s for s in p.substate if s not in newstate]
1655 subs += [s for s in r if match(s)]
1657 subs += [s for s in r if match(s)]
1656 if subs:
1658 if subs:
1657 if (not match('.hgsub') and
1659 if (not match('.hgsub') and
1658 '.hgsub' in (wctx.modified() + wctx.added())):
1660 '.hgsub' in (wctx.modified() + wctx.added())):
1659 raise error.Abort(
1661 raise error.Abort(
1660 _("can't commit subrepos without .hgsub"))
1662 _("can't commit subrepos without .hgsub"))
1661 status.modified.insert(0, '.hgsubstate')
1663 status.modified.insert(0, '.hgsubstate')
1662
1664
1663 elif '.hgsub' in status.removed:
1665 elif '.hgsub' in status.removed:
1664 # clean up .hgsubstate when .hgsub is removed
1666 # clean up .hgsubstate when .hgsub is removed
1665 if ('.hgsubstate' in wctx and
1667 if ('.hgsubstate' in wctx and
1666 '.hgsubstate' not in (status.modified + status.added +
1668 '.hgsubstate' not in (status.modified + status.added +
1667 status.removed)):
1669 status.removed)):
1668 status.removed.insert(0, '.hgsubstate')
1670 status.removed.insert(0, '.hgsubstate')
1669
1671
1670 # make sure all explicit patterns are matched
1672 # make sure all explicit patterns are matched
1671 if not force:
1673 if not force:
1672 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1674 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1673
1675
1674 cctx = context.workingcommitctx(self, status,
1676 cctx = context.workingcommitctx(self, status,
1675 text, user, date, extra)
1677 text, user, date, extra)
1676
1678
1677 # internal config: ui.allowemptycommit
1679 # internal config: ui.allowemptycommit
1678 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1680 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1679 or extra.get('close') or merge or cctx.files()
1681 or extra.get('close') or merge or cctx.files()
1680 or self.ui.configbool('ui', 'allowemptycommit'))
1682 or self.ui.configbool('ui', 'allowemptycommit'))
1681 if not allowemptycommit:
1683 if not allowemptycommit:
1682 return None
1684 return None
1683
1685
1684 if merge and cctx.deleted():
1686 if merge and cctx.deleted():
1685 raise error.Abort(_("cannot commit merge with missing files"))
1687 raise error.Abort(_("cannot commit merge with missing files"))
1686
1688
1687 ms = mergemod.mergestate.read(self)
1689 ms = mergemod.mergestate.read(self)
1688 mergeutil.checkunresolved(ms)
1690 mergeutil.checkunresolved(ms)
1689
1691
1690 if editor:
1692 if editor:
1691 cctx._text = editor(self, cctx, subs)
1693 cctx._text = editor(self, cctx, subs)
1692 edited = (text != cctx._text)
1694 edited = (text != cctx._text)
1693
1695
1694 # Save commit message in case this transaction gets rolled back
1696 # Save commit message in case this transaction gets rolled back
1695 # (e.g. by a pretxncommit hook). Leave the content alone on
1697 # (e.g. by a pretxncommit hook). Leave the content alone on
1696 # the assumption that the user will use the same editor again.
1698 # the assumption that the user will use the same editor again.
1697 msgfn = self.savecommitmessage(cctx._text)
1699 msgfn = self.savecommitmessage(cctx._text)
1698
1700
1699 # commit subs and write new state
1701 # commit subs and write new state
1700 if subs:
1702 if subs:
1701 for s in sorted(commitsubs):
1703 for s in sorted(commitsubs):
1702 sub = wctx.sub(s)
1704 sub = wctx.sub(s)
1703 self.ui.status(_('committing subrepository %s\n') %
1705 self.ui.status(_('committing subrepository %s\n') %
1704 subrepo.subrelpath(sub))
1706 subrepo.subrelpath(sub))
1705 sr = sub.commit(cctx._text, user, date)
1707 sr = sub.commit(cctx._text, user, date)
1706 newstate[s] = (newstate[s][0], sr)
1708 newstate[s] = (newstate[s][0], sr)
1707 subrepo.writestate(self, newstate)
1709 subrepo.writestate(self, newstate)
1708
1710
1709 p1, p2 = self.dirstate.parents()
1711 p1, p2 = self.dirstate.parents()
1710 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1712 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1711 try:
1713 try:
1712 self.hook("precommit", throw=True, parent1=hookp1,
1714 self.hook("precommit", throw=True, parent1=hookp1,
1713 parent2=hookp2)
1715 parent2=hookp2)
1714 tr = self.transaction('commit')
1716 tr = self.transaction('commit')
1715 ret = self.commitctx(cctx, True)
1717 ret = self.commitctx(cctx, True)
1716 except: # re-raises
1718 except: # re-raises
1717 if edited:
1719 if edited:
1718 self.ui.write(
1720 self.ui.write(
1719 _('note: commit message saved in %s\n') % msgfn)
1721 _('note: commit message saved in %s\n') % msgfn)
1720 raise
1722 raise
1721 # update bookmarks, dirstate and mergestate
1723 # update bookmarks, dirstate and mergestate
1722 bookmarks.update(self, [p1, p2], ret)
1724 bookmarks.update(self, [p1, p2], ret)
1723 cctx.markcommitted(ret)
1725 cctx.markcommitted(ret)
1724 ms.reset()
1726 ms.reset()
1725 tr.close()
1727 tr.close()
1726
1728
1727 finally:
1729 finally:
1728 lockmod.release(tr, lock, wlock)
1730 lockmod.release(tr, lock, wlock)
1729
1731
1730 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1732 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1731 # hack for command that use a temporary commit (eg: histedit)
1733 # hack for command that use a temporary commit (eg: histedit)
1732 # temporary commit got stripped before hook release
1734 # temporary commit got stripped before hook release
1733 if self.changelog.hasnode(ret):
1735 if self.changelog.hasnode(ret):
1734 self.hook("commit", node=node, parent1=parent1,
1736 self.hook("commit", node=node, parent1=parent1,
1735 parent2=parent2)
1737 parent2=parent2)
1736 self._afterlock(commithook)
1738 self._afterlock(commithook)
1737 return ret
1739 return ret
1738
1740
1739 @unfilteredmethod
1741 @unfilteredmethod
1740 def commitctx(self, ctx, error=False):
1742 def commitctx(self, ctx, error=False):
1741 """Add a new revision to current repository.
1743 """Add a new revision to current repository.
1742 Revision information is passed via the context argument.
1744 Revision information is passed via the context argument.
1743 """
1745 """
1744
1746
1745 tr = None
1747 tr = None
1746 p1, p2 = ctx.p1(), ctx.p2()
1748 p1, p2 = ctx.p1(), ctx.p2()
1747 user = ctx.user()
1749 user = ctx.user()
1748
1750
1749 lock = self.lock()
1751 lock = self.lock()
1750 try:
1752 try:
1751 tr = self.transaction("commit")
1753 tr = self.transaction("commit")
1752 trp = weakref.proxy(tr)
1754 trp = weakref.proxy(tr)
1753
1755
1754 if ctx.manifestnode():
1756 if ctx.manifestnode():
1755 # reuse an existing manifest revision
1757 # reuse an existing manifest revision
1756 mn = ctx.manifestnode()
1758 mn = ctx.manifestnode()
1757 files = ctx.files()
1759 files = ctx.files()
1758 elif ctx.files():
1760 elif ctx.files():
1759 m1ctx = p1.manifestctx()
1761 m1ctx = p1.manifestctx()
1760 m2ctx = p2.manifestctx()
1762 m2ctx = p2.manifestctx()
1761 mctx = m1ctx.copy()
1763 mctx = m1ctx.copy()
1762
1764
1763 m = mctx.read()
1765 m = mctx.read()
1764 m1 = m1ctx.read()
1766 m1 = m1ctx.read()
1765 m2 = m2ctx.read()
1767 m2 = m2ctx.read()
1766
1768
1767 # check in files
1769 # check in files
1768 added = []
1770 added = []
1769 changed = []
1771 changed = []
1770 removed = list(ctx.removed())
1772 removed = list(ctx.removed())
1771 linkrev = len(self)
1773 linkrev = len(self)
1772 self.ui.note(_("committing files:\n"))
1774 self.ui.note(_("committing files:\n"))
1773 for f in sorted(ctx.modified() + ctx.added()):
1775 for f in sorted(ctx.modified() + ctx.added()):
1774 self.ui.note(f + "\n")
1776 self.ui.note(f + "\n")
1775 try:
1777 try:
1776 fctx = ctx[f]
1778 fctx = ctx[f]
1777 if fctx is None:
1779 if fctx is None:
1778 removed.append(f)
1780 removed.append(f)
1779 else:
1781 else:
1780 added.append(f)
1782 added.append(f)
1781 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1783 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1782 trp, changed)
1784 trp, changed)
1783 m.setflag(f, fctx.flags())
1785 m.setflag(f, fctx.flags())
1784 except OSError as inst:
1786 except OSError as inst:
1785 self.ui.warn(_("trouble committing %s!\n") % f)
1787 self.ui.warn(_("trouble committing %s!\n") % f)
1786 raise
1788 raise
1787 except IOError as inst:
1789 except IOError as inst:
1788 errcode = getattr(inst, 'errno', errno.ENOENT)
1790 errcode = getattr(inst, 'errno', errno.ENOENT)
1789 if error or errcode and errcode != errno.ENOENT:
1791 if error or errcode and errcode != errno.ENOENT:
1790 self.ui.warn(_("trouble committing %s!\n") % f)
1792 self.ui.warn(_("trouble committing %s!\n") % f)
1791 raise
1793 raise
1792
1794
1793 # update manifest
1795 # update manifest
1794 self.ui.note(_("committing manifest\n"))
1796 self.ui.note(_("committing manifest\n"))
1795 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1797 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1796 drop = [f for f in removed if f in m]
1798 drop = [f for f in removed if f in m]
1797 for f in drop:
1799 for f in drop:
1798 del m[f]
1800 del m[f]
1799 mn = mctx.write(trp, linkrev,
1801 mn = mctx.write(trp, linkrev,
1800 p1.manifestnode(), p2.manifestnode(),
1802 p1.manifestnode(), p2.manifestnode(),
1801 added, drop)
1803 added, drop)
1802 files = changed + removed
1804 files = changed + removed
1803 else:
1805 else:
1804 mn = p1.manifestnode()
1806 mn = p1.manifestnode()
1805 files = []
1807 files = []
1806
1808
1807 # update changelog
1809 # update changelog
1808 self.ui.note(_("committing changelog\n"))
1810 self.ui.note(_("committing changelog\n"))
1809 self.changelog.delayupdate(tr)
1811 self.changelog.delayupdate(tr)
1810 n = self.changelog.add(mn, files, ctx.description(),
1812 n = self.changelog.add(mn, files, ctx.description(),
1811 trp, p1.node(), p2.node(),
1813 trp, p1.node(), p2.node(),
1812 user, ctx.date(), ctx.extra().copy())
1814 user, ctx.date(), ctx.extra().copy())
1813 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1815 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1814 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1816 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1815 parent2=xp2)
1817 parent2=xp2)
1816 # set the new commit is proper phase
1818 # set the new commit is proper phase
1817 targetphase = subrepo.newcommitphase(self.ui, ctx)
1819 targetphase = subrepo.newcommitphase(self.ui, ctx)
1818 if targetphase:
1820 if targetphase:
1819 # retract boundary do not alter parent changeset.
1821 # retract boundary do not alter parent changeset.
1820 # if a parent have higher the resulting phase will
1822 # if a parent have higher the resulting phase will
1821 # be compliant anyway
1823 # be compliant anyway
1822 #
1824 #
1823 # if minimal phase was 0 we don't need to retract anything
1825 # if minimal phase was 0 we don't need to retract anything
1824 phases.retractboundary(self, tr, targetphase, [n])
1826 phases.retractboundary(self, tr, targetphase, [n])
1825 tr.close()
1827 tr.close()
1826 branchmap.updatecache(self.filtered('served'))
1828 branchmap.updatecache(self.filtered('served'))
1827 return n
1829 return n
1828 finally:
1830 finally:
1829 if tr:
1831 if tr:
1830 tr.release()
1832 tr.release()
1831 lock.release()
1833 lock.release()
1832
1834
1833 @unfilteredmethod
1835 @unfilteredmethod
1834 def destroying(self):
1836 def destroying(self):
1835 '''Inform the repository that nodes are about to be destroyed.
1837 '''Inform the repository that nodes are about to be destroyed.
1836 Intended for use by strip and rollback, so there's a common
1838 Intended for use by strip and rollback, so there's a common
1837 place for anything that has to be done before destroying history.
1839 place for anything that has to be done before destroying history.
1838
1840
1839 This is mostly useful for saving state that is in memory and waiting
1841 This is mostly useful for saving state that is in memory and waiting
1840 to be flushed when the current lock is released. Because a call to
1842 to be flushed when the current lock is released. Because a call to
1841 destroyed is imminent, the repo will be invalidated causing those
1843 destroyed is imminent, the repo will be invalidated causing those
1842 changes to stay in memory (waiting for the next unlock), or vanish
1844 changes to stay in memory (waiting for the next unlock), or vanish
1843 completely.
1845 completely.
1844 '''
1846 '''
1845 # When using the same lock to commit and strip, the phasecache is left
1847 # When using the same lock to commit and strip, the phasecache is left
1846 # dirty after committing. Then when we strip, the repo is invalidated,
1848 # dirty after committing. Then when we strip, the repo is invalidated,
1847 # causing those changes to disappear.
1849 # causing those changes to disappear.
1848 if '_phasecache' in vars(self):
1850 if '_phasecache' in vars(self):
1849 self._phasecache.write()
1851 self._phasecache.write()
1850
1852
1851 @unfilteredmethod
1853 @unfilteredmethod
1852 def destroyed(self):
1854 def destroyed(self):
1853 '''Inform the repository that nodes have been destroyed.
1855 '''Inform the repository that nodes have been destroyed.
1854 Intended for use by strip and rollback, so there's a common
1856 Intended for use by strip and rollback, so there's a common
1855 place for anything that has to be done after destroying history.
1857 place for anything that has to be done after destroying history.
1856 '''
1858 '''
1857 # When one tries to:
1859 # When one tries to:
1858 # 1) destroy nodes thus calling this method (e.g. strip)
1860 # 1) destroy nodes thus calling this method (e.g. strip)
1859 # 2) use phasecache somewhere (e.g. commit)
1861 # 2) use phasecache somewhere (e.g. commit)
1860 #
1862 #
1861 # then 2) will fail because the phasecache contains nodes that were
1863 # then 2) will fail because the phasecache contains nodes that were
1862 # removed. We can either remove phasecache from the filecache,
1864 # removed. We can either remove phasecache from the filecache,
1863 # causing it to reload next time it is accessed, or simply filter
1865 # causing it to reload next time it is accessed, or simply filter
1864 # the removed nodes now and write the updated cache.
1866 # the removed nodes now and write the updated cache.
1865 self._phasecache.filterunknown(self)
1867 self._phasecache.filterunknown(self)
1866 self._phasecache.write()
1868 self._phasecache.write()
1867
1869
1868 # update the 'served' branch cache to help read only server process
1870 # update the 'served' branch cache to help read only server process
1869 # Thanks to branchcache collaboration this is done from the nearest
1871 # Thanks to branchcache collaboration this is done from the nearest
1870 # filtered subset and it is expected to be fast.
1872 # filtered subset and it is expected to be fast.
1871 branchmap.updatecache(self.filtered('served'))
1873 branchmap.updatecache(self.filtered('served'))
1872
1874
1873 # Ensure the persistent tag cache is updated. Doing it now
1875 # Ensure the persistent tag cache is updated. Doing it now
1874 # means that the tag cache only has to worry about destroyed
1876 # means that the tag cache only has to worry about destroyed
1875 # heads immediately after a strip/rollback. That in turn
1877 # heads immediately after a strip/rollback. That in turn
1876 # guarantees that "cachetip == currenttip" (comparing both rev
1878 # guarantees that "cachetip == currenttip" (comparing both rev
1877 # and node) always means no nodes have been added or destroyed.
1879 # and node) always means no nodes have been added or destroyed.
1878
1880
1879 # XXX this is suboptimal when qrefresh'ing: we strip the current
1881 # XXX this is suboptimal when qrefresh'ing: we strip the current
1880 # head, refresh the tag cache, then immediately add a new head.
1882 # head, refresh the tag cache, then immediately add a new head.
1881 # But I think doing it this way is necessary for the "instant
1883 # But I think doing it this way is necessary for the "instant
1882 # tag cache retrieval" case to work.
1884 # tag cache retrieval" case to work.
1883 self.invalidate()
1885 self.invalidate()
1884
1886
1885 def walk(self, match, node=None):
1887 def walk(self, match, node=None):
1886 '''
1888 '''
1887 walk recursively through the directory tree or a given
1889 walk recursively through the directory tree or a given
1888 changeset, finding all files matched by the match
1890 changeset, finding all files matched by the match
1889 function
1891 function
1890 '''
1892 '''
1891 return self[node].walk(match)
1893 return self[node].walk(match)
1892
1894
1893 def status(self, node1='.', node2=None, match=None,
1895 def status(self, node1='.', node2=None, match=None,
1894 ignored=False, clean=False, unknown=False,
1896 ignored=False, clean=False, unknown=False,
1895 listsubrepos=False):
1897 listsubrepos=False):
1896 '''a convenience method that calls node1.status(node2)'''
1898 '''a convenience method that calls node1.status(node2)'''
1897 return self[node1].status(node2, match, ignored, clean, unknown,
1899 return self[node1].status(node2, match, ignored, clean, unknown,
1898 listsubrepos)
1900 listsubrepos)
1899
1901
1900 def heads(self, start=None):
1902 def heads(self, start=None):
1901 if start is None:
1903 if start is None:
1902 cl = self.changelog
1904 cl = self.changelog
1903 headrevs = reversed(cl.headrevs())
1905 headrevs = reversed(cl.headrevs())
1904 return [cl.node(rev) for rev in headrevs]
1906 return [cl.node(rev) for rev in headrevs]
1905
1907
1906 heads = self.changelog.heads(start)
1908 heads = self.changelog.heads(start)
1907 # sort the output in rev descending order
1909 # sort the output in rev descending order
1908 return sorted(heads, key=self.changelog.rev, reverse=True)
1910 return sorted(heads, key=self.changelog.rev, reverse=True)
1909
1911
1910 def branchheads(self, branch=None, start=None, closed=False):
1912 def branchheads(self, branch=None, start=None, closed=False):
1911 '''return a (possibly filtered) list of heads for the given branch
1913 '''return a (possibly filtered) list of heads for the given branch
1912
1914
1913 Heads are returned in topological order, from newest to oldest.
1915 Heads are returned in topological order, from newest to oldest.
1914 If branch is None, use the dirstate branch.
1916 If branch is None, use the dirstate branch.
1915 If start is not None, return only heads reachable from start.
1917 If start is not None, return only heads reachable from start.
1916 If closed is True, return heads that are marked as closed as well.
1918 If closed is True, return heads that are marked as closed as well.
1917 '''
1919 '''
1918 if branch is None:
1920 if branch is None:
1919 branch = self[None].branch()
1921 branch = self[None].branch()
1920 branches = self.branchmap()
1922 branches = self.branchmap()
1921 if branch not in branches:
1923 if branch not in branches:
1922 return []
1924 return []
1923 # the cache returns heads ordered lowest to highest
1925 # the cache returns heads ordered lowest to highest
1924 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1926 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1925 if start is not None:
1927 if start is not None:
1926 # filter out the heads that cannot be reached from startrev
1928 # filter out the heads that cannot be reached from startrev
1927 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1929 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1928 bheads = [h for h in bheads if h in fbheads]
1930 bheads = [h for h in bheads if h in fbheads]
1929 return bheads
1931 return bheads
1930
1932
1931 def branches(self, nodes):
1933 def branches(self, nodes):
1932 if not nodes:
1934 if not nodes:
1933 nodes = [self.changelog.tip()]
1935 nodes = [self.changelog.tip()]
1934 b = []
1936 b = []
1935 for n in nodes:
1937 for n in nodes:
1936 t = n
1938 t = n
1937 while True:
1939 while True:
1938 p = self.changelog.parents(n)
1940 p = self.changelog.parents(n)
1939 if p[1] != nullid or p[0] == nullid:
1941 if p[1] != nullid or p[0] == nullid:
1940 b.append((t, n, p[0], p[1]))
1942 b.append((t, n, p[0], p[1]))
1941 break
1943 break
1942 n = p[0]
1944 n = p[0]
1943 return b
1945 return b
1944
1946
1945 def between(self, pairs):
1947 def between(self, pairs):
1946 r = []
1948 r = []
1947
1949
1948 for top, bottom in pairs:
1950 for top, bottom in pairs:
1949 n, l, i = top, [], 0
1951 n, l, i = top, [], 0
1950 f = 1
1952 f = 1
1951
1953
1952 while n != bottom and n != nullid:
1954 while n != bottom and n != nullid:
1953 p = self.changelog.parents(n)[0]
1955 p = self.changelog.parents(n)[0]
1954 if i == f:
1956 if i == f:
1955 l.append(n)
1957 l.append(n)
1956 f = f * 2
1958 f = f * 2
1957 n = p
1959 n = p
1958 i += 1
1960 i += 1
1959
1961
1960 r.append(l)
1962 r.append(l)
1961
1963
1962 return r
1964 return r
1963
1965
1964 def checkpush(self, pushop):
1966 def checkpush(self, pushop):
1965 """Extensions can override this function if additional checks have
1967 """Extensions can override this function if additional checks have
1966 to be performed before pushing, or call it if they override push
1968 to be performed before pushing, or call it if they override push
1967 command.
1969 command.
1968 """
1970 """
1969 pass
1971 pass
1970
1972
1971 @unfilteredpropertycache
1973 @unfilteredpropertycache
1972 def prepushoutgoinghooks(self):
1974 def prepushoutgoinghooks(self):
1973 """Return util.hooks consists of a pushop with repo, remote, outgoing
1975 """Return util.hooks consists of a pushop with repo, remote, outgoing
1974 methods, which are called before pushing changesets.
1976 methods, which are called before pushing changesets.
1975 """
1977 """
1976 return util.hooks()
1978 return util.hooks()
1977
1979
1978 def pushkey(self, namespace, key, old, new):
1980 def pushkey(self, namespace, key, old, new):
1979 try:
1981 try:
1980 tr = self.currenttransaction()
1982 tr = self.currenttransaction()
1981 hookargs = {}
1983 hookargs = {}
1982 if tr is not None:
1984 if tr is not None:
1983 hookargs.update(tr.hookargs)
1985 hookargs.update(tr.hookargs)
1984 hookargs['namespace'] = namespace
1986 hookargs['namespace'] = namespace
1985 hookargs['key'] = key
1987 hookargs['key'] = key
1986 hookargs['old'] = old
1988 hookargs['old'] = old
1987 hookargs['new'] = new
1989 hookargs['new'] = new
1988 self.hook('prepushkey', throw=True, **hookargs)
1990 self.hook('prepushkey', throw=True, **hookargs)
1989 except error.HookAbort as exc:
1991 except error.HookAbort as exc:
1990 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1992 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1991 if exc.hint:
1993 if exc.hint:
1992 self.ui.write_err(_("(%s)\n") % exc.hint)
1994 self.ui.write_err(_("(%s)\n") % exc.hint)
1993 return False
1995 return False
1994 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1996 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1995 ret = pushkey.push(self, namespace, key, old, new)
1997 ret = pushkey.push(self, namespace, key, old, new)
1996 def runhook():
1998 def runhook():
1997 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1999 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1998 ret=ret)
2000 ret=ret)
1999 self._afterlock(runhook)
2001 self._afterlock(runhook)
2000 return ret
2002 return ret
2001
2003
2002 def listkeys(self, namespace):
2004 def listkeys(self, namespace):
2003 self.hook('prelistkeys', throw=True, namespace=namespace)
2005 self.hook('prelistkeys', throw=True, namespace=namespace)
2004 self.ui.debug('listing keys for "%s"\n' % namespace)
2006 self.ui.debug('listing keys for "%s"\n' % namespace)
2005 values = pushkey.list(self, namespace)
2007 values = pushkey.list(self, namespace)
2006 self.hook('listkeys', namespace=namespace, values=values)
2008 self.hook('listkeys', namespace=namespace, values=values)
2007 return values
2009 return values
2008
2010
2009 def debugwireargs(self, one, two, three=None, four=None, five=None):
2011 def debugwireargs(self, one, two, three=None, four=None, five=None):
2010 '''used to test argument passing over the wire'''
2012 '''used to test argument passing over the wire'''
2011 return "%s %s %s %s %s" % (one, two, three, four, five)
2013 return "%s %s %s %s %s" % (one, two, three, four, five)
2012
2014
2013 def savecommitmessage(self, text):
2015 def savecommitmessage(self, text):
2014 fp = self.vfs('last-message.txt', 'wb')
2016 fp = self.vfs('last-message.txt', 'wb')
2015 try:
2017 try:
2016 fp.write(text)
2018 fp.write(text)
2017 finally:
2019 finally:
2018 fp.close()
2020 fp.close()
2019 return self.pathto(fp.name[len(self.root) + 1:])
2021 return self.pathto(fp.name[len(self.root) + 1:])
2020
2022
2021 # used to avoid circular references so destructors work
2023 # used to avoid circular references so destructors work
2022 def aftertrans(files):
2024 def aftertrans(files):
2023 renamefiles = [tuple(t) for t in files]
2025 renamefiles = [tuple(t) for t in files]
2024 def a():
2026 def a():
2025 for vfs, src, dest in renamefiles:
2027 for vfs, src, dest in renamefiles:
2026 try:
2028 try:
2027 # if src and dest refer to a same file, vfs.rename is a no-op,
2029 # if src and dest refer to a same file, vfs.rename is a no-op,
2028 # leaving both src and dest on disk. delete dest to make sure
2030 # leaving both src and dest on disk. delete dest to make sure
2029 # the rename couldn't be such a no-op.
2031 # the rename couldn't be such a no-op.
2030 vfs.unlink(dest)
2032 vfs.unlink(dest)
2031 except OSError as ex:
2033 except OSError as ex:
2032 if ex.errno != errno.ENOENT:
2034 if ex.errno != errno.ENOENT:
2033 raise
2035 raise
2034 try:
2036 try:
2035 vfs.rename(src, dest)
2037 vfs.rename(src, dest)
2036 except OSError: # journal file does not yet exist
2038 except OSError: # journal file does not yet exist
2037 pass
2039 pass
2038 return a
2040 return a
2039
2041
2040 def undoname(fn):
2042 def undoname(fn):
2041 base, name = os.path.split(fn)
2043 base, name = os.path.split(fn)
2042 assert name.startswith('journal')
2044 assert name.startswith('journal')
2043 return os.path.join(base, name.replace('journal', 'undo', 1))
2045 return os.path.join(base, name.replace('journal', 'undo', 1))
2044
2046
2045 def instance(ui, path, create):
2047 def instance(ui, path, create):
2046 return localrepository(ui, util.urllocalpath(path), create)
2048 return localrepository(ui, util.urllocalpath(path), create)
2047
2049
2048 def islocal(path):
2050 def islocal(path):
2049 return True
2051 return True
2050
2052
2051 def newreporequirements(repo):
2053 def newreporequirements(repo):
2052 """Determine the set of requirements for a new local repository.
2054 """Determine the set of requirements for a new local repository.
2053
2055
2054 Extensions can wrap this function to specify custom requirements for
2056 Extensions can wrap this function to specify custom requirements for
2055 new repositories.
2057 new repositories.
2056 """
2058 """
2057 ui = repo.ui
2059 ui = repo.ui
2058 requirements = set(['revlogv1'])
2060 requirements = set(['revlogv1'])
2059 if ui.configbool('format', 'usestore', True):
2061 if ui.configbool('format', 'usestore', True):
2060 requirements.add('store')
2062 requirements.add('store')
2061 if ui.configbool('format', 'usefncache', True):
2063 if ui.configbool('format', 'usefncache', True):
2062 requirements.add('fncache')
2064 requirements.add('fncache')
2063 if ui.configbool('format', 'dotencode', True):
2065 if ui.configbool('format', 'dotencode', True):
2064 requirements.add('dotencode')
2066 requirements.add('dotencode')
2065
2067
2066 compengine = ui.config('experimental', 'format.compression', 'zlib')
2068 compengine = ui.config('experimental', 'format.compression', 'zlib')
2067 if compengine not in util.compengines:
2069 if compengine not in util.compengines:
2068 raise error.Abort(_('compression engine %s defined by '
2070 raise error.Abort(_('compression engine %s defined by '
2069 'experimental.format.compression not available') %
2071 'experimental.format.compression not available') %
2070 compengine,
2072 compengine,
2071 hint=_('run "hg debuginstall" to list available '
2073 hint=_('run "hg debuginstall" to list available '
2072 'compression engines'))
2074 'compression engines'))
2073
2075
2074 # zlib is the historical default and doesn't need an explicit requirement.
2076 # zlib is the historical default and doesn't need an explicit requirement.
2075 if compengine != 'zlib':
2077 if compengine != 'zlib':
2076 requirements.add('exp-compression-%s' % compengine)
2078 requirements.add('exp-compression-%s' % compengine)
2077
2079
2078 if scmutil.gdinitconfig(ui):
2080 if scmutil.gdinitconfig(ui):
2079 requirements.add('generaldelta')
2081 requirements.add('generaldelta')
2080 if ui.configbool('experimental', 'treemanifest', False):
2082 if ui.configbool('experimental', 'treemanifest', False):
2081 requirements.add('treemanifest')
2083 requirements.add('treemanifest')
2082 if ui.configbool('experimental', 'manifestv2', False):
2084 if ui.configbool('experimental', 'manifestv2', False):
2083 requirements.add('manifestv2')
2085 requirements.add('manifestv2')
2084
2086
2085 return requirements
2087 return requirements
General Comments 0
You need to be logged in to leave comments. Login now