##// END OF EJS Templates
localrepo: drop force check from checkcommitpatterns...
timeless -
r28814:1f65f291 default
parent child Browse files
Show More
@@ -1,1983 +1,1982 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import inspect
11 import inspect
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import urllib
15 import urllib
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 propertycache = util.propertycache
61 propertycache = util.propertycache
62 filecache = scmutil.filecache
62 filecache = scmutil.filecache
63
63
64 class repofilecache(filecache):
64 class repofilecache(filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 def __set__(self, repo, value):
70 def __set__(self, repo, value):
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 def __delete__(self, repo):
72 def __delete__(self, repo):
73 return super(repofilecache, self).__delete__(repo.unfiltered())
73 return super(repofilecache, self).__delete__(repo.unfiltered())
74
74
75 class storecache(repofilecache):
75 class storecache(repofilecache):
76 """filecache for files in the store"""
76 """filecache for files in the store"""
77 def join(self, obj, fname):
77 def join(self, obj, fname):
78 return obj.sjoin(fname)
78 return obj.sjoin(fname)
79
79
80 class unfilteredpropertycache(propertycache):
80 class unfilteredpropertycache(propertycache):
81 """propertycache that apply to unfiltered repo only"""
81 """propertycache that apply to unfiltered repo only"""
82
82
83 def __get__(self, repo, type=None):
83 def __get__(self, repo, type=None):
84 unfi = repo.unfiltered()
84 unfi = repo.unfiltered()
85 if unfi is repo:
85 if unfi is repo:
86 return super(unfilteredpropertycache, self).__get__(unfi)
86 return super(unfilteredpropertycache, self).__get__(unfi)
87 return getattr(unfi, self.name)
87 return getattr(unfi, self.name)
88
88
89 class filteredpropertycache(propertycache):
89 class filteredpropertycache(propertycache):
90 """propertycache that must take filtering in account"""
90 """propertycache that must take filtering in account"""
91
91
92 def cachevalue(self, obj, value):
92 def cachevalue(self, obj, value):
93 object.__setattr__(obj, self.name, value)
93 object.__setattr__(obj, self.name, value)
94
94
95
95
96 def hasunfilteredcache(repo, name):
96 def hasunfilteredcache(repo, name):
97 """check if a repo has an unfilteredpropertycache value for <name>"""
97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 return name in vars(repo.unfiltered())
98 return name in vars(repo.unfiltered())
99
99
100 def unfilteredmethod(orig):
100 def unfilteredmethod(orig):
101 """decorate method that always need to be run on unfiltered version"""
101 """decorate method that always need to be run on unfiltered version"""
102 def wrapper(repo, *args, **kwargs):
102 def wrapper(repo, *args, **kwargs):
103 return orig(repo.unfiltered(), *args, **kwargs)
103 return orig(repo.unfiltered(), *args, **kwargs)
104 return wrapper
104 return wrapper
105
105
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 'unbundle'))
107 'unbundle'))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109
109
110 class localpeer(peer.peerrepository):
110 class localpeer(peer.peerrepository):
111 '''peer for a local repo; reflects only the most recent API'''
111 '''peer for a local repo; reflects only the most recent API'''
112
112
113 def __init__(self, repo, caps=moderncaps):
113 def __init__(self, repo, caps=moderncaps):
114 peer.peerrepository.__init__(self)
114 peer.peerrepository.__init__(self)
115 self._repo = repo.filtered('served')
115 self._repo = repo.filtered('served')
116 self.ui = repo.ui
116 self.ui = repo.ui
117 self._caps = repo._restrictcapabilities(caps)
117 self._caps = repo._restrictcapabilities(caps)
118 self.requirements = repo.requirements
118 self.requirements = repo.requirements
119 self.supportedformats = repo.supportedformats
119 self.supportedformats = repo.supportedformats
120
120
121 def close(self):
121 def close(self):
122 self._repo.close()
122 self._repo.close()
123
123
124 def _capabilities(self):
124 def _capabilities(self):
125 return self._caps
125 return self._caps
126
126
127 def local(self):
127 def local(self):
128 return self._repo
128 return self._repo
129
129
130 def canpush(self):
130 def canpush(self):
131 return True
131 return True
132
132
133 def url(self):
133 def url(self):
134 return self._repo.url()
134 return self._repo.url()
135
135
136 def lookup(self, key):
136 def lookup(self, key):
137 return self._repo.lookup(key)
137 return self._repo.lookup(key)
138
138
139 def branchmap(self):
139 def branchmap(self):
140 return self._repo.branchmap()
140 return self._repo.branchmap()
141
141
142 def heads(self):
142 def heads(self):
143 return self._repo.heads()
143 return self._repo.heads()
144
144
145 def known(self, nodes):
145 def known(self, nodes):
146 return self._repo.known(nodes)
146 return self._repo.known(nodes)
147
147
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 **kwargs):
149 **kwargs):
150 cg = exchange.getbundle(self._repo, source, heads=heads,
150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 common=common, bundlecaps=bundlecaps, **kwargs)
151 common=common, bundlecaps=bundlecaps, **kwargs)
152 if bundlecaps is not None and 'HG20' in bundlecaps:
152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 # When requesting a bundle2, getbundle returns a stream to make the
153 # When requesting a bundle2, getbundle returns a stream to make the
154 # wire level function happier. We need to build a proper object
154 # wire level function happier. We need to build a proper object
155 # from it in local peer.
155 # from it in local peer.
156 cg = bundle2.getunbundler(self.ui, cg)
156 cg = bundle2.getunbundler(self.ui, cg)
157 return cg
157 return cg
158
158
159 # TODO We might want to move the next two calls into legacypeer and add
159 # TODO We might want to move the next two calls into legacypeer and add
160 # unbundle instead.
160 # unbundle instead.
161
161
162 def unbundle(self, cg, heads, url):
162 def unbundle(self, cg, heads, url):
163 """apply a bundle on a repo
163 """apply a bundle on a repo
164
164
165 This function handles the repo locking itself."""
165 This function handles the repo locking itself."""
166 try:
166 try:
167 try:
167 try:
168 cg = exchange.readbundle(self.ui, cg, None)
168 cg = exchange.readbundle(self.ui, cg, None)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 if util.safehasattr(ret, 'getchunks'):
170 if util.safehasattr(ret, 'getchunks'):
171 # This is a bundle20 object, turn it into an unbundler.
171 # This is a bundle20 object, turn it into an unbundler.
172 # This little dance should be dropped eventually when the
172 # This little dance should be dropped eventually when the
173 # API is finally improved.
173 # API is finally improved.
174 stream = util.chunkbuffer(ret.getchunks())
174 stream = util.chunkbuffer(ret.getchunks())
175 ret = bundle2.getunbundler(self.ui, stream)
175 ret = bundle2.getunbundler(self.ui, stream)
176 return ret
176 return ret
177 except Exception as exc:
177 except Exception as exc:
178 # If the exception contains output salvaged from a bundle2
178 # If the exception contains output salvaged from a bundle2
179 # reply, we need to make sure it is printed before continuing
179 # reply, we need to make sure it is printed before continuing
180 # to fail. So we build a bundle2 with such output and consume
180 # to fail. So we build a bundle2 with such output and consume
181 # it directly.
181 # it directly.
182 #
182 #
183 # This is not very elegant but allows a "simple" solution for
183 # This is not very elegant but allows a "simple" solution for
184 # issue4594
184 # issue4594
185 output = getattr(exc, '_bundle2salvagedoutput', ())
185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 if output:
186 if output:
187 bundler = bundle2.bundle20(self._repo.ui)
187 bundler = bundle2.bundle20(self._repo.ui)
188 for out in output:
188 for out in output:
189 bundler.addpart(out)
189 bundler.addpart(out)
190 stream = util.chunkbuffer(bundler.getchunks())
190 stream = util.chunkbuffer(bundler.getchunks())
191 b = bundle2.getunbundler(self.ui, stream)
191 b = bundle2.getunbundler(self.ui, stream)
192 bundle2.processbundle(self._repo, b)
192 bundle2.processbundle(self._repo, b)
193 raise
193 raise
194 except error.PushRaced as exc:
194 except error.PushRaced as exc:
195 raise error.ResponseError(_('push failed:'), str(exc))
195 raise error.ResponseError(_('push failed:'), str(exc))
196
196
197 def lock(self):
197 def lock(self):
198 return self._repo.lock()
198 return self._repo.lock()
199
199
200 def addchangegroup(self, cg, source, url):
200 def addchangegroup(self, cg, source, url):
201 return cg.apply(self._repo, source, url)
201 return cg.apply(self._repo, source, url)
202
202
203 def pushkey(self, namespace, key, old, new):
203 def pushkey(self, namespace, key, old, new):
204 return self._repo.pushkey(namespace, key, old, new)
204 return self._repo.pushkey(namespace, key, old, new)
205
205
206 def listkeys(self, namespace):
206 def listkeys(self, namespace):
207 return self._repo.listkeys(namespace)
207 return self._repo.listkeys(namespace)
208
208
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 '''used to test argument passing over the wire'''
210 '''used to test argument passing over the wire'''
211 return "%s %s %s %s %s" % (one, two, three, four, five)
211 return "%s %s %s %s %s" % (one, two, three, four, five)
212
212
213 class locallegacypeer(localpeer):
213 class locallegacypeer(localpeer):
214 '''peer extension which implements legacy methods too; used for tests with
214 '''peer extension which implements legacy methods too; used for tests with
215 restricted capabilities'''
215 restricted capabilities'''
216
216
217 def __init__(self, repo):
217 def __init__(self, repo):
218 localpeer.__init__(self, repo, caps=legacycaps)
218 localpeer.__init__(self, repo, caps=legacycaps)
219
219
220 def branches(self, nodes):
220 def branches(self, nodes):
221 return self._repo.branches(nodes)
221 return self._repo.branches(nodes)
222
222
223 def between(self, pairs):
223 def between(self, pairs):
224 return self._repo.between(pairs)
224 return self._repo.between(pairs)
225
225
226 def changegroup(self, basenodes, source):
226 def changegroup(self, basenodes, source):
227 return changegroup.changegroup(self._repo, basenodes, source)
227 return changegroup.changegroup(self._repo, basenodes, source)
228
228
229 def changegroupsubset(self, bases, heads, source):
229 def changegroupsubset(self, bases, heads, source):
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231
231
232 class localrepository(object):
232 class localrepository(object):
233
233
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 'manifestv2'))
235 'manifestv2'))
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 'dotencode'))
237 'dotencode'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 filtername = None
239 filtername = None
240
240
241 # a list of (ui, featureset) functions.
241 # a list of (ui, featureset) functions.
242 # only functions defined in module of enabled extensions are invoked
242 # only functions defined in module of enabled extensions are invoked
243 featuresetupfuncs = set()
243 featuresetupfuncs = set()
244
244
245 def __init__(self, baseui, path=None, create=False):
245 def __init__(self, baseui, path=None, create=False):
246 self.requirements = set()
246 self.requirements = set()
247 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
247 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
248 self.wopener = self.wvfs
248 self.wopener = self.wvfs
249 self.root = self.wvfs.base
249 self.root = self.wvfs.base
250 self.path = self.wvfs.join(".hg")
250 self.path = self.wvfs.join(".hg")
251 self.origroot = path
251 self.origroot = path
252 self.auditor = pathutil.pathauditor(self.root, self._checknested)
252 self.auditor = pathutil.pathauditor(self.root, self._checknested)
253 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
253 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
254 realfs=False)
254 realfs=False)
255 self.vfs = scmutil.vfs(self.path)
255 self.vfs = scmutil.vfs(self.path)
256 self.opener = self.vfs
256 self.opener = self.vfs
257 self.baseui = baseui
257 self.baseui = baseui
258 self.ui = baseui.copy()
258 self.ui = baseui.copy()
259 self.ui.copy = baseui.copy # prevent copying repo configuration
259 self.ui.copy = baseui.copy # prevent copying repo configuration
260 # A list of callback to shape the phase if no data were found.
260 # A list of callback to shape the phase if no data were found.
261 # Callback are in the form: func(repo, roots) --> processed root.
261 # Callback are in the form: func(repo, roots) --> processed root.
262 # This list it to be filled by extension during repo setup
262 # This list it to be filled by extension during repo setup
263 self._phasedefaults = []
263 self._phasedefaults = []
264 try:
264 try:
265 self.ui.readconfig(self.join("hgrc"), self.root)
265 self.ui.readconfig(self.join("hgrc"), self.root)
266 extensions.loadall(self.ui)
266 extensions.loadall(self.ui)
267 except IOError:
267 except IOError:
268 pass
268 pass
269
269
270 if self.featuresetupfuncs:
270 if self.featuresetupfuncs:
271 self.supported = set(self._basesupported) # use private copy
271 self.supported = set(self._basesupported) # use private copy
272 extmods = set(m.__name__ for n, m
272 extmods = set(m.__name__ for n, m
273 in extensions.extensions(self.ui))
273 in extensions.extensions(self.ui))
274 for setupfunc in self.featuresetupfuncs:
274 for setupfunc in self.featuresetupfuncs:
275 if setupfunc.__module__ in extmods:
275 if setupfunc.__module__ in extmods:
276 setupfunc(self.ui, self.supported)
276 setupfunc(self.ui, self.supported)
277 else:
277 else:
278 self.supported = self._basesupported
278 self.supported = self._basesupported
279
279
280 if not self.vfs.isdir():
280 if not self.vfs.isdir():
281 if create:
281 if create:
282 self.requirements = newreporequirements(self)
282 self.requirements = newreporequirements(self)
283
283
284 if not self.wvfs.exists():
284 if not self.wvfs.exists():
285 self.wvfs.makedirs()
285 self.wvfs.makedirs()
286 self.vfs.makedir(notindexed=True)
286 self.vfs.makedir(notindexed=True)
287
287
288 if 'store' in self.requirements:
288 if 'store' in self.requirements:
289 self.vfs.mkdir("store")
289 self.vfs.mkdir("store")
290
290
291 # create an invalid changelog
291 # create an invalid changelog
292 self.vfs.append(
292 self.vfs.append(
293 "00changelog.i",
293 "00changelog.i",
294 '\0\0\0\2' # represents revlogv2
294 '\0\0\0\2' # represents revlogv2
295 ' dummy changelog to prevent using the old repo layout'
295 ' dummy changelog to prevent using the old repo layout'
296 )
296 )
297 else:
297 else:
298 raise error.RepoError(_("repository %s not found") % path)
298 raise error.RepoError(_("repository %s not found") % path)
299 elif create:
299 elif create:
300 raise error.RepoError(_("repository %s already exists") % path)
300 raise error.RepoError(_("repository %s already exists") % path)
301 else:
301 else:
302 try:
302 try:
303 self.requirements = scmutil.readrequires(
303 self.requirements = scmutil.readrequires(
304 self.vfs, self.supported)
304 self.vfs, self.supported)
305 except IOError as inst:
305 except IOError as inst:
306 if inst.errno != errno.ENOENT:
306 if inst.errno != errno.ENOENT:
307 raise
307 raise
308
308
309 self.sharedpath = self.path
309 self.sharedpath = self.path
310 try:
310 try:
311 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
311 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
312 realpath=True)
312 realpath=True)
313 s = vfs.base
313 s = vfs.base
314 if not vfs.exists():
314 if not vfs.exists():
315 raise error.RepoError(
315 raise error.RepoError(
316 _('.hg/sharedpath points to nonexistent directory %s') % s)
316 _('.hg/sharedpath points to nonexistent directory %s') % s)
317 self.sharedpath = s
317 self.sharedpath = s
318 except IOError as inst:
318 except IOError as inst:
319 if inst.errno != errno.ENOENT:
319 if inst.errno != errno.ENOENT:
320 raise
320 raise
321
321
322 self.store = store.store(
322 self.store = store.store(
323 self.requirements, self.sharedpath, scmutil.vfs)
323 self.requirements, self.sharedpath, scmutil.vfs)
324 self.spath = self.store.path
324 self.spath = self.store.path
325 self.svfs = self.store.vfs
325 self.svfs = self.store.vfs
326 self.sjoin = self.store.join
326 self.sjoin = self.store.join
327 self.vfs.createmode = self.store.createmode
327 self.vfs.createmode = self.store.createmode
328 self._applyopenerreqs()
328 self._applyopenerreqs()
329 if create:
329 if create:
330 self._writerequirements()
330 self._writerequirements()
331
331
332 self._dirstatevalidatewarned = False
332 self._dirstatevalidatewarned = False
333
333
334 self._branchcaches = {}
334 self._branchcaches = {}
335 self._revbranchcache = None
335 self._revbranchcache = None
336 self.filterpats = {}
336 self.filterpats = {}
337 self._datafilters = {}
337 self._datafilters = {}
338 self._transref = self._lockref = self._wlockref = None
338 self._transref = self._lockref = self._wlockref = None
339
339
340 # A cache for various files under .hg/ that tracks file changes,
340 # A cache for various files under .hg/ that tracks file changes,
341 # (used by the filecache decorator)
341 # (used by the filecache decorator)
342 #
342 #
343 # Maps a property name to its util.filecacheentry
343 # Maps a property name to its util.filecacheentry
344 self._filecache = {}
344 self._filecache = {}
345
345
346 # hold sets of revision to be filtered
346 # hold sets of revision to be filtered
347 # should be cleared when something might have changed the filter value:
347 # should be cleared when something might have changed the filter value:
348 # - new changesets,
348 # - new changesets,
349 # - phase change,
349 # - phase change,
350 # - new obsolescence marker,
350 # - new obsolescence marker,
351 # - working directory parent change,
351 # - working directory parent change,
352 # - bookmark changes
352 # - bookmark changes
353 self.filteredrevcache = {}
353 self.filteredrevcache = {}
354
354
355 # generic mapping between names and nodes
355 # generic mapping between names and nodes
356 self.names = namespaces.namespaces()
356 self.names = namespaces.namespaces()
357
357
358 def close(self):
358 def close(self):
359 self._writecaches()
359 self._writecaches()
360
360
361 def _writecaches(self):
361 def _writecaches(self):
362 if self._revbranchcache:
362 if self._revbranchcache:
363 self._revbranchcache.write()
363 self._revbranchcache.write()
364
364
365 def _restrictcapabilities(self, caps):
365 def _restrictcapabilities(self, caps):
366 if self.ui.configbool('experimental', 'bundle2-advertise', True):
366 if self.ui.configbool('experimental', 'bundle2-advertise', True):
367 caps = set(caps)
367 caps = set(caps)
368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
369 caps.add('bundle2=' + urllib.quote(capsblob))
369 caps.add('bundle2=' + urllib.quote(capsblob))
370 return caps
370 return caps
371
371
372 def _applyopenerreqs(self):
372 def _applyopenerreqs(self):
373 self.svfs.options = dict((r, 1) for r in self.requirements
373 self.svfs.options = dict((r, 1) for r in self.requirements
374 if r in self.openerreqs)
374 if r in self.openerreqs)
375 # experimental config: format.chunkcachesize
375 # experimental config: format.chunkcachesize
376 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
376 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
377 if chunkcachesize is not None:
377 if chunkcachesize is not None:
378 self.svfs.options['chunkcachesize'] = chunkcachesize
378 self.svfs.options['chunkcachesize'] = chunkcachesize
379 # experimental config: format.maxchainlen
379 # experimental config: format.maxchainlen
380 maxchainlen = self.ui.configint('format', 'maxchainlen')
380 maxchainlen = self.ui.configint('format', 'maxchainlen')
381 if maxchainlen is not None:
381 if maxchainlen is not None:
382 self.svfs.options['maxchainlen'] = maxchainlen
382 self.svfs.options['maxchainlen'] = maxchainlen
383 # experimental config: format.manifestcachesize
383 # experimental config: format.manifestcachesize
384 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
384 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
385 if manifestcachesize is not None:
385 if manifestcachesize is not None:
386 self.svfs.options['manifestcachesize'] = manifestcachesize
386 self.svfs.options['manifestcachesize'] = manifestcachesize
387 # experimental config: format.aggressivemergedeltas
387 # experimental config: format.aggressivemergedeltas
388 aggressivemergedeltas = self.ui.configbool('format',
388 aggressivemergedeltas = self.ui.configbool('format',
389 'aggressivemergedeltas', False)
389 'aggressivemergedeltas', False)
390 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
390 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
391 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
391 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
392
392
393 def _writerequirements(self):
393 def _writerequirements(self):
394 scmutil.writerequires(self.vfs, self.requirements)
394 scmutil.writerequires(self.vfs, self.requirements)
395
395
396 def _checknested(self, path):
396 def _checknested(self, path):
397 """Determine if path is a legal nested repository."""
397 """Determine if path is a legal nested repository."""
398 if not path.startswith(self.root):
398 if not path.startswith(self.root):
399 return False
399 return False
400 subpath = path[len(self.root) + 1:]
400 subpath = path[len(self.root) + 1:]
401 normsubpath = util.pconvert(subpath)
401 normsubpath = util.pconvert(subpath)
402
402
403 # XXX: Checking against the current working copy is wrong in
403 # XXX: Checking against the current working copy is wrong in
404 # the sense that it can reject things like
404 # the sense that it can reject things like
405 #
405 #
406 # $ hg cat -r 10 sub/x.txt
406 # $ hg cat -r 10 sub/x.txt
407 #
407 #
408 # if sub/ is no longer a subrepository in the working copy
408 # if sub/ is no longer a subrepository in the working copy
409 # parent revision.
409 # parent revision.
410 #
410 #
411 # However, it can of course also allow things that would have
411 # However, it can of course also allow things that would have
412 # been rejected before, such as the above cat command if sub/
412 # been rejected before, such as the above cat command if sub/
413 # is a subrepository now, but was a normal directory before.
413 # is a subrepository now, but was a normal directory before.
414 # The old path auditor would have rejected by mistake since it
414 # The old path auditor would have rejected by mistake since it
415 # panics when it sees sub/.hg/.
415 # panics when it sees sub/.hg/.
416 #
416 #
417 # All in all, checking against the working copy seems sensible
417 # All in all, checking against the working copy seems sensible
418 # since we want to prevent access to nested repositories on
418 # since we want to prevent access to nested repositories on
419 # the filesystem *now*.
419 # the filesystem *now*.
420 ctx = self[None]
420 ctx = self[None]
421 parts = util.splitpath(subpath)
421 parts = util.splitpath(subpath)
422 while parts:
422 while parts:
423 prefix = '/'.join(parts)
423 prefix = '/'.join(parts)
424 if prefix in ctx.substate:
424 if prefix in ctx.substate:
425 if prefix == normsubpath:
425 if prefix == normsubpath:
426 return True
426 return True
427 else:
427 else:
428 sub = ctx.sub(prefix)
428 sub = ctx.sub(prefix)
429 return sub.checknested(subpath[len(prefix) + 1:])
429 return sub.checknested(subpath[len(prefix) + 1:])
430 else:
430 else:
431 parts.pop()
431 parts.pop()
432 return False
432 return False
433
433
434 def peer(self):
434 def peer(self):
435 return localpeer(self) # not cached to avoid reference cycle
435 return localpeer(self) # not cached to avoid reference cycle
436
436
437 def unfiltered(self):
437 def unfiltered(self):
438 """Return unfiltered version of the repository
438 """Return unfiltered version of the repository
439
439
440 Intended to be overwritten by filtered repo."""
440 Intended to be overwritten by filtered repo."""
441 return self
441 return self
442
442
443 def filtered(self, name):
443 def filtered(self, name):
444 """Return a filtered version of a repository"""
444 """Return a filtered version of a repository"""
445 # build a new class with the mixin and the current class
445 # build a new class with the mixin and the current class
446 # (possibly subclass of the repo)
446 # (possibly subclass of the repo)
447 class proxycls(repoview.repoview, self.unfiltered().__class__):
447 class proxycls(repoview.repoview, self.unfiltered().__class__):
448 pass
448 pass
449 return proxycls(self, name)
449 return proxycls(self, name)
450
450
451 @repofilecache('bookmarks', 'bookmarks.current')
451 @repofilecache('bookmarks', 'bookmarks.current')
452 def _bookmarks(self):
452 def _bookmarks(self):
453 return bookmarks.bmstore(self)
453 return bookmarks.bmstore(self)
454
454
455 @property
455 @property
456 def _activebookmark(self):
456 def _activebookmark(self):
457 return self._bookmarks.active
457 return self._bookmarks.active
458
458
459 def bookmarkheads(self, bookmark):
459 def bookmarkheads(self, bookmark):
460 name = bookmark.split('@', 1)[0]
460 name = bookmark.split('@', 1)[0]
461 heads = []
461 heads = []
462 for mark, n in self._bookmarks.iteritems():
462 for mark, n in self._bookmarks.iteritems():
463 if mark.split('@', 1)[0] == name:
463 if mark.split('@', 1)[0] == name:
464 heads.append(n)
464 heads.append(n)
465 return heads
465 return heads
466
466
467 # _phaserevs and _phasesets depend on changelog. what we need is to
467 # _phaserevs and _phasesets depend on changelog. what we need is to
468 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
468 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
469 # can't be easily expressed in filecache mechanism.
469 # can't be easily expressed in filecache mechanism.
470 @storecache('phaseroots', '00changelog.i')
470 @storecache('phaseroots', '00changelog.i')
471 def _phasecache(self):
471 def _phasecache(self):
472 return phases.phasecache(self, self._phasedefaults)
472 return phases.phasecache(self, self._phasedefaults)
473
473
474 @storecache('obsstore')
474 @storecache('obsstore')
475 def obsstore(self):
475 def obsstore(self):
476 # read default format for new obsstore.
476 # read default format for new obsstore.
477 # developer config: format.obsstore-version
477 # developer config: format.obsstore-version
478 defaultformat = self.ui.configint('format', 'obsstore-version', None)
478 defaultformat = self.ui.configint('format', 'obsstore-version', None)
479 # rely on obsstore class default when possible.
479 # rely on obsstore class default when possible.
480 kwargs = {}
480 kwargs = {}
481 if defaultformat is not None:
481 if defaultformat is not None:
482 kwargs['defaultformat'] = defaultformat
482 kwargs['defaultformat'] = defaultformat
483 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
483 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
484 store = obsolete.obsstore(self.svfs, readonly=readonly,
484 store = obsolete.obsstore(self.svfs, readonly=readonly,
485 **kwargs)
485 **kwargs)
486 if store and readonly:
486 if store and readonly:
487 self.ui.warn(
487 self.ui.warn(
488 _('obsolete feature not enabled but %i markers found!\n')
488 _('obsolete feature not enabled but %i markers found!\n')
489 % len(list(store)))
489 % len(list(store)))
490 return store
490 return store
491
491
492 @storecache('00changelog.i')
492 @storecache('00changelog.i')
493 def changelog(self):
493 def changelog(self):
494 c = changelog.changelog(self.svfs)
494 c = changelog.changelog(self.svfs)
495 if 'HG_PENDING' in os.environ:
495 if 'HG_PENDING' in os.environ:
496 p = os.environ['HG_PENDING']
496 p = os.environ['HG_PENDING']
497 if p.startswith(self.root):
497 if p.startswith(self.root):
498 c.readpending('00changelog.i.a')
498 c.readpending('00changelog.i.a')
499 return c
499 return c
500
500
501 @storecache('00manifest.i')
501 @storecache('00manifest.i')
502 def manifest(self):
502 def manifest(self):
503 return manifest.manifest(self.svfs)
503 return manifest.manifest(self.svfs)
504
504
505 def dirlog(self, dir):
505 def dirlog(self, dir):
506 return self.manifest.dirlog(dir)
506 return self.manifest.dirlog(dir)
507
507
508 @repofilecache('dirstate')
508 @repofilecache('dirstate')
509 def dirstate(self):
509 def dirstate(self):
510 return dirstate.dirstate(self.vfs, self.ui, self.root,
510 return dirstate.dirstate(self.vfs, self.ui, self.root,
511 self._dirstatevalidate)
511 self._dirstatevalidate)
512
512
513 def _dirstatevalidate(self, node):
513 def _dirstatevalidate(self, node):
514 try:
514 try:
515 self.changelog.rev(node)
515 self.changelog.rev(node)
516 return node
516 return node
517 except error.LookupError:
517 except error.LookupError:
518 if not self._dirstatevalidatewarned:
518 if not self._dirstatevalidatewarned:
519 self._dirstatevalidatewarned = True
519 self._dirstatevalidatewarned = True
520 self.ui.warn(_("warning: ignoring unknown"
520 self.ui.warn(_("warning: ignoring unknown"
521 " working parent %s!\n") % short(node))
521 " working parent %s!\n") % short(node))
522 return nullid
522 return nullid
523
523
524 def __getitem__(self, changeid):
524 def __getitem__(self, changeid):
525 if changeid is None or changeid == wdirrev:
525 if changeid is None or changeid == wdirrev:
526 return context.workingctx(self)
526 return context.workingctx(self)
527 if isinstance(changeid, slice):
527 if isinstance(changeid, slice):
528 return [context.changectx(self, i)
528 return [context.changectx(self, i)
529 for i in xrange(*changeid.indices(len(self)))
529 for i in xrange(*changeid.indices(len(self)))
530 if i not in self.changelog.filteredrevs]
530 if i not in self.changelog.filteredrevs]
531 return context.changectx(self, changeid)
531 return context.changectx(self, changeid)
532
532
533 def __contains__(self, changeid):
533 def __contains__(self, changeid):
534 try:
534 try:
535 self[changeid]
535 self[changeid]
536 return True
536 return True
537 except error.RepoLookupError:
537 except error.RepoLookupError:
538 return False
538 return False
539
539
540 def __nonzero__(self):
540 def __nonzero__(self):
541 return True
541 return True
542
542
543 def __len__(self):
543 def __len__(self):
544 return len(self.changelog)
544 return len(self.changelog)
545
545
546 def __iter__(self):
546 def __iter__(self):
547 return iter(self.changelog)
547 return iter(self.changelog)
548
548
549 def revs(self, expr, *args):
549 def revs(self, expr, *args):
550 '''Find revisions matching a revset.
550 '''Find revisions matching a revset.
551
551
552 The revset is specified as a string ``expr`` that may contain
552 The revset is specified as a string ``expr`` that may contain
553 %-formatting to escape certain types. See ``revset.formatspec``.
553 %-formatting to escape certain types. See ``revset.formatspec``.
554
554
555 Return a revset.abstractsmartset, which is a list-like interface
555 Return a revset.abstractsmartset, which is a list-like interface
556 that contains integer revisions.
556 that contains integer revisions.
557 '''
557 '''
558 expr = revset.formatspec(expr, *args)
558 expr = revset.formatspec(expr, *args)
559 m = revset.match(None, expr)
559 m = revset.match(None, expr)
560 return m(self)
560 return m(self)
561
561
562 def set(self, expr, *args):
562 def set(self, expr, *args):
563 '''Find revisions matching a revset and emit changectx instances.
563 '''Find revisions matching a revset and emit changectx instances.
564
564
565 This is a convenience wrapper around ``revs()`` that iterates the
565 This is a convenience wrapper around ``revs()`` that iterates the
566 result and is a generator of changectx instances.
566 result and is a generator of changectx instances.
567 '''
567 '''
568 for r in self.revs(expr, *args):
568 for r in self.revs(expr, *args):
569 yield self[r]
569 yield self[r]
570
570
571 def url(self):
571 def url(self):
572 return 'file:' + self.root
572 return 'file:' + self.root
573
573
574 def hook(self, name, throw=False, **args):
574 def hook(self, name, throw=False, **args):
575 """Call a hook, passing this repo instance.
575 """Call a hook, passing this repo instance.
576
576
577 This a convenience method to aid invoking hooks. Extensions likely
577 This a convenience method to aid invoking hooks. Extensions likely
578 won't call this unless they have registered a custom hook or are
578 won't call this unless they have registered a custom hook or are
579 replacing code that is expected to call a hook.
579 replacing code that is expected to call a hook.
580 """
580 """
581 return hook.hook(self.ui, self, name, throw, **args)
581 return hook.hook(self.ui, self, name, throw, **args)
582
582
583 @unfilteredmethod
583 @unfilteredmethod
584 def _tag(self, names, node, message, local, user, date, extra=None,
584 def _tag(self, names, node, message, local, user, date, extra=None,
585 editor=False):
585 editor=False):
586 if isinstance(names, str):
586 if isinstance(names, str):
587 names = (names,)
587 names = (names,)
588
588
589 branches = self.branchmap()
589 branches = self.branchmap()
590 for name in names:
590 for name in names:
591 self.hook('pretag', throw=True, node=hex(node), tag=name,
591 self.hook('pretag', throw=True, node=hex(node), tag=name,
592 local=local)
592 local=local)
593 if name in branches:
593 if name in branches:
594 self.ui.warn(_("warning: tag %s conflicts with existing"
594 self.ui.warn(_("warning: tag %s conflicts with existing"
595 " branch name\n") % name)
595 " branch name\n") % name)
596
596
597 def writetags(fp, names, munge, prevtags):
597 def writetags(fp, names, munge, prevtags):
598 fp.seek(0, 2)
598 fp.seek(0, 2)
599 if prevtags and prevtags[-1] != '\n':
599 if prevtags and prevtags[-1] != '\n':
600 fp.write('\n')
600 fp.write('\n')
601 for name in names:
601 for name in names:
602 if munge:
602 if munge:
603 m = munge(name)
603 m = munge(name)
604 else:
604 else:
605 m = name
605 m = name
606
606
607 if (self._tagscache.tagtypes and
607 if (self._tagscache.tagtypes and
608 name in self._tagscache.tagtypes):
608 name in self._tagscache.tagtypes):
609 old = self.tags().get(name, nullid)
609 old = self.tags().get(name, nullid)
610 fp.write('%s %s\n' % (hex(old), m))
610 fp.write('%s %s\n' % (hex(old), m))
611 fp.write('%s %s\n' % (hex(node), m))
611 fp.write('%s %s\n' % (hex(node), m))
612 fp.close()
612 fp.close()
613
613
614 prevtags = ''
614 prevtags = ''
615 if local:
615 if local:
616 try:
616 try:
617 fp = self.vfs('localtags', 'r+')
617 fp = self.vfs('localtags', 'r+')
618 except IOError:
618 except IOError:
619 fp = self.vfs('localtags', 'a')
619 fp = self.vfs('localtags', 'a')
620 else:
620 else:
621 prevtags = fp.read()
621 prevtags = fp.read()
622
622
623 # local tags are stored in the current charset
623 # local tags are stored in the current charset
624 writetags(fp, names, None, prevtags)
624 writetags(fp, names, None, prevtags)
625 for name in names:
625 for name in names:
626 self.hook('tag', node=hex(node), tag=name, local=local)
626 self.hook('tag', node=hex(node), tag=name, local=local)
627 return
627 return
628
628
629 try:
629 try:
630 fp = self.wfile('.hgtags', 'rb+')
630 fp = self.wfile('.hgtags', 'rb+')
631 except IOError as e:
631 except IOError as e:
632 if e.errno != errno.ENOENT:
632 if e.errno != errno.ENOENT:
633 raise
633 raise
634 fp = self.wfile('.hgtags', 'ab')
634 fp = self.wfile('.hgtags', 'ab')
635 else:
635 else:
636 prevtags = fp.read()
636 prevtags = fp.read()
637
637
638 # committed tags are stored in UTF-8
638 # committed tags are stored in UTF-8
639 writetags(fp, names, encoding.fromlocal, prevtags)
639 writetags(fp, names, encoding.fromlocal, prevtags)
640
640
641 fp.close()
641 fp.close()
642
642
643 self.invalidatecaches()
643 self.invalidatecaches()
644
644
645 if '.hgtags' not in self.dirstate:
645 if '.hgtags' not in self.dirstate:
646 self[None].add(['.hgtags'])
646 self[None].add(['.hgtags'])
647
647
648 m = matchmod.exact(self.root, '', ['.hgtags'])
648 m = matchmod.exact(self.root, '', ['.hgtags'])
649 tagnode = self.commit(message, user, date, extra=extra, match=m,
649 tagnode = self.commit(message, user, date, extra=extra, match=m,
650 editor=editor)
650 editor=editor)
651
651
652 for name in names:
652 for name in names:
653 self.hook('tag', node=hex(node), tag=name, local=local)
653 self.hook('tag', node=hex(node), tag=name, local=local)
654
654
655 return tagnode
655 return tagnode
656
656
657 def tag(self, names, node, message, local, user, date, editor=False):
657 def tag(self, names, node, message, local, user, date, editor=False):
658 '''tag a revision with one or more symbolic names.
658 '''tag a revision with one or more symbolic names.
659
659
660 names is a list of strings or, when adding a single tag, names may be a
660 names is a list of strings or, when adding a single tag, names may be a
661 string.
661 string.
662
662
663 if local is True, the tags are stored in a per-repository file.
663 if local is True, the tags are stored in a per-repository file.
664 otherwise, they are stored in the .hgtags file, and a new
664 otherwise, they are stored in the .hgtags file, and a new
665 changeset is committed with the change.
665 changeset is committed with the change.
666
666
667 keyword arguments:
667 keyword arguments:
668
668
669 local: whether to store tags in non-version-controlled file
669 local: whether to store tags in non-version-controlled file
670 (default False)
670 (default False)
671
671
672 message: commit message to use if committing
672 message: commit message to use if committing
673
673
674 user: name of user to use if committing
674 user: name of user to use if committing
675
675
676 date: date tuple to use if committing'''
676 date: date tuple to use if committing'''
677
677
678 if not local:
678 if not local:
679 m = matchmod.exact(self.root, '', ['.hgtags'])
679 m = matchmod.exact(self.root, '', ['.hgtags'])
680 if any(self.status(match=m, unknown=True, ignored=True)):
680 if any(self.status(match=m, unknown=True, ignored=True)):
681 raise error.Abort(_('working copy of .hgtags is changed'),
681 raise error.Abort(_('working copy of .hgtags is changed'),
682 hint=_('please commit .hgtags manually'))
682 hint=_('please commit .hgtags manually'))
683
683
684 self.tags() # instantiate the cache
684 self.tags() # instantiate the cache
685 self._tag(names, node, message, local, user, date, editor=editor)
685 self._tag(names, node, message, local, user, date, editor=editor)
686
686
687 @filteredpropertycache
687 @filteredpropertycache
688 def _tagscache(self):
688 def _tagscache(self):
689 '''Returns a tagscache object that contains various tags related
689 '''Returns a tagscache object that contains various tags related
690 caches.'''
690 caches.'''
691
691
692 # This simplifies its cache management by having one decorated
692 # This simplifies its cache management by having one decorated
693 # function (this one) and the rest simply fetch things from it.
693 # function (this one) and the rest simply fetch things from it.
694 class tagscache(object):
694 class tagscache(object):
695 def __init__(self):
695 def __init__(self):
696 # These two define the set of tags for this repository. tags
696 # These two define the set of tags for this repository. tags
697 # maps tag name to node; tagtypes maps tag name to 'global' or
697 # maps tag name to node; tagtypes maps tag name to 'global' or
698 # 'local'. (Global tags are defined by .hgtags across all
698 # 'local'. (Global tags are defined by .hgtags across all
699 # heads, and local tags are defined in .hg/localtags.)
699 # heads, and local tags are defined in .hg/localtags.)
700 # They constitute the in-memory cache of tags.
700 # They constitute the in-memory cache of tags.
701 self.tags = self.tagtypes = None
701 self.tags = self.tagtypes = None
702
702
703 self.nodetagscache = self.tagslist = None
703 self.nodetagscache = self.tagslist = None
704
704
705 cache = tagscache()
705 cache = tagscache()
706 cache.tags, cache.tagtypes = self._findtags()
706 cache.tags, cache.tagtypes = self._findtags()
707
707
708 return cache
708 return cache
709
709
710 def tags(self):
710 def tags(self):
711 '''return a mapping of tag to node'''
711 '''return a mapping of tag to node'''
712 t = {}
712 t = {}
713 if self.changelog.filteredrevs:
713 if self.changelog.filteredrevs:
714 tags, tt = self._findtags()
714 tags, tt = self._findtags()
715 else:
715 else:
716 tags = self._tagscache.tags
716 tags = self._tagscache.tags
717 for k, v in tags.iteritems():
717 for k, v in tags.iteritems():
718 try:
718 try:
719 # ignore tags to unknown nodes
719 # ignore tags to unknown nodes
720 self.changelog.rev(v)
720 self.changelog.rev(v)
721 t[k] = v
721 t[k] = v
722 except (error.LookupError, ValueError):
722 except (error.LookupError, ValueError):
723 pass
723 pass
724 return t
724 return t
725
725
726 def _findtags(self):
726 def _findtags(self):
727 '''Do the hard work of finding tags. Return a pair of dicts
727 '''Do the hard work of finding tags. Return a pair of dicts
728 (tags, tagtypes) where tags maps tag name to node, and tagtypes
728 (tags, tagtypes) where tags maps tag name to node, and tagtypes
729 maps tag name to a string like \'global\' or \'local\'.
729 maps tag name to a string like \'global\' or \'local\'.
730 Subclasses or extensions are free to add their own tags, but
730 Subclasses or extensions are free to add their own tags, but
731 should be aware that the returned dicts will be retained for the
731 should be aware that the returned dicts will be retained for the
732 duration of the localrepo object.'''
732 duration of the localrepo object.'''
733
733
734 # XXX what tagtype should subclasses/extensions use? Currently
734 # XXX what tagtype should subclasses/extensions use? Currently
735 # mq and bookmarks add tags, but do not set the tagtype at all.
735 # mq and bookmarks add tags, but do not set the tagtype at all.
736 # Should each extension invent its own tag type? Should there
736 # Should each extension invent its own tag type? Should there
737 # be one tagtype for all such "virtual" tags? Or is the status
737 # be one tagtype for all such "virtual" tags? Or is the status
738 # quo fine?
738 # quo fine?
739
739
740 alltags = {} # map tag name to (node, hist)
740 alltags = {} # map tag name to (node, hist)
741 tagtypes = {}
741 tagtypes = {}
742
742
743 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
743 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
744 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
744 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
745
745
746 # Build the return dicts. Have to re-encode tag names because
746 # Build the return dicts. Have to re-encode tag names because
747 # the tags module always uses UTF-8 (in order not to lose info
747 # the tags module always uses UTF-8 (in order not to lose info
748 # writing to the cache), but the rest of Mercurial wants them in
748 # writing to the cache), but the rest of Mercurial wants them in
749 # local encoding.
749 # local encoding.
750 tags = {}
750 tags = {}
751 for (name, (node, hist)) in alltags.iteritems():
751 for (name, (node, hist)) in alltags.iteritems():
752 if node != nullid:
752 if node != nullid:
753 tags[encoding.tolocal(name)] = node
753 tags[encoding.tolocal(name)] = node
754 tags['tip'] = self.changelog.tip()
754 tags['tip'] = self.changelog.tip()
755 tagtypes = dict([(encoding.tolocal(name), value)
755 tagtypes = dict([(encoding.tolocal(name), value)
756 for (name, value) in tagtypes.iteritems()])
756 for (name, value) in tagtypes.iteritems()])
757 return (tags, tagtypes)
757 return (tags, tagtypes)
758
758
759 def tagtype(self, tagname):
759 def tagtype(self, tagname):
760 '''
760 '''
761 return the type of the given tag. result can be:
761 return the type of the given tag. result can be:
762
762
763 'local' : a local tag
763 'local' : a local tag
764 'global' : a global tag
764 'global' : a global tag
765 None : tag does not exist
765 None : tag does not exist
766 '''
766 '''
767
767
768 return self._tagscache.tagtypes.get(tagname)
768 return self._tagscache.tagtypes.get(tagname)
769
769
770 def tagslist(self):
770 def tagslist(self):
771 '''return a list of tags ordered by revision'''
771 '''return a list of tags ordered by revision'''
772 if not self._tagscache.tagslist:
772 if not self._tagscache.tagslist:
773 l = []
773 l = []
774 for t, n in self.tags().iteritems():
774 for t, n in self.tags().iteritems():
775 l.append((self.changelog.rev(n), t, n))
775 l.append((self.changelog.rev(n), t, n))
776 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
776 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
777
777
778 return self._tagscache.tagslist
778 return self._tagscache.tagslist
779
779
780 def nodetags(self, node):
780 def nodetags(self, node):
781 '''return the tags associated with a node'''
781 '''return the tags associated with a node'''
782 if not self._tagscache.nodetagscache:
782 if not self._tagscache.nodetagscache:
783 nodetagscache = {}
783 nodetagscache = {}
784 for t, n in self._tagscache.tags.iteritems():
784 for t, n in self._tagscache.tags.iteritems():
785 nodetagscache.setdefault(n, []).append(t)
785 nodetagscache.setdefault(n, []).append(t)
786 for tags in nodetagscache.itervalues():
786 for tags in nodetagscache.itervalues():
787 tags.sort()
787 tags.sort()
788 self._tagscache.nodetagscache = nodetagscache
788 self._tagscache.nodetagscache = nodetagscache
789 return self._tagscache.nodetagscache.get(node, [])
789 return self._tagscache.nodetagscache.get(node, [])
790
790
791 def nodebookmarks(self, node):
791 def nodebookmarks(self, node):
792 """return the list of bookmarks pointing to the specified node"""
792 """return the list of bookmarks pointing to the specified node"""
793 marks = []
793 marks = []
794 for bookmark, n in self._bookmarks.iteritems():
794 for bookmark, n in self._bookmarks.iteritems():
795 if n == node:
795 if n == node:
796 marks.append(bookmark)
796 marks.append(bookmark)
797 return sorted(marks)
797 return sorted(marks)
798
798
799 def branchmap(self):
799 def branchmap(self):
800 '''returns a dictionary {branch: [branchheads]} with branchheads
800 '''returns a dictionary {branch: [branchheads]} with branchheads
801 ordered by increasing revision number'''
801 ordered by increasing revision number'''
802 branchmap.updatecache(self)
802 branchmap.updatecache(self)
803 return self._branchcaches[self.filtername]
803 return self._branchcaches[self.filtername]
804
804
805 @unfilteredmethod
805 @unfilteredmethod
806 def revbranchcache(self):
806 def revbranchcache(self):
807 if not self._revbranchcache:
807 if not self._revbranchcache:
808 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
808 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
809 return self._revbranchcache
809 return self._revbranchcache
810
810
811 def branchtip(self, branch, ignoremissing=False):
811 def branchtip(self, branch, ignoremissing=False):
812 '''return the tip node for a given branch
812 '''return the tip node for a given branch
813
813
814 If ignoremissing is True, then this method will not raise an error.
814 If ignoremissing is True, then this method will not raise an error.
815 This is helpful for callers that only expect None for a missing branch
815 This is helpful for callers that only expect None for a missing branch
816 (e.g. namespace).
816 (e.g. namespace).
817
817
818 '''
818 '''
819 try:
819 try:
820 return self.branchmap().branchtip(branch)
820 return self.branchmap().branchtip(branch)
821 except KeyError:
821 except KeyError:
822 if not ignoremissing:
822 if not ignoremissing:
823 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
823 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
824 else:
824 else:
825 pass
825 pass
826
826
827 def lookup(self, key):
827 def lookup(self, key):
828 return self[key].node()
828 return self[key].node()
829
829
830 def lookupbranch(self, key, remote=None):
830 def lookupbranch(self, key, remote=None):
831 repo = remote or self
831 repo = remote or self
832 if key in repo.branchmap():
832 if key in repo.branchmap():
833 return key
833 return key
834
834
835 repo = (remote and remote.local()) and remote or self
835 repo = (remote and remote.local()) and remote or self
836 return repo[key].branch()
836 return repo[key].branch()
837
837
838 def known(self, nodes):
838 def known(self, nodes):
839 cl = self.changelog
839 cl = self.changelog
840 nm = cl.nodemap
840 nm = cl.nodemap
841 filtered = cl.filteredrevs
841 filtered = cl.filteredrevs
842 result = []
842 result = []
843 for n in nodes:
843 for n in nodes:
844 r = nm.get(n)
844 r = nm.get(n)
845 resp = not (r is None or r in filtered)
845 resp = not (r is None or r in filtered)
846 result.append(resp)
846 result.append(resp)
847 return result
847 return result
848
848
849 def local(self):
849 def local(self):
850 return self
850 return self
851
851
852 def publishing(self):
852 def publishing(self):
853 # it's safe (and desirable) to trust the publish flag unconditionally
853 # it's safe (and desirable) to trust the publish flag unconditionally
854 # so that we don't finalize changes shared between users via ssh or nfs
854 # so that we don't finalize changes shared between users via ssh or nfs
855 return self.ui.configbool('phases', 'publish', True, untrusted=True)
855 return self.ui.configbool('phases', 'publish', True, untrusted=True)
856
856
857 def cancopy(self):
857 def cancopy(self):
858 # so statichttprepo's override of local() works
858 # so statichttprepo's override of local() works
859 if not self.local():
859 if not self.local():
860 return False
860 return False
861 if not self.publishing():
861 if not self.publishing():
862 return True
862 return True
863 # if publishing we can't copy if there is filtered content
863 # if publishing we can't copy if there is filtered content
864 return not self.filtered('visible').changelog.filteredrevs
864 return not self.filtered('visible').changelog.filteredrevs
865
865
866 def shared(self):
866 def shared(self):
867 '''the type of shared repository (None if not shared)'''
867 '''the type of shared repository (None if not shared)'''
868 if self.sharedpath != self.path:
868 if self.sharedpath != self.path:
869 return 'store'
869 return 'store'
870 return None
870 return None
871
871
872 def join(self, f, *insidef):
872 def join(self, f, *insidef):
873 return self.vfs.join(os.path.join(f, *insidef))
873 return self.vfs.join(os.path.join(f, *insidef))
874
874
875 def wjoin(self, f, *insidef):
875 def wjoin(self, f, *insidef):
876 return self.vfs.reljoin(self.root, f, *insidef)
876 return self.vfs.reljoin(self.root, f, *insidef)
877
877
878 def file(self, f):
878 def file(self, f):
879 if f[0] == '/':
879 if f[0] == '/':
880 f = f[1:]
880 f = f[1:]
881 return filelog.filelog(self.svfs, f)
881 return filelog.filelog(self.svfs, f)
882
882
883 def parents(self, changeid=None):
883 def parents(self, changeid=None):
884 '''get list of changectxs for parents of changeid'''
884 '''get list of changectxs for parents of changeid'''
885 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
885 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
886 self.ui.deprecwarn(msg, '3.7')
886 self.ui.deprecwarn(msg, '3.7')
887 return self[changeid].parents()
887 return self[changeid].parents()
888
888
889 def changectx(self, changeid):
889 def changectx(self, changeid):
890 return self[changeid]
890 return self[changeid]
891
891
892 def setparents(self, p1, p2=nullid):
892 def setparents(self, p1, p2=nullid):
893 self.dirstate.beginparentchange()
893 self.dirstate.beginparentchange()
894 copies = self.dirstate.setparents(p1, p2)
894 copies = self.dirstate.setparents(p1, p2)
895 pctx = self[p1]
895 pctx = self[p1]
896 if copies:
896 if copies:
897 # Adjust copy records, the dirstate cannot do it, it
897 # Adjust copy records, the dirstate cannot do it, it
898 # requires access to parents manifests. Preserve them
898 # requires access to parents manifests. Preserve them
899 # only for entries added to first parent.
899 # only for entries added to first parent.
900 for f in copies:
900 for f in copies:
901 if f not in pctx and copies[f] in pctx:
901 if f not in pctx and copies[f] in pctx:
902 self.dirstate.copy(copies[f], f)
902 self.dirstate.copy(copies[f], f)
903 if p2 == nullid:
903 if p2 == nullid:
904 for f, s in sorted(self.dirstate.copies().items()):
904 for f, s in sorted(self.dirstate.copies().items()):
905 if f not in pctx and s not in pctx:
905 if f not in pctx and s not in pctx:
906 self.dirstate.copy(None, f)
906 self.dirstate.copy(None, f)
907 self.dirstate.endparentchange()
907 self.dirstate.endparentchange()
908
908
909 def filectx(self, path, changeid=None, fileid=None):
909 def filectx(self, path, changeid=None, fileid=None):
910 """changeid can be a changeset revision, node, or tag.
910 """changeid can be a changeset revision, node, or tag.
911 fileid can be a file revision or node."""
911 fileid can be a file revision or node."""
912 return context.filectx(self, path, changeid, fileid)
912 return context.filectx(self, path, changeid, fileid)
913
913
914 def getcwd(self):
914 def getcwd(self):
915 return self.dirstate.getcwd()
915 return self.dirstate.getcwd()
916
916
917 def pathto(self, f, cwd=None):
917 def pathto(self, f, cwd=None):
918 return self.dirstate.pathto(f, cwd)
918 return self.dirstate.pathto(f, cwd)
919
919
920 def wfile(self, f, mode='r'):
920 def wfile(self, f, mode='r'):
921 return self.wvfs(f, mode)
921 return self.wvfs(f, mode)
922
922
923 def _link(self, f):
923 def _link(self, f):
924 return self.wvfs.islink(f)
924 return self.wvfs.islink(f)
925
925
926 def _loadfilter(self, filter):
926 def _loadfilter(self, filter):
927 if filter not in self.filterpats:
927 if filter not in self.filterpats:
928 l = []
928 l = []
929 for pat, cmd in self.ui.configitems(filter):
929 for pat, cmd in self.ui.configitems(filter):
930 if cmd == '!':
930 if cmd == '!':
931 continue
931 continue
932 mf = matchmod.match(self.root, '', [pat])
932 mf = matchmod.match(self.root, '', [pat])
933 fn = None
933 fn = None
934 params = cmd
934 params = cmd
935 for name, filterfn in self._datafilters.iteritems():
935 for name, filterfn in self._datafilters.iteritems():
936 if cmd.startswith(name):
936 if cmd.startswith(name):
937 fn = filterfn
937 fn = filterfn
938 params = cmd[len(name):].lstrip()
938 params = cmd[len(name):].lstrip()
939 break
939 break
940 if not fn:
940 if not fn:
941 fn = lambda s, c, **kwargs: util.filter(s, c)
941 fn = lambda s, c, **kwargs: util.filter(s, c)
942 # Wrap old filters not supporting keyword arguments
942 # Wrap old filters not supporting keyword arguments
943 if not inspect.getargspec(fn)[2]:
943 if not inspect.getargspec(fn)[2]:
944 oldfn = fn
944 oldfn = fn
945 fn = lambda s, c, **kwargs: oldfn(s, c)
945 fn = lambda s, c, **kwargs: oldfn(s, c)
946 l.append((mf, fn, params))
946 l.append((mf, fn, params))
947 self.filterpats[filter] = l
947 self.filterpats[filter] = l
948 return self.filterpats[filter]
948 return self.filterpats[filter]
949
949
950 def _filter(self, filterpats, filename, data):
950 def _filter(self, filterpats, filename, data):
951 for mf, fn, cmd in filterpats:
951 for mf, fn, cmd in filterpats:
952 if mf(filename):
952 if mf(filename):
953 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
953 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
954 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
954 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
955 break
955 break
956
956
957 return data
957 return data
958
958
959 @unfilteredpropertycache
959 @unfilteredpropertycache
960 def _encodefilterpats(self):
960 def _encodefilterpats(self):
961 return self._loadfilter('encode')
961 return self._loadfilter('encode')
962
962
963 @unfilteredpropertycache
963 @unfilteredpropertycache
964 def _decodefilterpats(self):
964 def _decodefilterpats(self):
965 return self._loadfilter('decode')
965 return self._loadfilter('decode')
966
966
967 def adddatafilter(self, name, filter):
967 def adddatafilter(self, name, filter):
968 self._datafilters[name] = filter
968 self._datafilters[name] = filter
969
969
970 def wread(self, filename):
970 def wread(self, filename):
971 if self._link(filename):
971 if self._link(filename):
972 data = self.wvfs.readlink(filename)
972 data = self.wvfs.readlink(filename)
973 else:
973 else:
974 data = self.wvfs.read(filename)
974 data = self.wvfs.read(filename)
975 return self._filter(self._encodefilterpats, filename, data)
975 return self._filter(self._encodefilterpats, filename, data)
976
976
977 def wwrite(self, filename, data, flags, backgroundclose=False):
977 def wwrite(self, filename, data, flags, backgroundclose=False):
978 """write ``data`` into ``filename`` in the working directory
978 """write ``data`` into ``filename`` in the working directory
979
979
980 This returns length of written (maybe decoded) data.
980 This returns length of written (maybe decoded) data.
981 """
981 """
982 data = self._filter(self._decodefilterpats, filename, data)
982 data = self._filter(self._decodefilterpats, filename, data)
983 if 'l' in flags:
983 if 'l' in flags:
984 self.wvfs.symlink(data, filename)
984 self.wvfs.symlink(data, filename)
985 else:
985 else:
986 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
986 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
987 if 'x' in flags:
987 if 'x' in flags:
988 self.wvfs.setflags(filename, False, True)
988 self.wvfs.setflags(filename, False, True)
989 return len(data)
989 return len(data)
990
990
991 def wwritedata(self, filename, data):
991 def wwritedata(self, filename, data):
992 return self._filter(self._decodefilterpats, filename, data)
992 return self._filter(self._decodefilterpats, filename, data)
993
993
994 def currenttransaction(self):
994 def currenttransaction(self):
995 """return the current transaction or None if non exists"""
995 """return the current transaction or None if non exists"""
996 if self._transref:
996 if self._transref:
997 tr = self._transref()
997 tr = self._transref()
998 else:
998 else:
999 tr = None
999 tr = None
1000
1000
1001 if tr and tr.running():
1001 if tr and tr.running():
1002 return tr
1002 return tr
1003 return None
1003 return None
1004
1004
1005 def transaction(self, desc, report=None):
1005 def transaction(self, desc, report=None):
1006 if (self.ui.configbool('devel', 'all-warnings')
1006 if (self.ui.configbool('devel', 'all-warnings')
1007 or self.ui.configbool('devel', 'check-locks')):
1007 or self.ui.configbool('devel', 'check-locks')):
1008 l = self._lockref and self._lockref()
1008 l = self._lockref and self._lockref()
1009 if l is None or not l.held:
1009 if l is None or not l.held:
1010 self.ui.develwarn('transaction with no lock')
1010 self.ui.develwarn('transaction with no lock')
1011 tr = self.currenttransaction()
1011 tr = self.currenttransaction()
1012 if tr is not None:
1012 if tr is not None:
1013 return tr.nest()
1013 return tr.nest()
1014
1014
1015 # abort here if the journal already exists
1015 # abort here if the journal already exists
1016 if self.svfs.exists("journal"):
1016 if self.svfs.exists("journal"):
1017 raise error.RepoError(
1017 raise error.RepoError(
1018 _("abandoned transaction found"),
1018 _("abandoned transaction found"),
1019 hint=_("run 'hg recover' to clean up transaction"))
1019 hint=_("run 'hg recover' to clean up transaction"))
1020
1020
1021 # make journal.dirstate contain in-memory changes at this point
1021 # make journal.dirstate contain in-memory changes at this point
1022 self.dirstate.write(None)
1022 self.dirstate.write(None)
1023
1023
1024 idbase = "%.40f#%f" % (random.random(), time.time())
1024 idbase = "%.40f#%f" % (random.random(), time.time())
1025 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1025 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1027
1027
1028 self._writejournal(desc)
1028 self._writejournal(desc)
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1030 if report:
1030 if report:
1031 rp = report
1031 rp = report
1032 else:
1032 else:
1033 rp = self.ui.warn
1033 rp = self.ui.warn
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1035 # we must avoid cyclic reference between repo and transaction.
1035 # we must avoid cyclic reference between repo and transaction.
1036 reporef = weakref.ref(self)
1036 reporef = weakref.ref(self)
1037 def validate(tr):
1037 def validate(tr):
1038 """will run pre-closing hooks"""
1038 """will run pre-closing hooks"""
1039 reporef().hook('pretxnclose', throw=True,
1039 reporef().hook('pretxnclose', throw=True,
1040 txnname=desc, **tr.hookargs)
1040 txnname=desc, **tr.hookargs)
1041 def releasefn(tr, success):
1041 def releasefn(tr, success):
1042 repo = reporef()
1042 repo = reporef()
1043 if success:
1043 if success:
1044 # this should be explicitly invoked here, because
1044 # this should be explicitly invoked here, because
1045 # in-memory changes aren't written out at closing
1045 # in-memory changes aren't written out at closing
1046 # transaction, if tr.addfilegenerator (via
1046 # transaction, if tr.addfilegenerator (via
1047 # dirstate.write or so) isn't invoked while
1047 # dirstate.write or so) isn't invoked while
1048 # transaction running
1048 # transaction running
1049 repo.dirstate.write(None)
1049 repo.dirstate.write(None)
1050 else:
1050 else:
1051 # prevent in-memory changes from being written out at
1051 # prevent in-memory changes from being written out at
1052 # the end of outer wlock scope or so
1052 # the end of outer wlock scope or so
1053 repo.dirstate.invalidate()
1053 repo.dirstate.invalidate()
1054
1054
1055 # discard all changes (including ones already written
1055 # discard all changes (including ones already written
1056 # out) in this transaction
1056 # out) in this transaction
1057 repo.vfs.rename('journal.dirstate', 'dirstate')
1057 repo.vfs.rename('journal.dirstate', 'dirstate')
1058
1058
1059 repo.invalidate(clearfilecache=True)
1059 repo.invalidate(clearfilecache=True)
1060
1060
1061 tr = transaction.transaction(rp, self.svfs, vfsmap,
1061 tr = transaction.transaction(rp, self.svfs, vfsmap,
1062 "journal",
1062 "journal",
1063 "undo",
1063 "undo",
1064 aftertrans(renames),
1064 aftertrans(renames),
1065 self.store.createmode,
1065 self.store.createmode,
1066 validator=validate,
1066 validator=validate,
1067 releasefn=releasefn)
1067 releasefn=releasefn)
1068
1068
1069 tr.hookargs['txnid'] = txnid
1069 tr.hookargs['txnid'] = txnid
1070 # note: writing the fncache only during finalize mean that the file is
1070 # note: writing the fncache only during finalize mean that the file is
1071 # outdated when running hooks. As fncache is used for streaming clone,
1071 # outdated when running hooks. As fncache is used for streaming clone,
1072 # this is not expected to break anything that happen during the hooks.
1072 # this is not expected to break anything that happen during the hooks.
1073 tr.addfinalize('flush-fncache', self.store.write)
1073 tr.addfinalize('flush-fncache', self.store.write)
1074 def txnclosehook(tr2):
1074 def txnclosehook(tr2):
1075 """To be run if transaction is successful, will schedule a hook run
1075 """To be run if transaction is successful, will schedule a hook run
1076 """
1076 """
1077 # Don't reference tr2 in hook() so we don't hold a reference.
1077 # Don't reference tr2 in hook() so we don't hold a reference.
1078 # This reduces memory consumption when there are multiple
1078 # This reduces memory consumption when there are multiple
1079 # transactions per lock. This can likely go away if issue5045
1079 # transactions per lock. This can likely go away if issue5045
1080 # fixes the function accumulation.
1080 # fixes the function accumulation.
1081 hookargs = tr2.hookargs
1081 hookargs = tr2.hookargs
1082
1082
1083 def hook():
1083 def hook():
1084 reporef().hook('txnclose', throw=False, txnname=desc,
1084 reporef().hook('txnclose', throw=False, txnname=desc,
1085 **hookargs)
1085 **hookargs)
1086 reporef()._afterlock(hook)
1086 reporef()._afterlock(hook)
1087 tr.addfinalize('txnclose-hook', txnclosehook)
1087 tr.addfinalize('txnclose-hook', txnclosehook)
1088 def txnaborthook(tr2):
1088 def txnaborthook(tr2):
1089 """To be run if transaction is aborted
1089 """To be run if transaction is aborted
1090 """
1090 """
1091 reporef().hook('txnabort', throw=False, txnname=desc,
1091 reporef().hook('txnabort', throw=False, txnname=desc,
1092 **tr2.hookargs)
1092 **tr2.hookargs)
1093 tr.addabort('txnabort-hook', txnaborthook)
1093 tr.addabort('txnabort-hook', txnaborthook)
1094 # avoid eager cache invalidation. in-memory data should be identical
1094 # avoid eager cache invalidation. in-memory data should be identical
1095 # to stored data if transaction has no error.
1095 # to stored data if transaction has no error.
1096 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1096 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1097 self._transref = weakref.ref(tr)
1097 self._transref = weakref.ref(tr)
1098 return tr
1098 return tr
1099
1099
1100 def _journalfiles(self):
1100 def _journalfiles(self):
1101 return ((self.svfs, 'journal'),
1101 return ((self.svfs, 'journal'),
1102 (self.vfs, 'journal.dirstate'),
1102 (self.vfs, 'journal.dirstate'),
1103 (self.vfs, 'journal.branch'),
1103 (self.vfs, 'journal.branch'),
1104 (self.vfs, 'journal.desc'),
1104 (self.vfs, 'journal.desc'),
1105 (self.vfs, 'journal.bookmarks'),
1105 (self.vfs, 'journal.bookmarks'),
1106 (self.svfs, 'journal.phaseroots'))
1106 (self.svfs, 'journal.phaseroots'))
1107
1107
1108 def undofiles(self):
1108 def undofiles(self):
1109 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1109 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1110
1110
1111 def _writejournal(self, desc):
1111 def _writejournal(self, desc):
1112 self.vfs.write("journal.dirstate",
1112 self.vfs.write("journal.dirstate",
1113 self.vfs.tryread("dirstate"))
1113 self.vfs.tryread("dirstate"))
1114 self.vfs.write("journal.branch",
1114 self.vfs.write("journal.branch",
1115 encoding.fromlocal(self.dirstate.branch()))
1115 encoding.fromlocal(self.dirstate.branch()))
1116 self.vfs.write("journal.desc",
1116 self.vfs.write("journal.desc",
1117 "%d\n%s\n" % (len(self), desc))
1117 "%d\n%s\n" % (len(self), desc))
1118 self.vfs.write("journal.bookmarks",
1118 self.vfs.write("journal.bookmarks",
1119 self.vfs.tryread("bookmarks"))
1119 self.vfs.tryread("bookmarks"))
1120 self.svfs.write("journal.phaseroots",
1120 self.svfs.write("journal.phaseroots",
1121 self.svfs.tryread("phaseroots"))
1121 self.svfs.tryread("phaseroots"))
1122
1122
1123 def recover(self):
1123 def recover(self):
1124 with self.lock():
1124 with self.lock():
1125 if self.svfs.exists("journal"):
1125 if self.svfs.exists("journal"):
1126 self.ui.status(_("rolling back interrupted transaction\n"))
1126 self.ui.status(_("rolling back interrupted transaction\n"))
1127 vfsmap = {'': self.svfs,
1127 vfsmap = {'': self.svfs,
1128 'plain': self.vfs,}
1128 'plain': self.vfs,}
1129 transaction.rollback(self.svfs, vfsmap, "journal",
1129 transaction.rollback(self.svfs, vfsmap, "journal",
1130 self.ui.warn)
1130 self.ui.warn)
1131 self.invalidate()
1131 self.invalidate()
1132 return True
1132 return True
1133 else:
1133 else:
1134 self.ui.warn(_("no interrupted transaction available\n"))
1134 self.ui.warn(_("no interrupted transaction available\n"))
1135 return False
1135 return False
1136
1136
1137 def rollback(self, dryrun=False, force=False):
1137 def rollback(self, dryrun=False, force=False):
1138 wlock = lock = dsguard = None
1138 wlock = lock = dsguard = None
1139 try:
1139 try:
1140 wlock = self.wlock()
1140 wlock = self.wlock()
1141 lock = self.lock()
1141 lock = self.lock()
1142 if self.svfs.exists("undo"):
1142 if self.svfs.exists("undo"):
1143 dsguard = cmdutil.dirstateguard(self, 'rollback')
1143 dsguard = cmdutil.dirstateguard(self, 'rollback')
1144
1144
1145 return self._rollback(dryrun, force, dsguard)
1145 return self._rollback(dryrun, force, dsguard)
1146 else:
1146 else:
1147 self.ui.warn(_("no rollback information available\n"))
1147 self.ui.warn(_("no rollback information available\n"))
1148 return 1
1148 return 1
1149 finally:
1149 finally:
1150 release(dsguard, lock, wlock)
1150 release(dsguard, lock, wlock)
1151
1151
1152 @unfilteredmethod # Until we get smarter cache management
1152 @unfilteredmethod # Until we get smarter cache management
1153 def _rollback(self, dryrun, force, dsguard):
1153 def _rollback(self, dryrun, force, dsguard):
1154 ui = self.ui
1154 ui = self.ui
1155 try:
1155 try:
1156 args = self.vfs.read('undo.desc').splitlines()
1156 args = self.vfs.read('undo.desc').splitlines()
1157 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1157 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1158 if len(args) >= 3:
1158 if len(args) >= 3:
1159 detail = args[2]
1159 detail = args[2]
1160 oldtip = oldlen - 1
1160 oldtip = oldlen - 1
1161
1161
1162 if detail and ui.verbose:
1162 if detail and ui.verbose:
1163 msg = (_('repository tip rolled back to revision %s'
1163 msg = (_('repository tip rolled back to revision %s'
1164 ' (undo %s: %s)\n')
1164 ' (undo %s: %s)\n')
1165 % (oldtip, desc, detail))
1165 % (oldtip, desc, detail))
1166 else:
1166 else:
1167 msg = (_('repository tip rolled back to revision %s'
1167 msg = (_('repository tip rolled back to revision %s'
1168 ' (undo %s)\n')
1168 ' (undo %s)\n')
1169 % (oldtip, desc))
1169 % (oldtip, desc))
1170 except IOError:
1170 except IOError:
1171 msg = _('rolling back unknown transaction\n')
1171 msg = _('rolling back unknown transaction\n')
1172 desc = None
1172 desc = None
1173
1173
1174 if not force and self['.'] != self['tip'] and desc == 'commit':
1174 if not force and self['.'] != self['tip'] and desc == 'commit':
1175 raise error.Abort(
1175 raise error.Abort(
1176 _('rollback of last commit while not checked out '
1176 _('rollback of last commit while not checked out '
1177 'may lose data'), hint=_('use -f to force'))
1177 'may lose data'), hint=_('use -f to force'))
1178
1178
1179 ui.status(msg)
1179 ui.status(msg)
1180 if dryrun:
1180 if dryrun:
1181 return 0
1181 return 0
1182
1182
1183 parents = self.dirstate.parents()
1183 parents = self.dirstate.parents()
1184 self.destroying()
1184 self.destroying()
1185 vfsmap = {'plain': self.vfs, '': self.svfs}
1185 vfsmap = {'plain': self.vfs, '': self.svfs}
1186 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1186 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1187 if self.vfs.exists('undo.bookmarks'):
1187 if self.vfs.exists('undo.bookmarks'):
1188 self.vfs.rename('undo.bookmarks', 'bookmarks')
1188 self.vfs.rename('undo.bookmarks', 'bookmarks')
1189 if self.svfs.exists('undo.phaseroots'):
1189 if self.svfs.exists('undo.phaseroots'):
1190 self.svfs.rename('undo.phaseroots', 'phaseroots')
1190 self.svfs.rename('undo.phaseroots', 'phaseroots')
1191 self.invalidate()
1191 self.invalidate()
1192
1192
1193 parentgone = (parents[0] not in self.changelog.nodemap or
1193 parentgone = (parents[0] not in self.changelog.nodemap or
1194 parents[1] not in self.changelog.nodemap)
1194 parents[1] not in self.changelog.nodemap)
1195 if parentgone:
1195 if parentgone:
1196 # prevent dirstateguard from overwriting already restored one
1196 # prevent dirstateguard from overwriting already restored one
1197 dsguard.close()
1197 dsguard.close()
1198
1198
1199 self.vfs.rename('undo.dirstate', 'dirstate')
1199 self.vfs.rename('undo.dirstate', 'dirstate')
1200 try:
1200 try:
1201 branch = self.vfs.read('undo.branch')
1201 branch = self.vfs.read('undo.branch')
1202 self.dirstate.setbranch(encoding.tolocal(branch))
1202 self.dirstate.setbranch(encoding.tolocal(branch))
1203 except IOError:
1203 except IOError:
1204 ui.warn(_('named branch could not be reset: '
1204 ui.warn(_('named branch could not be reset: '
1205 'current branch is still \'%s\'\n')
1205 'current branch is still \'%s\'\n')
1206 % self.dirstate.branch())
1206 % self.dirstate.branch())
1207
1207
1208 self.dirstate.invalidate()
1208 self.dirstate.invalidate()
1209 parents = tuple([p.rev() for p in self[None].parents()])
1209 parents = tuple([p.rev() for p in self[None].parents()])
1210 if len(parents) > 1:
1210 if len(parents) > 1:
1211 ui.status(_('working directory now based on '
1211 ui.status(_('working directory now based on '
1212 'revisions %d and %d\n') % parents)
1212 'revisions %d and %d\n') % parents)
1213 else:
1213 else:
1214 ui.status(_('working directory now based on '
1214 ui.status(_('working directory now based on '
1215 'revision %d\n') % parents)
1215 'revision %d\n') % parents)
1216 mergemod.mergestate.clean(self, self['.'].node())
1216 mergemod.mergestate.clean(self, self['.'].node())
1217
1217
1218 # TODO: if we know which new heads may result from this rollback, pass
1218 # TODO: if we know which new heads may result from this rollback, pass
1219 # them to destroy(), which will prevent the branchhead cache from being
1219 # them to destroy(), which will prevent the branchhead cache from being
1220 # invalidated.
1220 # invalidated.
1221 self.destroyed()
1221 self.destroyed()
1222 return 0
1222 return 0
1223
1223
1224 def invalidatecaches(self):
1224 def invalidatecaches(self):
1225
1225
1226 if '_tagscache' in vars(self):
1226 if '_tagscache' in vars(self):
1227 # can't use delattr on proxy
1227 # can't use delattr on proxy
1228 del self.__dict__['_tagscache']
1228 del self.__dict__['_tagscache']
1229
1229
1230 self.unfiltered()._branchcaches.clear()
1230 self.unfiltered()._branchcaches.clear()
1231 self.invalidatevolatilesets()
1231 self.invalidatevolatilesets()
1232
1232
1233 def invalidatevolatilesets(self):
1233 def invalidatevolatilesets(self):
1234 self.filteredrevcache.clear()
1234 self.filteredrevcache.clear()
1235 obsolete.clearobscaches(self)
1235 obsolete.clearobscaches(self)
1236
1236
1237 def invalidatedirstate(self):
1237 def invalidatedirstate(self):
1238 '''Invalidates the dirstate, causing the next call to dirstate
1238 '''Invalidates the dirstate, causing the next call to dirstate
1239 to check if it was modified since the last time it was read,
1239 to check if it was modified since the last time it was read,
1240 rereading it if it has.
1240 rereading it if it has.
1241
1241
1242 This is different to dirstate.invalidate() that it doesn't always
1242 This is different to dirstate.invalidate() that it doesn't always
1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1244 explicitly read the dirstate again (i.e. restoring it to a previous
1244 explicitly read the dirstate again (i.e. restoring it to a previous
1245 known good state).'''
1245 known good state).'''
1246 if hasunfilteredcache(self, 'dirstate'):
1246 if hasunfilteredcache(self, 'dirstate'):
1247 for k in self.dirstate._filecache:
1247 for k in self.dirstate._filecache:
1248 try:
1248 try:
1249 delattr(self.dirstate, k)
1249 delattr(self.dirstate, k)
1250 except AttributeError:
1250 except AttributeError:
1251 pass
1251 pass
1252 delattr(self.unfiltered(), 'dirstate')
1252 delattr(self.unfiltered(), 'dirstate')
1253
1253
1254 def invalidate(self, clearfilecache=False):
1254 def invalidate(self, clearfilecache=False):
1255 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1255 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1256 for k in self._filecache.keys():
1256 for k in self._filecache.keys():
1257 # dirstate is invalidated separately in invalidatedirstate()
1257 # dirstate is invalidated separately in invalidatedirstate()
1258 if k == 'dirstate':
1258 if k == 'dirstate':
1259 continue
1259 continue
1260
1260
1261 if clearfilecache:
1261 if clearfilecache:
1262 del self._filecache[k]
1262 del self._filecache[k]
1263 try:
1263 try:
1264 delattr(unfiltered, k)
1264 delattr(unfiltered, k)
1265 except AttributeError:
1265 except AttributeError:
1266 pass
1266 pass
1267 self.invalidatecaches()
1267 self.invalidatecaches()
1268 self.store.invalidatecaches()
1268 self.store.invalidatecaches()
1269
1269
1270 def invalidateall(self):
1270 def invalidateall(self):
1271 '''Fully invalidates both store and non-store parts, causing the
1271 '''Fully invalidates both store and non-store parts, causing the
1272 subsequent operation to reread any outside changes.'''
1272 subsequent operation to reread any outside changes.'''
1273 # extension should hook this to invalidate its caches
1273 # extension should hook this to invalidate its caches
1274 self.invalidate()
1274 self.invalidate()
1275 self.invalidatedirstate()
1275 self.invalidatedirstate()
1276
1276
1277 def _refreshfilecachestats(self, tr):
1277 def _refreshfilecachestats(self, tr):
1278 """Reload stats of cached files so that they are flagged as valid"""
1278 """Reload stats of cached files so that they are flagged as valid"""
1279 for k, ce in self._filecache.items():
1279 for k, ce in self._filecache.items():
1280 if k == 'dirstate' or k not in self.__dict__:
1280 if k == 'dirstate' or k not in self.__dict__:
1281 continue
1281 continue
1282 ce.refresh()
1282 ce.refresh()
1283
1283
1284 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1284 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1285 inheritchecker=None, parentenvvar=None):
1285 inheritchecker=None, parentenvvar=None):
1286 parentlock = None
1286 parentlock = None
1287 # the contents of parentenvvar are used by the underlying lock to
1287 # the contents of parentenvvar are used by the underlying lock to
1288 # determine whether it can be inherited
1288 # determine whether it can be inherited
1289 if parentenvvar is not None:
1289 if parentenvvar is not None:
1290 parentlock = os.environ.get(parentenvvar)
1290 parentlock = os.environ.get(parentenvvar)
1291 try:
1291 try:
1292 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1292 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1293 acquirefn=acquirefn, desc=desc,
1293 acquirefn=acquirefn, desc=desc,
1294 inheritchecker=inheritchecker,
1294 inheritchecker=inheritchecker,
1295 parentlock=parentlock)
1295 parentlock=parentlock)
1296 except error.LockHeld as inst:
1296 except error.LockHeld as inst:
1297 if not wait:
1297 if not wait:
1298 raise
1298 raise
1299 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1299 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1300 (desc, inst.locker))
1300 (desc, inst.locker))
1301 # default to 600 seconds timeout
1301 # default to 600 seconds timeout
1302 l = lockmod.lock(vfs, lockname,
1302 l = lockmod.lock(vfs, lockname,
1303 int(self.ui.config("ui", "timeout", "600")),
1303 int(self.ui.config("ui", "timeout", "600")),
1304 releasefn=releasefn, acquirefn=acquirefn,
1304 releasefn=releasefn, acquirefn=acquirefn,
1305 desc=desc)
1305 desc=desc)
1306 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1306 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1307 return l
1307 return l
1308
1308
1309 def _afterlock(self, callback):
1309 def _afterlock(self, callback):
1310 """add a callback to be run when the repository is fully unlocked
1310 """add a callback to be run when the repository is fully unlocked
1311
1311
1312 The callback will be executed when the outermost lock is released
1312 The callback will be executed when the outermost lock is released
1313 (with wlock being higher level than 'lock')."""
1313 (with wlock being higher level than 'lock')."""
1314 for ref in (self._wlockref, self._lockref):
1314 for ref in (self._wlockref, self._lockref):
1315 l = ref and ref()
1315 l = ref and ref()
1316 if l and l.held:
1316 if l and l.held:
1317 l.postrelease.append(callback)
1317 l.postrelease.append(callback)
1318 break
1318 break
1319 else: # no lock have been found.
1319 else: # no lock have been found.
1320 callback()
1320 callback()
1321
1321
1322 def lock(self, wait=True):
1322 def lock(self, wait=True):
1323 '''Lock the repository store (.hg/store) and return a weak reference
1323 '''Lock the repository store (.hg/store) and return a weak reference
1324 to the lock. Use this before modifying the store (e.g. committing or
1324 to the lock. Use this before modifying the store (e.g. committing or
1325 stripping). If you are opening a transaction, get a lock as well.)
1325 stripping). If you are opening a transaction, get a lock as well.)
1326
1326
1327 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1327 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1328 'wlock' first to avoid a dead-lock hazard.'''
1328 'wlock' first to avoid a dead-lock hazard.'''
1329 l = self._lockref and self._lockref()
1329 l = self._lockref and self._lockref()
1330 if l is not None and l.held:
1330 if l is not None and l.held:
1331 l.lock()
1331 l.lock()
1332 return l
1332 return l
1333
1333
1334 l = self._lock(self.svfs, "lock", wait, None,
1334 l = self._lock(self.svfs, "lock", wait, None,
1335 self.invalidate, _('repository %s') % self.origroot)
1335 self.invalidate, _('repository %s') % self.origroot)
1336 self._lockref = weakref.ref(l)
1336 self._lockref = weakref.ref(l)
1337 return l
1337 return l
1338
1338
1339 def _wlockchecktransaction(self):
1339 def _wlockchecktransaction(self):
1340 if self.currenttransaction() is not None:
1340 if self.currenttransaction() is not None:
1341 raise error.LockInheritanceContractViolation(
1341 raise error.LockInheritanceContractViolation(
1342 'wlock cannot be inherited in the middle of a transaction')
1342 'wlock cannot be inherited in the middle of a transaction')
1343
1343
1344 def wlock(self, wait=True):
1344 def wlock(self, wait=True):
1345 '''Lock the non-store parts of the repository (everything under
1345 '''Lock the non-store parts of the repository (everything under
1346 .hg except .hg/store) and return a weak reference to the lock.
1346 .hg except .hg/store) and return a weak reference to the lock.
1347
1347
1348 Use this before modifying files in .hg.
1348 Use this before modifying files in .hg.
1349
1349
1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1351 'wlock' first to avoid a dead-lock hazard.'''
1351 'wlock' first to avoid a dead-lock hazard.'''
1352 l = self._wlockref and self._wlockref()
1352 l = self._wlockref and self._wlockref()
1353 if l is not None and l.held:
1353 if l is not None and l.held:
1354 l.lock()
1354 l.lock()
1355 return l
1355 return l
1356
1356
1357 # We do not need to check for non-waiting lock acquisition. Such
1357 # We do not need to check for non-waiting lock acquisition. Such
1358 # acquisition would not cause dead-lock as they would just fail.
1358 # acquisition would not cause dead-lock as they would just fail.
1359 if wait and (self.ui.configbool('devel', 'all-warnings')
1359 if wait and (self.ui.configbool('devel', 'all-warnings')
1360 or self.ui.configbool('devel', 'check-locks')):
1360 or self.ui.configbool('devel', 'check-locks')):
1361 l = self._lockref and self._lockref()
1361 l = self._lockref and self._lockref()
1362 if l is not None and l.held:
1362 if l is not None and l.held:
1363 self.ui.develwarn('"wlock" acquired after "lock"')
1363 self.ui.develwarn('"wlock" acquired after "lock"')
1364
1364
1365 def unlock():
1365 def unlock():
1366 if self.dirstate.pendingparentchange():
1366 if self.dirstate.pendingparentchange():
1367 self.dirstate.invalidate()
1367 self.dirstate.invalidate()
1368 else:
1368 else:
1369 self.dirstate.write(None)
1369 self.dirstate.write(None)
1370
1370
1371 self._filecache['dirstate'].refresh()
1371 self._filecache['dirstate'].refresh()
1372
1372
1373 l = self._lock(self.vfs, "wlock", wait, unlock,
1373 l = self._lock(self.vfs, "wlock", wait, unlock,
1374 self.invalidatedirstate, _('working directory of %s') %
1374 self.invalidatedirstate, _('working directory of %s') %
1375 self.origroot,
1375 self.origroot,
1376 inheritchecker=self._wlockchecktransaction,
1376 inheritchecker=self._wlockchecktransaction,
1377 parentenvvar='HG_WLOCK_LOCKER')
1377 parentenvvar='HG_WLOCK_LOCKER')
1378 self._wlockref = weakref.ref(l)
1378 self._wlockref = weakref.ref(l)
1379 return l
1379 return l
1380
1380
1381 def _currentlock(self, lockref):
1381 def _currentlock(self, lockref):
1382 """Returns the lock if it's held, or None if it's not."""
1382 """Returns the lock if it's held, or None if it's not."""
1383 if lockref is None:
1383 if lockref is None:
1384 return None
1384 return None
1385 l = lockref()
1385 l = lockref()
1386 if l is None or not l.held:
1386 if l is None or not l.held:
1387 return None
1387 return None
1388 return l
1388 return l
1389
1389
1390 def currentwlock(self):
1390 def currentwlock(self):
1391 """Returns the wlock if it's held, or None if it's not."""
1391 """Returns the wlock if it's held, or None if it's not."""
1392 return self._currentlock(self._wlockref)
1392 return self._currentlock(self._wlockref)
1393
1393
1394 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1394 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1395 """
1395 """
1396 commit an individual file as part of a larger transaction
1396 commit an individual file as part of a larger transaction
1397 """
1397 """
1398
1398
1399 fname = fctx.path()
1399 fname = fctx.path()
1400 fparent1 = manifest1.get(fname, nullid)
1400 fparent1 = manifest1.get(fname, nullid)
1401 fparent2 = manifest2.get(fname, nullid)
1401 fparent2 = manifest2.get(fname, nullid)
1402 if isinstance(fctx, context.filectx):
1402 if isinstance(fctx, context.filectx):
1403 node = fctx.filenode()
1403 node = fctx.filenode()
1404 if node in [fparent1, fparent2]:
1404 if node in [fparent1, fparent2]:
1405 self.ui.debug('reusing %s filelog entry\n' % fname)
1405 self.ui.debug('reusing %s filelog entry\n' % fname)
1406 return node
1406 return node
1407
1407
1408 flog = self.file(fname)
1408 flog = self.file(fname)
1409 meta = {}
1409 meta = {}
1410 copy = fctx.renamed()
1410 copy = fctx.renamed()
1411 if copy and copy[0] != fname:
1411 if copy and copy[0] != fname:
1412 # Mark the new revision of this file as a copy of another
1412 # Mark the new revision of this file as a copy of another
1413 # file. This copy data will effectively act as a parent
1413 # file. This copy data will effectively act as a parent
1414 # of this new revision. If this is a merge, the first
1414 # of this new revision. If this is a merge, the first
1415 # parent will be the nullid (meaning "look up the copy data")
1415 # parent will be the nullid (meaning "look up the copy data")
1416 # and the second one will be the other parent. For example:
1416 # and the second one will be the other parent. For example:
1417 #
1417 #
1418 # 0 --- 1 --- 3 rev1 changes file foo
1418 # 0 --- 1 --- 3 rev1 changes file foo
1419 # \ / rev2 renames foo to bar and changes it
1419 # \ / rev2 renames foo to bar and changes it
1420 # \- 2 -/ rev3 should have bar with all changes and
1420 # \- 2 -/ rev3 should have bar with all changes and
1421 # should record that bar descends from
1421 # should record that bar descends from
1422 # bar in rev2 and foo in rev1
1422 # bar in rev2 and foo in rev1
1423 #
1423 #
1424 # this allows this merge to succeed:
1424 # this allows this merge to succeed:
1425 #
1425 #
1426 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1426 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1427 # \ / merging rev3 and rev4 should use bar@rev2
1427 # \ / merging rev3 and rev4 should use bar@rev2
1428 # \- 2 --- 4 as the merge base
1428 # \- 2 --- 4 as the merge base
1429 #
1429 #
1430
1430
1431 cfname = copy[0]
1431 cfname = copy[0]
1432 crev = manifest1.get(cfname)
1432 crev = manifest1.get(cfname)
1433 newfparent = fparent2
1433 newfparent = fparent2
1434
1434
1435 if manifest2: # branch merge
1435 if manifest2: # branch merge
1436 if fparent2 == nullid or crev is None: # copied on remote side
1436 if fparent2 == nullid or crev is None: # copied on remote side
1437 if cfname in manifest2:
1437 if cfname in manifest2:
1438 crev = manifest2[cfname]
1438 crev = manifest2[cfname]
1439 newfparent = fparent1
1439 newfparent = fparent1
1440
1440
1441 # Here, we used to search backwards through history to try to find
1441 # Here, we used to search backwards through history to try to find
1442 # where the file copy came from if the source of a copy was not in
1442 # where the file copy came from if the source of a copy was not in
1443 # the parent directory. However, this doesn't actually make sense to
1443 # the parent directory. However, this doesn't actually make sense to
1444 # do (what does a copy from something not in your working copy even
1444 # do (what does a copy from something not in your working copy even
1445 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1445 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1446 # the user that copy information was dropped, so if they didn't
1446 # the user that copy information was dropped, so if they didn't
1447 # expect this outcome it can be fixed, but this is the correct
1447 # expect this outcome it can be fixed, but this is the correct
1448 # behavior in this circumstance.
1448 # behavior in this circumstance.
1449
1449
1450 if crev:
1450 if crev:
1451 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1451 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1452 meta["copy"] = cfname
1452 meta["copy"] = cfname
1453 meta["copyrev"] = hex(crev)
1453 meta["copyrev"] = hex(crev)
1454 fparent1, fparent2 = nullid, newfparent
1454 fparent1, fparent2 = nullid, newfparent
1455 else:
1455 else:
1456 self.ui.warn(_("warning: can't find ancestor for '%s' "
1456 self.ui.warn(_("warning: can't find ancestor for '%s' "
1457 "copied from '%s'!\n") % (fname, cfname))
1457 "copied from '%s'!\n") % (fname, cfname))
1458
1458
1459 elif fparent1 == nullid:
1459 elif fparent1 == nullid:
1460 fparent1, fparent2 = fparent2, nullid
1460 fparent1, fparent2 = fparent2, nullid
1461 elif fparent2 != nullid:
1461 elif fparent2 != nullid:
1462 # is one parent an ancestor of the other?
1462 # is one parent an ancestor of the other?
1463 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1463 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1464 if fparent1 in fparentancestors:
1464 if fparent1 in fparentancestors:
1465 fparent1, fparent2 = fparent2, nullid
1465 fparent1, fparent2 = fparent2, nullid
1466 elif fparent2 in fparentancestors:
1466 elif fparent2 in fparentancestors:
1467 fparent2 = nullid
1467 fparent2 = nullid
1468
1468
1469 # is the file changed?
1469 # is the file changed?
1470 text = fctx.data()
1470 text = fctx.data()
1471 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1471 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1472 changelist.append(fname)
1472 changelist.append(fname)
1473 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1473 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1474 # are just the flags changed during merge?
1474 # are just the flags changed during merge?
1475 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1475 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1476 changelist.append(fname)
1476 changelist.append(fname)
1477
1477
1478 return fparent1
1478 return fparent1
1479
1479
1480 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1480 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1481 """check for commit arguments that aren't commitable"""
1481 """check for commit arguments that aren't commitable"""
1482 force = False
1482 if match.isexact() or match.prefix():
1483 if not force and (match.isexact() or match.prefix()):
1484 matched = set(status.modified + status.added + status.removed)
1483 matched = set(status.modified + status.added + status.removed)
1485
1484
1486 for f in match.files():
1485 for f in match.files():
1487 f = self.dirstate.normalize(f)
1486 f = self.dirstate.normalize(f)
1488 if f == '.' or f in matched or f in wctx.substate:
1487 if f == '.' or f in matched or f in wctx.substate:
1489 continue
1488 continue
1490 if f in status.deleted:
1489 if f in status.deleted:
1491 fail(f, _('file not found!'))
1490 fail(f, _('file not found!'))
1492 if f in vdirs: # visited directory
1491 if f in vdirs: # visited directory
1493 d = f + '/'
1492 d = f + '/'
1494 for mf in matched:
1493 for mf in matched:
1495 if mf.startswith(d):
1494 if mf.startswith(d):
1496 break
1495 break
1497 else:
1496 else:
1498 fail(f, _("no match under directory!"))
1497 fail(f, _("no match under directory!"))
1499 elif f not in self.dirstate:
1498 elif f not in self.dirstate:
1500 fail(f, _("file not tracked!"))
1499 fail(f, _("file not tracked!"))
1501
1500
1502 @unfilteredmethod
1501 @unfilteredmethod
1503 def commit(self, text="", user=None, date=None, match=None, force=False,
1502 def commit(self, text="", user=None, date=None, match=None, force=False,
1504 editor=False, extra=None):
1503 editor=False, extra=None):
1505 """Add a new revision to current repository.
1504 """Add a new revision to current repository.
1506
1505
1507 Revision information is gathered from the working directory,
1506 Revision information is gathered from the working directory,
1508 match can be used to filter the committed files. If editor is
1507 match can be used to filter the committed files. If editor is
1509 supplied, it is called to get a commit message.
1508 supplied, it is called to get a commit message.
1510 """
1509 """
1511 if extra is None:
1510 if extra is None:
1512 extra = {}
1511 extra = {}
1513
1512
1514 def fail(f, msg):
1513 def fail(f, msg):
1515 raise error.Abort('%s: %s' % (f, msg))
1514 raise error.Abort('%s: %s' % (f, msg))
1516
1515
1517 if not match:
1516 if not match:
1518 match = matchmod.always(self.root, '')
1517 match = matchmod.always(self.root, '')
1519
1518
1520 if not force:
1519 if not force:
1521 vdirs = []
1520 vdirs = []
1522 match.explicitdir = vdirs.append
1521 match.explicitdir = vdirs.append
1523 match.bad = fail
1522 match.bad = fail
1524
1523
1525 wlock = lock = tr = None
1524 wlock = lock = tr = None
1526 try:
1525 try:
1527 wlock = self.wlock()
1526 wlock = self.wlock()
1528 lock = self.lock() # for recent changelog (see issue4368)
1527 lock = self.lock() # for recent changelog (see issue4368)
1529
1528
1530 wctx = self[None]
1529 wctx = self[None]
1531 merge = len(wctx.parents()) > 1
1530 merge = len(wctx.parents()) > 1
1532
1531
1533 if not force and merge and match.ispartial():
1532 if not force and merge and match.ispartial():
1534 raise error.Abort(_('cannot partially commit a merge '
1533 raise error.Abort(_('cannot partially commit a merge '
1535 '(do not specify files or patterns)'))
1534 '(do not specify files or patterns)'))
1536
1535
1537 status = self.status(match=match, clean=force)
1536 status = self.status(match=match, clean=force)
1538 if force:
1537 if force:
1539 status.modified.extend(status.clean) # mq may commit clean files
1538 status.modified.extend(status.clean) # mq may commit clean files
1540
1539
1541 # check subrepos
1540 # check subrepos
1542 subs = []
1541 subs = []
1543 commitsubs = set()
1542 commitsubs = set()
1544 newstate = wctx.substate.copy()
1543 newstate = wctx.substate.copy()
1545 # only manage subrepos and .hgsubstate if .hgsub is present
1544 # only manage subrepos and .hgsubstate if .hgsub is present
1546 if '.hgsub' in wctx:
1545 if '.hgsub' in wctx:
1547 # we'll decide whether to track this ourselves, thanks
1546 # we'll decide whether to track this ourselves, thanks
1548 for c in status.modified, status.added, status.removed:
1547 for c in status.modified, status.added, status.removed:
1549 if '.hgsubstate' in c:
1548 if '.hgsubstate' in c:
1550 c.remove('.hgsubstate')
1549 c.remove('.hgsubstate')
1551
1550
1552 # compare current state to last committed state
1551 # compare current state to last committed state
1553 # build new substate based on last committed state
1552 # build new substate based on last committed state
1554 oldstate = wctx.p1().substate
1553 oldstate = wctx.p1().substate
1555 for s in sorted(newstate.keys()):
1554 for s in sorted(newstate.keys()):
1556 if not match(s):
1555 if not match(s):
1557 # ignore working copy, use old state if present
1556 # ignore working copy, use old state if present
1558 if s in oldstate:
1557 if s in oldstate:
1559 newstate[s] = oldstate[s]
1558 newstate[s] = oldstate[s]
1560 continue
1559 continue
1561 if not force:
1560 if not force:
1562 raise error.Abort(
1561 raise error.Abort(
1563 _("commit with new subrepo %s excluded") % s)
1562 _("commit with new subrepo %s excluded") % s)
1564 dirtyreason = wctx.sub(s).dirtyreason(True)
1563 dirtyreason = wctx.sub(s).dirtyreason(True)
1565 if dirtyreason:
1564 if dirtyreason:
1566 if not self.ui.configbool('ui', 'commitsubrepos'):
1565 if not self.ui.configbool('ui', 'commitsubrepos'):
1567 raise error.Abort(dirtyreason,
1566 raise error.Abort(dirtyreason,
1568 hint=_("use --subrepos for recursive commit"))
1567 hint=_("use --subrepos for recursive commit"))
1569 subs.append(s)
1568 subs.append(s)
1570 commitsubs.add(s)
1569 commitsubs.add(s)
1571 else:
1570 else:
1572 bs = wctx.sub(s).basestate()
1571 bs = wctx.sub(s).basestate()
1573 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1572 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1574 if oldstate.get(s, (None, None, None))[1] != bs:
1573 if oldstate.get(s, (None, None, None))[1] != bs:
1575 subs.append(s)
1574 subs.append(s)
1576
1575
1577 # check for removed subrepos
1576 # check for removed subrepos
1578 for p in wctx.parents():
1577 for p in wctx.parents():
1579 r = [s for s in p.substate if s not in newstate]
1578 r = [s for s in p.substate if s not in newstate]
1580 subs += [s for s in r if match(s)]
1579 subs += [s for s in r if match(s)]
1581 if subs:
1580 if subs:
1582 if (not match('.hgsub') and
1581 if (not match('.hgsub') and
1583 '.hgsub' in (wctx.modified() + wctx.added())):
1582 '.hgsub' in (wctx.modified() + wctx.added())):
1584 raise error.Abort(
1583 raise error.Abort(
1585 _("can't commit subrepos without .hgsub"))
1584 _("can't commit subrepos without .hgsub"))
1586 status.modified.insert(0, '.hgsubstate')
1585 status.modified.insert(0, '.hgsubstate')
1587
1586
1588 elif '.hgsub' in status.removed:
1587 elif '.hgsub' in status.removed:
1589 # clean up .hgsubstate when .hgsub is removed
1588 # clean up .hgsubstate when .hgsub is removed
1590 if ('.hgsubstate' in wctx and
1589 if ('.hgsubstate' in wctx and
1591 '.hgsubstate' not in (status.modified + status.added +
1590 '.hgsubstate' not in (status.modified + status.added +
1592 status.removed)):
1591 status.removed)):
1593 status.removed.insert(0, '.hgsubstate')
1592 status.removed.insert(0, '.hgsubstate')
1594
1593
1595 # make sure all explicit patterns are matched
1594 # make sure all explicit patterns are matched
1596 if not force:
1595 if not force:
1597 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1596 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1598
1597
1599 cctx = context.workingcommitctx(self, status,
1598 cctx = context.workingcommitctx(self, status,
1600 text, user, date, extra)
1599 text, user, date, extra)
1601
1600
1602 # internal config: ui.allowemptycommit
1601 # internal config: ui.allowemptycommit
1603 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1602 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1604 or extra.get('close') or merge or cctx.files()
1603 or extra.get('close') or merge or cctx.files()
1605 or self.ui.configbool('ui', 'allowemptycommit'))
1604 or self.ui.configbool('ui', 'allowemptycommit'))
1606 if not allowemptycommit:
1605 if not allowemptycommit:
1607 return None
1606 return None
1608
1607
1609 if merge and cctx.deleted():
1608 if merge and cctx.deleted():
1610 raise error.Abort(_("cannot commit merge with missing files"))
1609 raise error.Abort(_("cannot commit merge with missing files"))
1611
1610
1612 ms = mergemod.mergestate.read(self)
1611 ms = mergemod.mergestate.read(self)
1613
1612
1614 if list(ms.unresolved()):
1613 if list(ms.unresolved()):
1615 raise error.Abort(_('unresolved merge conflicts '
1614 raise error.Abort(_('unresolved merge conflicts '
1616 '(see "hg help resolve")'))
1615 '(see "hg help resolve")'))
1617 if ms.mdstate() != 's' or list(ms.driverresolved()):
1616 if ms.mdstate() != 's' or list(ms.driverresolved()):
1618 raise error.Abort(_('driver-resolved merge conflicts'),
1617 raise error.Abort(_('driver-resolved merge conflicts'),
1619 hint=_('run "hg resolve --all" to resolve'))
1618 hint=_('run "hg resolve --all" to resolve'))
1620
1619
1621 if editor:
1620 if editor:
1622 cctx._text = editor(self, cctx, subs)
1621 cctx._text = editor(self, cctx, subs)
1623 edited = (text != cctx._text)
1622 edited = (text != cctx._text)
1624
1623
1625 # Save commit message in case this transaction gets rolled back
1624 # Save commit message in case this transaction gets rolled back
1626 # (e.g. by a pretxncommit hook). Leave the content alone on
1625 # (e.g. by a pretxncommit hook). Leave the content alone on
1627 # the assumption that the user will use the same editor again.
1626 # the assumption that the user will use the same editor again.
1628 msgfn = self.savecommitmessage(cctx._text)
1627 msgfn = self.savecommitmessage(cctx._text)
1629
1628
1630 # commit subs and write new state
1629 # commit subs and write new state
1631 if subs:
1630 if subs:
1632 for s in sorted(commitsubs):
1631 for s in sorted(commitsubs):
1633 sub = wctx.sub(s)
1632 sub = wctx.sub(s)
1634 self.ui.status(_('committing subrepository %s\n') %
1633 self.ui.status(_('committing subrepository %s\n') %
1635 subrepo.subrelpath(sub))
1634 subrepo.subrelpath(sub))
1636 sr = sub.commit(cctx._text, user, date)
1635 sr = sub.commit(cctx._text, user, date)
1637 newstate[s] = (newstate[s][0], sr)
1636 newstate[s] = (newstate[s][0], sr)
1638 subrepo.writestate(self, newstate)
1637 subrepo.writestate(self, newstate)
1639
1638
1640 p1, p2 = self.dirstate.parents()
1639 p1, p2 = self.dirstate.parents()
1641 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1640 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1642 try:
1641 try:
1643 self.hook("precommit", throw=True, parent1=hookp1,
1642 self.hook("precommit", throw=True, parent1=hookp1,
1644 parent2=hookp2)
1643 parent2=hookp2)
1645 tr = self.transaction('commit')
1644 tr = self.transaction('commit')
1646 ret = self.commitctx(cctx, True)
1645 ret = self.commitctx(cctx, True)
1647 except: # re-raises
1646 except: # re-raises
1648 if edited:
1647 if edited:
1649 self.ui.write(
1648 self.ui.write(
1650 _('note: commit message saved in %s\n') % msgfn)
1649 _('note: commit message saved in %s\n') % msgfn)
1651 raise
1650 raise
1652 # update bookmarks, dirstate and mergestate
1651 # update bookmarks, dirstate and mergestate
1653 bookmarks.update(self, [p1, p2], ret)
1652 bookmarks.update(self, [p1, p2], ret)
1654 cctx.markcommitted(ret)
1653 cctx.markcommitted(ret)
1655 ms.reset()
1654 ms.reset()
1656 tr.close()
1655 tr.close()
1657
1656
1658 finally:
1657 finally:
1659 lockmod.release(tr, lock, wlock)
1658 lockmod.release(tr, lock, wlock)
1660
1659
1661 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1660 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1662 # hack for command that use a temporary commit (eg: histedit)
1661 # hack for command that use a temporary commit (eg: histedit)
1663 # temporary commit got stripped before hook release
1662 # temporary commit got stripped before hook release
1664 if self.changelog.hasnode(ret):
1663 if self.changelog.hasnode(ret):
1665 self.hook("commit", node=node, parent1=parent1,
1664 self.hook("commit", node=node, parent1=parent1,
1666 parent2=parent2)
1665 parent2=parent2)
1667 self._afterlock(commithook)
1666 self._afterlock(commithook)
1668 return ret
1667 return ret
1669
1668
1670 @unfilteredmethod
1669 @unfilteredmethod
1671 def commitctx(self, ctx, error=False):
1670 def commitctx(self, ctx, error=False):
1672 """Add a new revision to current repository.
1671 """Add a new revision to current repository.
1673 Revision information is passed via the context argument.
1672 Revision information is passed via the context argument.
1674 """
1673 """
1675
1674
1676 tr = None
1675 tr = None
1677 p1, p2 = ctx.p1(), ctx.p2()
1676 p1, p2 = ctx.p1(), ctx.p2()
1678 user = ctx.user()
1677 user = ctx.user()
1679
1678
1680 lock = self.lock()
1679 lock = self.lock()
1681 try:
1680 try:
1682 tr = self.transaction("commit")
1681 tr = self.transaction("commit")
1683 trp = weakref.proxy(tr)
1682 trp = weakref.proxy(tr)
1684
1683
1685 if ctx.files():
1684 if ctx.files():
1686 m1 = p1.manifest()
1685 m1 = p1.manifest()
1687 m2 = p2.manifest()
1686 m2 = p2.manifest()
1688 m = m1.copy()
1687 m = m1.copy()
1689
1688
1690 # check in files
1689 # check in files
1691 added = []
1690 added = []
1692 changed = []
1691 changed = []
1693 removed = list(ctx.removed())
1692 removed = list(ctx.removed())
1694 linkrev = len(self)
1693 linkrev = len(self)
1695 self.ui.note(_("committing files:\n"))
1694 self.ui.note(_("committing files:\n"))
1696 for f in sorted(ctx.modified() + ctx.added()):
1695 for f in sorted(ctx.modified() + ctx.added()):
1697 self.ui.note(f + "\n")
1696 self.ui.note(f + "\n")
1698 try:
1697 try:
1699 fctx = ctx[f]
1698 fctx = ctx[f]
1700 if fctx is None:
1699 if fctx is None:
1701 removed.append(f)
1700 removed.append(f)
1702 else:
1701 else:
1703 added.append(f)
1702 added.append(f)
1704 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1703 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1705 trp, changed)
1704 trp, changed)
1706 m.setflag(f, fctx.flags())
1705 m.setflag(f, fctx.flags())
1707 except OSError as inst:
1706 except OSError as inst:
1708 self.ui.warn(_("trouble committing %s!\n") % f)
1707 self.ui.warn(_("trouble committing %s!\n") % f)
1709 raise
1708 raise
1710 except IOError as inst:
1709 except IOError as inst:
1711 errcode = getattr(inst, 'errno', errno.ENOENT)
1710 errcode = getattr(inst, 'errno', errno.ENOENT)
1712 if error or errcode and errcode != errno.ENOENT:
1711 if error or errcode and errcode != errno.ENOENT:
1713 self.ui.warn(_("trouble committing %s!\n") % f)
1712 self.ui.warn(_("trouble committing %s!\n") % f)
1714 raise
1713 raise
1715
1714
1716 # update manifest
1715 # update manifest
1717 self.ui.note(_("committing manifest\n"))
1716 self.ui.note(_("committing manifest\n"))
1718 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1717 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1719 drop = [f for f in removed if f in m]
1718 drop = [f for f in removed if f in m]
1720 for f in drop:
1719 for f in drop:
1721 del m[f]
1720 del m[f]
1722 mn = self.manifest.add(m, trp, linkrev,
1721 mn = self.manifest.add(m, trp, linkrev,
1723 p1.manifestnode(), p2.manifestnode(),
1722 p1.manifestnode(), p2.manifestnode(),
1724 added, drop)
1723 added, drop)
1725 files = changed + removed
1724 files = changed + removed
1726 else:
1725 else:
1727 mn = p1.manifestnode()
1726 mn = p1.manifestnode()
1728 files = []
1727 files = []
1729
1728
1730 # update changelog
1729 # update changelog
1731 self.ui.note(_("committing changelog\n"))
1730 self.ui.note(_("committing changelog\n"))
1732 self.changelog.delayupdate(tr)
1731 self.changelog.delayupdate(tr)
1733 n = self.changelog.add(mn, files, ctx.description(),
1732 n = self.changelog.add(mn, files, ctx.description(),
1734 trp, p1.node(), p2.node(),
1733 trp, p1.node(), p2.node(),
1735 user, ctx.date(), ctx.extra().copy())
1734 user, ctx.date(), ctx.extra().copy())
1736 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1735 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1737 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1736 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1738 parent2=xp2)
1737 parent2=xp2)
1739 # set the new commit is proper phase
1738 # set the new commit is proper phase
1740 targetphase = subrepo.newcommitphase(self.ui, ctx)
1739 targetphase = subrepo.newcommitphase(self.ui, ctx)
1741 if targetphase:
1740 if targetphase:
1742 # retract boundary do not alter parent changeset.
1741 # retract boundary do not alter parent changeset.
1743 # if a parent have higher the resulting phase will
1742 # if a parent have higher the resulting phase will
1744 # be compliant anyway
1743 # be compliant anyway
1745 #
1744 #
1746 # if minimal phase was 0 we don't need to retract anything
1745 # if minimal phase was 0 we don't need to retract anything
1747 phases.retractboundary(self, tr, targetphase, [n])
1746 phases.retractboundary(self, tr, targetphase, [n])
1748 tr.close()
1747 tr.close()
1749 branchmap.updatecache(self.filtered('served'))
1748 branchmap.updatecache(self.filtered('served'))
1750 return n
1749 return n
1751 finally:
1750 finally:
1752 if tr:
1751 if tr:
1753 tr.release()
1752 tr.release()
1754 lock.release()
1753 lock.release()
1755
1754
1756 @unfilteredmethod
1755 @unfilteredmethod
1757 def destroying(self):
1756 def destroying(self):
1758 '''Inform the repository that nodes are about to be destroyed.
1757 '''Inform the repository that nodes are about to be destroyed.
1759 Intended for use by strip and rollback, so there's a common
1758 Intended for use by strip and rollback, so there's a common
1760 place for anything that has to be done before destroying history.
1759 place for anything that has to be done before destroying history.
1761
1760
1762 This is mostly useful for saving state that is in memory and waiting
1761 This is mostly useful for saving state that is in memory and waiting
1763 to be flushed when the current lock is released. Because a call to
1762 to be flushed when the current lock is released. Because a call to
1764 destroyed is imminent, the repo will be invalidated causing those
1763 destroyed is imminent, the repo will be invalidated causing those
1765 changes to stay in memory (waiting for the next unlock), or vanish
1764 changes to stay in memory (waiting for the next unlock), or vanish
1766 completely.
1765 completely.
1767 '''
1766 '''
1768 # When using the same lock to commit and strip, the phasecache is left
1767 # When using the same lock to commit and strip, the phasecache is left
1769 # dirty after committing. Then when we strip, the repo is invalidated,
1768 # dirty after committing. Then when we strip, the repo is invalidated,
1770 # causing those changes to disappear.
1769 # causing those changes to disappear.
1771 if '_phasecache' in vars(self):
1770 if '_phasecache' in vars(self):
1772 self._phasecache.write()
1771 self._phasecache.write()
1773
1772
1774 @unfilteredmethod
1773 @unfilteredmethod
1775 def destroyed(self):
1774 def destroyed(self):
1776 '''Inform the repository that nodes have been destroyed.
1775 '''Inform the repository that nodes have been destroyed.
1777 Intended for use by strip and rollback, so there's a common
1776 Intended for use by strip and rollback, so there's a common
1778 place for anything that has to be done after destroying history.
1777 place for anything that has to be done after destroying history.
1779 '''
1778 '''
1780 # When one tries to:
1779 # When one tries to:
1781 # 1) destroy nodes thus calling this method (e.g. strip)
1780 # 1) destroy nodes thus calling this method (e.g. strip)
1782 # 2) use phasecache somewhere (e.g. commit)
1781 # 2) use phasecache somewhere (e.g. commit)
1783 #
1782 #
1784 # then 2) will fail because the phasecache contains nodes that were
1783 # then 2) will fail because the phasecache contains nodes that were
1785 # removed. We can either remove phasecache from the filecache,
1784 # removed. We can either remove phasecache from the filecache,
1786 # causing it to reload next time it is accessed, or simply filter
1785 # causing it to reload next time it is accessed, or simply filter
1787 # the removed nodes now and write the updated cache.
1786 # the removed nodes now and write the updated cache.
1788 self._phasecache.filterunknown(self)
1787 self._phasecache.filterunknown(self)
1789 self._phasecache.write()
1788 self._phasecache.write()
1790
1789
1791 # update the 'served' branch cache to help read only server process
1790 # update the 'served' branch cache to help read only server process
1792 # Thanks to branchcache collaboration this is done from the nearest
1791 # Thanks to branchcache collaboration this is done from the nearest
1793 # filtered subset and it is expected to be fast.
1792 # filtered subset and it is expected to be fast.
1794 branchmap.updatecache(self.filtered('served'))
1793 branchmap.updatecache(self.filtered('served'))
1795
1794
1796 # Ensure the persistent tag cache is updated. Doing it now
1795 # Ensure the persistent tag cache is updated. Doing it now
1797 # means that the tag cache only has to worry about destroyed
1796 # means that the tag cache only has to worry about destroyed
1798 # heads immediately after a strip/rollback. That in turn
1797 # heads immediately after a strip/rollback. That in turn
1799 # guarantees that "cachetip == currenttip" (comparing both rev
1798 # guarantees that "cachetip == currenttip" (comparing both rev
1800 # and node) always means no nodes have been added or destroyed.
1799 # and node) always means no nodes have been added or destroyed.
1801
1800
1802 # XXX this is suboptimal when qrefresh'ing: we strip the current
1801 # XXX this is suboptimal when qrefresh'ing: we strip the current
1803 # head, refresh the tag cache, then immediately add a new head.
1802 # head, refresh the tag cache, then immediately add a new head.
1804 # But I think doing it this way is necessary for the "instant
1803 # But I think doing it this way is necessary for the "instant
1805 # tag cache retrieval" case to work.
1804 # tag cache retrieval" case to work.
1806 self.invalidate()
1805 self.invalidate()
1807
1806
1808 def walk(self, match, node=None):
1807 def walk(self, match, node=None):
1809 '''
1808 '''
1810 walk recursively through the directory tree or a given
1809 walk recursively through the directory tree or a given
1811 changeset, finding all files matched by the match
1810 changeset, finding all files matched by the match
1812 function
1811 function
1813 '''
1812 '''
1814 return self[node].walk(match)
1813 return self[node].walk(match)
1815
1814
1816 def status(self, node1='.', node2=None, match=None,
1815 def status(self, node1='.', node2=None, match=None,
1817 ignored=False, clean=False, unknown=False,
1816 ignored=False, clean=False, unknown=False,
1818 listsubrepos=False):
1817 listsubrepos=False):
1819 '''a convenience method that calls node1.status(node2)'''
1818 '''a convenience method that calls node1.status(node2)'''
1820 return self[node1].status(node2, match, ignored, clean, unknown,
1819 return self[node1].status(node2, match, ignored, clean, unknown,
1821 listsubrepos)
1820 listsubrepos)
1822
1821
1823 def heads(self, start=None):
1822 def heads(self, start=None):
1824 heads = self.changelog.heads(start)
1823 heads = self.changelog.heads(start)
1825 # sort the output in rev descending order
1824 # sort the output in rev descending order
1826 return sorted(heads, key=self.changelog.rev, reverse=True)
1825 return sorted(heads, key=self.changelog.rev, reverse=True)
1827
1826
1828 def branchheads(self, branch=None, start=None, closed=False):
1827 def branchheads(self, branch=None, start=None, closed=False):
1829 '''return a (possibly filtered) list of heads for the given branch
1828 '''return a (possibly filtered) list of heads for the given branch
1830
1829
1831 Heads are returned in topological order, from newest to oldest.
1830 Heads are returned in topological order, from newest to oldest.
1832 If branch is None, use the dirstate branch.
1831 If branch is None, use the dirstate branch.
1833 If start is not None, return only heads reachable from start.
1832 If start is not None, return only heads reachable from start.
1834 If closed is True, return heads that are marked as closed as well.
1833 If closed is True, return heads that are marked as closed as well.
1835 '''
1834 '''
1836 if branch is None:
1835 if branch is None:
1837 branch = self[None].branch()
1836 branch = self[None].branch()
1838 branches = self.branchmap()
1837 branches = self.branchmap()
1839 if branch not in branches:
1838 if branch not in branches:
1840 return []
1839 return []
1841 # the cache returns heads ordered lowest to highest
1840 # the cache returns heads ordered lowest to highest
1842 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1841 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1843 if start is not None:
1842 if start is not None:
1844 # filter out the heads that cannot be reached from startrev
1843 # filter out the heads that cannot be reached from startrev
1845 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1844 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1846 bheads = [h for h in bheads if h in fbheads]
1845 bheads = [h for h in bheads if h in fbheads]
1847 return bheads
1846 return bheads
1848
1847
1849 def branches(self, nodes):
1848 def branches(self, nodes):
1850 if not nodes:
1849 if not nodes:
1851 nodes = [self.changelog.tip()]
1850 nodes = [self.changelog.tip()]
1852 b = []
1851 b = []
1853 for n in nodes:
1852 for n in nodes:
1854 t = n
1853 t = n
1855 while True:
1854 while True:
1856 p = self.changelog.parents(n)
1855 p = self.changelog.parents(n)
1857 if p[1] != nullid or p[0] == nullid:
1856 if p[1] != nullid or p[0] == nullid:
1858 b.append((t, n, p[0], p[1]))
1857 b.append((t, n, p[0], p[1]))
1859 break
1858 break
1860 n = p[0]
1859 n = p[0]
1861 return b
1860 return b
1862
1861
1863 def between(self, pairs):
1862 def between(self, pairs):
1864 r = []
1863 r = []
1865
1864
1866 for top, bottom in pairs:
1865 for top, bottom in pairs:
1867 n, l, i = top, [], 0
1866 n, l, i = top, [], 0
1868 f = 1
1867 f = 1
1869
1868
1870 while n != bottom and n != nullid:
1869 while n != bottom and n != nullid:
1871 p = self.changelog.parents(n)[0]
1870 p = self.changelog.parents(n)[0]
1872 if i == f:
1871 if i == f:
1873 l.append(n)
1872 l.append(n)
1874 f = f * 2
1873 f = f * 2
1875 n = p
1874 n = p
1876 i += 1
1875 i += 1
1877
1876
1878 r.append(l)
1877 r.append(l)
1879
1878
1880 return r
1879 return r
1881
1880
1882 def checkpush(self, pushop):
1881 def checkpush(self, pushop):
1883 """Extensions can override this function if additional checks have
1882 """Extensions can override this function if additional checks have
1884 to be performed before pushing, or call it if they override push
1883 to be performed before pushing, or call it if they override push
1885 command.
1884 command.
1886 """
1885 """
1887 pass
1886 pass
1888
1887
1889 @unfilteredpropertycache
1888 @unfilteredpropertycache
1890 def prepushoutgoinghooks(self):
1889 def prepushoutgoinghooks(self):
1891 """Return util.hooks consists of "(repo, remote, outgoing)"
1890 """Return util.hooks consists of "(repo, remote, outgoing)"
1892 functions, which are called before pushing changesets.
1891 functions, which are called before pushing changesets.
1893 """
1892 """
1894 return util.hooks()
1893 return util.hooks()
1895
1894
1896 def pushkey(self, namespace, key, old, new):
1895 def pushkey(self, namespace, key, old, new):
1897 try:
1896 try:
1898 tr = self.currenttransaction()
1897 tr = self.currenttransaction()
1899 hookargs = {}
1898 hookargs = {}
1900 if tr is not None:
1899 if tr is not None:
1901 hookargs.update(tr.hookargs)
1900 hookargs.update(tr.hookargs)
1902 hookargs['namespace'] = namespace
1901 hookargs['namespace'] = namespace
1903 hookargs['key'] = key
1902 hookargs['key'] = key
1904 hookargs['old'] = old
1903 hookargs['old'] = old
1905 hookargs['new'] = new
1904 hookargs['new'] = new
1906 self.hook('prepushkey', throw=True, **hookargs)
1905 self.hook('prepushkey', throw=True, **hookargs)
1907 except error.HookAbort as exc:
1906 except error.HookAbort as exc:
1908 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1907 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1909 if exc.hint:
1908 if exc.hint:
1910 self.ui.write_err(_("(%s)\n") % exc.hint)
1909 self.ui.write_err(_("(%s)\n") % exc.hint)
1911 return False
1910 return False
1912 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1911 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1913 ret = pushkey.push(self, namespace, key, old, new)
1912 ret = pushkey.push(self, namespace, key, old, new)
1914 def runhook():
1913 def runhook():
1915 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1914 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1916 ret=ret)
1915 ret=ret)
1917 self._afterlock(runhook)
1916 self._afterlock(runhook)
1918 return ret
1917 return ret
1919
1918
1920 def listkeys(self, namespace):
1919 def listkeys(self, namespace):
1921 self.hook('prelistkeys', throw=True, namespace=namespace)
1920 self.hook('prelistkeys', throw=True, namespace=namespace)
1922 self.ui.debug('listing keys for "%s"\n' % namespace)
1921 self.ui.debug('listing keys for "%s"\n' % namespace)
1923 values = pushkey.list(self, namespace)
1922 values = pushkey.list(self, namespace)
1924 self.hook('listkeys', namespace=namespace, values=values)
1923 self.hook('listkeys', namespace=namespace, values=values)
1925 return values
1924 return values
1926
1925
1927 def debugwireargs(self, one, two, three=None, four=None, five=None):
1926 def debugwireargs(self, one, two, three=None, four=None, five=None):
1928 '''used to test argument passing over the wire'''
1927 '''used to test argument passing over the wire'''
1929 return "%s %s %s %s %s" % (one, two, three, four, five)
1928 return "%s %s %s %s %s" % (one, two, three, four, five)
1930
1929
1931 def savecommitmessage(self, text):
1930 def savecommitmessage(self, text):
1932 fp = self.vfs('last-message.txt', 'wb')
1931 fp = self.vfs('last-message.txt', 'wb')
1933 try:
1932 try:
1934 fp.write(text)
1933 fp.write(text)
1935 finally:
1934 finally:
1936 fp.close()
1935 fp.close()
1937 return self.pathto(fp.name[len(self.root) + 1:])
1936 return self.pathto(fp.name[len(self.root) + 1:])
1938
1937
1939 # used to avoid circular references so destructors work
1938 # used to avoid circular references so destructors work
1940 def aftertrans(files):
1939 def aftertrans(files):
1941 renamefiles = [tuple(t) for t in files]
1940 renamefiles = [tuple(t) for t in files]
1942 def a():
1941 def a():
1943 for vfs, src, dest in renamefiles:
1942 for vfs, src, dest in renamefiles:
1944 try:
1943 try:
1945 vfs.rename(src, dest)
1944 vfs.rename(src, dest)
1946 except OSError: # journal file does not yet exist
1945 except OSError: # journal file does not yet exist
1947 pass
1946 pass
1948 return a
1947 return a
1949
1948
1950 def undoname(fn):
1949 def undoname(fn):
1951 base, name = os.path.split(fn)
1950 base, name = os.path.split(fn)
1952 assert name.startswith('journal')
1951 assert name.startswith('journal')
1953 return os.path.join(base, name.replace('journal', 'undo', 1))
1952 return os.path.join(base, name.replace('journal', 'undo', 1))
1954
1953
1955 def instance(ui, path, create):
1954 def instance(ui, path, create):
1956 return localrepository(ui, util.urllocalpath(path), create)
1955 return localrepository(ui, util.urllocalpath(path), create)
1957
1956
1958 def islocal(path):
1957 def islocal(path):
1959 return True
1958 return True
1960
1959
1961 def newreporequirements(repo):
1960 def newreporequirements(repo):
1962 """Determine the set of requirements for a new local repository.
1961 """Determine the set of requirements for a new local repository.
1963
1962
1964 Extensions can wrap this function to specify custom requirements for
1963 Extensions can wrap this function to specify custom requirements for
1965 new repositories.
1964 new repositories.
1966 """
1965 """
1967 ui = repo.ui
1966 ui = repo.ui
1968 requirements = set(['revlogv1'])
1967 requirements = set(['revlogv1'])
1969 if ui.configbool('format', 'usestore', True):
1968 if ui.configbool('format', 'usestore', True):
1970 requirements.add('store')
1969 requirements.add('store')
1971 if ui.configbool('format', 'usefncache', True):
1970 if ui.configbool('format', 'usefncache', True):
1972 requirements.add('fncache')
1971 requirements.add('fncache')
1973 if ui.configbool('format', 'dotencode', True):
1972 if ui.configbool('format', 'dotencode', True):
1974 requirements.add('dotencode')
1973 requirements.add('dotencode')
1975
1974
1976 if scmutil.gdinitconfig(ui):
1975 if scmutil.gdinitconfig(ui):
1977 requirements.add('generaldelta')
1976 requirements.add('generaldelta')
1978 if ui.configbool('experimental', 'treemanifest', False):
1977 if ui.configbool('experimental', 'treemanifest', False):
1979 requirements.add('treemanifest')
1978 requirements.add('treemanifest')
1980 if ui.configbool('experimental', 'manifestv2', False):
1979 if ui.configbool('experimental', 'manifestv2', False):
1981 requirements.add('manifestv2')
1980 requirements.add('manifestv2')
1982
1981
1983 return requirements
1982 return requirements
General Comments 0
You need to be logged in to leave comments. Login now