##// END OF EJS Templates
localrepo: make restoring from backup at rollback avoid ambiguity of file stat...
FUJIWARA Katsunori -
r29352:37c7f9fb default
parent child Browse files
Show More
@@ -1,1970 +1,1970
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63
63
64 class repofilecache(scmutil.filecache):
64 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 def __set__(self, repo, value):
70 def __set__(self, repo, value):
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 def __delete__(self, repo):
72 def __delete__(self, repo):
73 return super(repofilecache, self).__delete__(repo.unfiltered())
73 return super(repofilecache, self).__delete__(repo.unfiltered())
74
74
75 class storecache(repofilecache):
75 class storecache(repofilecache):
76 """filecache for files in the store"""
76 """filecache for files in the store"""
77 def join(self, obj, fname):
77 def join(self, obj, fname):
78 return obj.sjoin(fname)
78 return obj.sjoin(fname)
79
79
80 class unfilteredpropertycache(util.propertycache):
80 class unfilteredpropertycache(util.propertycache):
81 """propertycache that apply to unfiltered repo only"""
81 """propertycache that apply to unfiltered repo only"""
82
82
83 def __get__(self, repo, type=None):
83 def __get__(self, repo, type=None):
84 unfi = repo.unfiltered()
84 unfi = repo.unfiltered()
85 if unfi is repo:
85 if unfi is repo:
86 return super(unfilteredpropertycache, self).__get__(unfi)
86 return super(unfilteredpropertycache, self).__get__(unfi)
87 return getattr(unfi, self.name)
87 return getattr(unfi, self.name)
88
88
89 class filteredpropertycache(util.propertycache):
89 class filteredpropertycache(util.propertycache):
90 """propertycache that must take filtering in account"""
90 """propertycache that must take filtering in account"""
91
91
92 def cachevalue(self, obj, value):
92 def cachevalue(self, obj, value):
93 object.__setattr__(obj, self.name, value)
93 object.__setattr__(obj, self.name, value)
94
94
95
95
96 def hasunfilteredcache(repo, name):
96 def hasunfilteredcache(repo, name):
97 """check if a repo has an unfilteredpropertycache value for <name>"""
97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 return name in vars(repo.unfiltered())
98 return name in vars(repo.unfiltered())
99
99
100 def unfilteredmethod(orig):
100 def unfilteredmethod(orig):
101 """decorate method that always need to be run on unfiltered version"""
101 """decorate method that always need to be run on unfiltered version"""
102 def wrapper(repo, *args, **kwargs):
102 def wrapper(repo, *args, **kwargs):
103 return orig(repo.unfiltered(), *args, **kwargs)
103 return orig(repo.unfiltered(), *args, **kwargs)
104 return wrapper
104 return wrapper
105
105
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 'unbundle'))
107 'unbundle'))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109
109
110 class localpeer(peer.peerrepository):
110 class localpeer(peer.peerrepository):
111 '''peer for a local repo; reflects only the most recent API'''
111 '''peer for a local repo; reflects only the most recent API'''
112
112
113 def __init__(self, repo, caps=moderncaps):
113 def __init__(self, repo, caps=moderncaps):
114 peer.peerrepository.__init__(self)
114 peer.peerrepository.__init__(self)
115 self._repo = repo.filtered('served')
115 self._repo = repo.filtered('served')
116 self.ui = repo.ui
116 self.ui = repo.ui
117 self._caps = repo._restrictcapabilities(caps)
117 self._caps = repo._restrictcapabilities(caps)
118 self.requirements = repo.requirements
118 self.requirements = repo.requirements
119 self.supportedformats = repo.supportedformats
119 self.supportedformats = repo.supportedformats
120
120
121 def close(self):
121 def close(self):
122 self._repo.close()
122 self._repo.close()
123
123
124 def _capabilities(self):
124 def _capabilities(self):
125 return self._caps
125 return self._caps
126
126
127 def local(self):
127 def local(self):
128 return self._repo
128 return self._repo
129
129
130 def canpush(self):
130 def canpush(self):
131 return True
131 return True
132
132
133 def url(self):
133 def url(self):
134 return self._repo.url()
134 return self._repo.url()
135
135
136 def lookup(self, key):
136 def lookup(self, key):
137 return self._repo.lookup(key)
137 return self._repo.lookup(key)
138
138
139 def branchmap(self):
139 def branchmap(self):
140 return self._repo.branchmap()
140 return self._repo.branchmap()
141
141
142 def heads(self):
142 def heads(self):
143 return self._repo.heads()
143 return self._repo.heads()
144
144
145 def known(self, nodes):
145 def known(self, nodes):
146 return self._repo.known(nodes)
146 return self._repo.known(nodes)
147
147
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 **kwargs):
149 **kwargs):
150 cg = exchange.getbundle(self._repo, source, heads=heads,
150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 common=common, bundlecaps=bundlecaps, **kwargs)
151 common=common, bundlecaps=bundlecaps, **kwargs)
152 if bundlecaps is not None and 'HG20' in bundlecaps:
152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 # When requesting a bundle2, getbundle returns a stream to make the
153 # When requesting a bundle2, getbundle returns a stream to make the
154 # wire level function happier. We need to build a proper object
154 # wire level function happier. We need to build a proper object
155 # from it in local peer.
155 # from it in local peer.
156 cg = bundle2.getunbundler(self.ui, cg)
156 cg = bundle2.getunbundler(self.ui, cg)
157 return cg
157 return cg
158
158
159 # TODO We might want to move the next two calls into legacypeer and add
159 # TODO We might want to move the next two calls into legacypeer and add
160 # unbundle instead.
160 # unbundle instead.
161
161
162 def unbundle(self, cg, heads, url):
162 def unbundle(self, cg, heads, url):
163 """apply a bundle on a repo
163 """apply a bundle on a repo
164
164
165 This function handles the repo locking itself."""
165 This function handles the repo locking itself."""
166 try:
166 try:
167 try:
167 try:
168 cg = exchange.readbundle(self.ui, cg, None)
168 cg = exchange.readbundle(self.ui, cg, None)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 if util.safehasattr(ret, 'getchunks'):
170 if util.safehasattr(ret, 'getchunks'):
171 # This is a bundle20 object, turn it into an unbundler.
171 # This is a bundle20 object, turn it into an unbundler.
172 # This little dance should be dropped eventually when the
172 # This little dance should be dropped eventually when the
173 # API is finally improved.
173 # API is finally improved.
174 stream = util.chunkbuffer(ret.getchunks())
174 stream = util.chunkbuffer(ret.getchunks())
175 ret = bundle2.getunbundler(self.ui, stream)
175 ret = bundle2.getunbundler(self.ui, stream)
176 return ret
176 return ret
177 except Exception as exc:
177 except Exception as exc:
178 # If the exception contains output salvaged from a bundle2
178 # If the exception contains output salvaged from a bundle2
179 # reply, we need to make sure it is printed before continuing
179 # reply, we need to make sure it is printed before continuing
180 # to fail. So we build a bundle2 with such output and consume
180 # to fail. So we build a bundle2 with such output and consume
181 # it directly.
181 # it directly.
182 #
182 #
183 # This is not very elegant but allows a "simple" solution for
183 # This is not very elegant but allows a "simple" solution for
184 # issue4594
184 # issue4594
185 output = getattr(exc, '_bundle2salvagedoutput', ())
185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 if output:
186 if output:
187 bundler = bundle2.bundle20(self._repo.ui)
187 bundler = bundle2.bundle20(self._repo.ui)
188 for out in output:
188 for out in output:
189 bundler.addpart(out)
189 bundler.addpart(out)
190 stream = util.chunkbuffer(bundler.getchunks())
190 stream = util.chunkbuffer(bundler.getchunks())
191 b = bundle2.getunbundler(self.ui, stream)
191 b = bundle2.getunbundler(self.ui, stream)
192 bundle2.processbundle(self._repo, b)
192 bundle2.processbundle(self._repo, b)
193 raise
193 raise
194 except error.PushRaced as exc:
194 except error.PushRaced as exc:
195 raise error.ResponseError(_('push failed:'), str(exc))
195 raise error.ResponseError(_('push failed:'), str(exc))
196
196
197 def lock(self):
197 def lock(self):
198 return self._repo.lock()
198 return self._repo.lock()
199
199
200 def addchangegroup(self, cg, source, url):
200 def addchangegroup(self, cg, source, url):
201 return cg.apply(self._repo, source, url)
201 return cg.apply(self._repo, source, url)
202
202
203 def pushkey(self, namespace, key, old, new):
203 def pushkey(self, namespace, key, old, new):
204 return self._repo.pushkey(namespace, key, old, new)
204 return self._repo.pushkey(namespace, key, old, new)
205
205
206 def listkeys(self, namespace):
206 def listkeys(self, namespace):
207 return self._repo.listkeys(namespace)
207 return self._repo.listkeys(namespace)
208
208
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 '''used to test argument passing over the wire'''
210 '''used to test argument passing over the wire'''
211 return "%s %s %s %s %s" % (one, two, three, four, five)
211 return "%s %s %s %s %s" % (one, two, three, four, five)
212
212
213 class locallegacypeer(localpeer):
213 class locallegacypeer(localpeer):
214 '''peer extension which implements legacy methods too; used for tests with
214 '''peer extension which implements legacy methods too; used for tests with
215 restricted capabilities'''
215 restricted capabilities'''
216
216
217 def __init__(self, repo):
217 def __init__(self, repo):
218 localpeer.__init__(self, repo, caps=legacycaps)
218 localpeer.__init__(self, repo, caps=legacycaps)
219
219
220 def branches(self, nodes):
220 def branches(self, nodes):
221 return self._repo.branches(nodes)
221 return self._repo.branches(nodes)
222
222
223 def between(self, pairs):
223 def between(self, pairs):
224 return self._repo.between(pairs)
224 return self._repo.between(pairs)
225
225
226 def changegroup(self, basenodes, source):
226 def changegroup(self, basenodes, source):
227 return changegroup.changegroup(self._repo, basenodes, source)
227 return changegroup.changegroup(self._repo, basenodes, source)
228
228
229 def changegroupsubset(self, bases, heads, source):
229 def changegroupsubset(self, bases, heads, source):
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231
231
232 class localrepository(object):
232 class localrepository(object):
233
233
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 'manifestv2'))
235 'manifestv2'))
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 'dotencode'))
237 'dotencode'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 filtername = None
239 filtername = None
240
240
241 # a list of (ui, featureset) functions.
241 # a list of (ui, featureset) functions.
242 # only functions defined in module of enabled extensions are invoked
242 # only functions defined in module of enabled extensions are invoked
243 featuresetupfuncs = set()
243 featuresetupfuncs = set()
244
244
245 def __init__(self, baseui, path=None, create=False):
245 def __init__(self, baseui, path=None, create=False):
246 self.requirements = set()
246 self.requirements = set()
247 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
247 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
248 self.wopener = self.wvfs
248 self.wopener = self.wvfs
249 self.root = self.wvfs.base
249 self.root = self.wvfs.base
250 self.path = self.wvfs.join(".hg")
250 self.path = self.wvfs.join(".hg")
251 self.origroot = path
251 self.origroot = path
252 self.auditor = pathutil.pathauditor(self.root, self._checknested)
252 self.auditor = pathutil.pathauditor(self.root, self._checknested)
253 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
253 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
254 realfs=False)
254 realfs=False)
255 self.vfs = scmutil.vfs(self.path)
255 self.vfs = scmutil.vfs(self.path)
256 self.opener = self.vfs
256 self.opener = self.vfs
257 self.baseui = baseui
257 self.baseui = baseui
258 self.ui = baseui.copy()
258 self.ui = baseui.copy()
259 self.ui.copy = baseui.copy # prevent copying repo configuration
259 self.ui.copy = baseui.copy # prevent copying repo configuration
260 # A list of callback to shape the phase if no data were found.
260 # A list of callback to shape the phase if no data were found.
261 # Callback are in the form: func(repo, roots) --> processed root.
261 # Callback are in the form: func(repo, roots) --> processed root.
262 # This list it to be filled by extension during repo setup
262 # This list it to be filled by extension during repo setup
263 self._phasedefaults = []
263 self._phasedefaults = []
264 try:
264 try:
265 self.ui.readconfig(self.join("hgrc"), self.root)
265 self.ui.readconfig(self.join("hgrc"), self.root)
266 extensions.loadall(self.ui)
266 extensions.loadall(self.ui)
267 except IOError:
267 except IOError:
268 pass
268 pass
269
269
270 if self.featuresetupfuncs:
270 if self.featuresetupfuncs:
271 self.supported = set(self._basesupported) # use private copy
271 self.supported = set(self._basesupported) # use private copy
272 extmods = set(m.__name__ for n, m
272 extmods = set(m.__name__ for n, m
273 in extensions.extensions(self.ui))
273 in extensions.extensions(self.ui))
274 for setupfunc in self.featuresetupfuncs:
274 for setupfunc in self.featuresetupfuncs:
275 if setupfunc.__module__ in extmods:
275 if setupfunc.__module__ in extmods:
276 setupfunc(self.ui, self.supported)
276 setupfunc(self.ui, self.supported)
277 else:
277 else:
278 self.supported = self._basesupported
278 self.supported = self._basesupported
279
279
280 if not self.vfs.isdir():
280 if not self.vfs.isdir():
281 if create:
281 if create:
282 self.requirements = newreporequirements(self)
282 self.requirements = newreporequirements(self)
283
283
284 if not self.wvfs.exists():
284 if not self.wvfs.exists():
285 self.wvfs.makedirs()
285 self.wvfs.makedirs()
286 self.vfs.makedir(notindexed=True)
286 self.vfs.makedir(notindexed=True)
287
287
288 if 'store' in self.requirements:
288 if 'store' in self.requirements:
289 self.vfs.mkdir("store")
289 self.vfs.mkdir("store")
290
290
291 # create an invalid changelog
291 # create an invalid changelog
292 self.vfs.append(
292 self.vfs.append(
293 "00changelog.i",
293 "00changelog.i",
294 '\0\0\0\2' # represents revlogv2
294 '\0\0\0\2' # represents revlogv2
295 ' dummy changelog to prevent using the old repo layout'
295 ' dummy changelog to prevent using the old repo layout'
296 )
296 )
297 else:
297 else:
298 raise error.RepoError(_("repository %s not found") % path)
298 raise error.RepoError(_("repository %s not found") % path)
299 elif create:
299 elif create:
300 raise error.RepoError(_("repository %s already exists") % path)
300 raise error.RepoError(_("repository %s already exists") % path)
301 else:
301 else:
302 try:
302 try:
303 self.requirements = scmutil.readrequires(
303 self.requirements = scmutil.readrequires(
304 self.vfs, self.supported)
304 self.vfs, self.supported)
305 except IOError as inst:
305 except IOError as inst:
306 if inst.errno != errno.ENOENT:
306 if inst.errno != errno.ENOENT:
307 raise
307 raise
308
308
309 self.sharedpath = self.path
309 self.sharedpath = self.path
310 try:
310 try:
311 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
311 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
312 realpath=True)
312 realpath=True)
313 s = vfs.base
313 s = vfs.base
314 if not vfs.exists():
314 if not vfs.exists():
315 raise error.RepoError(
315 raise error.RepoError(
316 _('.hg/sharedpath points to nonexistent directory %s') % s)
316 _('.hg/sharedpath points to nonexistent directory %s') % s)
317 self.sharedpath = s
317 self.sharedpath = s
318 except IOError as inst:
318 except IOError as inst:
319 if inst.errno != errno.ENOENT:
319 if inst.errno != errno.ENOENT:
320 raise
320 raise
321
321
322 self.store = store.store(
322 self.store = store.store(
323 self.requirements, self.sharedpath, scmutil.vfs)
323 self.requirements, self.sharedpath, scmutil.vfs)
324 self.spath = self.store.path
324 self.spath = self.store.path
325 self.svfs = self.store.vfs
325 self.svfs = self.store.vfs
326 self.sjoin = self.store.join
326 self.sjoin = self.store.join
327 self.vfs.createmode = self.store.createmode
327 self.vfs.createmode = self.store.createmode
328 self._applyopenerreqs()
328 self._applyopenerreqs()
329 if create:
329 if create:
330 self._writerequirements()
330 self._writerequirements()
331
331
332 self._dirstatevalidatewarned = False
332 self._dirstatevalidatewarned = False
333
333
334 self._branchcaches = {}
334 self._branchcaches = {}
335 self._revbranchcache = None
335 self._revbranchcache = None
336 self.filterpats = {}
336 self.filterpats = {}
337 self._datafilters = {}
337 self._datafilters = {}
338 self._transref = self._lockref = self._wlockref = None
338 self._transref = self._lockref = self._wlockref = None
339
339
340 # A cache for various files under .hg/ that tracks file changes,
340 # A cache for various files under .hg/ that tracks file changes,
341 # (used by the filecache decorator)
341 # (used by the filecache decorator)
342 #
342 #
343 # Maps a property name to its util.filecacheentry
343 # Maps a property name to its util.filecacheentry
344 self._filecache = {}
344 self._filecache = {}
345
345
346 # hold sets of revision to be filtered
346 # hold sets of revision to be filtered
347 # should be cleared when something might have changed the filter value:
347 # should be cleared when something might have changed the filter value:
348 # - new changesets,
348 # - new changesets,
349 # - phase change,
349 # - phase change,
350 # - new obsolescence marker,
350 # - new obsolescence marker,
351 # - working directory parent change,
351 # - working directory parent change,
352 # - bookmark changes
352 # - bookmark changes
353 self.filteredrevcache = {}
353 self.filteredrevcache = {}
354
354
355 # generic mapping between names and nodes
355 # generic mapping between names and nodes
356 self.names = namespaces.namespaces()
356 self.names = namespaces.namespaces()
357
357
358 def close(self):
358 def close(self):
359 self._writecaches()
359 self._writecaches()
360
360
361 def _writecaches(self):
361 def _writecaches(self):
362 if self._revbranchcache:
362 if self._revbranchcache:
363 self._revbranchcache.write()
363 self._revbranchcache.write()
364
364
365 def _restrictcapabilities(self, caps):
365 def _restrictcapabilities(self, caps):
366 if self.ui.configbool('experimental', 'bundle2-advertise', True):
366 if self.ui.configbool('experimental', 'bundle2-advertise', True):
367 caps = set(caps)
367 caps = set(caps)
368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
369 caps.add('bundle2=' + urlreq.quote(capsblob))
369 caps.add('bundle2=' + urlreq.quote(capsblob))
370 return caps
370 return caps
371
371
372 def _applyopenerreqs(self):
372 def _applyopenerreqs(self):
373 self.svfs.options = dict((r, 1) for r in self.requirements
373 self.svfs.options = dict((r, 1) for r in self.requirements
374 if r in self.openerreqs)
374 if r in self.openerreqs)
375 # experimental config: format.chunkcachesize
375 # experimental config: format.chunkcachesize
376 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
376 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
377 if chunkcachesize is not None:
377 if chunkcachesize is not None:
378 self.svfs.options['chunkcachesize'] = chunkcachesize
378 self.svfs.options['chunkcachesize'] = chunkcachesize
379 # experimental config: format.maxchainlen
379 # experimental config: format.maxchainlen
380 maxchainlen = self.ui.configint('format', 'maxchainlen')
380 maxchainlen = self.ui.configint('format', 'maxchainlen')
381 if maxchainlen is not None:
381 if maxchainlen is not None:
382 self.svfs.options['maxchainlen'] = maxchainlen
382 self.svfs.options['maxchainlen'] = maxchainlen
383 # experimental config: format.manifestcachesize
383 # experimental config: format.manifestcachesize
384 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
384 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
385 if manifestcachesize is not None:
385 if manifestcachesize is not None:
386 self.svfs.options['manifestcachesize'] = manifestcachesize
386 self.svfs.options['manifestcachesize'] = manifestcachesize
387 # experimental config: format.aggressivemergedeltas
387 # experimental config: format.aggressivemergedeltas
388 aggressivemergedeltas = self.ui.configbool('format',
388 aggressivemergedeltas = self.ui.configbool('format',
389 'aggressivemergedeltas', False)
389 'aggressivemergedeltas', False)
390 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
390 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
391 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
391 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
392
392
393 def _writerequirements(self):
393 def _writerequirements(self):
394 scmutil.writerequires(self.vfs, self.requirements)
394 scmutil.writerequires(self.vfs, self.requirements)
395
395
396 def _checknested(self, path):
396 def _checknested(self, path):
397 """Determine if path is a legal nested repository."""
397 """Determine if path is a legal nested repository."""
398 if not path.startswith(self.root):
398 if not path.startswith(self.root):
399 return False
399 return False
400 subpath = path[len(self.root) + 1:]
400 subpath = path[len(self.root) + 1:]
401 normsubpath = util.pconvert(subpath)
401 normsubpath = util.pconvert(subpath)
402
402
403 # XXX: Checking against the current working copy is wrong in
403 # XXX: Checking against the current working copy is wrong in
404 # the sense that it can reject things like
404 # the sense that it can reject things like
405 #
405 #
406 # $ hg cat -r 10 sub/x.txt
406 # $ hg cat -r 10 sub/x.txt
407 #
407 #
408 # if sub/ is no longer a subrepository in the working copy
408 # if sub/ is no longer a subrepository in the working copy
409 # parent revision.
409 # parent revision.
410 #
410 #
411 # However, it can of course also allow things that would have
411 # However, it can of course also allow things that would have
412 # been rejected before, such as the above cat command if sub/
412 # been rejected before, such as the above cat command if sub/
413 # is a subrepository now, but was a normal directory before.
413 # is a subrepository now, but was a normal directory before.
414 # The old path auditor would have rejected by mistake since it
414 # The old path auditor would have rejected by mistake since it
415 # panics when it sees sub/.hg/.
415 # panics when it sees sub/.hg/.
416 #
416 #
417 # All in all, checking against the working copy seems sensible
417 # All in all, checking against the working copy seems sensible
418 # since we want to prevent access to nested repositories on
418 # since we want to prevent access to nested repositories on
419 # the filesystem *now*.
419 # the filesystem *now*.
420 ctx = self[None]
420 ctx = self[None]
421 parts = util.splitpath(subpath)
421 parts = util.splitpath(subpath)
422 while parts:
422 while parts:
423 prefix = '/'.join(parts)
423 prefix = '/'.join(parts)
424 if prefix in ctx.substate:
424 if prefix in ctx.substate:
425 if prefix == normsubpath:
425 if prefix == normsubpath:
426 return True
426 return True
427 else:
427 else:
428 sub = ctx.sub(prefix)
428 sub = ctx.sub(prefix)
429 return sub.checknested(subpath[len(prefix) + 1:])
429 return sub.checknested(subpath[len(prefix) + 1:])
430 else:
430 else:
431 parts.pop()
431 parts.pop()
432 return False
432 return False
433
433
434 def peer(self):
434 def peer(self):
435 return localpeer(self) # not cached to avoid reference cycle
435 return localpeer(self) # not cached to avoid reference cycle
436
436
437 def unfiltered(self):
437 def unfiltered(self):
438 """Return unfiltered version of the repository
438 """Return unfiltered version of the repository
439
439
440 Intended to be overwritten by filtered repo."""
440 Intended to be overwritten by filtered repo."""
441 return self
441 return self
442
442
443 def filtered(self, name):
443 def filtered(self, name):
444 """Return a filtered version of a repository"""
444 """Return a filtered version of a repository"""
445 # build a new class with the mixin and the current class
445 # build a new class with the mixin and the current class
446 # (possibly subclass of the repo)
446 # (possibly subclass of the repo)
447 class proxycls(repoview.repoview, self.unfiltered().__class__):
447 class proxycls(repoview.repoview, self.unfiltered().__class__):
448 pass
448 pass
449 return proxycls(self, name)
449 return proxycls(self, name)
450
450
451 @repofilecache('bookmarks', 'bookmarks.current')
451 @repofilecache('bookmarks', 'bookmarks.current')
452 def _bookmarks(self):
452 def _bookmarks(self):
453 return bookmarks.bmstore(self)
453 return bookmarks.bmstore(self)
454
454
455 @property
455 @property
456 def _activebookmark(self):
456 def _activebookmark(self):
457 return self._bookmarks.active
457 return self._bookmarks.active
458
458
459 def bookmarkheads(self, bookmark):
459 def bookmarkheads(self, bookmark):
460 name = bookmark.split('@', 1)[0]
460 name = bookmark.split('@', 1)[0]
461 heads = []
461 heads = []
462 for mark, n in self._bookmarks.iteritems():
462 for mark, n in self._bookmarks.iteritems():
463 if mark.split('@', 1)[0] == name:
463 if mark.split('@', 1)[0] == name:
464 heads.append(n)
464 heads.append(n)
465 return heads
465 return heads
466
466
467 # _phaserevs and _phasesets depend on changelog. what we need is to
467 # _phaserevs and _phasesets depend on changelog. what we need is to
468 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
468 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
469 # can't be easily expressed in filecache mechanism.
469 # can't be easily expressed in filecache mechanism.
470 @storecache('phaseroots', '00changelog.i')
470 @storecache('phaseroots', '00changelog.i')
471 def _phasecache(self):
471 def _phasecache(self):
472 return phases.phasecache(self, self._phasedefaults)
472 return phases.phasecache(self, self._phasedefaults)
473
473
474 @storecache('obsstore')
474 @storecache('obsstore')
475 def obsstore(self):
475 def obsstore(self):
476 # read default format for new obsstore.
476 # read default format for new obsstore.
477 # developer config: format.obsstore-version
477 # developer config: format.obsstore-version
478 defaultformat = self.ui.configint('format', 'obsstore-version', None)
478 defaultformat = self.ui.configint('format', 'obsstore-version', None)
479 # rely on obsstore class default when possible.
479 # rely on obsstore class default when possible.
480 kwargs = {}
480 kwargs = {}
481 if defaultformat is not None:
481 if defaultformat is not None:
482 kwargs['defaultformat'] = defaultformat
482 kwargs['defaultformat'] = defaultformat
483 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
483 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
484 store = obsolete.obsstore(self.svfs, readonly=readonly,
484 store = obsolete.obsstore(self.svfs, readonly=readonly,
485 **kwargs)
485 **kwargs)
486 if store and readonly:
486 if store and readonly:
487 self.ui.warn(
487 self.ui.warn(
488 _('obsolete feature not enabled but %i markers found!\n')
488 _('obsolete feature not enabled but %i markers found!\n')
489 % len(list(store)))
489 % len(list(store)))
490 return store
490 return store
491
491
492 @storecache('00changelog.i')
492 @storecache('00changelog.i')
493 def changelog(self):
493 def changelog(self):
494 c = changelog.changelog(self.svfs)
494 c = changelog.changelog(self.svfs)
495 if 'HG_PENDING' in os.environ:
495 if 'HG_PENDING' in os.environ:
496 p = os.environ['HG_PENDING']
496 p = os.environ['HG_PENDING']
497 if p.startswith(self.root):
497 if p.startswith(self.root):
498 c.readpending('00changelog.i.a')
498 c.readpending('00changelog.i.a')
499 return c
499 return c
500
500
501 @storecache('00manifest.i')
501 @storecache('00manifest.i')
502 def manifest(self):
502 def manifest(self):
503 return manifest.manifest(self.svfs)
503 return manifest.manifest(self.svfs)
504
504
505 def dirlog(self, dir):
505 def dirlog(self, dir):
506 return self.manifest.dirlog(dir)
506 return self.manifest.dirlog(dir)
507
507
508 @repofilecache('dirstate')
508 @repofilecache('dirstate')
509 def dirstate(self):
509 def dirstate(self):
510 return dirstate.dirstate(self.vfs, self.ui, self.root,
510 return dirstate.dirstate(self.vfs, self.ui, self.root,
511 self._dirstatevalidate)
511 self._dirstatevalidate)
512
512
513 def _dirstatevalidate(self, node):
513 def _dirstatevalidate(self, node):
514 try:
514 try:
515 self.changelog.rev(node)
515 self.changelog.rev(node)
516 return node
516 return node
517 except error.LookupError:
517 except error.LookupError:
518 if not self._dirstatevalidatewarned:
518 if not self._dirstatevalidatewarned:
519 self._dirstatevalidatewarned = True
519 self._dirstatevalidatewarned = True
520 self.ui.warn(_("warning: ignoring unknown"
520 self.ui.warn(_("warning: ignoring unknown"
521 " working parent %s!\n") % short(node))
521 " working parent %s!\n") % short(node))
522 return nullid
522 return nullid
523
523
524 def __getitem__(self, changeid):
524 def __getitem__(self, changeid):
525 if changeid is None or changeid == wdirrev:
525 if changeid is None or changeid == wdirrev:
526 return context.workingctx(self)
526 return context.workingctx(self)
527 if isinstance(changeid, slice):
527 if isinstance(changeid, slice):
528 return [context.changectx(self, i)
528 return [context.changectx(self, i)
529 for i in xrange(*changeid.indices(len(self)))
529 for i in xrange(*changeid.indices(len(self)))
530 if i not in self.changelog.filteredrevs]
530 if i not in self.changelog.filteredrevs]
531 return context.changectx(self, changeid)
531 return context.changectx(self, changeid)
532
532
533 def __contains__(self, changeid):
533 def __contains__(self, changeid):
534 try:
534 try:
535 self[changeid]
535 self[changeid]
536 return True
536 return True
537 except error.RepoLookupError:
537 except error.RepoLookupError:
538 return False
538 return False
539
539
540 def __nonzero__(self):
540 def __nonzero__(self):
541 return True
541 return True
542
542
543 def __len__(self):
543 def __len__(self):
544 return len(self.changelog)
544 return len(self.changelog)
545
545
546 def __iter__(self):
546 def __iter__(self):
547 return iter(self.changelog)
547 return iter(self.changelog)
548
548
549 def revs(self, expr, *args):
549 def revs(self, expr, *args):
550 '''Find revisions matching a revset.
550 '''Find revisions matching a revset.
551
551
552 The revset is specified as a string ``expr`` that may contain
552 The revset is specified as a string ``expr`` that may contain
553 %-formatting to escape certain types. See ``revset.formatspec``.
553 %-formatting to escape certain types. See ``revset.formatspec``.
554
554
555 Return a revset.abstractsmartset, which is a list-like interface
555 Return a revset.abstractsmartset, which is a list-like interface
556 that contains integer revisions.
556 that contains integer revisions.
557 '''
557 '''
558 expr = revset.formatspec(expr, *args)
558 expr = revset.formatspec(expr, *args)
559 m = revset.match(None, expr)
559 m = revset.match(None, expr)
560 return m(self)
560 return m(self)
561
561
562 def set(self, expr, *args):
562 def set(self, expr, *args):
563 '''Find revisions matching a revset and emit changectx instances.
563 '''Find revisions matching a revset and emit changectx instances.
564
564
565 This is a convenience wrapper around ``revs()`` that iterates the
565 This is a convenience wrapper around ``revs()`` that iterates the
566 result and is a generator of changectx instances.
566 result and is a generator of changectx instances.
567 '''
567 '''
568 for r in self.revs(expr, *args):
568 for r in self.revs(expr, *args):
569 yield self[r]
569 yield self[r]
570
570
571 def url(self):
571 def url(self):
572 return 'file:' + self.root
572 return 'file:' + self.root
573
573
574 def hook(self, name, throw=False, **args):
574 def hook(self, name, throw=False, **args):
575 """Call a hook, passing this repo instance.
575 """Call a hook, passing this repo instance.
576
576
577 This a convenience method to aid invoking hooks. Extensions likely
577 This a convenience method to aid invoking hooks. Extensions likely
578 won't call this unless they have registered a custom hook or are
578 won't call this unless they have registered a custom hook or are
579 replacing code that is expected to call a hook.
579 replacing code that is expected to call a hook.
580 """
580 """
581 return hook.hook(self.ui, self, name, throw, **args)
581 return hook.hook(self.ui, self, name, throw, **args)
582
582
583 @unfilteredmethod
583 @unfilteredmethod
584 def _tag(self, names, node, message, local, user, date, extra=None,
584 def _tag(self, names, node, message, local, user, date, extra=None,
585 editor=False):
585 editor=False):
586 if isinstance(names, str):
586 if isinstance(names, str):
587 names = (names,)
587 names = (names,)
588
588
589 branches = self.branchmap()
589 branches = self.branchmap()
590 for name in names:
590 for name in names:
591 self.hook('pretag', throw=True, node=hex(node), tag=name,
591 self.hook('pretag', throw=True, node=hex(node), tag=name,
592 local=local)
592 local=local)
593 if name in branches:
593 if name in branches:
594 self.ui.warn(_("warning: tag %s conflicts with existing"
594 self.ui.warn(_("warning: tag %s conflicts with existing"
595 " branch name\n") % name)
595 " branch name\n") % name)
596
596
597 def writetags(fp, names, munge, prevtags):
597 def writetags(fp, names, munge, prevtags):
598 fp.seek(0, 2)
598 fp.seek(0, 2)
599 if prevtags and prevtags[-1] != '\n':
599 if prevtags and prevtags[-1] != '\n':
600 fp.write('\n')
600 fp.write('\n')
601 for name in names:
601 for name in names:
602 if munge:
602 if munge:
603 m = munge(name)
603 m = munge(name)
604 else:
604 else:
605 m = name
605 m = name
606
606
607 if (self._tagscache.tagtypes and
607 if (self._tagscache.tagtypes and
608 name in self._tagscache.tagtypes):
608 name in self._tagscache.tagtypes):
609 old = self.tags().get(name, nullid)
609 old = self.tags().get(name, nullid)
610 fp.write('%s %s\n' % (hex(old), m))
610 fp.write('%s %s\n' % (hex(old), m))
611 fp.write('%s %s\n' % (hex(node), m))
611 fp.write('%s %s\n' % (hex(node), m))
612 fp.close()
612 fp.close()
613
613
614 prevtags = ''
614 prevtags = ''
615 if local:
615 if local:
616 try:
616 try:
617 fp = self.vfs('localtags', 'r+')
617 fp = self.vfs('localtags', 'r+')
618 except IOError:
618 except IOError:
619 fp = self.vfs('localtags', 'a')
619 fp = self.vfs('localtags', 'a')
620 else:
620 else:
621 prevtags = fp.read()
621 prevtags = fp.read()
622
622
623 # local tags are stored in the current charset
623 # local tags are stored in the current charset
624 writetags(fp, names, None, prevtags)
624 writetags(fp, names, None, prevtags)
625 for name in names:
625 for name in names:
626 self.hook('tag', node=hex(node), tag=name, local=local)
626 self.hook('tag', node=hex(node), tag=name, local=local)
627 return
627 return
628
628
629 try:
629 try:
630 fp = self.wfile('.hgtags', 'rb+')
630 fp = self.wfile('.hgtags', 'rb+')
631 except IOError as e:
631 except IOError as e:
632 if e.errno != errno.ENOENT:
632 if e.errno != errno.ENOENT:
633 raise
633 raise
634 fp = self.wfile('.hgtags', 'ab')
634 fp = self.wfile('.hgtags', 'ab')
635 else:
635 else:
636 prevtags = fp.read()
636 prevtags = fp.read()
637
637
638 # committed tags are stored in UTF-8
638 # committed tags are stored in UTF-8
639 writetags(fp, names, encoding.fromlocal, prevtags)
639 writetags(fp, names, encoding.fromlocal, prevtags)
640
640
641 fp.close()
641 fp.close()
642
642
643 self.invalidatecaches()
643 self.invalidatecaches()
644
644
645 if '.hgtags' not in self.dirstate:
645 if '.hgtags' not in self.dirstate:
646 self[None].add(['.hgtags'])
646 self[None].add(['.hgtags'])
647
647
648 m = matchmod.exact(self.root, '', ['.hgtags'])
648 m = matchmod.exact(self.root, '', ['.hgtags'])
649 tagnode = self.commit(message, user, date, extra=extra, match=m,
649 tagnode = self.commit(message, user, date, extra=extra, match=m,
650 editor=editor)
650 editor=editor)
651
651
652 for name in names:
652 for name in names:
653 self.hook('tag', node=hex(node), tag=name, local=local)
653 self.hook('tag', node=hex(node), tag=name, local=local)
654
654
655 return tagnode
655 return tagnode
656
656
657 def tag(self, names, node, message, local, user, date, editor=False):
657 def tag(self, names, node, message, local, user, date, editor=False):
658 '''tag a revision with one or more symbolic names.
658 '''tag a revision with one or more symbolic names.
659
659
660 names is a list of strings or, when adding a single tag, names may be a
660 names is a list of strings or, when adding a single tag, names may be a
661 string.
661 string.
662
662
663 if local is True, the tags are stored in a per-repository file.
663 if local is True, the tags are stored in a per-repository file.
664 otherwise, they are stored in the .hgtags file, and a new
664 otherwise, they are stored in the .hgtags file, and a new
665 changeset is committed with the change.
665 changeset is committed with the change.
666
666
667 keyword arguments:
667 keyword arguments:
668
668
669 local: whether to store tags in non-version-controlled file
669 local: whether to store tags in non-version-controlled file
670 (default False)
670 (default False)
671
671
672 message: commit message to use if committing
672 message: commit message to use if committing
673
673
674 user: name of user to use if committing
674 user: name of user to use if committing
675
675
676 date: date tuple to use if committing'''
676 date: date tuple to use if committing'''
677
677
678 if not local:
678 if not local:
679 m = matchmod.exact(self.root, '', ['.hgtags'])
679 m = matchmod.exact(self.root, '', ['.hgtags'])
680 if any(self.status(match=m, unknown=True, ignored=True)):
680 if any(self.status(match=m, unknown=True, ignored=True)):
681 raise error.Abort(_('working copy of .hgtags is changed'),
681 raise error.Abort(_('working copy of .hgtags is changed'),
682 hint=_('please commit .hgtags manually'))
682 hint=_('please commit .hgtags manually'))
683
683
684 self.tags() # instantiate the cache
684 self.tags() # instantiate the cache
685 self._tag(names, node, message, local, user, date, editor=editor)
685 self._tag(names, node, message, local, user, date, editor=editor)
686
686
687 @filteredpropertycache
687 @filteredpropertycache
688 def _tagscache(self):
688 def _tagscache(self):
689 '''Returns a tagscache object that contains various tags related
689 '''Returns a tagscache object that contains various tags related
690 caches.'''
690 caches.'''
691
691
692 # This simplifies its cache management by having one decorated
692 # This simplifies its cache management by having one decorated
693 # function (this one) and the rest simply fetch things from it.
693 # function (this one) and the rest simply fetch things from it.
694 class tagscache(object):
694 class tagscache(object):
695 def __init__(self):
695 def __init__(self):
696 # These two define the set of tags for this repository. tags
696 # These two define the set of tags for this repository. tags
697 # maps tag name to node; tagtypes maps tag name to 'global' or
697 # maps tag name to node; tagtypes maps tag name to 'global' or
698 # 'local'. (Global tags are defined by .hgtags across all
698 # 'local'. (Global tags are defined by .hgtags across all
699 # heads, and local tags are defined in .hg/localtags.)
699 # heads, and local tags are defined in .hg/localtags.)
700 # They constitute the in-memory cache of tags.
700 # They constitute the in-memory cache of tags.
701 self.tags = self.tagtypes = None
701 self.tags = self.tagtypes = None
702
702
703 self.nodetagscache = self.tagslist = None
703 self.nodetagscache = self.tagslist = None
704
704
705 cache = tagscache()
705 cache = tagscache()
706 cache.tags, cache.tagtypes = self._findtags()
706 cache.tags, cache.tagtypes = self._findtags()
707
707
708 return cache
708 return cache
709
709
710 def tags(self):
710 def tags(self):
711 '''return a mapping of tag to node'''
711 '''return a mapping of tag to node'''
712 t = {}
712 t = {}
713 if self.changelog.filteredrevs:
713 if self.changelog.filteredrevs:
714 tags, tt = self._findtags()
714 tags, tt = self._findtags()
715 else:
715 else:
716 tags = self._tagscache.tags
716 tags = self._tagscache.tags
717 for k, v in tags.iteritems():
717 for k, v in tags.iteritems():
718 try:
718 try:
719 # ignore tags to unknown nodes
719 # ignore tags to unknown nodes
720 self.changelog.rev(v)
720 self.changelog.rev(v)
721 t[k] = v
721 t[k] = v
722 except (error.LookupError, ValueError):
722 except (error.LookupError, ValueError):
723 pass
723 pass
724 return t
724 return t
725
725
726 def _findtags(self):
726 def _findtags(self):
727 '''Do the hard work of finding tags. Return a pair of dicts
727 '''Do the hard work of finding tags. Return a pair of dicts
728 (tags, tagtypes) where tags maps tag name to node, and tagtypes
728 (tags, tagtypes) where tags maps tag name to node, and tagtypes
729 maps tag name to a string like \'global\' or \'local\'.
729 maps tag name to a string like \'global\' or \'local\'.
730 Subclasses or extensions are free to add their own tags, but
730 Subclasses or extensions are free to add their own tags, but
731 should be aware that the returned dicts will be retained for the
731 should be aware that the returned dicts will be retained for the
732 duration of the localrepo object.'''
732 duration of the localrepo object.'''
733
733
734 # XXX what tagtype should subclasses/extensions use? Currently
734 # XXX what tagtype should subclasses/extensions use? Currently
735 # mq and bookmarks add tags, but do not set the tagtype at all.
735 # mq and bookmarks add tags, but do not set the tagtype at all.
736 # Should each extension invent its own tag type? Should there
736 # Should each extension invent its own tag type? Should there
737 # be one tagtype for all such "virtual" tags? Or is the status
737 # be one tagtype for all such "virtual" tags? Or is the status
738 # quo fine?
738 # quo fine?
739
739
740 alltags = {} # map tag name to (node, hist)
740 alltags = {} # map tag name to (node, hist)
741 tagtypes = {}
741 tagtypes = {}
742
742
743 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
743 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
744 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
744 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
745
745
746 # Build the return dicts. Have to re-encode tag names because
746 # Build the return dicts. Have to re-encode tag names because
747 # the tags module always uses UTF-8 (in order not to lose info
747 # the tags module always uses UTF-8 (in order not to lose info
748 # writing to the cache), but the rest of Mercurial wants them in
748 # writing to the cache), but the rest of Mercurial wants them in
749 # local encoding.
749 # local encoding.
750 tags = {}
750 tags = {}
751 for (name, (node, hist)) in alltags.iteritems():
751 for (name, (node, hist)) in alltags.iteritems():
752 if node != nullid:
752 if node != nullid:
753 tags[encoding.tolocal(name)] = node
753 tags[encoding.tolocal(name)] = node
754 tags['tip'] = self.changelog.tip()
754 tags['tip'] = self.changelog.tip()
755 tagtypes = dict([(encoding.tolocal(name), value)
755 tagtypes = dict([(encoding.tolocal(name), value)
756 for (name, value) in tagtypes.iteritems()])
756 for (name, value) in tagtypes.iteritems()])
757 return (tags, tagtypes)
757 return (tags, tagtypes)
758
758
759 def tagtype(self, tagname):
759 def tagtype(self, tagname):
760 '''
760 '''
761 return the type of the given tag. result can be:
761 return the type of the given tag. result can be:
762
762
763 'local' : a local tag
763 'local' : a local tag
764 'global' : a global tag
764 'global' : a global tag
765 None : tag does not exist
765 None : tag does not exist
766 '''
766 '''
767
767
768 return self._tagscache.tagtypes.get(tagname)
768 return self._tagscache.tagtypes.get(tagname)
769
769
770 def tagslist(self):
770 def tagslist(self):
771 '''return a list of tags ordered by revision'''
771 '''return a list of tags ordered by revision'''
772 if not self._tagscache.tagslist:
772 if not self._tagscache.tagslist:
773 l = []
773 l = []
774 for t, n in self.tags().iteritems():
774 for t, n in self.tags().iteritems():
775 l.append((self.changelog.rev(n), t, n))
775 l.append((self.changelog.rev(n), t, n))
776 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
776 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
777
777
778 return self._tagscache.tagslist
778 return self._tagscache.tagslist
779
779
780 def nodetags(self, node):
780 def nodetags(self, node):
781 '''return the tags associated with a node'''
781 '''return the tags associated with a node'''
782 if not self._tagscache.nodetagscache:
782 if not self._tagscache.nodetagscache:
783 nodetagscache = {}
783 nodetagscache = {}
784 for t, n in self._tagscache.tags.iteritems():
784 for t, n in self._tagscache.tags.iteritems():
785 nodetagscache.setdefault(n, []).append(t)
785 nodetagscache.setdefault(n, []).append(t)
786 for tags in nodetagscache.itervalues():
786 for tags in nodetagscache.itervalues():
787 tags.sort()
787 tags.sort()
788 self._tagscache.nodetagscache = nodetagscache
788 self._tagscache.nodetagscache = nodetagscache
789 return self._tagscache.nodetagscache.get(node, [])
789 return self._tagscache.nodetagscache.get(node, [])
790
790
791 def nodebookmarks(self, node):
791 def nodebookmarks(self, node):
792 """return the list of bookmarks pointing to the specified node"""
792 """return the list of bookmarks pointing to the specified node"""
793 marks = []
793 marks = []
794 for bookmark, n in self._bookmarks.iteritems():
794 for bookmark, n in self._bookmarks.iteritems():
795 if n == node:
795 if n == node:
796 marks.append(bookmark)
796 marks.append(bookmark)
797 return sorted(marks)
797 return sorted(marks)
798
798
799 def branchmap(self):
799 def branchmap(self):
800 '''returns a dictionary {branch: [branchheads]} with branchheads
800 '''returns a dictionary {branch: [branchheads]} with branchheads
801 ordered by increasing revision number'''
801 ordered by increasing revision number'''
802 branchmap.updatecache(self)
802 branchmap.updatecache(self)
803 return self._branchcaches[self.filtername]
803 return self._branchcaches[self.filtername]
804
804
805 @unfilteredmethod
805 @unfilteredmethod
806 def revbranchcache(self):
806 def revbranchcache(self):
807 if not self._revbranchcache:
807 if not self._revbranchcache:
808 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
808 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
809 return self._revbranchcache
809 return self._revbranchcache
810
810
811 def branchtip(self, branch, ignoremissing=False):
811 def branchtip(self, branch, ignoremissing=False):
812 '''return the tip node for a given branch
812 '''return the tip node for a given branch
813
813
814 If ignoremissing is True, then this method will not raise an error.
814 If ignoremissing is True, then this method will not raise an error.
815 This is helpful for callers that only expect None for a missing branch
815 This is helpful for callers that only expect None for a missing branch
816 (e.g. namespace).
816 (e.g. namespace).
817
817
818 '''
818 '''
819 try:
819 try:
820 return self.branchmap().branchtip(branch)
820 return self.branchmap().branchtip(branch)
821 except KeyError:
821 except KeyError:
822 if not ignoremissing:
822 if not ignoremissing:
823 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
823 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
824 else:
824 else:
825 pass
825 pass
826
826
827 def lookup(self, key):
827 def lookup(self, key):
828 return self[key].node()
828 return self[key].node()
829
829
830 def lookupbranch(self, key, remote=None):
830 def lookupbranch(self, key, remote=None):
831 repo = remote or self
831 repo = remote or self
832 if key in repo.branchmap():
832 if key in repo.branchmap():
833 return key
833 return key
834
834
835 repo = (remote and remote.local()) and remote or self
835 repo = (remote and remote.local()) and remote or self
836 return repo[key].branch()
836 return repo[key].branch()
837
837
838 def known(self, nodes):
838 def known(self, nodes):
839 cl = self.changelog
839 cl = self.changelog
840 nm = cl.nodemap
840 nm = cl.nodemap
841 filtered = cl.filteredrevs
841 filtered = cl.filteredrevs
842 result = []
842 result = []
843 for n in nodes:
843 for n in nodes:
844 r = nm.get(n)
844 r = nm.get(n)
845 resp = not (r is None or r in filtered)
845 resp = not (r is None or r in filtered)
846 result.append(resp)
846 result.append(resp)
847 return result
847 return result
848
848
849 def local(self):
849 def local(self):
850 return self
850 return self
851
851
852 def publishing(self):
852 def publishing(self):
853 # it's safe (and desirable) to trust the publish flag unconditionally
853 # it's safe (and desirable) to trust the publish flag unconditionally
854 # so that we don't finalize changes shared between users via ssh or nfs
854 # so that we don't finalize changes shared between users via ssh or nfs
855 return self.ui.configbool('phases', 'publish', True, untrusted=True)
855 return self.ui.configbool('phases', 'publish', True, untrusted=True)
856
856
857 def cancopy(self):
857 def cancopy(self):
858 # so statichttprepo's override of local() works
858 # so statichttprepo's override of local() works
859 if not self.local():
859 if not self.local():
860 return False
860 return False
861 if not self.publishing():
861 if not self.publishing():
862 return True
862 return True
863 # if publishing we can't copy if there is filtered content
863 # if publishing we can't copy if there is filtered content
864 return not self.filtered('visible').changelog.filteredrevs
864 return not self.filtered('visible').changelog.filteredrevs
865
865
866 def shared(self):
866 def shared(self):
867 '''the type of shared repository (None if not shared)'''
867 '''the type of shared repository (None if not shared)'''
868 if self.sharedpath != self.path:
868 if self.sharedpath != self.path:
869 return 'store'
869 return 'store'
870 return None
870 return None
871
871
872 def join(self, f, *insidef):
872 def join(self, f, *insidef):
873 return self.vfs.join(os.path.join(f, *insidef))
873 return self.vfs.join(os.path.join(f, *insidef))
874
874
875 def wjoin(self, f, *insidef):
875 def wjoin(self, f, *insidef):
876 return self.vfs.reljoin(self.root, f, *insidef)
876 return self.vfs.reljoin(self.root, f, *insidef)
877
877
878 def file(self, f):
878 def file(self, f):
879 if f[0] == '/':
879 if f[0] == '/':
880 f = f[1:]
880 f = f[1:]
881 return filelog.filelog(self.svfs, f)
881 return filelog.filelog(self.svfs, f)
882
882
883 def changectx(self, changeid):
883 def changectx(self, changeid):
884 return self[changeid]
884 return self[changeid]
885
885
886 def setparents(self, p1, p2=nullid):
886 def setparents(self, p1, p2=nullid):
887 self.dirstate.beginparentchange()
887 self.dirstate.beginparentchange()
888 copies = self.dirstate.setparents(p1, p2)
888 copies = self.dirstate.setparents(p1, p2)
889 pctx = self[p1]
889 pctx = self[p1]
890 if copies:
890 if copies:
891 # Adjust copy records, the dirstate cannot do it, it
891 # Adjust copy records, the dirstate cannot do it, it
892 # requires access to parents manifests. Preserve them
892 # requires access to parents manifests. Preserve them
893 # only for entries added to first parent.
893 # only for entries added to first parent.
894 for f in copies:
894 for f in copies:
895 if f not in pctx and copies[f] in pctx:
895 if f not in pctx and copies[f] in pctx:
896 self.dirstate.copy(copies[f], f)
896 self.dirstate.copy(copies[f], f)
897 if p2 == nullid:
897 if p2 == nullid:
898 for f, s in sorted(self.dirstate.copies().items()):
898 for f, s in sorted(self.dirstate.copies().items()):
899 if f not in pctx and s not in pctx:
899 if f not in pctx and s not in pctx:
900 self.dirstate.copy(None, f)
900 self.dirstate.copy(None, f)
901 self.dirstate.endparentchange()
901 self.dirstate.endparentchange()
902
902
903 def filectx(self, path, changeid=None, fileid=None):
903 def filectx(self, path, changeid=None, fileid=None):
904 """changeid can be a changeset revision, node, or tag.
904 """changeid can be a changeset revision, node, or tag.
905 fileid can be a file revision or node."""
905 fileid can be a file revision or node."""
906 return context.filectx(self, path, changeid, fileid)
906 return context.filectx(self, path, changeid, fileid)
907
907
908 def getcwd(self):
908 def getcwd(self):
909 return self.dirstate.getcwd()
909 return self.dirstate.getcwd()
910
910
911 def pathto(self, f, cwd=None):
911 def pathto(self, f, cwd=None):
912 return self.dirstate.pathto(f, cwd)
912 return self.dirstate.pathto(f, cwd)
913
913
914 def wfile(self, f, mode='r'):
914 def wfile(self, f, mode='r'):
915 return self.wvfs(f, mode)
915 return self.wvfs(f, mode)
916
916
917 def _link(self, f):
917 def _link(self, f):
918 return self.wvfs.islink(f)
918 return self.wvfs.islink(f)
919
919
920 def _loadfilter(self, filter):
920 def _loadfilter(self, filter):
921 if filter not in self.filterpats:
921 if filter not in self.filterpats:
922 l = []
922 l = []
923 for pat, cmd in self.ui.configitems(filter):
923 for pat, cmd in self.ui.configitems(filter):
924 if cmd == '!':
924 if cmd == '!':
925 continue
925 continue
926 mf = matchmod.match(self.root, '', [pat])
926 mf = matchmod.match(self.root, '', [pat])
927 fn = None
927 fn = None
928 params = cmd
928 params = cmd
929 for name, filterfn in self._datafilters.iteritems():
929 for name, filterfn in self._datafilters.iteritems():
930 if cmd.startswith(name):
930 if cmd.startswith(name):
931 fn = filterfn
931 fn = filterfn
932 params = cmd[len(name):].lstrip()
932 params = cmd[len(name):].lstrip()
933 break
933 break
934 if not fn:
934 if not fn:
935 fn = lambda s, c, **kwargs: util.filter(s, c)
935 fn = lambda s, c, **kwargs: util.filter(s, c)
936 # Wrap old filters not supporting keyword arguments
936 # Wrap old filters not supporting keyword arguments
937 if not inspect.getargspec(fn)[2]:
937 if not inspect.getargspec(fn)[2]:
938 oldfn = fn
938 oldfn = fn
939 fn = lambda s, c, **kwargs: oldfn(s, c)
939 fn = lambda s, c, **kwargs: oldfn(s, c)
940 l.append((mf, fn, params))
940 l.append((mf, fn, params))
941 self.filterpats[filter] = l
941 self.filterpats[filter] = l
942 return self.filterpats[filter]
942 return self.filterpats[filter]
943
943
944 def _filter(self, filterpats, filename, data):
944 def _filter(self, filterpats, filename, data):
945 for mf, fn, cmd in filterpats:
945 for mf, fn, cmd in filterpats:
946 if mf(filename):
946 if mf(filename):
947 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
947 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
948 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
948 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
949 break
949 break
950
950
951 return data
951 return data
952
952
953 @unfilteredpropertycache
953 @unfilteredpropertycache
954 def _encodefilterpats(self):
954 def _encodefilterpats(self):
955 return self._loadfilter('encode')
955 return self._loadfilter('encode')
956
956
957 @unfilteredpropertycache
957 @unfilteredpropertycache
958 def _decodefilterpats(self):
958 def _decodefilterpats(self):
959 return self._loadfilter('decode')
959 return self._loadfilter('decode')
960
960
961 def adddatafilter(self, name, filter):
961 def adddatafilter(self, name, filter):
962 self._datafilters[name] = filter
962 self._datafilters[name] = filter
963
963
964 def wread(self, filename):
964 def wread(self, filename):
965 if self._link(filename):
965 if self._link(filename):
966 data = self.wvfs.readlink(filename)
966 data = self.wvfs.readlink(filename)
967 else:
967 else:
968 data = self.wvfs.read(filename)
968 data = self.wvfs.read(filename)
969 return self._filter(self._encodefilterpats, filename, data)
969 return self._filter(self._encodefilterpats, filename, data)
970
970
971 def wwrite(self, filename, data, flags, backgroundclose=False):
971 def wwrite(self, filename, data, flags, backgroundclose=False):
972 """write ``data`` into ``filename`` in the working directory
972 """write ``data`` into ``filename`` in the working directory
973
973
974 This returns length of written (maybe decoded) data.
974 This returns length of written (maybe decoded) data.
975 """
975 """
976 data = self._filter(self._decodefilterpats, filename, data)
976 data = self._filter(self._decodefilterpats, filename, data)
977 if 'l' in flags:
977 if 'l' in flags:
978 self.wvfs.symlink(data, filename)
978 self.wvfs.symlink(data, filename)
979 else:
979 else:
980 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
980 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
981 if 'x' in flags:
981 if 'x' in flags:
982 self.wvfs.setflags(filename, False, True)
982 self.wvfs.setflags(filename, False, True)
983 return len(data)
983 return len(data)
984
984
985 def wwritedata(self, filename, data):
985 def wwritedata(self, filename, data):
986 return self._filter(self._decodefilterpats, filename, data)
986 return self._filter(self._decodefilterpats, filename, data)
987
987
988 def currenttransaction(self):
988 def currenttransaction(self):
989 """return the current transaction or None if non exists"""
989 """return the current transaction or None if non exists"""
990 if self._transref:
990 if self._transref:
991 tr = self._transref()
991 tr = self._transref()
992 else:
992 else:
993 tr = None
993 tr = None
994
994
995 if tr and tr.running():
995 if tr and tr.running():
996 return tr
996 return tr
997 return None
997 return None
998
998
999 def transaction(self, desc, report=None):
999 def transaction(self, desc, report=None):
1000 if (self.ui.configbool('devel', 'all-warnings')
1000 if (self.ui.configbool('devel', 'all-warnings')
1001 or self.ui.configbool('devel', 'check-locks')):
1001 or self.ui.configbool('devel', 'check-locks')):
1002 l = self._lockref and self._lockref()
1002 l = self._lockref and self._lockref()
1003 if l is None or not l.held:
1003 if l is None or not l.held:
1004 raise RuntimeError('programming error: transaction requires '
1004 raise RuntimeError('programming error: transaction requires '
1005 'locking')
1005 'locking')
1006 tr = self.currenttransaction()
1006 tr = self.currenttransaction()
1007 if tr is not None:
1007 if tr is not None:
1008 return tr.nest()
1008 return tr.nest()
1009
1009
1010 # abort here if the journal already exists
1010 # abort here if the journal already exists
1011 if self.svfs.exists("journal"):
1011 if self.svfs.exists("journal"):
1012 raise error.RepoError(
1012 raise error.RepoError(
1013 _("abandoned transaction found"),
1013 _("abandoned transaction found"),
1014 hint=_("run 'hg recover' to clean up transaction"))
1014 hint=_("run 'hg recover' to clean up transaction"))
1015
1015
1016 idbase = "%.40f#%f" % (random.random(), time.time())
1016 idbase = "%.40f#%f" % (random.random(), time.time())
1017 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1017 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1018 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1018 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1019
1019
1020 self._writejournal(desc)
1020 self._writejournal(desc)
1021 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1021 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1022 if report:
1022 if report:
1023 rp = report
1023 rp = report
1024 else:
1024 else:
1025 rp = self.ui.warn
1025 rp = self.ui.warn
1026 vfsmap = {'plain': self.vfs} # root of .hg/
1026 vfsmap = {'plain': self.vfs} # root of .hg/
1027 # we must avoid cyclic reference between repo and transaction.
1027 # we must avoid cyclic reference between repo and transaction.
1028 reporef = weakref.ref(self)
1028 reporef = weakref.ref(self)
1029 def validate(tr):
1029 def validate(tr):
1030 """will run pre-closing hooks"""
1030 """will run pre-closing hooks"""
1031 reporef().hook('pretxnclose', throw=True,
1031 reporef().hook('pretxnclose', throw=True,
1032 txnname=desc, **tr.hookargs)
1032 txnname=desc, **tr.hookargs)
1033 def releasefn(tr, success):
1033 def releasefn(tr, success):
1034 repo = reporef()
1034 repo = reporef()
1035 if success:
1035 if success:
1036 # this should be explicitly invoked here, because
1036 # this should be explicitly invoked here, because
1037 # in-memory changes aren't written out at closing
1037 # in-memory changes aren't written out at closing
1038 # transaction, if tr.addfilegenerator (via
1038 # transaction, if tr.addfilegenerator (via
1039 # dirstate.write or so) isn't invoked while
1039 # dirstate.write or so) isn't invoked while
1040 # transaction running
1040 # transaction running
1041 repo.dirstate.write(None)
1041 repo.dirstate.write(None)
1042 else:
1042 else:
1043 # discard all changes (including ones already written
1043 # discard all changes (including ones already written
1044 # out) in this transaction
1044 # out) in this transaction
1045 repo.dirstate.restorebackup(None, prefix='journal.')
1045 repo.dirstate.restorebackup(None, prefix='journal.')
1046
1046
1047 repo.invalidate(clearfilecache=True)
1047 repo.invalidate(clearfilecache=True)
1048
1048
1049 tr = transaction.transaction(rp, self.svfs, vfsmap,
1049 tr = transaction.transaction(rp, self.svfs, vfsmap,
1050 "journal",
1050 "journal",
1051 "undo",
1051 "undo",
1052 aftertrans(renames),
1052 aftertrans(renames),
1053 self.store.createmode,
1053 self.store.createmode,
1054 validator=validate,
1054 validator=validate,
1055 releasefn=releasefn)
1055 releasefn=releasefn)
1056
1056
1057 tr.hookargs['txnid'] = txnid
1057 tr.hookargs['txnid'] = txnid
1058 # note: writing the fncache only during finalize mean that the file is
1058 # note: writing the fncache only during finalize mean that the file is
1059 # outdated when running hooks. As fncache is used for streaming clone,
1059 # outdated when running hooks. As fncache is used for streaming clone,
1060 # this is not expected to break anything that happen during the hooks.
1060 # this is not expected to break anything that happen during the hooks.
1061 tr.addfinalize('flush-fncache', self.store.write)
1061 tr.addfinalize('flush-fncache', self.store.write)
1062 def txnclosehook(tr2):
1062 def txnclosehook(tr2):
1063 """To be run if transaction is successful, will schedule a hook run
1063 """To be run if transaction is successful, will schedule a hook run
1064 """
1064 """
1065 # Don't reference tr2 in hook() so we don't hold a reference.
1065 # Don't reference tr2 in hook() so we don't hold a reference.
1066 # This reduces memory consumption when there are multiple
1066 # This reduces memory consumption when there are multiple
1067 # transactions per lock. This can likely go away if issue5045
1067 # transactions per lock. This can likely go away if issue5045
1068 # fixes the function accumulation.
1068 # fixes the function accumulation.
1069 hookargs = tr2.hookargs
1069 hookargs = tr2.hookargs
1070
1070
1071 def hook():
1071 def hook():
1072 reporef().hook('txnclose', throw=False, txnname=desc,
1072 reporef().hook('txnclose', throw=False, txnname=desc,
1073 **hookargs)
1073 **hookargs)
1074 reporef()._afterlock(hook)
1074 reporef()._afterlock(hook)
1075 tr.addfinalize('txnclose-hook', txnclosehook)
1075 tr.addfinalize('txnclose-hook', txnclosehook)
1076 def txnaborthook(tr2):
1076 def txnaborthook(tr2):
1077 """To be run if transaction is aborted
1077 """To be run if transaction is aborted
1078 """
1078 """
1079 reporef().hook('txnabort', throw=False, txnname=desc,
1079 reporef().hook('txnabort', throw=False, txnname=desc,
1080 **tr2.hookargs)
1080 **tr2.hookargs)
1081 tr.addabort('txnabort-hook', txnaborthook)
1081 tr.addabort('txnabort-hook', txnaborthook)
1082 # avoid eager cache invalidation. in-memory data should be identical
1082 # avoid eager cache invalidation. in-memory data should be identical
1083 # to stored data if transaction has no error.
1083 # to stored data if transaction has no error.
1084 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1084 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1085 self._transref = weakref.ref(tr)
1085 self._transref = weakref.ref(tr)
1086 return tr
1086 return tr
1087
1087
1088 def _journalfiles(self):
1088 def _journalfiles(self):
1089 return ((self.svfs, 'journal'),
1089 return ((self.svfs, 'journal'),
1090 (self.vfs, 'journal.dirstate'),
1090 (self.vfs, 'journal.dirstate'),
1091 (self.vfs, 'journal.branch'),
1091 (self.vfs, 'journal.branch'),
1092 (self.vfs, 'journal.desc'),
1092 (self.vfs, 'journal.desc'),
1093 (self.vfs, 'journal.bookmarks'),
1093 (self.vfs, 'journal.bookmarks'),
1094 (self.svfs, 'journal.phaseroots'))
1094 (self.svfs, 'journal.phaseroots'))
1095
1095
1096 def undofiles(self):
1096 def undofiles(self):
1097 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1097 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1098
1098
1099 def _writejournal(self, desc):
1099 def _writejournal(self, desc):
1100 self.dirstate.savebackup(None, prefix='journal.')
1100 self.dirstate.savebackup(None, prefix='journal.')
1101 self.vfs.write("journal.branch",
1101 self.vfs.write("journal.branch",
1102 encoding.fromlocal(self.dirstate.branch()))
1102 encoding.fromlocal(self.dirstate.branch()))
1103 self.vfs.write("journal.desc",
1103 self.vfs.write("journal.desc",
1104 "%d\n%s\n" % (len(self), desc))
1104 "%d\n%s\n" % (len(self), desc))
1105 self.vfs.write("journal.bookmarks",
1105 self.vfs.write("journal.bookmarks",
1106 self.vfs.tryread("bookmarks"))
1106 self.vfs.tryread("bookmarks"))
1107 self.svfs.write("journal.phaseroots",
1107 self.svfs.write("journal.phaseroots",
1108 self.svfs.tryread("phaseroots"))
1108 self.svfs.tryread("phaseroots"))
1109
1109
1110 def recover(self):
1110 def recover(self):
1111 with self.lock():
1111 with self.lock():
1112 if self.svfs.exists("journal"):
1112 if self.svfs.exists("journal"):
1113 self.ui.status(_("rolling back interrupted transaction\n"))
1113 self.ui.status(_("rolling back interrupted transaction\n"))
1114 vfsmap = {'': self.svfs,
1114 vfsmap = {'': self.svfs,
1115 'plain': self.vfs,}
1115 'plain': self.vfs,}
1116 transaction.rollback(self.svfs, vfsmap, "journal",
1116 transaction.rollback(self.svfs, vfsmap, "journal",
1117 self.ui.warn)
1117 self.ui.warn)
1118 self.invalidate()
1118 self.invalidate()
1119 return True
1119 return True
1120 else:
1120 else:
1121 self.ui.warn(_("no interrupted transaction available\n"))
1121 self.ui.warn(_("no interrupted transaction available\n"))
1122 return False
1122 return False
1123
1123
1124 def rollback(self, dryrun=False, force=False):
1124 def rollback(self, dryrun=False, force=False):
1125 wlock = lock = dsguard = None
1125 wlock = lock = dsguard = None
1126 try:
1126 try:
1127 wlock = self.wlock()
1127 wlock = self.wlock()
1128 lock = self.lock()
1128 lock = self.lock()
1129 if self.svfs.exists("undo"):
1129 if self.svfs.exists("undo"):
1130 dsguard = cmdutil.dirstateguard(self, 'rollback')
1130 dsguard = cmdutil.dirstateguard(self, 'rollback')
1131
1131
1132 return self._rollback(dryrun, force, dsguard)
1132 return self._rollback(dryrun, force, dsguard)
1133 else:
1133 else:
1134 self.ui.warn(_("no rollback information available\n"))
1134 self.ui.warn(_("no rollback information available\n"))
1135 return 1
1135 return 1
1136 finally:
1136 finally:
1137 release(dsguard, lock, wlock)
1137 release(dsguard, lock, wlock)
1138
1138
1139 @unfilteredmethod # Until we get smarter cache management
1139 @unfilteredmethod # Until we get smarter cache management
1140 def _rollback(self, dryrun, force, dsguard):
1140 def _rollback(self, dryrun, force, dsguard):
1141 ui = self.ui
1141 ui = self.ui
1142 try:
1142 try:
1143 args = self.vfs.read('undo.desc').splitlines()
1143 args = self.vfs.read('undo.desc').splitlines()
1144 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1144 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1145 if len(args) >= 3:
1145 if len(args) >= 3:
1146 detail = args[2]
1146 detail = args[2]
1147 oldtip = oldlen - 1
1147 oldtip = oldlen - 1
1148
1148
1149 if detail and ui.verbose:
1149 if detail and ui.verbose:
1150 msg = (_('repository tip rolled back to revision %s'
1150 msg = (_('repository tip rolled back to revision %s'
1151 ' (undo %s: %s)\n')
1151 ' (undo %s: %s)\n')
1152 % (oldtip, desc, detail))
1152 % (oldtip, desc, detail))
1153 else:
1153 else:
1154 msg = (_('repository tip rolled back to revision %s'
1154 msg = (_('repository tip rolled back to revision %s'
1155 ' (undo %s)\n')
1155 ' (undo %s)\n')
1156 % (oldtip, desc))
1156 % (oldtip, desc))
1157 except IOError:
1157 except IOError:
1158 msg = _('rolling back unknown transaction\n')
1158 msg = _('rolling back unknown transaction\n')
1159 desc = None
1159 desc = None
1160
1160
1161 if not force and self['.'] != self['tip'] and desc == 'commit':
1161 if not force and self['.'] != self['tip'] and desc == 'commit':
1162 raise error.Abort(
1162 raise error.Abort(
1163 _('rollback of last commit while not checked out '
1163 _('rollback of last commit while not checked out '
1164 'may lose data'), hint=_('use -f to force'))
1164 'may lose data'), hint=_('use -f to force'))
1165
1165
1166 ui.status(msg)
1166 ui.status(msg)
1167 if dryrun:
1167 if dryrun:
1168 return 0
1168 return 0
1169
1169
1170 parents = self.dirstate.parents()
1170 parents = self.dirstate.parents()
1171 self.destroying()
1171 self.destroying()
1172 vfsmap = {'plain': self.vfs, '': self.svfs}
1172 vfsmap = {'plain': self.vfs, '': self.svfs}
1173 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1173 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1174 if self.vfs.exists('undo.bookmarks'):
1174 if self.vfs.exists('undo.bookmarks'):
1175 self.vfs.rename('undo.bookmarks', 'bookmarks')
1175 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1176 if self.svfs.exists('undo.phaseroots'):
1176 if self.svfs.exists('undo.phaseroots'):
1177 self.svfs.rename('undo.phaseroots', 'phaseroots')
1177 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1178 self.invalidate()
1178 self.invalidate()
1179
1179
1180 parentgone = (parents[0] not in self.changelog.nodemap or
1180 parentgone = (parents[0] not in self.changelog.nodemap or
1181 parents[1] not in self.changelog.nodemap)
1181 parents[1] not in self.changelog.nodemap)
1182 if parentgone:
1182 if parentgone:
1183 # prevent dirstateguard from overwriting already restored one
1183 # prevent dirstateguard from overwriting already restored one
1184 dsguard.close()
1184 dsguard.close()
1185
1185
1186 self.dirstate.restorebackup(None, prefix='undo.')
1186 self.dirstate.restorebackup(None, prefix='undo.')
1187 try:
1187 try:
1188 branch = self.vfs.read('undo.branch')
1188 branch = self.vfs.read('undo.branch')
1189 self.dirstate.setbranch(encoding.tolocal(branch))
1189 self.dirstate.setbranch(encoding.tolocal(branch))
1190 except IOError:
1190 except IOError:
1191 ui.warn(_('named branch could not be reset: '
1191 ui.warn(_('named branch could not be reset: '
1192 'current branch is still \'%s\'\n')
1192 'current branch is still \'%s\'\n')
1193 % self.dirstate.branch())
1193 % self.dirstate.branch())
1194
1194
1195 parents = tuple([p.rev() for p in self[None].parents()])
1195 parents = tuple([p.rev() for p in self[None].parents()])
1196 if len(parents) > 1:
1196 if len(parents) > 1:
1197 ui.status(_('working directory now based on '
1197 ui.status(_('working directory now based on '
1198 'revisions %d and %d\n') % parents)
1198 'revisions %d and %d\n') % parents)
1199 else:
1199 else:
1200 ui.status(_('working directory now based on '
1200 ui.status(_('working directory now based on '
1201 'revision %d\n') % parents)
1201 'revision %d\n') % parents)
1202 mergemod.mergestate.clean(self, self['.'].node())
1202 mergemod.mergestate.clean(self, self['.'].node())
1203
1203
1204 # TODO: if we know which new heads may result from this rollback, pass
1204 # TODO: if we know which new heads may result from this rollback, pass
1205 # them to destroy(), which will prevent the branchhead cache from being
1205 # them to destroy(), which will prevent the branchhead cache from being
1206 # invalidated.
1206 # invalidated.
1207 self.destroyed()
1207 self.destroyed()
1208 return 0
1208 return 0
1209
1209
1210 def invalidatecaches(self):
1210 def invalidatecaches(self):
1211
1211
1212 if '_tagscache' in vars(self):
1212 if '_tagscache' in vars(self):
1213 # can't use delattr on proxy
1213 # can't use delattr on proxy
1214 del self.__dict__['_tagscache']
1214 del self.__dict__['_tagscache']
1215
1215
1216 self.unfiltered()._branchcaches.clear()
1216 self.unfiltered()._branchcaches.clear()
1217 self.invalidatevolatilesets()
1217 self.invalidatevolatilesets()
1218
1218
1219 def invalidatevolatilesets(self):
1219 def invalidatevolatilesets(self):
1220 self.filteredrevcache.clear()
1220 self.filteredrevcache.clear()
1221 obsolete.clearobscaches(self)
1221 obsolete.clearobscaches(self)
1222
1222
1223 def invalidatedirstate(self):
1223 def invalidatedirstate(self):
1224 '''Invalidates the dirstate, causing the next call to dirstate
1224 '''Invalidates the dirstate, causing the next call to dirstate
1225 to check if it was modified since the last time it was read,
1225 to check if it was modified since the last time it was read,
1226 rereading it if it has.
1226 rereading it if it has.
1227
1227
1228 This is different to dirstate.invalidate() that it doesn't always
1228 This is different to dirstate.invalidate() that it doesn't always
1229 rereads the dirstate. Use dirstate.invalidate() if you want to
1229 rereads the dirstate. Use dirstate.invalidate() if you want to
1230 explicitly read the dirstate again (i.e. restoring it to a previous
1230 explicitly read the dirstate again (i.e. restoring it to a previous
1231 known good state).'''
1231 known good state).'''
1232 if hasunfilteredcache(self, 'dirstate'):
1232 if hasunfilteredcache(self, 'dirstate'):
1233 for k in self.dirstate._filecache:
1233 for k in self.dirstate._filecache:
1234 try:
1234 try:
1235 delattr(self.dirstate, k)
1235 delattr(self.dirstate, k)
1236 except AttributeError:
1236 except AttributeError:
1237 pass
1237 pass
1238 delattr(self.unfiltered(), 'dirstate')
1238 delattr(self.unfiltered(), 'dirstate')
1239
1239
1240 def invalidate(self, clearfilecache=False):
1240 def invalidate(self, clearfilecache=False):
1241 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1241 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1242 for k in self._filecache.keys():
1242 for k in self._filecache.keys():
1243 # dirstate is invalidated separately in invalidatedirstate()
1243 # dirstate is invalidated separately in invalidatedirstate()
1244 if k == 'dirstate':
1244 if k == 'dirstate':
1245 continue
1245 continue
1246
1246
1247 if clearfilecache:
1247 if clearfilecache:
1248 del self._filecache[k]
1248 del self._filecache[k]
1249 try:
1249 try:
1250 delattr(unfiltered, k)
1250 delattr(unfiltered, k)
1251 except AttributeError:
1251 except AttributeError:
1252 pass
1252 pass
1253 self.invalidatecaches()
1253 self.invalidatecaches()
1254 self.store.invalidatecaches()
1254 self.store.invalidatecaches()
1255
1255
1256 def invalidateall(self):
1256 def invalidateall(self):
1257 '''Fully invalidates both store and non-store parts, causing the
1257 '''Fully invalidates both store and non-store parts, causing the
1258 subsequent operation to reread any outside changes.'''
1258 subsequent operation to reread any outside changes.'''
1259 # extension should hook this to invalidate its caches
1259 # extension should hook this to invalidate its caches
1260 self.invalidate()
1260 self.invalidate()
1261 self.invalidatedirstate()
1261 self.invalidatedirstate()
1262
1262
1263 def _refreshfilecachestats(self, tr):
1263 def _refreshfilecachestats(self, tr):
1264 """Reload stats of cached files so that they are flagged as valid"""
1264 """Reload stats of cached files so that they are flagged as valid"""
1265 for k, ce in self._filecache.items():
1265 for k, ce in self._filecache.items():
1266 if k == 'dirstate' or k not in self.__dict__:
1266 if k == 'dirstate' or k not in self.__dict__:
1267 continue
1267 continue
1268 ce.refresh()
1268 ce.refresh()
1269
1269
1270 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1270 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1271 inheritchecker=None, parentenvvar=None):
1271 inheritchecker=None, parentenvvar=None):
1272 parentlock = None
1272 parentlock = None
1273 # the contents of parentenvvar are used by the underlying lock to
1273 # the contents of parentenvvar are used by the underlying lock to
1274 # determine whether it can be inherited
1274 # determine whether it can be inherited
1275 if parentenvvar is not None:
1275 if parentenvvar is not None:
1276 parentlock = os.environ.get(parentenvvar)
1276 parentlock = os.environ.get(parentenvvar)
1277 try:
1277 try:
1278 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1278 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1279 acquirefn=acquirefn, desc=desc,
1279 acquirefn=acquirefn, desc=desc,
1280 inheritchecker=inheritchecker,
1280 inheritchecker=inheritchecker,
1281 parentlock=parentlock)
1281 parentlock=parentlock)
1282 except error.LockHeld as inst:
1282 except error.LockHeld as inst:
1283 if not wait:
1283 if not wait:
1284 raise
1284 raise
1285 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1285 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1286 (desc, inst.locker))
1286 (desc, inst.locker))
1287 # default to 600 seconds timeout
1287 # default to 600 seconds timeout
1288 l = lockmod.lock(vfs, lockname,
1288 l = lockmod.lock(vfs, lockname,
1289 int(self.ui.config("ui", "timeout", "600")),
1289 int(self.ui.config("ui", "timeout", "600")),
1290 releasefn=releasefn, acquirefn=acquirefn,
1290 releasefn=releasefn, acquirefn=acquirefn,
1291 desc=desc)
1291 desc=desc)
1292 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1292 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1293 return l
1293 return l
1294
1294
1295 def _afterlock(self, callback):
1295 def _afterlock(self, callback):
1296 """add a callback to be run when the repository is fully unlocked
1296 """add a callback to be run when the repository is fully unlocked
1297
1297
1298 The callback will be executed when the outermost lock is released
1298 The callback will be executed when the outermost lock is released
1299 (with wlock being higher level than 'lock')."""
1299 (with wlock being higher level than 'lock')."""
1300 for ref in (self._wlockref, self._lockref):
1300 for ref in (self._wlockref, self._lockref):
1301 l = ref and ref()
1301 l = ref and ref()
1302 if l and l.held:
1302 if l and l.held:
1303 l.postrelease.append(callback)
1303 l.postrelease.append(callback)
1304 break
1304 break
1305 else: # no lock have been found.
1305 else: # no lock have been found.
1306 callback()
1306 callback()
1307
1307
1308 def lock(self, wait=True):
1308 def lock(self, wait=True):
1309 '''Lock the repository store (.hg/store) and return a weak reference
1309 '''Lock the repository store (.hg/store) and return a weak reference
1310 to the lock. Use this before modifying the store (e.g. committing or
1310 to the lock. Use this before modifying the store (e.g. committing or
1311 stripping). If you are opening a transaction, get a lock as well.)
1311 stripping). If you are opening a transaction, get a lock as well.)
1312
1312
1313 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1313 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1314 'wlock' first to avoid a dead-lock hazard.'''
1314 'wlock' first to avoid a dead-lock hazard.'''
1315 l = self._lockref and self._lockref()
1315 l = self._lockref and self._lockref()
1316 if l is not None and l.held:
1316 if l is not None and l.held:
1317 l.lock()
1317 l.lock()
1318 return l
1318 return l
1319
1319
1320 l = self._lock(self.svfs, "lock", wait, None,
1320 l = self._lock(self.svfs, "lock", wait, None,
1321 self.invalidate, _('repository %s') % self.origroot)
1321 self.invalidate, _('repository %s') % self.origroot)
1322 self._lockref = weakref.ref(l)
1322 self._lockref = weakref.ref(l)
1323 return l
1323 return l
1324
1324
1325 def _wlockchecktransaction(self):
1325 def _wlockchecktransaction(self):
1326 if self.currenttransaction() is not None:
1326 if self.currenttransaction() is not None:
1327 raise error.LockInheritanceContractViolation(
1327 raise error.LockInheritanceContractViolation(
1328 'wlock cannot be inherited in the middle of a transaction')
1328 'wlock cannot be inherited in the middle of a transaction')
1329
1329
1330 def wlock(self, wait=True):
1330 def wlock(self, wait=True):
1331 '''Lock the non-store parts of the repository (everything under
1331 '''Lock the non-store parts of the repository (everything under
1332 .hg except .hg/store) and return a weak reference to the lock.
1332 .hg except .hg/store) and return a weak reference to the lock.
1333
1333
1334 Use this before modifying files in .hg.
1334 Use this before modifying files in .hg.
1335
1335
1336 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1336 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1337 'wlock' first to avoid a dead-lock hazard.'''
1337 'wlock' first to avoid a dead-lock hazard.'''
1338 l = self._wlockref and self._wlockref()
1338 l = self._wlockref and self._wlockref()
1339 if l is not None and l.held:
1339 if l is not None and l.held:
1340 l.lock()
1340 l.lock()
1341 return l
1341 return l
1342
1342
1343 # We do not need to check for non-waiting lock acquisition. Such
1343 # We do not need to check for non-waiting lock acquisition. Such
1344 # acquisition would not cause dead-lock as they would just fail.
1344 # acquisition would not cause dead-lock as they would just fail.
1345 if wait and (self.ui.configbool('devel', 'all-warnings')
1345 if wait and (self.ui.configbool('devel', 'all-warnings')
1346 or self.ui.configbool('devel', 'check-locks')):
1346 or self.ui.configbool('devel', 'check-locks')):
1347 l = self._lockref and self._lockref()
1347 l = self._lockref and self._lockref()
1348 if l is not None and l.held:
1348 if l is not None and l.held:
1349 self.ui.develwarn('"wlock" acquired after "lock"')
1349 self.ui.develwarn('"wlock" acquired after "lock"')
1350
1350
1351 def unlock():
1351 def unlock():
1352 if self.dirstate.pendingparentchange():
1352 if self.dirstate.pendingparentchange():
1353 self.dirstate.invalidate()
1353 self.dirstate.invalidate()
1354 else:
1354 else:
1355 self.dirstate.write(None)
1355 self.dirstate.write(None)
1356
1356
1357 self._filecache['dirstate'].refresh()
1357 self._filecache['dirstate'].refresh()
1358
1358
1359 l = self._lock(self.vfs, "wlock", wait, unlock,
1359 l = self._lock(self.vfs, "wlock", wait, unlock,
1360 self.invalidatedirstate, _('working directory of %s') %
1360 self.invalidatedirstate, _('working directory of %s') %
1361 self.origroot,
1361 self.origroot,
1362 inheritchecker=self._wlockchecktransaction,
1362 inheritchecker=self._wlockchecktransaction,
1363 parentenvvar='HG_WLOCK_LOCKER')
1363 parentenvvar='HG_WLOCK_LOCKER')
1364 self._wlockref = weakref.ref(l)
1364 self._wlockref = weakref.ref(l)
1365 return l
1365 return l
1366
1366
1367 def _currentlock(self, lockref):
1367 def _currentlock(self, lockref):
1368 """Returns the lock if it's held, or None if it's not."""
1368 """Returns the lock if it's held, or None if it's not."""
1369 if lockref is None:
1369 if lockref is None:
1370 return None
1370 return None
1371 l = lockref()
1371 l = lockref()
1372 if l is None or not l.held:
1372 if l is None or not l.held:
1373 return None
1373 return None
1374 return l
1374 return l
1375
1375
1376 def currentwlock(self):
1376 def currentwlock(self):
1377 """Returns the wlock if it's held, or None if it's not."""
1377 """Returns the wlock if it's held, or None if it's not."""
1378 return self._currentlock(self._wlockref)
1378 return self._currentlock(self._wlockref)
1379
1379
1380 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1380 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1381 """
1381 """
1382 commit an individual file as part of a larger transaction
1382 commit an individual file as part of a larger transaction
1383 """
1383 """
1384
1384
1385 fname = fctx.path()
1385 fname = fctx.path()
1386 fparent1 = manifest1.get(fname, nullid)
1386 fparent1 = manifest1.get(fname, nullid)
1387 fparent2 = manifest2.get(fname, nullid)
1387 fparent2 = manifest2.get(fname, nullid)
1388 if isinstance(fctx, context.filectx):
1388 if isinstance(fctx, context.filectx):
1389 node = fctx.filenode()
1389 node = fctx.filenode()
1390 if node in [fparent1, fparent2]:
1390 if node in [fparent1, fparent2]:
1391 self.ui.debug('reusing %s filelog entry\n' % fname)
1391 self.ui.debug('reusing %s filelog entry\n' % fname)
1392 if manifest1.flags(fname) != fctx.flags():
1392 if manifest1.flags(fname) != fctx.flags():
1393 changelist.append(fname)
1393 changelist.append(fname)
1394 return node
1394 return node
1395
1395
1396 flog = self.file(fname)
1396 flog = self.file(fname)
1397 meta = {}
1397 meta = {}
1398 copy = fctx.renamed()
1398 copy = fctx.renamed()
1399 if copy and copy[0] != fname:
1399 if copy and copy[0] != fname:
1400 # Mark the new revision of this file as a copy of another
1400 # Mark the new revision of this file as a copy of another
1401 # file. This copy data will effectively act as a parent
1401 # file. This copy data will effectively act as a parent
1402 # of this new revision. If this is a merge, the first
1402 # of this new revision. If this is a merge, the first
1403 # parent will be the nullid (meaning "look up the copy data")
1403 # parent will be the nullid (meaning "look up the copy data")
1404 # and the second one will be the other parent. For example:
1404 # and the second one will be the other parent. For example:
1405 #
1405 #
1406 # 0 --- 1 --- 3 rev1 changes file foo
1406 # 0 --- 1 --- 3 rev1 changes file foo
1407 # \ / rev2 renames foo to bar and changes it
1407 # \ / rev2 renames foo to bar and changes it
1408 # \- 2 -/ rev3 should have bar with all changes and
1408 # \- 2 -/ rev3 should have bar with all changes and
1409 # should record that bar descends from
1409 # should record that bar descends from
1410 # bar in rev2 and foo in rev1
1410 # bar in rev2 and foo in rev1
1411 #
1411 #
1412 # this allows this merge to succeed:
1412 # this allows this merge to succeed:
1413 #
1413 #
1414 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1414 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1415 # \ / merging rev3 and rev4 should use bar@rev2
1415 # \ / merging rev3 and rev4 should use bar@rev2
1416 # \- 2 --- 4 as the merge base
1416 # \- 2 --- 4 as the merge base
1417 #
1417 #
1418
1418
1419 cfname = copy[0]
1419 cfname = copy[0]
1420 crev = manifest1.get(cfname)
1420 crev = manifest1.get(cfname)
1421 newfparent = fparent2
1421 newfparent = fparent2
1422
1422
1423 if manifest2: # branch merge
1423 if manifest2: # branch merge
1424 if fparent2 == nullid or crev is None: # copied on remote side
1424 if fparent2 == nullid or crev is None: # copied on remote side
1425 if cfname in manifest2:
1425 if cfname in manifest2:
1426 crev = manifest2[cfname]
1426 crev = manifest2[cfname]
1427 newfparent = fparent1
1427 newfparent = fparent1
1428
1428
1429 # Here, we used to search backwards through history to try to find
1429 # Here, we used to search backwards through history to try to find
1430 # where the file copy came from if the source of a copy was not in
1430 # where the file copy came from if the source of a copy was not in
1431 # the parent directory. However, this doesn't actually make sense to
1431 # the parent directory. However, this doesn't actually make sense to
1432 # do (what does a copy from something not in your working copy even
1432 # do (what does a copy from something not in your working copy even
1433 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1433 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1434 # the user that copy information was dropped, so if they didn't
1434 # the user that copy information was dropped, so if they didn't
1435 # expect this outcome it can be fixed, but this is the correct
1435 # expect this outcome it can be fixed, but this is the correct
1436 # behavior in this circumstance.
1436 # behavior in this circumstance.
1437
1437
1438 if crev:
1438 if crev:
1439 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1439 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1440 meta["copy"] = cfname
1440 meta["copy"] = cfname
1441 meta["copyrev"] = hex(crev)
1441 meta["copyrev"] = hex(crev)
1442 fparent1, fparent2 = nullid, newfparent
1442 fparent1, fparent2 = nullid, newfparent
1443 else:
1443 else:
1444 self.ui.warn(_("warning: can't find ancestor for '%s' "
1444 self.ui.warn(_("warning: can't find ancestor for '%s' "
1445 "copied from '%s'!\n") % (fname, cfname))
1445 "copied from '%s'!\n") % (fname, cfname))
1446
1446
1447 elif fparent1 == nullid:
1447 elif fparent1 == nullid:
1448 fparent1, fparent2 = fparent2, nullid
1448 fparent1, fparent2 = fparent2, nullid
1449 elif fparent2 != nullid:
1449 elif fparent2 != nullid:
1450 # is one parent an ancestor of the other?
1450 # is one parent an ancestor of the other?
1451 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1451 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1452 if fparent1 in fparentancestors:
1452 if fparent1 in fparentancestors:
1453 fparent1, fparent2 = fparent2, nullid
1453 fparent1, fparent2 = fparent2, nullid
1454 elif fparent2 in fparentancestors:
1454 elif fparent2 in fparentancestors:
1455 fparent2 = nullid
1455 fparent2 = nullid
1456
1456
1457 # is the file changed?
1457 # is the file changed?
1458 text = fctx.data()
1458 text = fctx.data()
1459 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1459 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1460 changelist.append(fname)
1460 changelist.append(fname)
1461 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1461 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1462 # are just the flags changed during merge?
1462 # are just the flags changed during merge?
1463 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1463 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1464 changelist.append(fname)
1464 changelist.append(fname)
1465
1465
1466 return fparent1
1466 return fparent1
1467
1467
1468 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1468 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1469 """check for commit arguments that aren't commitable"""
1469 """check for commit arguments that aren't commitable"""
1470 if match.isexact() or match.prefix():
1470 if match.isexact() or match.prefix():
1471 matched = set(status.modified + status.added + status.removed)
1471 matched = set(status.modified + status.added + status.removed)
1472
1472
1473 for f in match.files():
1473 for f in match.files():
1474 f = self.dirstate.normalize(f)
1474 f = self.dirstate.normalize(f)
1475 if f == '.' or f in matched or f in wctx.substate:
1475 if f == '.' or f in matched or f in wctx.substate:
1476 continue
1476 continue
1477 if f in status.deleted:
1477 if f in status.deleted:
1478 fail(f, _('file not found!'))
1478 fail(f, _('file not found!'))
1479 if f in vdirs: # visited directory
1479 if f in vdirs: # visited directory
1480 d = f + '/'
1480 d = f + '/'
1481 for mf in matched:
1481 for mf in matched:
1482 if mf.startswith(d):
1482 if mf.startswith(d):
1483 break
1483 break
1484 else:
1484 else:
1485 fail(f, _("no match under directory!"))
1485 fail(f, _("no match under directory!"))
1486 elif f not in self.dirstate:
1486 elif f not in self.dirstate:
1487 fail(f, _("file not tracked!"))
1487 fail(f, _("file not tracked!"))
1488
1488
1489 @unfilteredmethod
1489 @unfilteredmethod
1490 def commit(self, text="", user=None, date=None, match=None, force=False,
1490 def commit(self, text="", user=None, date=None, match=None, force=False,
1491 editor=False, extra=None):
1491 editor=False, extra=None):
1492 """Add a new revision to current repository.
1492 """Add a new revision to current repository.
1493
1493
1494 Revision information is gathered from the working directory,
1494 Revision information is gathered from the working directory,
1495 match can be used to filter the committed files. If editor is
1495 match can be used to filter the committed files. If editor is
1496 supplied, it is called to get a commit message.
1496 supplied, it is called to get a commit message.
1497 """
1497 """
1498 if extra is None:
1498 if extra is None:
1499 extra = {}
1499 extra = {}
1500
1500
1501 def fail(f, msg):
1501 def fail(f, msg):
1502 raise error.Abort('%s: %s' % (f, msg))
1502 raise error.Abort('%s: %s' % (f, msg))
1503
1503
1504 if not match:
1504 if not match:
1505 match = matchmod.always(self.root, '')
1505 match = matchmod.always(self.root, '')
1506
1506
1507 if not force:
1507 if not force:
1508 vdirs = []
1508 vdirs = []
1509 match.explicitdir = vdirs.append
1509 match.explicitdir = vdirs.append
1510 match.bad = fail
1510 match.bad = fail
1511
1511
1512 wlock = lock = tr = None
1512 wlock = lock = tr = None
1513 try:
1513 try:
1514 wlock = self.wlock()
1514 wlock = self.wlock()
1515 lock = self.lock() # for recent changelog (see issue4368)
1515 lock = self.lock() # for recent changelog (see issue4368)
1516
1516
1517 wctx = self[None]
1517 wctx = self[None]
1518 merge = len(wctx.parents()) > 1
1518 merge = len(wctx.parents()) > 1
1519
1519
1520 if not force and merge and match.ispartial():
1520 if not force and merge and match.ispartial():
1521 raise error.Abort(_('cannot partially commit a merge '
1521 raise error.Abort(_('cannot partially commit a merge '
1522 '(do not specify files or patterns)'))
1522 '(do not specify files or patterns)'))
1523
1523
1524 status = self.status(match=match, clean=force)
1524 status = self.status(match=match, clean=force)
1525 if force:
1525 if force:
1526 status.modified.extend(status.clean) # mq may commit clean files
1526 status.modified.extend(status.clean) # mq may commit clean files
1527
1527
1528 # check subrepos
1528 # check subrepos
1529 subs = []
1529 subs = []
1530 commitsubs = set()
1530 commitsubs = set()
1531 newstate = wctx.substate.copy()
1531 newstate = wctx.substate.copy()
1532 # only manage subrepos and .hgsubstate if .hgsub is present
1532 # only manage subrepos and .hgsubstate if .hgsub is present
1533 if '.hgsub' in wctx:
1533 if '.hgsub' in wctx:
1534 # we'll decide whether to track this ourselves, thanks
1534 # we'll decide whether to track this ourselves, thanks
1535 for c in status.modified, status.added, status.removed:
1535 for c in status.modified, status.added, status.removed:
1536 if '.hgsubstate' in c:
1536 if '.hgsubstate' in c:
1537 c.remove('.hgsubstate')
1537 c.remove('.hgsubstate')
1538
1538
1539 # compare current state to last committed state
1539 # compare current state to last committed state
1540 # build new substate based on last committed state
1540 # build new substate based on last committed state
1541 oldstate = wctx.p1().substate
1541 oldstate = wctx.p1().substate
1542 for s in sorted(newstate.keys()):
1542 for s in sorted(newstate.keys()):
1543 if not match(s):
1543 if not match(s):
1544 # ignore working copy, use old state if present
1544 # ignore working copy, use old state if present
1545 if s in oldstate:
1545 if s in oldstate:
1546 newstate[s] = oldstate[s]
1546 newstate[s] = oldstate[s]
1547 continue
1547 continue
1548 if not force:
1548 if not force:
1549 raise error.Abort(
1549 raise error.Abort(
1550 _("commit with new subrepo %s excluded") % s)
1550 _("commit with new subrepo %s excluded") % s)
1551 dirtyreason = wctx.sub(s).dirtyreason(True)
1551 dirtyreason = wctx.sub(s).dirtyreason(True)
1552 if dirtyreason:
1552 if dirtyreason:
1553 if not self.ui.configbool('ui', 'commitsubrepos'):
1553 if not self.ui.configbool('ui', 'commitsubrepos'):
1554 raise error.Abort(dirtyreason,
1554 raise error.Abort(dirtyreason,
1555 hint=_("use --subrepos for recursive commit"))
1555 hint=_("use --subrepos for recursive commit"))
1556 subs.append(s)
1556 subs.append(s)
1557 commitsubs.add(s)
1557 commitsubs.add(s)
1558 else:
1558 else:
1559 bs = wctx.sub(s).basestate()
1559 bs = wctx.sub(s).basestate()
1560 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1560 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1561 if oldstate.get(s, (None, None, None))[1] != bs:
1561 if oldstate.get(s, (None, None, None))[1] != bs:
1562 subs.append(s)
1562 subs.append(s)
1563
1563
1564 # check for removed subrepos
1564 # check for removed subrepos
1565 for p in wctx.parents():
1565 for p in wctx.parents():
1566 r = [s for s in p.substate if s not in newstate]
1566 r = [s for s in p.substate if s not in newstate]
1567 subs += [s for s in r if match(s)]
1567 subs += [s for s in r if match(s)]
1568 if subs:
1568 if subs:
1569 if (not match('.hgsub') and
1569 if (not match('.hgsub') and
1570 '.hgsub' in (wctx.modified() + wctx.added())):
1570 '.hgsub' in (wctx.modified() + wctx.added())):
1571 raise error.Abort(
1571 raise error.Abort(
1572 _("can't commit subrepos without .hgsub"))
1572 _("can't commit subrepos without .hgsub"))
1573 status.modified.insert(0, '.hgsubstate')
1573 status.modified.insert(0, '.hgsubstate')
1574
1574
1575 elif '.hgsub' in status.removed:
1575 elif '.hgsub' in status.removed:
1576 # clean up .hgsubstate when .hgsub is removed
1576 # clean up .hgsubstate when .hgsub is removed
1577 if ('.hgsubstate' in wctx and
1577 if ('.hgsubstate' in wctx and
1578 '.hgsubstate' not in (status.modified + status.added +
1578 '.hgsubstate' not in (status.modified + status.added +
1579 status.removed)):
1579 status.removed)):
1580 status.removed.insert(0, '.hgsubstate')
1580 status.removed.insert(0, '.hgsubstate')
1581
1581
1582 # make sure all explicit patterns are matched
1582 # make sure all explicit patterns are matched
1583 if not force:
1583 if not force:
1584 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1584 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1585
1585
1586 cctx = context.workingcommitctx(self, status,
1586 cctx = context.workingcommitctx(self, status,
1587 text, user, date, extra)
1587 text, user, date, extra)
1588
1588
1589 # internal config: ui.allowemptycommit
1589 # internal config: ui.allowemptycommit
1590 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1590 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1591 or extra.get('close') or merge or cctx.files()
1591 or extra.get('close') or merge or cctx.files()
1592 or self.ui.configbool('ui', 'allowemptycommit'))
1592 or self.ui.configbool('ui', 'allowemptycommit'))
1593 if not allowemptycommit:
1593 if not allowemptycommit:
1594 return None
1594 return None
1595
1595
1596 if merge and cctx.deleted():
1596 if merge and cctx.deleted():
1597 raise error.Abort(_("cannot commit merge with missing files"))
1597 raise error.Abort(_("cannot commit merge with missing files"))
1598
1598
1599 ms = mergemod.mergestate.read(self)
1599 ms = mergemod.mergestate.read(self)
1600
1600
1601 if list(ms.unresolved()):
1601 if list(ms.unresolved()):
1602 raise error.Abort(_('unresolved merge conflicts '
1602 raise error.Abort(_('unresolved merge conflicts '
1603 '(see "hg help resolve")'))
1603 '(see "hg help resolve")'))
1604 if ms.mdstate() != 's' or list(ms.driverresolved()):
1604 if ms.mdstate() != 's' or list(ms.driverresolved()):
1605 raise error.Abort(_('driver-resolved merge conflicts'),
1605 raise error.Abort(_('driver-resolved merge conflicts'),
1606 hint=_('run "hg resolve --all" to resolve'))
1606 hint=_('run "hg resolve --all" to resolve'))
1607
1607
1608 if editor:
1608 if editor:
1609 cctx._text = editor(self, cctx, subs)
1609 cctx._text = editor(self, cctx, subs)
1610 edited = (text != cctx._text)
1610 edited = (text != cctx._text)
1611
1611
1612 # Save commit message in case this transaction gets rolled back
1612 # Save commit message in case this transaction gets rolled back
1613 # (e.g. by a pretxncommit hook). Leave the content alone on
1613 # (e.g. by a pretxncommit hook). Leave the content alone on
1614 # the assumption that the user will use the same editor again.
1614 # the assumption that the user will use the same editor again.
1615 msgfn = self.savecommitmessage(cctx._text)
1615 msgfn = self.savecommitmessage(cctx._text)
1616
1616
1617 # commit subs and write new state
1617 # commit subs and write new state
1618 if subs:
1618 if subs:
1619 for s in sorted(commitsubs):
1619 for s in sorted(commitsubs):
1620 sub = wctx.sub(s)
1620 sub = wctx.sub(s)
1621 self.ui.status(_('committing subrepository %s\n') %
1621 self.ui.status(_('committing subrepository %s\n') %
1622 subrepo.subrelpath(sub))
1622 subrepo.subrelpath(sub))
1623 sr = sub.commit(cctx._text, user, date)
1623 sr = sub.commit(cctx._text, user, date)
1624 newstate[s] = (newstate[s][0], sr)
1624 newstate[s] = (newstate[s][0], sr)
1625 subrepo.writestate(self, newstate)
1625 subrepo.writestate(self, newstate)
1626
1626
1627 p1, p2 = self.dirstate.parents()
1627 p1, p2 = self.dirstate.parents()
1628 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1628 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1629 try:
1629 try:
1630 self.hook("precommit", throw=True, parent1=hookp1,
1630 self.hook("precommit", throw=True, parent1=hookp1,
1631 parent2=hookp2)
1631 parent2=hookp2)
1632 tr = self.transaction('commit')
1632 tr = self.transaction('commit')
1633 ret = self.commitctx(cctx, True)
1633 ret = self.commitctx(cctx, True)
1634 except: # re-raises
1634 except: # re-raises
1635 if edited:
1635 if edited:
1636 self.ui.write(
1636 self.ui.write(
1637 _('note: commit message saved in %s\n') % msgfn)
1637 _('note: commit message saved in %s\n') % msgfn)
1638 raise
1638 raise
1639 # update bookmarks, dirstate and mergestate
1639 # update bookmarks, dirstate and mergestate
1640 bookmarks.update(self, [p1, p2], ret)
1640 bookmarks.update(self, [p1, p2], ret)
1641 cctx.markcommitted(ret)
1641 cctx.markcommitted(ret)
1642 ms.reset()
1642 ms.reset()
1643 tr.close()
1643 tr.close()
1644
1644
1645 finally:
1645 finally:
1646 lockmod.release(tr, lock, wlock)
1646 lockmod.release(tr, lock, wlock)
1647
1647
1648 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1648 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1649 # hack for command that use a temporary commit (eg: histedit)
1649 # hack for command that use a temporary commit (eg: histedit)
1650 # temporary commit got stripped before hook release
1650 # temporary commit got stripped before hook release
1651 if self.changelog.hasnode(ret):
1651 if self.changelog.hasnode(ret):
1652 self.hook("commit", node=node, parent1=parent1,
1652 self.hook("commit", node=node, parent1=parent1,
1653 parent2=parent2)
1653 parent2=parent2)
1654 self._afterlock(commithook)
1654 self._afterlock(commithook)
1655 return ret
1655 return ret
1656
1656
1657 @unfilteredmethod
1657 @unfilteredmethod
1658 def commitctx(self, ctx, error=False):
1658 def commitctx(self, ctx, error=False):
1659 """Add a new revision to current repository.
1659 """Add a new revision to current repository.
1660 Revision information is passed via the context argument.
1660 Revision information is passed via the context argument.
1661 """
1661 """
1662
1662
1663 tr = None
1663 tr = None
1664 p1, p2 = ctx.p1(), ctx.p2()
1664 p1, p2 = ctx.p1(), ctx.p2()
1665 user = ctx.user()
1665 user = ctx.user()
1666
1666
1667 lock = self.lock()
1667 lock = self.lock()
1668 try:
1668 try:
1669 tr = self.transaction("commit")
1669 tr = self.transaction("commit")
1670 trp = weakref.proxy(tr)
1670 trp = weakref.proxy(tr)
1671
1671
1672 if ctx.files():
1672 if ctx.files():
1673 m1 = p1.manifest()
1673 m1 = p1.manifest()
1674 m2 = p2.manifest()
1674 m2 = p2.manifest()
1675 m = m1.copy()
1675 m = m1.copy()
1676
1676
1677 # check in files
1677 # check in files
1678 added = []
1678 added = []
1679 changed = []
1679 changed = []
1680 removed = list(ctx.removed())
1680 removed = list(ctx.removed())
1681 linkrev = len(self)
1681 linkrev = len(self)
1682 self.ui.note(_("committing files:\n"))
1682 self.ui.note(_("committing files:\n"))
1683 for f in sorted(ctx.modified() + ctx.added()):
1683 for f in sorted(ctx.modified() + ctx.added()):
1684 self.ui.note(f + "\n")
1684 self.ui.note(f + "\n")
1685 try:
1685 try:
1686 fctx = ctx[f]
1686 fctx = ctx[f]
1687 if fctx is None:
1687 if fctx is None:
1688 removed.append(f)
1688 removed.append(f)
1689 else:
1689 else:
1690 added.append(f)
1690 added.append(f)
1691 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1691 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1692 trp, changed)
1692 trp, changed)
1693 m.setflag(f, fctx.flags())
1693 m.setflag(f, fctx.flags())
1694 except OSError as inst:
1694 except OSError as inst:
1695 self.ui.warn(_("trouble committing %s!\n") % f)
1695 self.ui.warn(_("trouble committing %s!\n") % f)
1696 raise
1696 raise
1697 except IOError as inst:
1697 except IOError as inst:
1698 errcode = getattr(inst, 'errno', errno.ENOENT)
1698 errcode = getattr(inst, 'errno', errno.ENOENT)
1699 if error or errcode and errcode != errno.ENOENT:
1699 if error or errcode and errcode != errno.ENOENT:
1700 self.ui.warn(_("trouble committing %s!\n") % f)
1700 self.ui.warn(_("trouble committing %s!\n") % f)
1701 raise
1701 raise
1702
1702
1703 # update manifest
1703 # update manifest
1704 self.ui.note(_("committing manifest\n"))
1704 self.ui.note(_("committing manifest\n"))
1705 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1705 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1706 drop = [f for f in removed if f in m]
1706 drop = [f for f in removed if f in m]
1707 for f in drop:
1707 for f in drop:
1708 del m[f]
1708 del m[f]
1709 mn = self.manifest.add(m, trp, linkrev,
1709 mn = self.manifest.add(m, trp, linkrev,
1710 p1.manifestnode(), p2.manifestnode(),
1710 p1.manifestnode(), p2.manifestnode(),
1711 added, drop)
1711 added, drop)
1712 files = changed + removed
1712 files = changed + removed
1713 else:
1713 else:
1714 mn = p1.manifestnode()
1714 mn = p1.manifestnode()
1715 files = []
1715 files = []
1716
1716
1717 # update changelog
1717 # update changelog
1718 self.ui.note(_("committing changelog\n"))
1718 self.ui.note(_("committing changelog\n"))
1719 self.changelog.delayupdate(tr)
1719 self.changelog.delayupdate(tr)
1720 n = self.changelog.add(mn, files, ctx.description(),
1720 n = self.changelog.add(mn, files, ctx.description(),
1721 trp, p1.node(), p2.node(),
1721 trp, p1.node(), p2.node(),
1722 user, ctx.date(), ctx.extra().copy())
1722 user, ctx.date(), ctx.extra().copy())
1723 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1723 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1724 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1724 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1725 parent2=xp2)
1725 parent2=xp2)
1726 # set the new commit is proper phase
1726 # set the new commit is proper phase
1727 targetphase = subrepo.newcommitphase(self.ui, ctx)
1727 targetphase = subrepo.newcommitphase(self.ui, ctx)
1728 if targetphase:
1728 if targetphase:
1729 # retract boundary do not alter parent changeset.
1729 # retract boundary do not alter parent changeset.
1730 # if a parent have higher the resulting phase will
1730 # if a parent have higher the resulting phase will
1731 # be compliant anyway
1731 # be compliant anyway
1732 #
1732 #
1733 # if minimal phase was 0 we don't need to retract anything
1733 # if minimal phase was 0 we don't need to retract anything
1734 phases.retractboundary(self, tr, targetphase, [n])
1734 phases.retractboundary(self, tr, targetphase, [n])
1735 tr.close()
1735 tr.close()
1736 branchmap.updatecache(self.filtered('served'))
1736 branchmap.updatecache(self.filtered('served'))
1737 return n
1737 return n
1738 finally:
1738 finally:
1739 if tr:
1739 if tr:
1740 tr.release()
1740 tr.release()
1741 lock.release()
1741 lock.release()
1742
1742
1743 @unfilteredmethod
1743 @unfilteredmethod
1744 def destroying(self):
1744 def destroying(self):
1745 '''Inform the repository that nodes are about to be destroyed.
1745 '''Inform the repository that nodes are about to be destroyed.
1746 Intended for use by strip and rollback, so there's a common
1746 Intended for use by strip and rollback, so there's a common
1747 place for anything that has to be done before destroying history.
1747 place for anything that has to be done before destroying history.
1748
1748
1749 This is mostly useful for saving state that is in memory and waiting
1749 This is mostly useful for saving state that is in memory and waiting
1750 to be flushed when the current lock is released. Because a call to
1750 to be flushed when the current lock is released. Because a call to
1751 destroyed is imminent, the repo will be invalidated causing those
1751 destroyed is imminent, the repo will be invalidated causing those
1752 changes to stay in memory (waiting for the next unlock), or vanish
1752 changes to stay in memory (waiting for the next unlock), or vanish
1753 completely.
1753 completely.
1754 '''
1754 '''
1755 # When using the same lock to commit and strip, the phasecache is left
1755 # When using the same lock to commit and strip, the phasecache is left
1756 # dirty after committing. Then when we strip, the repo is invalidated,
1756 # dirty after committing. Then when we strip, the repo is invalidated,
1757 # causing those changes to disappear.
1757 # causing those changes to disappear.
1758 if '_phasecache' in vars(self):
1758 if '_phasecache' in vars(self):
1759 self._phasecache.write()
1759 self._phasecache.write()
1760
1760
1761 @unfilteredmethod
1761 @unfilteredmethod
1762 def destroyed(self):
1762 def destroyed(self):
1763 '''Inform the repository that nodes have been destroyed.
1763 '''Inform the repository that nodes have been destroyed.
1764 Intended for use by strip and rollback, so there's a common
1764 Intended for use by strip and rollback, so there's a common
1765 place for anything that has to be done after destroying history.
1765 place for anything that has to be done after destroying history.
1766 '''
1766 '''
1767 # When one tries to:
1767 # When one tries to:
1768 # 1) destroy nodes thus calling this method (e.g. strip)
1768 # 1) destroy nodes thus calling this method (e.g. strip)
1769 # 2) use phasecache somewhere (e.g. commit)
1769 # 2) use phasecache somewhere (e.g. commit)
1770 #
1770 #
1771 # then 2) will fail because the phasecache contains nodes that were
1771 # then 2) will fail because the phasecache contains nodes that were
1772 # removed. We can either remove phasecache from the filecache,
1772 # removed. We can either remove phasecache from the filecache,
1773 # causing it to reload next time it is accessed, or simply filter
1773 # causing it to reload next time it is accessed, or simply filter
1774 # the removed nodes now and write the updated cache.
1774 # the removed nodes now and write the updated cache.
1775 self._phasecache.filterunknown(self)
1775 self._phasecache.filterunknown(self)
1776 self._phasecache.write()
1776 self._phasecache.write()
1777
1777
1778 # update the 'served' branch cache to help read only server process
1778 # update the 'served' branch cache to help read only server process
1779 # Thanks to branchcache collaboration this is done from the nearest
1779 # Thanks to branchcache collaboration this is done from the nearest
1780 # filtered subset and it is expected to be fast.
1780 # filtered subset and it is expected to be fast.
1781 branchmap.updatecache(self.filtered('served'))
1781 branchmap.updatecache(self.filtered('served'))
1782
1782
1783 # Ensure the persistent tag cache is updated. Doing it now
1783 # Ensure the persistent tag cache is updated. Doing it now
1784 # means that the tag cache only has to worry about destroyed
1784 # means that the tag cache only has to worry about destroyed
1785 # heads immediately after a strip/rollback. That in turn
1785 # heads immediately after a strip/rollback. That in turn
1786 # guarantees that "cachetip == currenttip" (comparing both rev
1786 # guarantees that "cachetip == currenttip" (comparing both rev
1787 # and node) always means no nodes have been added or destroyed.
1787 # and node) always means no nodes have been added or destroyed.
1788
1788
1789 # XXX this is suboptimal when qrefresh'ing: we strip the current
1789 # XXX this is suboptimal when qrefresh'ing: we strip the current
1790 # head, refresh the tag cache, then immediately add a new head.
1790 # head, refresh the tag cache, then immediately add a new head.
1791 # But I think doing it this way is necessary for the "instant
1791 # But I think doing it this way is necessary for the "instant
1792 # tag cache retrieval" case to work.
1792 # tag cache retrieval" case to work.
1793 self.invalidate()
1793 self.invalidate()
1794
1794
1795 def walk(self, match, node=None):
1795 def walk(self, match, node=None):
1796 '''
1796 '''
1797 walk recursively through the directory tree or a given
1797 walk recursively through the directory tree or a given
1798 changeset, finding all files matched by the match
1798 changeset, finding all files matched by the match
1799 function
1799 function
1800 '''
1800 '''
1801 return self[node].walk(match)
1801 return self[node].walk(match)
1802
1802
1803 def status(self, node1='.', node2=None, match=None,
1803 def status(self, node1='.', node2=None, match=None,
1804 ignored=False, clean=False, unknown=False,
1804 ignored=False, clean=False, unknown=False,
1805 listsubrepos=False):
1805 listsubrepos=False):
1806 '''a convenience method that calls node1.status(node2)'''
1806 '''a convenience method that calls node1.status(node2)'''
1807 return self[node1].status(node2, match, ignored, clean, unknown,
1807 return self[node1].status(node2, match, ignored, clean, unknown,
1808 listsubrepos)
1808 listsubrepos)
1809
1809
1810 def heads(self, start=None):
1810 def heads(self, start=None):
1811 heads = self.changelog.heads(start)
1811 heads = self.changelog.heads(start)
1812 # sort the output in rev descending order
1812 # sort the output in rev descending order
1813 return sorted(heads, key=self.changelog.rev, reverse=True)
1813 return sorted(heads, key=self.changelog.rev, reverse=True)
1814
1814
1815 def branchheads(self, branch=None, start=None, closed=False):
1815 def branchheads(self, branch=None, start=None, closed=False):
1816 '''return a (possibly filtered) list of heads for the given branch
1816 '''return a (possibly filtered) list of heads for the given branch
1817
1817
1818 Heads are returned in topological order, from newest to oldest.
1818 Heads are returned in topological order, from newest to oldest.
1819 If branch is None, use the dirstate branch.
1819 If branch is None, use the dirstate branch.
1820 If start is not None, return only heads reachable from start.
1820 If start is not None, return only heads reachable from start.
1821 If closed is True, return heads that are marked as closed as well.
1821 If closed is True, return heads that are marked as closed as well.
1822 '''
1822 '''
1823 if branch is None:
1823 if branch is None:
1824 branch = self[None].branch()
1824 branch = self[None].branch()
1825 branches = self.branchmap()
1825 branches = self.branchmap()
1826 if branch not in branches:
1826 if branch not in branches:
1827 return []
1827 return []
1828 # the cache returns heads ordered lowest to highest
1828 # the cache returns heads ordered lowest to highest
1829 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1829 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1830 if start is not None:
1830 if start is not None:
1831 # filter out the heads that cannot be reached from startrev
1831 # filter out the heads that cannot be reached from startrev
1832 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1832 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1833 bheads = [h for h in bheads if h in fbheads]
1833 bheads = [h for h in bheads if h in fbheads]
1834 return bheads
1834 return bheads
1835
1835
1836 def branches(self, nodes):
1836 def branches(self, nodes):
1837 if not nodes:
1837 if not nodes:
1838 nodes = [self.changelog.tip()]
1838 nodes = [self.changelog.tip()]
1839 b = []
1839 b = []
1840 for n in nodes:
1840 for n in nodes:
1841 t = n
1841 t = n
1842 while True:
1842 while True:
1843 p = self.changelog.parents(n)
1843 p = self.changelog.parents(n)
1844 if p[1] != nullid or p[0] == nullid:
1844 if p[1] != nullid or p[0] == nullid:
1845 b.append((t, n, p[0], p[1]))
1845 b.append((t, n, p[0], p[1]))
1846 break
1846 break
1847 n = p[0]
1847 n = p[0]
1848 return b
1848 return b
1849
1849
1850 def between(self, pairs):
1850 def between(self, pairs):
1851 r = []
1851 r = []
1852
1852
1853 for top, bottom in pairs:
1853 for top, bottom in pairs:
1854 n, l, i = top, [], 0
1854 n, l, i = top, [], 0
1855 f = 1
1855 f = 1
1856
1856
1857 while n != bottom and n != nullid:
1857 while n != bottom and n != nullid:
1858 p = self.changelog.parents(n)[0]
1858 p = self.changelog.parents(n)[0]
1859 if i == f:
1859 if i == f:
1860 l.append(n)
1860 l.append(n)
1861 f = f * 2
1861 f = f * 2
1862 n = p
1862 n = p
1863 i += 1
1863 i += 1
1864
1864
1865 r.append(l)
1865 r.append(l)
1866
1866
1867 return r
1867 return r
1868
1868
1869 def checkpush(self, pushop):
1869 def checkpush(self, pushop):
1870 """Extensions can override this function if additional checks have
1870 """Extensions can override this function if additional checks have
1871 to be performed before pushing, or call it if they override push
1871 to be performed before pushing, or call it if they override push
1872 command.
1872 command.
1873 """
1873 """
1874 pass
1874 pass
1875
1875
1876 @unfilteredpropertycache
1876 @unfilteredpropertycache
1877 def prepushoutgoinghooks(self):
1877 def prepushoutgoinghooks(self):
1878 """Return util.hooks consists of a pushop with repo, remote, outgoing
1878 """Return util.hooks consists of a pushop with repo, remote, outgoing
1879 methods, which are called before pushing changesets.
1879 methods, which are called before pushing changesets.
1880 """
1880 """
1881 return util.hooks()
1881 return util.hooks()
1882
1882
1883 def pushkey(self, namespace, key, old, new):
1883 def pushkey(self, namespace, key, old, new):
1884 try:
1884 try:
1885 tr = self.currenttransaction()
1885 tr = self.currenttransaction()
1886 hookargs = {}
1886 hookargs = {}
1887 if tr is not None:
1887 if tr is not None:
1888 hookargs.update(tr.hookargs)
1888 hookargs.update(tr.hookargs)
1889 hookargs['namespace'] = namespace
1889 hookargs['namespace'] = namespace
1890 hookargs['key'] = key
1890 hookargs['key'] = key
1891 hookargs['old'] = old
1891 hookargs['old'] = old
1892 hookargs['new'] = new
1892 hookargs['new'] = new
1893 self.hook('prepushkey', throw=True, **hookargs)
1893 self.hook('prepushkey', throw=True, **hookargs)
1894 except error.HookAbort as exc:
1894 except error.HookAbort as exc:
1895 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1895 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1896 if exc.hint:
1896 if exc.hint:
1897 self.ui.write_err(_("(%s)\n") % exc.hint)
1897 self.ui.write_err(_("(%s)\n") % exc.hint)
1898 return False
1898 return False
1899 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1899 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1900 ret = pushkey.push(self, namespace, key, old, new)
1900 ret = pushkey.push(self, namespace, key, old, new)
1901 def runhook():
1901 def runhook():
1902 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1902 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1903 ret=ret)
1903 ret=ret)
1904 self._afterlock(runhook)
1904 self._afterlock(runhook)
1905 return ret
1905 return ret
1906
1906
1907 def listkeys(self, namespace):
1907 def listkeys(self, namespace):
1908 self.hook('prelistkeys', throw=True, namespace=namespace)
1908 self.hook('prelistkeys', throw=True, namespace=namespace)
1909 self.ui.debug('listing keys for "%s"\n' % namespace)
1909 self.ui.debug('listing keys for "%s"\n' % namespace)
1910 values = pushkey.list(self, namespace)
1910 values = pushkey.list(self, namespace)
1911 self.hook('listkeys', namespace=namespace, values=values)
1911 self.hook('listkeys', namespace=namespace, values=values)
1912 return values
1912 return values
1913
1913
1914 def debugwireargs(self, one, two, three=None, four=None, five=None):
1914 def debugwireargs(self, one, two, three=None, four=None, five=None):
1915 '''used to test argument passing over the wire'''
1915 '''used to test argument passing over the wire'''
1916 return "%s %s %s %s %s" % (one, two, three, four, five)
1916 return "%s %s %s %s %s" % (one, two, three, four, five)
1917
1917
1918 def savecommitmessage(self, text):
1918 def savecommitmessage(self, text):
1919 fp = self.vfs('last-message.txt', 'wb')
1919 fp = self.vfs('last-message.txt', 'wb')
1920 try:
1920 try:
1921 fp.write(text)
1921 fp.write(text)
1922 finally:
1922 finally:
1923 fp.close()
1923 fp.close()
1924 return self.pathto(fp.name[len(self.root) + 1:])
1924 return self.pathto(fp.name[len(self.root) + 1:])
1925
1925
1926 # used to avoid circular references so destructors work
1926 # used to avoid circular references so destructors work
1927 def aftertrans(files):
1927 def aftertrans(files):
1928 renamefiles = [tuple(t) for t in files]
1928 renamefiles = [tuple(t) for t in files]
1929 def a():
1929 def a():
1930 for vfs, src, dest in renamefiles:
1930 for vfs, src, dest in renamefiles:
1931 try:
1931 try:
1932 vfs.rename(src, dest)
1932 vfs.rename(src, dest)
1933 except OSError: # journal file does not yet exist
1933 except OSError: # journal file does not yet exist
1934 pass
1934 pass
1935 return a
1935 return a
1936
1936
1937 def undoname(fn):
1937 def undoname(fn):
1938 base, name = os.path.split(fn)
1938 base, name = os.path.split(fn)
1939 assert name.startswith('journal')
1939 assert name.startswith('journal')
1940 return os.path.join(base, name.replace('journal', 'undo', 1))
1940 return os.path.join(base, name.replace('journal', 'undo', 1))
1941
1941
1942 def instance(ui, path, create):
1942 def instance(ui, path, create):
1943 return localrepository(ui, util.urllocalpath(path), create)
1943 return localrepository(ui, util.urllocalpath(path), create)
1944
1944
1945 def islocal(path):
1945 def islocal(path):
1946 return True
1946 return True
1947
1947
1948 def newreporequirements(repo):
1948 def newreporequirements(repo):
1949 """Determine the set of requirements for a new local repository.
1949 """Determine the set of requirements for a new local repository.
1950
1950
1951 Extensions can wrap this function to specify custom requirements for
1951 Extensions can wrap this function to specify custom requirements for
1952 new repositories.
1952 new repositories.
1953 """
1953 """
1954 ui = repo.ui
1954 ui = repo.ui
1955 requirements = set(['revlogv1'])
1955 requirements = set(['revlogv1'])
1956 if ui.configbool('format', 'usestore', True):
1956 if ui.configbool('format', 'usestore', True):
1957 requirements.add('store')
1957 requirements.add('store')
1958 if ui.configbool('format', 'usefncache', True):
1958 if ui.configbool('format', 'usefncache', True):
1959 requirements.add('fncache')
1959 requirements.add('fncache')
1960 if ui.configbool('format', 'dotencode', True):
1960 if ui.configbool('format', 'dotencode', True):
1961 requirements.add('dotencode')
1961 requirements.add('dotencode')
1962
1962
1963 if scmutil.gdinitconfig(ui):
1963 if scmutil.gdinitconfig(ui):
1964 requirements.add('generaldelta')
1964 requirements.add('generaldelta')
1965 if ui.configbool('experimental', 'treemanifest', False):
1965 if ui.configbool('experimental', 'treemanifest', False):
1966 requirements.add('treemanifest')
1966 requirements.add('treemanifest')
1967 if ui.configbool('experimental', 'manifestv2', False):
1967 if ui.configbool('experimental', 'manifestv2', False):
1968 requirements.add('manifestv2')
1968 requirements.add('manifestv2')
1969
1969
1970 return requirements
1970 return requirements
General Comments 0
You need to be logged in to leave comments. Login now