##// END OF EJS Templates
localrepo: document nodebookmarks
Augie Fackler -
r27166:263db313 default
parent child Browse files
Show More
@@ -1,1922 +1,1923 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, wdirrev, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset, cmdutil
14 import scmutil, util, extensions, hook, error, revset, cmdutil
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception as exc:
138 except Exception as exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced as exc:
155 except error.PushRaced as exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return cg.apply(self._repo, source, url)
162 return cg.apply(self._repo, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 if scmutil.gdinitconfig(self.ui):
261 if scmutil.gdinitconfig(self.ui):
262 self.requirements.add("generaldelta")
262 self.requirements.add("generaldelta")
263 if self.ui.configbool('experimental', 'treemanifest', False):
263 if self.ui.configbool('experimental', 'treemanifest', False):
264 self.requirements.add("treemanifest")
264 self.requirements.add("treemanifest")
265 if self.ui.configbool('experimental', 'manifestv2', False):
265 if self.ui.configbool('experimental', 'manifestv2', False):
266 self.requirements.add("manifestv2")
266 self.requirements.add("manifestv2")
267 else:
267 else:
268 raise error.RepoError(_("repository %s not found") % path)
268 raise error.RepoError(_("repository %s not found") % path)
269 elif create:
269 elif create:
270 raise error.RepoError(_("repository %s already exists") % path)
270 raise error.RepoError(_("repository %s already exists") % path)
271 else:
271 else:
272 try:
272 try:
273 self.requirements = scmutil.readrequires(
273 self.requirements = scmutil.readrequires(
274 self.vfs, self.supported)
274 self.vfs, self.supported)
275 except IOError as inst:
275 except IOError as inst:
276 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
277 raise
277 raise
278
278
279 self.sharedpath = self.path
279 self.sharedpath = self.path
280 try:
280 try:
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 realpath=True)
282 realpath=True)
283 s = vfs.base
283 s = vfs.base
284 if not vfs.exists():
284 if not vfs.exists():
285 raise error.RepoError(
285 raise error.RepoError(
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 self.sharedpath = s
287 self.sharedpath = s
288 except IOError as inst:
288 except IOError as inst:
289 if inst.errno != errno.ENOENT:
289 if inst.errno != errno.ENOENT:
290 raise
290 raise
291
291
292 self.store = store.store(
292 self.store = store.store(
293 self.requirements, self.sharedpath, scmutil.vfs)
293 self.requirements, self.sharedpath, scmutil.vfs)
294 self.spath = self.store.path
294 self.spath = self.store.path
295 self.svfs = self.store.vfs
295 self.svfs = self.store.vfs
296 self.sjoin = self.store.join
296 self.sjoin = self.store.join
297 self.vfs.createmode = self.store.createmode
297 self.vfs.createmode = self.store.createmode
298 self._applyopenerreqs()
298 self._applyopenerreqs()
299 if create:
299 if create:
300 self._writerequirements()
300 self._writerequirements()
301
301
302 self._dirstatevalidatewarned = False
302 self._dirstatevalidatewarned = False
303
303
304 self._branchcaches = {}
304 self._branchcaches = {}
305 self._revbranchcache = None
305 self._revbranchcache = None
306 self.filterpats = {}
306 self.filterpats = {}
307 self._datafilters = {}
307 self._datafilters = {}
308 self._transref = self._lockref = self._wlockref = None
308 self._transref = self._lockref = self._wlockref = None
309
309
310 # A cache for various files under .hg/ that tracks file changes,
310 # A cache for various files under .hg/ that tracks file changes,
311 # (used by the filecache decorator)
311 # (used by the filecache decorator)
312 #
312 #
313 # Maps a property name to its util.filecacheentry
313 # Maps a property name to its util.filecacheentry
314 self._filecache = {}
314 self._filecache = {}
315
315
316 # hold sets of revision to be filtered
316 # hold sets of revision to be filtered
317 # should be cleared when something might have changed the filter value:
317 # should be cleared when something might have changed the filter value:
318 # - new changesets,
318 # - new changesets,
319 # - phase change,
319 # - phase change,
320 # - new obsolescence marker,
320 # - new obsolescence marker,
321 # - working directory parent change,
321 # - working directory parent change,
322 # - bookmark changes
322 # - bookmark changes
323 self.filteredrevcache = {}
323 self.filteredrevcache = {}
324
324
325 # generic mapping between names and nodes
325 # generic mapping between names and nodes
326 self.names = namespaces.namespaces()
326 self.names = namespaces.namespaces()
327
327
328 def close(self):
328 def close(self):
329 self._writecaches()
329 self._writecaches()
330
330
331 def _writecaches(self):
331 def _writecaches(self):
332 if self._revbranchcache:
332 if self._revbranchcache:
333 self._revbranchcache.write()
333 self._revbranchcache.write()
334
334
335 def _restrictcapabilities(self, caps):
335 def _restrictcapabilities(self, caps):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 caps = set(caps)
337 caps = set(caps)
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 caps.add('bundle2=' + urllib.quote(capsblob))
339 caps.add('bundle2=' + urllib.quote(capsblob))
340 return caps
340 return caps
341
341
342 def _applyopenerreqs(self):
342 def _applyopenerreqs(self):
343 self.svfs.options = dict((r, 1) for r in self.requirements
343 self.svfs.options = dict((r, 1) for r in self.requirements
344 if r in self.openerreqs)
344 if r in self.openerreqs)
345 # experimental config: format.chunkcachesize
345 # experimental config: format.chunkcachesize
346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 if chunkcachesize is not None:
347 if chunkcachesize is not None:
348 self.svfs.options['chunkcachesize'] = chunkcachesize
348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 # experimental config: format.maxchainlen
349 # experimental config: format.maxchainlen
350 maxchainlen = self.ui.configint('format', 'maxchainlen')
350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 if maxchainlen is not None:
351 if maxchainlen is not None:
352 self.svfs.options['maxchainlen'] = maxchainlen
352 self.svfs.options['maxchainlen'] = maxchainlen
353 # experimental config: format.manifestcachesize
353 # experimental config: format.manifestcachesize
354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 if manifestcachesize is not None:
355 if manifestcachesize is not None:
356 self.svfs.options['manifestcachesize'] = manifestcachesize
356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 # experimental config: format.aggressivemergedeltas
357 # experimental config: format.aggressivemergedeltas
358 aggressivemergedeltas = self.ui.configbool('format',
358 aggressivemergedeltas = self.ui.configbool('format',
359 'aggressivemergedeltas', False)
359 'aggressivemergedeltas', False)
360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
361 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
361 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
362
362
363 def _writerequirements(self):
363 def _writerequirements(self):
364 scmutil.writerequires(self.vfs, self.requirements)
364 scmutil.writerequires(self.vfs, self.requirements)
365
365
366 def _checknested(self, path):
366 def _checknested(self, path):
367 """Determine if path is a legal nested repository."""
367 """Determine if path is a legal nested repository."""
368 if not path.startswith(self.root):
368 if not path.startswith(self.root):
369 return False
369 return False
370 subpath = path[len(self.root) + 1:]
370 subpath = path[len(self.root) + 1:]
371 normsubpath = util.pconvert(subpath)
371 normsubpath = util.pconvert(subpath)
372
372
373 # XXX: Checking against the current working copy is wrong in
373 # XXX: Checking against the current working copy is wrong in
374 # the sense that it can reject things like
374 # the sense that it can reject things like
375 #
375 #
376 # $ hg cat -r 10 sub/x.txt
376 # $ hg cat -r 10 sub/x.txt
377 #
377 #
378 # if sub/ is no longer a subrepository in the working copy
378 # if sub/ is no longer a subrepository in the working copy
379 # parent revision.
379 # parent revision.
380 #
380 #
381 # However, it can of course also allow things that would have
381 # However, it can of course also allow things that would have
382 # been rejected before, such as the above cat command if sub/
382 # been rejected before, such as the above cat command if sub/
383 # is a subrepository now, but was a normal directory before.
383 # is a subrepository now, but was a normal directory before.
384 # The old path auditor would have rejected by mistake since it
384 # The old path auditor would have rejected by mistake since it
385 # panics when it sees sub/.hg/.
385 # panics when it sees sub/.hg/.
386 #
386 #
387 # All in all, checking against the working copy seems sensible
387 # All in all, checking against the working copy seems sensible
388 # since we want to prevent access to nested repositories on
388 # since we want to prevent access to nested repositories on
389 # the filesystem *now*.
389 # the filesystem *now*.
390 ctx = self[None]
390 ctx = self[None]
391 parts = util.splitpath(subpath)
391 parts = util.splitpath(subpath)
392 while parts:
392 while parts:
393 prefix = '/'.join(parts)
393 prefix = '/'.join(parts)
394 if prefix in ctx.substate:
394 if prefix in ctx.substate:
395 if prefix == normsubpath:
395 if prefix == normsubpath:
396 return True
396 return True
397 else:
397 else:
398 sub = ctx.sub(prefix)
398 sub = ctx.sub(prefix)
399 return sub.checknested(subpath[len(prefix) + 1:])
399 return sub.checknested(subpath[len(prefix) + 1:])
400 else:
400 else:
401 parts.pop()
401 parts.pop()
402 return False
402 return False
403
403
404 def peer(self):
404 def peer(self):
405 return localpeer(self) # not cached to avoid reference cycle
405 return localpeer(self) # not cached to avoid reference cycle
406
406
407 def unfiltered(self):
407 def unfiltered(self):
408 """Return unfiltered version of the repository
408 """Return unfiltered version of the repository
409
409
410 Intended to be overwritten by filtered repo."""
410 Intended to be overwritten by filtered repo."""
411 return self
411 return self
412
412
413 def filtered(self, name):
413 def filtered(self, name):
414 """Return a filtered version of a repository"""
414 """Return a filtered version of a repository"""
415 # build a new class with the mixin and the current class
415 # build a new class with the mixin and the current class
416 # (possibly subclass of the repo)
416 # (possibly subclass of the repo)
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 pass
418 pass
419 return proxycls(self, name)
419 return proxycls(self, name)
420
420
421 @repofilecache('bookmarks')
421 @repofilecache('bookmarks')
422 def _bookmarks(self):
422 def _bookmarks(self):
423 return bookmarks.bmstore(self)
423 return bookmarks.bmstore(self)
424
424
425 @repofilecache('bookmarks.current')
425 @repofilecache('bookmarks.current')
426 def _activebookmark(self):
426 def _activebookmark(self):
427 return bookmarks.readactive(self)
427 return bookmarks.readactive(self)
428
428
429 def bookmarkheads(self, bookmark):
429 def bookmarkheads(self, bookmark):
430 name = bookmark.split('@', 1)[0]
430 name = bookmark.split('@', 1)[0]
431 heads = []
431 heads = []
432 for mark, n in self._bookmarks.iteritems():
432 for mark, n in self._bookmarks.iteritems():
433 if mark.split('@', 1)[0] == name:
433 if mark.split('@', 1)[0] == name:
434 heads.append(n)
434 heads.append(n)
435 return heads
435 return heads
436
436
437 # _phaserevs and _phasesets depend on changelog. what we need is to
437 # _phaserevs and _phasesets depend on changelog. what we need is to
438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 # can't be easily expressed in filecache mechanism.
439 # can't be easily expressed in filecache mechanism.
440 @storecache('phaseroots', '00changelog.i')
440 @storecache('phaseroots', '00changelog.i')
441 def _phasecache(self):
441 def _phasecache(self):
442 return phases.phasecache(self, self._phasedefaults)
442 return phases.phasecache(self, self._phasedefaults)
443
443
444 @storecache('obsstore')
444 @storecache('obsstore')
445 def obsstore(self):
445 def obsstore(self):
446 # read default format for new obsstore.
446 # read default format for new obsstore.
447 # developer config: format.obsstore-version
447 # developer config: format.obsstore-version
448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 # rely on obsstore class default when possible.
449 # rely on obsstore class default when possible.
450 kwargs = {}
450 kwargs = {}
451 if defaultformat is not None:
451 if defaultformat is not None:
452 kwargs['defaultformat'] = defaultformat
452 kwargs['defaultformat'] = defaultformat
453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 store = obsolete.obsstore(self.svfs, readonly=readonly,
454 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 **kwargs)
455 **kwargs)
456 if store and readonly:
456 if store and readonly:
457 self.ui.warn(
457 self.ui.warn(
458 _('obsolete feature not enabled but %i markers found!\n')
458 _('obsolete feature not enabled but %i markers found!\n')
459 % len(list(store)))
459 % len(list(store)))
460 return store
460 return store
461
461
462 @storecache('00changelog.i')
462 @storecache('00changelog.i')
463 def changelog(self):
463 def changelog(self):
464 c = changelog.changelog(self.svfs)
464 c = changelog.changelog(self.svfs)
465 if 'HG_PENDING' in os.environ:
465 if 'HG_PENDING' in os.environ:
466 p = os.environ['HG_PENDING']
466 p = os.environ['HG_PENDING']
467 if p.startswith(self.root):
467 if p.startswith(self.root):
468 c.readpending('00changelog.i.a')
468 c.readpending('00changelog.i.a')
469 return c
469 return c
470
470
471 @storecache('00manifest.i')
471 @storecache('00manifest.i')
472 def manifest(self):
472 def manifest(self):
473 return manifest.manifest(self.svfs)
473 return manifest.manifest(self.svfs)
474
474
475 def dirlog(self, dir):
475 def dirlog(self, dir):
476 return self.manifest.dirlog(dir)
476 return self.manifest.dirlog(dir)
477
477
478 @repofilecache('dirstate')
478 @repofilecache('dirstate')
479 def dirstate(self):
479 def dirstate(self):
480 return dirstate.dirstate(self.vfs, self.ui, self.root,
480 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 self._dirstatevalidate)
481 self._dirstatevalidate)
482
482
483 def _dirstatevalidate(self, node):
483 def _dirstatevalidate(self, node):
484 try:
484 try:
485 self.changelog.rev(node)
485 self.changelog.rev(node)
486 return node
486 return node
487 except error.LookupError:
487 except error.LookupError:
488 if not self._dirstatevalidatewarned:
488 if not self._dirstatevalidatewarned:
489 self._dirstatevalidatewarned = True
489 self._dirstatevalidatewarned = True
490 self.ui.warn(_("warning: ignoring unknown"
490 self.ui.warn(_("warning: ignoring unknown"
491 " working parent %s!\n") % short(node))
491 " working parent %s!\n") % short(node))
492 return nullid
492 return nullid
493
493
494 def __getitem__(self, changeid):
494 def __getitem__(self, changeid):
495 if changeid is None or changeid == wdirrev:
495 if changeid is None or changeid == wdirrev:
496 return context.workingctx(self)
496 return context.workingctx(self)
497 if isinstance(changeid, slice):
497 if isinstance(changeid, slice):
498 return [context.changectx(self, i)
498 return [context.changectx(self, i)
499 for i in xrange(*changeid.indices(len(self)))
499 for i in xrange(*changeid.indices(len(self)))
500 if i not in self.changelog.filteredrevs]
500 if i not in self.changelog.filteredrevs]
501 return context.changectx(self, changeid)
501 return context.changectx(self, changeid)
502
502
503 def __contains__(self, changeid):
503 def __contains__(self, changeid):
504 try:
504 try:
505 self[changeid]
505 self[changeid]
506 return True
506 return True
507 except error.RepoLookupError:
507 except error.RepoLookupError:
508 return False
508 return False
509
509
510 def __nonzero__(self):
510 def __nonzero__(self):
511 return True
511 return True
512
512
513 def __len__(self):
513 def __len__(self):
514 return len(self.changelog)
514 return len(self.changelog)
515
515
516 def __iter__(self):
516 def __iter__(self):
517 return iter(self.changelog)
517 return iter(self.changelog)
518
518
519 def revs(self, expr, *args):
519 def revs(self, expr, *args):
520 '''Find revisions matching a revset.
520 '''Find revisions matching a revset.
521
521
522 The revset is specified as a string ``expr`` that may contain
522 The revset is specified as a string ``expr`` that may contain
523 %-formatting to escape certain types. See ``revset.formatspec``.
523 %-formatting to escape certain types. See ``revset.formatspec``.
524
524
525 Return a revset.abstractsmartset, which is a list-like interface
525 Return a revset.abstractsmartset, which is a list-like interface
526 that contains integer revisions.
526 that contains integer revisions.
527 '''
527 '''
528 expr = revset.formatspec(expr, *args)
528 expr = revset.formatspec(expr, *args)
529 m = revset.match(None, expr)
529 m = revset.match(None, expr)
530 return m(self)
530 return m(self)
531
531
532 def set(self, expr, *args):
532 def set(self, expr, *args):
533 '''Find revisions matching a revset and emit changectx instances.
533 '''Find revisions matching a revset and emit changectx instances.
534
534
535 This is a convenience wrapper around ``revs()`` that iterates the
535 This is a convenience wrapper around ``revs()`` that iterates the
536 result and is a generator of changectx instances.
536 result and is a generator of changectx instances.
537 '''
537 '''
538 for r in self.revs(expr, *args):
538 for r in self.revs(expr, *args):
539 yield self[r]
539 yield self[r]
540
540
541 def url(self):
541 def url(self):
542 return 'file:' + self.root
542 return 'file:' + self.root
543
543
544 def hook(self, name, throw=False, **args):
544 def hook(self, name, throw=False, **args):
545 """Call a hook, passing this repo instance.
545 """Call a hook, passing this repo instance.
546
546
547 This a convenience method to aid invoking hooks. Extensions likely
547 This a convenience method to aid invoking hooks. Extensions likely
548 won't call this unless they have registered a custom hook or are
548 won't call this unless they have registered a custom hook or are
549 replacing code that is expected to call a hook.
549 replacing code that is expected to call a hook.
550 """
550 """
551 return hook.hook(self.ui, self, name, throw, **args)
551 return hook.hook(self.ui, self, name, throw, **args)
552
552
553 @unfilteredmethod
553 @unfilteredmethod
554 def _tag(self, names, node, message, local, user, date, extra=None,
554 def _tag(self, names, node, message, local, user, date, extra=None,
555 editor=False):
555 editor=False):
556 if isinstance(names, str):
556 if isinstance(names, str):
557 names = (names,)
557 names = (names,)
558
558
559 branches = self.branchmap()
559 branches = self.branchmap()
560 for name in names:
560 for name in names:
561 self.hook('pretag', throw=True, node=hex(node), tag=name,
561 self.hook('pretag', throw=True, node=hex(node), tag=name,
562 local=local)
562 local=local)
563 if name in branches:
563 if name in branches:
564 self.ui.warn(_("warning: tag %s conflicts with existing"
564 self.ui.warn(_("warning: tag %s conflicts with existing"
565 " branch name\n") % name)
565 " branch name\n") % name)
566
566
567 def writetags(fp, names, munge, prevtags):
567 def writetags(fp, names, munge, prevtags):
568 fp.seek(0, 2)
568 fp.seek(0, 2)
569 if prevtags and prevtags[-1] != '\n':
569 if prevtags and prevtags[-1] != '\n':
570 fp.write('\n')
570 fp.write('\n')
571 for name in names:
571 for name in names:
572 if munge:
572 if munge:
573 m = munge(name)
573 m = munge(name)
574 else:
574 else:
575 m = name
575 m = name
576
576
577 if (self._tagscache.tagtypes and
577 if (self._tagscache.tagtypes and
578 name in self._tagscache.tagtypes):
578 name in self._tagscache.tagtypes):
579 old = self.tags().get(name, nullid)
579 old = self.tags().get(name, nullid)
580 fp.write('%s %s\n' % (hex(old), m))
580 fp.write('%s %s\n' % (hex(old), m))
581 fp.write('%s %s\n' % (hex(node), m))
581 fp.write('%s %s\n' % (hex(node), m))
582 fp.close()
582 fp.close()
583
583
584 prevtags = ''
584 prevtags = ''
585 if local:
585 if local:
586 try:
586 try:
587 fp = self.vfs('localtags', 'r+')
587 fp = self.vfs('localtags', 'r+')
588 except IOError:
588 except IOError:
589 fp = self.vfs('localtags', 'a')
589 fp = self.vfs('localtags', 'a')
590 else:
590 else:
591 prevtags = fp.read()
591 prevtags = fp.read()
592
592
593 # local tags are stored in the current charset
593 # local tags are stored in the current charset
594 writetags(fp, names, None, prevtags)
594 writetags(fp, names, None, prevtags)
595 for name in names:
595 for name in names:
596 self.hook('tag', node=hex(node), tag=name, local=local)
596 self.hook('tag', node=hex(node), tag=name, local=local)
597 return
597 return
598
598
599 try:
599 try:
600 fp = self.wfile('.hgtags', 'rb+')
600 fp = self.wfile('.hgtags', 'rb+')
601 except IOError as e:
601 except IOError as e:
602 if e.errno != errno.ENOENT:
602 if e.errno != errno.ENOENT:
603 raise
603 raise
604 fp = self.wfile('.hgtags', 'ab')
604 fp = self.wfile('.hgtags', 'ab')
605 else:
605 else:
606 prevtags = fp.read()
606 prevtags = fp.read()
607
607
608 # committed tags are stored in UTF-8
608 # committed tags are stored in UTF-8
609 writetags(fp, names, encoding.fromlocal, prevtags)
609 writetags(fp, names, encoding.fromlocal, prevtags)
610
610
611 fp.close()
611 fp.close()
612
612
613 self.invalidatecaches()
613 self.invalidatecaches()
614
614
615 if '.hgtags' not in self.dirstate:
615 if '.hgtags' not in self.dirstate:
616 self[None].add(['.hgtags'])
616 self[None].add(['.hgtags'])
617
617
618 m = matchmod.exact(self.root, '', ['.hgtags'])
618 m = matchmod.exact(self.root, '', ['.hgtags'])
619 tagnode = self.commit(message, user, date, extra=extra, match=m,
619 tagnode = self.commit(message, user, date, extra=extra, match=m,
620 editor=editor)
620 editor=editor)
621
621
622 for name in names:
622 for name in names:
623 self.hook('tag', node=hex(node), tag=name, local=local)
623 self.hook('tag', node=hex(node), tag=name, local=local)
624
624
625 return tagnode
625 return tagnode
626
626
627 def tag(self, names, node, message, local, user, date, editor=False):
627 def tag(self, names, node, message, local, user, date, editor=False):
628 '''tag a revision with one or more symbolic names.
628 '''tag a revision with one or more symbolic names.
629
629
630 names is a list of strings or, when adding a single tag, names may be a
630 names is a list of strings or, when adding a single tag, names may be a
631 string.
631 string.
632
632
633 if local is True, the tags are stored in a per-repository file.
633 if local is True, the tags are stored in a per-repository file.
634 otherwise, they are stored in the .hgtags file, and a new
634 otherwise, they are stored in the .hgtags file, and a new
635 changeset is committed with the change.
635 changeset is committed with the change.
636
636
637 keyword arguments:
637 keyword arguments:
638
638
639 local: whether to store tags in non-version-controlled file
639 local: whether to store tags in non-version-controlled file
640 (default False)
640 (default False)
641
641
642 message: commit message to use if committing
642 message: commit message to use if committing
643
643
644 user: name of user to use if committing
644 user: name of user to use if committing
645
645
646 date: date tuple to use if committing'''
646 date: date tuple to use if committing'''
647
647
648 if not local:
648 if not local:
649 m = matchmod.exact(self.root, '', ['.hgtags'])
649 m = matchmod.exact(self.root, '', ['.hgtags'])
650 if any(self.status(match=m, unknown=True, ignored=True)):
650 if any(self.status(match=m, unknown=True, ignored=True)):
651 raise error.Abort(_('working copy of .hgtags is changed'),
651 raise error.Abort(_('working copy of .hgtags is changed'),
652 hint=_('please commit .hgtags manually'))
652 hint=_('please commit .hgtags manually'))
653
653
654 self.tags() # instantiate the cache
654 self.tags() # instantiate the cache
655 self._tag(names, node, message, local, user, date, editor=editor)
655 self._tag(names, node, message, local, user, date, editor=editor)
656
656
657 @filteredpropertycache
657 @filteredpropertycache
658 def _tagscache(self):
658 def _tagscache(self):
659 '''Returns a tagscache object that contains various tags related
659 '''Returns a tagscache object that contains various tags related
660 caches.'''
660 caches.'''
661
661
662 # This simplifies its cache management by having one decorated
662 # This simplifies its cache management by having one decorated
663 # function (this one) and the rest simply fetch things from it.
663 # function (this one) and the rest simply fetch things from it.
664 class tagscache(object):
664 class tagscache(object):
665 def __init__(self):
665 def __init__(self):
666 # These two define the set of tags for this repository. tags
666 # These two define the set of tags for this repository. tags
667 # maps tag name to node; tagtypes maps tag name to 'global' or
667 # maps tag name to node; tagtypes maps tag name to 'global' or
668 # 'local'. (Global tags are defined by .hgtags across all
668 # 'local'. (Global tags are defined by .hgtags across all
669 # heads, and local tags are defined in .hg/localtags.)
669 # heads, and local tags are defined in .hg/localtags.)
670 # They constitute the in-memory cache of tags.
670 # They constitute the in-memory cache of tags.
671 self.tags = self.tagtypes = None
671 self.tags = self.tagtypes = None
672
672
673 self.nodetagscache = self.tagslist = None
673 self.nodetagscache = self.tagslist = None
674
674
675 cache = tagscache()
675 cache = tagscache()
676 cache.tags, cache.tagtypes = self._findtags()
676 cache.tags, cache.tagtypes = self._findtags()
677
677
678 return cache
678 return cache
679
679
680 def tags(self):
680 def tags(self):
681 '''return a mapping of tag to node'''
681 '''return a mapping of tag to node'''
682 t = {}
682 t = {}
683 if self.changelog.filteredrevs:
683 if self.changelog.filteredrevs:
684 tags, tt = self._findtags()
684 tags, tt = self._findtags()
685 else:
685 else:
686 tags = self._tagscache.tags
686 tags = self._tagscache.tags
687 for k, v in tags.iteritems():
687 for k, v in tags.iteritems():
688 try:
688 try:
689 # ignore tags to unknown nodes
689 # ignore tags to unknown nodes
690 self.changelog.rev(v)
690 self.changelog.rev(v)
691 t[k] = v
691 t[k] = v
692 except (error.LookupError, ValueError):
692 except (error.LookupError, ValueError):
693 pass
693 pass
694 return t
694 return t
695
695
696 def _findtags(self):
696 def _findtags(self):
697 '''Do the hard work of finding tags. Return a pair of dicts
697 '''Do the hard work of finding tags. Return a pair of dicts
698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
699 maps tag name to a string like \'global\' or \'local\'.
699 maps tag name to a string like \'global\' or \'local\'.
700 Subclasses or extensions are free to add their own tags, but
700 Subclasses or extensions are free to add their own tags, but
701 should be aware that the returned dicts will be retained for the
701 should be aware that the returned dicts will be retained for the
702 duration of the localrepo object.'''
702 duration of the localrepo object.'''
703
703
704 # XXX what tagtype should subclasses/extensions use? Currently
704 # XXX what tagtype should subclasses/extensions use? Currently
705 # mq and bookmarks add tags, but do not set the tagtype at all.
705 # mq and bookmarks add tags, but do not set the tagtype at all.
706 # Should each extension invent its own tag type? Should there
706 # Should each extension invent its own tag type? Should there
707 # be one tagtype for all such "virtual" tags? Or is the status
707 # be one tagtype for all such "virtual" tags? Or is the status
708 # quo fine?
708 # quo fine?
709
709
710 alltags = {} # map tag name to (node, hist)
710 alltags = {} # map tag name to (node, hist)
711 tagtypes = {}
711 tagtypes = {}
712
712
713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
715
715
716 # Build the return dicts. Have to re-encode tag names because
716 # Build the return dicts. Have to re-encode tag names because
717 # the tags module always uses UTF-8 (in order not to lose info
717 # the tags module always uses UTF-8 (in order not to lose info
718 # writing to the cache), but the rest of Mercurial wants them in
718 # writing to the cache), but the rest of Mercurial wants them in
719 # local encoding.
719 # local encoding.
720 tags = {}
720 tags = {}
721 for (name, (node, hist)) in alltags.iteritems():
721 for (name, (node, hist)) in alltags.iteritems():
722 if node != nullid:
722 if node != nullid:
723 tags[encoding.tolocal(name)] = node
723 tags[encoding.tolocal(name)] = node
724 tags['tip'] = self.changelog.tip()
724 tags['tip'] = self.changelog.tip()
725 tagtypes = dict([(encoding.tolocal(name), value)
725 tagtypes = dict([(encoding.tolocal(name), value)
726 for (name, value) in tagtypes.iteritems()])
726 for (name, value) in tagtypes.iteritems()])
727 return (tags, tagtypes)
727 return (tags, tagtypes)
728
728
729 def tagtype(self, tagname):
729 def tagtype(self, tagname):
730 '''
730 '''
731 return the type of the given tag. result can be:
731 return the type of the given tag. result can be:
732
732
733 'local' : a local tag
733 'local' : a local tag
734 'global' : a global tag
734 'global' : a global tag
735 None : tag does not exist
735 None : tag does not exist
736 '''
736 '''
737
737
738 return self._tagscache.tagtypes.get(tagname)
738 return self._tagscache.tagtypes.get(tagname)
739
739
740 def tagslist(self):
740 def tagslist(self):
741 '''return a list of tags ordered by revision'''
741 '''return a list of tags ordered by revision'''
742 if not self._tagscache.tagslist:
742 if not self._tagscache.tagslist:
743 l = []
743 l = []
744 for t, n in self.tags().iteritems():
744 for t, n in self.tags().iteritems():
745 l.append((self.changelog.rev(n), t, n))
745 l.append((self.changelog.rev(n), t, n))
746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
747
747
748 return self._tagscache.tagslist
748 return self._tagscache.tagslist
749
749
750 def nodetags(self, node):
750 def nodetags(self, node):
751 '''return the tags associated with a node'''
751 '''return the tags associated with a node'''
752 if not self._tagscache.nodetagscache:
752 if not self._tagscache.nodetagscache:
753 nodetagscache = {}
753 nodetagscache = {}
754 for t, n in self._tagscache.tags.iteritems():
754 for t, n in self._tagscache.tags.iteritems():
755 nodetagscache.setdefault(n, []).append(t)
755 nodetagscache.setdefault(n, []).append(t)
756 for tags in nodetagscache.itervalues():
756 for tags in nodetagscache.itervalues():
757 tags.sort()
757 tags.sort()
758 self._tagscache.nodetagscache = nodetagscache
758 self._tagscache.nodetagscache = nodetagscache
759 return self._tagscache.nodetagscache.get(node, [])
759 return self._tagscache.nodetagscache.get(node, [])
760
760
761 def nodebookmarks(self, node):
761 def nodebookmarks(self, node):
762 """return the list of bookmarks pointing to the specified node"""
762 marks = []
763 marks = []
763 for bookmark, n in self._bookmarks.iteritems():
764 for bookmark, n in self._bookmarks.iteritems():
764 if n == node:
765 if n == node:
765 marks.append(bookmark)
766 marks.append(bookmark)
766 return sorted(marks)
767 return sorted(marks)
767
768
768 def branchmap(self):
769 def branchmap(self):
769 '''returns a dictionary {branch: [branchheads]} with branchheads
770 '''returns a dictionary {branch: [branchheads]} with branchheads
770 ordered by increasing revision number'''
771 ordered by increasing revision number'''
771 branchmap.updatecache(self)
772 branchmap.updatecache(self)
772 return self._branchcaches[self.filtername]
773 return self._branchcaches[self.filtername]
773
774
774 @unfilteredmethod
775 @unfilteredmethod
775 def revbranchcache(self):
776 def revbranchcache(self):
776 if not self._revbranchcache:
777 if not self._revbranchcache:
777 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 return self._revbranchcache
779 return self._revbranchcache
779
780
780 def branchtip(self, branch, ignoremissing=False):
781 def branchtip(self, branch, ignoremissing=False):
781 '''return the tip node for a given branch
782 '''return the tip node for a given branch
782
783
783 If ignoremissing is True, then this method will not raise an error.
784 If ignoremissing is True, then this method will not raise an error.
784 This is helpful for callers that only expect None for a missing branch
785 This is helpful for callers that only expect None for a missing branch
785 (e.g. namespace).
786 (e.g. namespace).
786
787
787 '''
788 '''
788 try:
789 try:
789 return self.branchmap().branchtip(branch)
790 return self.branchmap().branchtip(branch)
790 except KeyError:
791 except KeyError:
791 if not ignoremissing:
792 if not ignoremissing:
792 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 else:
794 else:
794 pass
795 pass
795
796
796 def lookup(self, key):
797 def lookup(self, key):
797 return self[key].node()
798 return self[key].node()
798
799
799 def lookupbranch(self, key, remote=None):
800 def lookupbranch(self, key, remote=None):
800 repo = remote or self
801 repo = remote or self
801 if key in repo.branchmap():
802 if key in repo.branchmap():
802 return key
803 return key
803
804
804 repo = (remote and remote.local()) and remote or self
805 repo = (remote and remote.local()) and remote or self
805 return repo[key].branch()
806 return repo[key].branch()
806
807
807 def known(self, nodes):
808 def known(self, nodes):
808 nm = self.changelog.nodemap
809 nm = self.changelog.nodemap
809 pc = self._phasecache
810 pc = self._phasecache
810 result = []
811 result = []
811 for n in nodes:
812 for n in nodes:
812 r = nm.get(n)
813 r = nm.get(n)
813 resp = not (r is None or pc.phase(self, r) >= phases.secret)
814 resp = not (r is None or pc.phase(self, r) >= phases.secret)
814 result.append(resp)
815 result.append(resp)
815 return result
816 return result
816
817
817 def local(self):
818 def local(self):
818 return self
819 return self
819
820
820 def publishing(self):
821 def publishing(self):
821 # it's safe (and desirable) to trust the publish flag unconditionally
822 # it's safe (and desirable) to trust the publish flag unconditionally
822 # so that we don't finalize changes shared between users via ssh or nfs
823 # so that we don't finalize changes shared between users via ssh or nfs
823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
824 return self.ui.configbool('phases', 'publish', True, untrusted=True)
824
825
825 def cancopy(self):
826 def cancopy(self):
826 # so statichttprepo's override of local() works
827 # so statichttprepo's override of local() works
827 if not self.local():
828 if not self.local():
828 return False
829 return False
829 if not self.publishing():
830 if not self.publishing():
830 return True
831 return True
831 # if publishing we can't copy if there is filtered content
832 # if publishing we can't copy if there is filtered content
832 return not self.filtered('visible').changelog.filteredrevs
833 return not self.filtered('visible').changelog.filteredrevs
833
834
834 def shared(self):
835 def shared(self):
835 '''the type of shared repository (None if not shared)'''
836 '''the type of shared repository (None if not shared)'''
836 if self.sharedpath != self.path:
837 if self.sharedpath != self.path:
837 return 'store'
838 return 'store'
838 return None
839 return None
839
840
840 def join(self, f, *insidef):
841 def join(self, f, *insidef):
841 return self.vfs.join(os.path.join(f, *insidef))
842 return self.vfs.join(os.path.join(f, *insidef))
842
843
843 def wjoin(self, f, *insidef):
844 def wjoin(self, f, *insidef):
844 return self.vfs.reljoin(self.root, f, *insidef)
845 return self.vfs.reljoin(self.root, f, *insidef)
845
846
846 def file(self, f):
847 def file(self, f):
847 if f[0] == '/':
848 if f[0] == '/':
848 f = f[1:]
849 f = f[1:]
849 return filelog.filelog(self.svfs, f)
850 return filelog.filelog(self.svfs, f)
850
851
851 def changectx(self, changeid):
852 def changectx(self, changeid):
852 return self[changeid]
853 return self[changeid]
853
854
854 def parents(self, changeid=None):
855 def parents(self, changeid=None):
855 '''get list of changectxs for parents of changeid'''
856 '''get list of changectxs for parents of changeid'''
856 return self[changeid].parents()
857 return self[changeid].parents()
857
858
858 def setparents(self, p1, p2=nullid):
859 def setparents(self, p1, p2=nullid):
859 self.dirstate.beginparentchange()
860 self.dirstate.beginparentchange()
860 copies = self.dirstate.setparents(p1, p2)
861 copies = self.dirstate.setparents(p1, p2)
861 pctx = self[p1]
862 pctx = self[p1]
862 if copies:
863 if copies:
863 # Adjust copy records, the dirstate cannot do it, it
864 # Adjust copy records, the dirstate cannot do it, it
864 # requires access to parents manifests. Preserve them
865 # requires access to parents manifests. Preserve them
865 # only for entries added to first parent.
866 # only for entries added to first parent.
866 for f in copies:
867 for f in copies:
867 if f not in pctx and copies[f] in pctx:
868 if f not in pctx and copies[f] in pctx:
868 self.dirstate.copy(copies[f], f)
869 self.dirstate.copy(copies[f], f)
869 if p2 == nullid:
870 if p2 == nullid:
870 for f, s in sorted(self.dirstate.copies().items()):
871 for f, s in sorted(self.dirstate.copies().items()):
871 if f not in pctx and s not in pctx:
872 if f not in pctx and s not in pctx:
872 self.dirstate.copy(None, f)
873 self.dirstate.copy(None, f)
873 self.dirstate.endparentchange()
874 self.dirstate.endparentchange()
874
875
875 def filectx(self, path, changeid=None, fileid=None):
876 def filectx(self, path, changeid=None, fileid=None):
876 """changeid can be a changeset revision, node, or tag.
877 """changeid can be a changeset revision, node, or tag.
877 fileid can be a file revision or node."""
878 fileid can be a file revision or node."""
878 return context.filectx(self, path, changeid, fileid)
879 return context.filectx(self, path, changeid, fileid)
879
880
880 def getcwd(self):
881 def getcwd(self):
881 return self.dirstate.getcwd()
882 return self.dirstate.getcwd()
882
883
883 def pathto(self, f, cwd=None):
884 def pathto(self, f, cwd=None):
884 return self.dirstate.pathto(f, cwd)
885 return self.dirstate.pathto(f, cwd)
885
886
886 def wfile(self, f, mode='r'):
887 def wfile(self, f, mode='r'):
887 return self.wvfs(f, mode)
888 return self.wvfs(f, mode)
888
889
889 def _link(self, f):
890 def _link(self, f):
890 return self.wvfs.islink(f)
891 return self.wvfs.islink(f)
891
892
892 def _loadfilter(self, filter):
893 def _loadfilter(self, filter):
893 if filter not in self.filterpats:
894 if filter not in self.filterpats:
894 l = []
895 l = []
895 for pat, cmd in self.ui.configitems(filter):
896 for pat, cmd in self.ui.configitems(filter):
896 if cmd == '!':
897 if cmd == '!':
897 continue
898 continue
898 mf = matchmod.match(self.root, '', [pat])
899 mf = matchmod.match(self.root, '', [pat])
899 fn = None
900 fn = None
900 params = cmd
901 params = cmd
901 for name, filterfn in self._datafilters.iteritems():
902 for name, filterfn in self._datafilters.iteritems():
902 if cmd.startswith(name):
903 if cmd.startswith(name):
903 fn = filterfn
904 fn = filterfn
904 params = cmd[len(name):].lstrip()
905 params = cmd[len(name):].lstrip()
905 break
906 break
906 if not fn:
907 if not fn:
907 fn = lambda s, c, **kwargs: util.filter(s, c)
908 fn = lambda s, c, **kwargs: util.filter(s, c)
908 # Wrap old filters not supporting keyword arguments
909 # Wrap old filters not supporting keyword arguments
909 if not inspect.getargspec(fn)[2]:
910 if not inspect.getargspec(fn)[2]:
910 oldfn = fn
911 oldfn = fn
911 fn = lambda s, c, **kwargs: oldfn(s, c)
912 fn = lambda s, c, **kwargs: oldfn(s, c)
912 l.append((mf, fn, params))
913 l.append((mf, fn, params))
913 self.filterpats[filter] = l
914 self.filterpats[filter] = l
914 return self.filterpats[filter]
915 return self.filterpats[filter]
915
916
916 def _filter(self, filterpats, filename, data):
917 def _filter(self, filterpats, filename, data):
917 for mf, fn, cmd in filterpats:
918 for mf, fn, cmd in filterpats:
918 if mf(filename):
919 if mf(filename):
919 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
920 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
920 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
921 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
921 break
922 break
922
923
923 return data
924 return data
924
925
925 @unfilteredpropertycache
926 @unfilteredpropertycache
926 def _encodefilterpats(self):
927 def _encodefilterpats(self):
927 return self._loadfilter('encode')
928 return self._loadfilter('encode')
928
929
929 @unfilteredpropertycache
930 @unfilteredpropertycache
930 def _decodefilterpats(self):
931 def _decodefilterpats(self):
931 return self._loadfilter('decode')
932 return self._loadfilter('decode')
932
933
933 def adddatafilter(self, name, filter):
934 def adddatafilter(self, name, filter):
934 self._datafilters[name] = filter
935 self._datafilters[name] = filter
935
936
936 def wread(self, filename):
937 def wread(self, filename):
937 if self._link(filename):
938 if self._link(filename):
938 data = self.wvfs.readlink(filename)
939 data = self.wvfs.readlink(filename)
939 else:
940 else:
940 data = self.wvfs.read(filename)
941 data = self.wvfs.read(filename)
941 return self._filter(self._encodefilterpats, filename, data)
942 return self._filter(self._encodefilterpats, filename, data)
942
943
943 def wwrite(self, filename, data, flags):
944 def wwrite(self, filename, data, flags):
944 """write ``data`` into ``filename`` in the working directory
945 """write ``data`` into ``filename`` in the working directory
945
946
946 This returns length of written (maybe decoded) data.
947 This returns length of written (maybe decoded) data.
947 """
948 """
948 data = self._filter(self._decodefilterpats, filename, data)
949 data = self._filter(self._decodefilterpats, filename, data)
949 if 'l' in flags:
950 if 'l' in flags:
950 self.wvfs.symlink(data, filename)
951 self.wvfs.symlink(data, filename)
951 else:
952 else:
952 self.wvfs.write(filename, data)
953 self.wvfs.write(filename, data)
953 if 'x' in flags:
954 if 'x' in flags:
954 self.wvfs.setflags(filename, False, True)
955 self.wvfs.setflags(filename, False, True)
955 return len(data)
956 return len(data)
956
957
957 def wwritedata(self, filename, data):
958 def wwritedata(self, filename, data):
958 return self._filter(self._decodefilterpats, filename, data)
959 return self._filter(self._decodefilterpats, filename, data)
959
960
960 def currenttransaction(self):
961 def currenttransaction(self):
961 """return the current transaction or None if non exists"""
962 """return the current transaction or None if non exists"""
962 if self._transref:
963 if self._transref:
963 tr = self._transref()
964 tr = self._transref()
964 else:
965 else:
965 tr = None
966 tr = None
966
967
967 if tr and tr.running():
968 if tr and tr.running():
968 return tr
969 return tr
969 return None
970 return None
970
971
971 def transaction(self, desc, report=None):
972 def transaction(self, desc, report=None):
972 if (self.ui.configbool('devel', 'all-warnings')
973 if (self.ui.configbool('devel', 'all-warnings')
973 or self.ui.configbool('devel', 'check-locks')):
974 or self.ui.configbool('devel', 'check-locks')):
974 l = self._lockref and self._lockref()
975 l = self._lockref and self._lockref()
975 if l is None or not l.held:
976 if l is None or not l.held:
976 self.ui.develwarn('transaction with no lock')
977 self.ui.develwarn('transaction with no lock')
977 tr = self.currenttransaction()
978 tr = self.currenttransaction()
978 if tr is not None:
979 if tr is not None:
979 return tr.nest()
980 return tr.nest()
980
981
981 # abort here if the journal already exists
982 # abort here if the journal already exists
982 if self.svfs.exists("journal"):
983 if self.svfs.exists("journal"):
983 raise error.RepoError(
984 raise error.RepoError(
984 _("abandoned transaction found"),
985 _("abandoned transaction found"),
985 hint=_("run 'hg recover' to clean up transaction"))
986 hint=_("run 'hg recover' to clean up transaction"))
986
987
987 # make journal.dirstate contain in-memory changes at this point
988 # make journal.dirstate contain in-memory changes at this point
988 self.dirstate.write(None)
989 self.dirstate.write(None)
989
990
990 idbase = "%.40f#%f" % (random.random(), time.time())
991 idbase = "%.40f#%f" % (random.random(), time.time())
991 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
992 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
992 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
993 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
993
994
994 self._writejournal(desc)
995 self._writejournal(desc)
995 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
996 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
996 if report:
997 if report:
997 rp = report
998 rp = report
998 else:
999 else:
999 rp = self.ui.warn
1000 rp = self.ui.warn
1000 vfsmap = {'plain': self.vfs} # root of .hg/
1001 vfsmap = {'plain': self.vfs} # root of .hg/
1001 # we must avoid cyclic reference between repo and transaction.
1002 # we must avoid cyclic reference between repo and transaction.
1002 reporef = weakref.ref(self)
1003 reporef = weakref.ref(self)
1003 def validate(tr):
1004 def validate(tr):
1004 """will run pre-closing hooks"""
1005 """will run pre-closing hooks"""
1005 reporef().hook('pretxnclose', throw=True,
1006 reporef().hook('pretxnclose', throw=True,
1006 txnname=desc, **tr.hookargs)
1007 txnname=desc, **tr.hookargs)
1007 def releasefn(tr, success):
1008 def releasefn(tr, success):
1008 repo = reporef()
1009 repo = reporef()
1009 if success:
1010 if success:
1010 # this should be explicitly invoked here, because
1011 # this should be explicitly invoked here, because
1011 # in-memory changes aren't written out at closing
1012 # in-memory changes aren't written out at closing
1012 # transaction, if tr.addfilegenerator (via
1013 # transaction, if tr.addfilegenerator (via
1013 # dirstate.write or so) isn't invoked while
1014 # dirstate.write or so) isn't invoked while
1014 # transaction running
1015 # transaction running
1015 repo.dirstate.write(None)
1016 repo.dirstate.write(None)
1016 else:
1017 else:
1017 # prevent in-memory changes from being written out at
1018 # prevent in-memory changes from being written out at
1018 # the end of outer wlock scope or so
1019 # the end of outer wlock scope or so
1019 repo.dirstate.invalidate()
1020 repo.dirstate.invalidate()
1020
1021
1021 # discard all changes (including ones already written
1022 # discard all changes (including ones already written
1022 # out) in this transaction
1023 # out) in this transaction
1023 repo.vfs.rename('journal.dirstate', 'dirstate')
1024 repo.vfs.rename('journal.dirstate', 'dirstate')
1024
1025
1025 repo.invalidate(clearfilecache=True)
1026 repo.invalidate(clearfilecache=True)
1026
1027
1027 tr = transaction.transaction(rp, self.svfs, vfsmap,
1028 tr = transaction.transaction(rp, self.svfs, vfsmap,
1028 "journal",
1029 "journal",
1029 "undo",
1030 "undo",
1030 aftertrans(renames),
1031 aftertrans(renames),
1031 self.store.createmode,
1032 self.store.createmode,
1032 validator=validate,
1033 validator=validate,
1033 releasefn=releasefn)
1034 releasefn=releasefn)
1034
1035
1035 tr.hookargs['txnid'] = txnid
1036 tr.hookargs['txnid'] = txnid
1036 # note: writing the fncache only during finalize mean that the file is
1037 # note: writing the fncache only during finalize mean that the file is
1037 # outdated when running hooks. As fncache is used for streaming clone,
1038 # outdated when running hooks. As fncache is used for streaming clone,
1038 # this is not expected to break anything that happen during the hooks.
1039 # this is not expected to break anything that happen during the hooks.
1039 tr.addfinalize('flush-fncache', self.store.write)
1040 tr.addfinalize('flush-fncache', self.store.write)
1040 def txnclosehook(tr2):
1041 def txnclosehook(tr2):
1041 """To be run if transaction is successful, will schedule a hook run
1042 """To be run if transaction is successful, will schedule a hook run
1042 """
1043 """
1043 def hook():
1044 def hook():
1044 reporef().hook('txnclose', throw=False, txnname=desc,
1045 reporef().hook('txnclose', throw=False, txnname=desc,
1045 **tr2.hookargs)
1046 **tr2.hookargs)
1046 reporef()._afterlock(hook)
1047 reporef()._afterlock(hook)
1047 tr.addfinalize('txnclose-hook', txnclosehook)
1048 tr.addfinalize('txnclose-hook', txnclosehook)
1048 def txnaborthook(tr2):
1049 def txnaborthook(tr2):
1049 """To be run if transaction is aborted
1050 """To be run if transaction is aborted
1050 """
1051 """
1051 reporef().hook('txnabort', throw=False, txnname=desc,
1052 reporef().hook('txnabort', throw=False, txnname=desc,
1052 **tr2.hookargs)
1053 **tr2.hookargs)
1053 tr.addabort('txnabort-hook', txnaborthook)
1054 tr.addabort('txnabort-hook', txnaborthook)
1054 # avoid eager cache invalidation. in-memory data should be identical
1055 # avoid eager cache invalidation. in-memory data should be identical
1055 # to stored data if transaction has no error.
1056 # to stored data if transaction has no error.
1056 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1057 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1057 self._transref = weakref.ref(tr)
1058 self._transref = weakref.ref(tr)
1058 return tr
1059 return tr
1059
1060
1060 def _journalfiles(self):
1061 def _journalfiles(self):
1061 return ((self.svfs, 'journal'),
1062 return ((self.svfs, 'journal'),
1062 (self.vfs, 'journal.dirstate'),
1063 (self.vfs, 'journal.dirstate'),
1063 (self.vfs, 'journal.branch'),
1064 (self.vfs, 'journal.branch'),
1064 (self.vfs, 'journal.desc'),
1065 (self.vfs, 'journal.desc'),
1065 (self.vfs, 'journal.bookmarks'),
1066 (self.vfs, 'journal.bookmarks'),
1066 (self.svfs, 'journal.phaseroots'))
1067 (self.svfs, 'journal.phaseroots'))
1067
1068
1068 def undofiles(self):
1069 def undofiles(self):
1069 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1070 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1070
1071
1071 def _writejournal(self, desc):
1072 def _writejournal(self, desc):
1072 self.vfs.write("journal.dirstate",
1073 self.vfs.write("journal.dirstate",
1073 self.vfs.tryread("dirstate"))
1074 self.vfs.tryread("dirstate"))
1074 self.vfs.write("journal.branch",
1075 self.vfs.write("journal.branch",
1075 encoding.fromlocal(self.dirstate.branch()))
1076 encoding.fromlocal(self.dirstate.branch()))
1076 self.vfs.write("journal.desc",
1077 self.vfs.write("journal.desc",
1077 "%d\n%s\n" % (len(self), desc))
1078 "%d\n%s\n" % (len(self), desc))
1078 self.vfs.write("journal.bookmarks",
1079 self.vfs.write("journal.bookmarks",
1079 self.vfs.tryread("bookmarks"))
1080 self.vfs.tryread("bookmarks"))
1080 self.svfs.write("journal.phaseroots",
1081 self.svfs.write("journal.phaseroots",
1081 self.svfs.tryread("phaseroots"))
1082 self.svfs.tryread("phaseroots"))
1082
1083
1083 def recover(self):
1084 def recover(self):
1084 lock = self.lock()
1085 lock = self.lock()
1085 try:
1086 try:
1086 if self.svfs.exists("journal"):
1087 if self.svfs.exists("journal"):
1087 self.ui.status(_("rolling back interrupted transaction\n"))
1088 self.ui.status(_("rolling back interrupted transaction\n"))
1088 vfsmap = {'': self.svfs,
1089 vfsmap = {'': self.svfs,
1089 'plain': self.vfs,}
1090 'plain': self.vfs,}
1090 transaction.rollback(self.svfs, vfsmap, "journal",
1091 transaction.rollback(self.svfs, vfsmap, "journal",
1091 self.ui.warn)
1092 self.ui.warn)
1092 self.invalidate()
1093 self.invalidate()
1093 return True
1094 return True
1094 else:
1095 else:
1095 self.ui.warn(_("no interrupted transaction available\n"))
1096 self.ui.warn(_("no interrupted transaction available\n"))
1096 return False
1097 return False
1097 finally:
1098 finally:
1098 lock.release()
1099 lock.release()
1099
1100
1100 def rollback(self, dryrun=False, force=False):
1101 def rollback(self, dryrun=False, force=False):
1101 wlock = lock = dsguard = None
1102 wlock = lock = dsguard = None
1102 try:
1103 try:
1103 wlock = self.wlock()
1104 wlock = self.wlock()
1104 lock = self.lock()
1105 lock = self.lock()
1105 if self.svfs.exists("undo"):
1106 if self.svfs.exists("undo"):
1106 dsguard = cmdutil.dirstateguard(self, 'rollback')
1107 dsguard = cmdutil.dirstateguard(self, 'rollback')
1107
1108
1108 return self._rollback(dryrun, force, dsguard)
1109 return self._rollback(dryrun, force, dsguard)
1109 else:
1110 else:
1110 self.ui.warn(_("no rollback information available\n"))
1111 self.ui.warn(_("no rollback information available\n"))
1111 return 1
1112 return 1
1112 finally:
1113 finally:
1113 release(dsguard, lock, wlock)
1114 release(dsguard, lock, wlock)
1114
1115
1115 @unfilteredmethod # Until we get smarter cache management
1116 @unfilteredmethod # Until we get smarter cache management
1116 def _rollback(self, dryrun, force, dsguard):
1117 def _rollback(self, dryrun, force, dsguard):
1117 ui = self.ui
1118 ui = self.ui
1118 try:
1119 try:
1119 args = self.vfs.read('undo.desc').splitlines()
1120 args = self.vfs.read('undo.desc').splitlines()
1120 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1121 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1121 if len(args) >= 3:
1122 if len(args) >= 3:
1122 detail = args[2]
1123 detail = args[2]
1123 oldtip = oldlen - 1
1124 oldtip = oldlen - 1
1124
1125
1125 if detail and ui.verbose:
1126 if detail and ui.verbose:
1126 msg = (_('repository tip rolled back to revision %s'
1127 msg = (_('repository tip rolled back to revision %s'
1127 ' (undo %s: %s)\n')
1128 ' (undo %s: %s)\n')
1128 % (oldtip, desc, detail))
1129 % (oldtip, desc, detail))
1129 else:
1130 else:
1130 msg = (_('repository tip rolled back to revision %s'
1131 msg = (_('repository tip rolled back to revision %s'
1131 ' (undo %s)\n')
1132 ' (undo %s)\n')
1132 % (oldtip, desc))
1133 % (oldtip, desc))
1133 except IOError:
1134 except IOError:
1134 msg = _('rolling back unknown transaction\n')
1135 msg = _('rolling back unknown transaction\n')
1135 desc = None
1136 desc = None
1136
1137
1137 if not force and self['.'] != self['tip'] and desc == 'commit':
1138 if not force and self['.'] != self['tip'] and desc == 'commit':
1138 raise error.Abort(
1139 raise error.Abort(
1139 _('rollback of last commit while not checked out '
1140 _('rollback of last commit while not checked out '
1140 'may lose data'), hint=_('use -f to force'))
1141 'may lose data'), hint=_('use -f to force'))
1141
1142
1142 ui.status(msg)
1143 ui.status(msg)
1143 if dryrun:
1144 if dryrun:
1144 return 0
1145 return 0
1145
1146
1146 parents = self.dirstate.parents()
1147 parents = self.dirstate.parents()
1147 self.destroying()
1148 self.destroying()
1148 vfsmap = {'plain': self.vfs, '': self.svfs}
1149 vfsmap = {'plain': self.vfs, '': self.svfs}
1149 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1150 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1150 if self.vfs.exists('undo.bookmarks'):
1151 if self.vfs.exists('undo.bookmarks'):
1151 self.vfs.rename('undo.bookmarks', 'bookmarks')
1152 self.vfs.rename('undo.bookmarks', 'bookmarks')
1152 if self.svfs.exists('undo.phaseroots'):
1153 if self.svfs.exists('undo.phaseroots'):
1153 self.svfs.rename('undo.phaseroots', 'phaseroots')
1154 self.svfs.rename('undo.phaseroots', 'phaseroots')
1154 self.invalidate()
1155 self.invalidate()
1155
1156
1156 parentgone = (parents[0] not in self.changelog.nodemap or
1157 parentgone = (parents[0] not in self.changelog.nodemap or
1157 parents[1] not in self.changelog.nodemap)
1158 parents[1] not in self.changelog.nodemap)
1158 if parentgone:
1159 if parentgone:
1159 # prevent dirstateguard from overwriting already restored one
1160 # prevent dirstateguard from overwriting already restored one
1160 dsguard.close()
1161 dsguard.close()
1161
1162
1162 self.vfs.rename('undo.dirstate', 'dirstate')
1163 self.vfs.rename('undo.dirstate', 'dirstate')
1163 try:
1164 try:
1164 branch = self.vfs.read('undo.branch')
1165 branch = self.vfs.read('undo.branch')
1165 self.dirstate.setbranch(encoding.tolocal(branch))
1166 self.dirstate.setbranch(encoding.tolocal(branch))
1166 except IOError:
1167 except IOError:
1167 ui.warn(_('named branch could not be reset: '
1168 ui.warn(_('named branch could not be reset: '
1168 'current branch is still \'%s\'\n')
1169 'current branch is still \'%s\'\n')
1169 % self.dirstate.branch())
1170 % self.dirstate.branch())
1170
1171
1171 self.dirstate.invalidate()
1172 self.dirstate.invalidate()
1172 parents = tuple([p.rev() for p in self.parents()])
1173 parents = tuple([p.rev() for p in self.parents()])
1173 if len(parents) > 1:
1174 if len(parents) > 1:
1174 ui.status(_('working directory now based on '
1175 ui.status(_('working directory now based on '
1175 'revisions %d and %d\n') % parents)
1176 'revisions %d and %d\n') % parents)
1176 else:
1177 else:
1177 ui.status(_('working directory now based on '
1178 ui.status(_('working directory now based on '
1178 'revision %d\n') % parents)
1179 'revision %d\n') % parents)
1179 mergemod.mergestate.clean(self, self['.'].node())
1180 mergemod.mergestate.clean(self, self['.'].node())
1180
1181
1181 # TODO: if we know which new heads may result from this rollback, pass
1182 # TODO: if we know which new heads may result from this rollback, pass
1182 # them to destroy(), which will prevent the branchhead cache from being
1183 # them to destroy(), which will prevent the branchhead cache from being
1183 # invalidated.
1184 # invalidated.
1184 self.destroyed()
1185 self.destroyed()
1185 return 0
1186 return 0
1186
1187
1187 def invalidatecaches(self):
1188 def invalidatecaches(self):
1188
1189
1189 if '_tagscache' in vars(self):
1190 if '_tagscache' in vars(self):
1190 # can't use delattr on proxy
1191 # can't use delattr on proxy
1191 del self.__dict__['_tagscache']
1192 del self.__dict__['_tagscache']
1192
1193
1193 self.unfiltered()._branchcaches.clear()
1194 self.unfiltered()._branchcaches.clear()
1194 self.invalidatevolatilesets()
1195 self.invalidatevolatilesets()
1195
1196
1196 def invalidatevolatilesets(self):
1197 def invalidatevolatilesets(self):
1197 self.filteredrevcache.clear()
1198 self.filteredrevcache.clear()
1198 obsolete.clearobscaches(self)
1199 obsolete.clearobscaches(self)
1199
1200
1200 def invalidatedirstate(self):
1201 def invalidatedirstate(self):
1201 '''Invalidates the dirstate, causing the next call to dirstate
1202 '''Invalidates the dirstate, causing the next call to dirstate
1202 to check if it was modified since the last time it was read,
1203 to check if it was modified since the last time it was read,
1203 rereading it if it has.
1204 rereading it if it has.
1204
1205
1205 This is different to dirstate.invalidate() that it doesn't always
1206 This is different to dirstate.invalidate() that it doesn't always
1206 rereads the dirstate. Use dirstate.invalidate() if you want to
1207 rereads the dirstate. Use dirstate.invalidate() if you want to
1207 explicitly read the dirstate again (i.e. restoring it to a previous
1208 explicitly read the dirstate again (i.e. restoring it to a previous
1208 known good state).'''
1209 known good state).'''
1209 if hasunfilteredcache(self, 'dirstate'):
1210 if hasunfilteredcache(self, 'dirstate'):
1210 for k in self.dirstate._filecache:
1211 for k in self.dirstate._filecache:
1211 try:
1212 try:
1212 delattr(self.dirstate, k)
1213 delattr(self.dirstate, k)
1213 except AttributeError:
1214 except AttributeError:
1214 pass
1215 pass
1215 delattr(self.unfiltered(), 'dirstate')
1216 delattr(self.unfiltered(), 'dirstate')
1216
1217
1217 def invalidate(self, clearfilecache=False):
1218 def invalidate(self, clearfilecache=False):
1218 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1219 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1219 for k in self._filecache.keys():
1220 for k in self._filecache.keys():
1220 # dirstate is invalidated separately in invalidatedirstate()
1221 # dirstate is invalidated separately in invalidatedirstate()
1221 if k == 'dirstate':
1222 if k == 'dirstate':
1222 continue
1223 continue
1223
1224
1224 if clearfilecache:
1225 if clearfilecache:
1225 del self._filecache[k]
1226 del self._filecache[k]
1226 try:
1227 try:
1227 delattr(unfiltered, k)
1228 delattr(unfiltered, k)
1228 except AttributeError:
1229 except AttributeError:
1229 pass
1230 pass
1230 self.invalidatecaches()
1231 self.invalidatecaches()
1231 self.store.invalidatecaches()
1232 self.store.invalidatecaches()
1232
1233
1233 def invalidateall(self):
1234 def invalidateall(self):
1234 '''Fully invalidates both store and non-store parts, causing the
1235 '''Fully invalidates both store and non-store parts, causing the
1235 subsequent operation to reread any outside changes.'''
1236 subsequent operation to reread any outside changes.'''
1236 # extension should hook this to invalidate its caches
1237 # extension should hook this to invalidate its caches
1237 self.invalidate()
1238 self.invalidate()
1238 self.invalidatedirstate()
1239 self.invalidatedirstate()
1239
1240
1240 def _refreshfilecachestats(self, tr):
1241 def _refreshfilecachestats(self, tr):
1241 """Reload stats of cached files so that they are flagged as valid"""
1242 """Reload stats of cached files so that they are flagged as valid"""
1242 for k, ce in self._filecache.items():
1243 for k, ce in self._filecache.items():
1243 if k == 'dirstate' or k not in self.__dict__:
1244 if k == 'dirstate' or k not in self.__dict__:
1244 continue
1245 continue
1245 ce.refresh()
1246 ce.refresh()
1246
1247
1247 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1248 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1248 inheritchecker=None, parentenvvar=None):
1249 inheritchecker=None, parentenvvar=None):
1249 parentlock = None
1250 parentlock = None
1250 # the contents of parentenvvar are used by the underlying lock to
1251 # the contents of parentenvvar are used by the underlying lock to
1251 # determine whether it can be inherited
1252 # determine whether it can be inherited
1252 if parentenvvar is not None:
1253 if parentenvvar is not None:
1253 parentlock = os.environ.get(parentenvvar)
1254 parentlock = os.environ.get(parentenvvar)
1254 try:
1255 try:
1255 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1256 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1256 acquirefn=acquirefn, desc=desc,
1257 acquirefn=acquirefn, desc=desc,
1257 inheritchecker=inheritchecker,
1258 inheritchecker=inheritchecker,
1258 parentlock=parentlock)
1259 parentlock=parentlock)
1259 except error.LockHeld as inst:
1260 except error.LockHeld as inst:
1260 if not wait:
1261 if not wait:
1261 raise
1262 raise
1262 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1263 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1263 (desc, inst.locker))
1264 (desc, inst.locker))
1264 # default to 600 seconds timeout
1265 # default to 600 seconds timeout
1265 l = lockmod.lock(vfs, lockname,
1266 l = lockmod.lock(vfs, lockname,
1266 int(self.ui.config("ui", "timeout", "600")),
1267 int(self.ui.config("ui", "timeout", "600")),
1267 releasefn=releasefn, acquirefn=acquirefn,
1268 releasefn=releasefn, acquirefn=acquirefn,
1268 desc=desc)
1269 desc=desc)
1269 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1270 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1270 return l
1271 return l
1271
1272
1272 def _afterlock(self, callback):
1273 def _afterlock(self, callback):
1273 """add a callback to be run when the repository is fully unlocked
1274 """add a callback to be run when the repository is fully unlocked
1274
1275
1275 The callback will be executed when the outermost lock is released
1276 The callback will be executed when the outermost lock is released
1276 (with wlock being higher level than 'lock')."""
1277 (with wlock being higher level than 'lock')."""
1277 for ref in (self._wlockref, self._lockref):
1278 for ref in (self._wlockref, self._lockref):
1278 l = ref and ref()
1279 l = ref and ref()
1279 if l and l.held:
1280 if l and l.held:
1280 l.postrelease.append(callback)
1281 l.postrelease.append(callback)
1281 break
1282 break
1282 else: # no lock have been found.
1283 else: # no lock have been found.
1283 callback()
1284 callback()
1284
1285
1285 def lock(self, wait=True):
1286 def lock(self, wait=True):
1286 '''Lock the repository store (.hg/store) and return a weak reference
1287 '''Lock the repository store (.hg/store) and return a weak reference
1287 to the lock. Use this before modifying the store (e.g. committing or
1288 to the lock. Use this before modifying the store (e.g. committing or
1288 stripping). If you are opening a transaction, get a lock as well.)
1289 stripping). If you are opening a transaction, get a lock as well.)
1289
1290
1290 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1291 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1291 'wlock' first to avoid a dead-lock hazard.'''
1292 'wlock' first to avoid a dead-lock hazard.'''
1292 l = self._lockref and self._lockref()
1293 l = self._lockref and self._lockref()
1293 if l is not None and l.held:
1294 if l is not None and l.held:
1294 l.lock()
1295 l.lock()
1295 return l
1296 return l
1296
1297
1297 l = self._lock(self.svfs, "lock", wait, None,
1298 l = self._lock(self.svfs, "lock", wait, None,
1298 self.invalidate, _('repository %s') % self.origroot)
1299 self.invalidate, _('repository %s') % self.origroot)
1299 self._lockref = weakref.ref(l)
1300 self._lockref = weakref.ref(l)
1300 return l
1301 return l
1301
1302
1302 def _wlockchecktransaction(self):
1303 def _wlockchecktransaction(self):
1303 if self.currenttransaction() is not None:
1304 if self.currenttransaction() is not None:
1304 raise error.LockInheritanceContractViolation(
1305 raise error.LockInheritanceContractViolation(
1305 'wlock cannot be inherited in the middle of a transaction')
1306 'wlock cannot be inherited in the middle of a transaction')
1306
1307
1307 def wlock(self, wait=True):
1308 def wlock(self, wait=True):
1308 '''Lock the non-store parts of the repository (everything under
1309 '''Lock the non-store parts of the repository (everything under
1309 .hg except .hg/store) and return a weak reference to the lock.
1310 .hg except .hg/store) and return a weak reference to the lock.
1310
1311
1311 Use this before modifying files in .hg.
1312 Use this before modifying files in .hg.
1312
1313
1313 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1314 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1314 'wlock' first to avoid a dead-lock hazard.'''
1315 'wlock' first to avoid a dead-lock hazard.'''
1315 l = self._wlockref and self._wlockref()
1316 l = self._wlockref and self._wlockref()
1316 if l is not None and l.held:
1317 if l is not None and l.held:
1317 l.lock()
1318 l.lock()
1318 return l
1319 return l
1319
1320
1320 # We do not need to check for non-waiting lock acquisition. Such
1321 # We do not need to check for non-waiting lock acquisition. Such
1321 # acquisition would not cause dead-lock as they would just fail.
1322 # acquisition would not cause dead-lock as they would just fail.
1322 if wait and (self.ui.configbool('devel', 'all-warnings')
1323 if wait and (self.ui.configbool('devel', 'all-warnings')
1323 or self.ui.configbool('devel', 'check-locks')):
1324 or self.ui.configbool('devel', 'check-locks')):
1324 l = self._lockref and self._lockref()
1325 l = self._lockref and self._lockref()
1325 if l is not None and l.held:
1326 if l is not None and l.held:
1326 self.ui.develwarn('"wlock" acquired after "lock"')
1327 self.ui.develwarn('"wlock" acquired after "lock"')
1327
1328
1328 def unlock():
1329 def unlock():
1329 if self.dirstate.pendingparentchange():
1330 if self.dirstate.pendingparentchange():
1330 self.dirstate.invalidate()
1331 self.dirstate.invalidate()
1331 else:
1332 else:
1332 self.dirstate.write(None)
1333 self.dirstate.write(None)
1333
1334
1334 self._filecache['dirstate'].refresh()
1335 self._filecache['dirstate'].refresh()
1335
1336
1336 l = self._lock(self.vfs, "wlock", wait, unlock,
1337 l = self._lock(self.vfs, "wlock", wait, unlock,
1337 self.invalidatedirstate, _('working directory of %s') %
1338 self.invalidatedirstate, _('working directory of %s') %
1338 self.origroot,
1339 self.origroot,
1339 inheritchecker=self._wlockchecktransaction,
1340 inheritchecker=self._wlockchecktransaction,
1340 parentenvvar='HG_WLOCK_LOCKER')
1341 parentenvvar='HG_WLOCK_LOCKER')
1341 self._wlockref = weakref.ref(l)
1342 self._wlockref = weakref.ref(l)
1342 return l
1343 return l
1343
1344
1344 def _currentlock(self, lockref):
1345 def _currentlock(self, lockref):
1345 """Returns the lock if it's held, or None if it's not."""
1346 """Returns the lock if it's held, or None if it's not."""
1346 if lockref is None:
1347 if lockref is None:
1347 return None
1348 return None
1348 l = lockref()
1349 l = lockref()
1349 if l is None or not l.held:
1350 if l is None or not l.held:
1350 return None
1351 return None
1351 return l
1352 return l
1352
1353
1353 def currentwlock(self):
1354 def currentwlock(self):
1354 """Returns the wlock if it's held, or None if it's not."""
1355 """Returns the wlock if it's held, or None if it's not."""
1355 return self._currentlock(self._wlockref)
1356 return self._currentlock(self._wlockref)
1356
1357
1357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1358 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1358 """
1359 """
1359 commit an individual file as part of a larger transaction
1360 commit an individual file as part of a larger transaction
1360 """
1361 """
1361
1362
1362 fname = fctx.path()
1363 fname = fctx.path()
1363 fparent1 = manifest1.get(fname, nullid)
1364 fparent1 = manifest1.get(fname, nullid)
1364 fparent2 = manifest2.get(fname, nullid)
1365 fparent2 = manifest2.get(fname, nullid)
1365 if isinstance(fctx, context.filectx):
1366 if isinstance(fctx, context.filectx):
1366 node = fctx.filenode()
1367 node = fctx.filenode()
1367 if node in [fparent1, fparent2]:
1368 if node in [fparent1, fparent2]:
1368 self.ui.debug('reusing %s filelog entry\n' % fname)
1369 self.ui.debug('reusing %s filelog entry\n' % fname)
1369 return node
1370 return node
1370
1371
1371 flog = self.file(fname)
1372 flog = self.file(fname)
1372 meta = {}
1373 meta = {}
1373 copy = fctx.renamed()
1374 copy = fctx.renamed()
1374 if copy and copy[0] != fname:
1375 if copy and copy[0] != fname:
1375 # Mark the new revision of this file as a copy of another
1376 # Mark the new revision of this file as a copy of another
1376 # file. This copy data will effectively act as a parent
1377 # file. This copy data will effectively act as a parent
1377 # of this new revision. If this is a merge, the first
1378 # of this new revision. If this is a merge, the first
1378 # parent will be the nullid (meaning "look up the copy data")
1379 # parent will be the nullid (meaning "look up the copy data")
1379 # and the second one will be the other parent. For example:
1380 # and the second one will be the other parent. For example:
1380 #
1381 #
1381 # 0 --- 1 --- 3 rev1 changes file foo
1382 # 0 --- 1 --- 3 rev1 changes file foo
1382 # \ / rev2 renames foo to bar and changes it
1383 # \ / rev2 renames foo to bar and changes it
1383 # \- 2 -/ rev3 should have bar with all changes and
1384 # \- 2 -/ rev3 should have bar with all changes and
1384 # should record that bar descends from
1385 # should record that bar descends from
1385 # bar in rev2 and foo in rev1
1386 # bar in rev2 and foo in rev1
1386 #
1387 #
1387 # this allows this merge to succeed:
1388 # this allows this merge to succeed:
1388 #
1389 #
1389 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1390 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1390 # \ / merging rev3 and rev4 should use bar@rev2
1391 # \ / merging rev3 and rev4 should use bar@rev2
1391 # \- 2 --- 4 as the merge base
1392 # \- 2 --- 4 as the merge base
1392 #
1393 #
1393
1394
1394 cfname = copy[0]
1395 cfname = copy[0]
1395 crev = manifest1.get(cfname)
1396 crev = manifest1.get(cfname)
1396 newfparent = fparent2
1397 newfparent = fparent2
1397
1398
1398 if manifest2: # branch merge
1399 if manifest2: # branch merge
1399 if fparent2 == nullid or crev is None: # copied on remote side
1400 if fparent2 == nullid or crev is None: # copied on remote side
1400 if cfname in manifest2:
1401 if cfname in manifest2:
1401 crev = manifest2[cfname]
1402 crev = manifest2[cfname]
1402 newfparent = fparent1
1403 newfparent = fparent1
1403
1404
1404 # Here, we used to search backwards through history to try to find
1405 # Here, we used to search backwards through history to try to find
1405 # where the file copy came from if the source of a copy was not in
1406 # where the file copy came from if the source of a copy was not in
1406 # the parent directory. However, this doesn't actually make sense to
1407 # the parent directory. However, this doesn't actually make sense to
1407 # do (what does a copy from something not in your working copy even
1408 # do (what does a copy from something not in your working copy even
1408 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1409 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1409 # the user that copy information was dropped, so if they didn't
1410 # the user that copy information was dropped, so if they didn't
1410 # expect this outcome it can be fixed, but this is the correct
1411 # expect this outcome it can be fixed, but this is the correct
1411 # behavior in this circumstance.
1412 # behavior in this circumstance.
1412
1413
1413 if crev:
1414 if crev:
1414 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1415 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1415 meta["copy"] = cfname
1416 meta["copy"] = cfname
1416 meta["copyrev"] = hex(crev)
1417 meta["copyrev"] = hex(crev)
1417 fparent1, fparent2 = nullid, newfparent
1418 fparent1, fparent2 = nullid, newfparent
1418 else:
1419 else:
1419 self.ui.warn(_("warning: can't find ancestor for '%s' "
1420 self.ui.warn(_("warning: can't find ancestor for '%s' "
1420 "copied from '%s'!\n") % (fname, cfname))
1421 "copied from '%s'!\n") % (fname, cfname))
1421
1422
1422 elif fparent1 == nullid:
1423 elif fparent1 == nullid:
1423 fparent1, fparent2 = fparent2, nullid
1424 fparent1, fparent2 = fparent2, nullid
1424 elif fparent2 != nullid:
1425 elif fparent2 != nullid:
1425 # is one parent an ancestor of the other?
1426 # is one parent an ancestor of the other?
1426 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1427 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1427 if fparent1 in fparentancestors:
1428 if fparent1 in fparentancestors:
1428 fparent1, fparent2 = fparent2, nullid
1429 fparent1, fparent2 = fparent2, nullid
1429 elif fparent2 in fparentancestors:
1430 elif fparent2 in fparentancestors:
1430 fparent2 = nullid
1431 fparent2 = nullid
1431
1432
1432 # is the file changed?
1433 # is the file changed?
1433 text = fctx.data()
1434 text = fctx.data()
1434 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1435 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1435 changelist.append(fname)
1436 changelist.append(fname)
1436 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1437 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1437 # are just the flags changed during merge?
1438 # are just the flags changed during merge?
1438 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1439 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1439 changelist.append(fname)
1440 changelist.append(fname)
1440
1441
1441 return fparent1
1442 return fparent1
1442
1443
1443 @unfilteredmethod
1444 @unfilteredmethod
1444 def commit(self, text="", user=None, date=None, match=None, force=False,
1445 def commit(self, text="", user=None, date=None, match=None, force=False,
1445 editor=False, extra=None):
1446 editor=False, extra=None):
1446 """Add a new revision to current repository.
1447 """Add a new revision to current repository.
1447
1448
1448 Revision information is gathered from the working directory,
1449 Revision information is gathered from the working directory,
1449 match can be used to filter the committed files. If editor is
1450 match can be used to filter the committed files. If editor is
1450 supplied, it is called to get a commit message.
1451 supplied, it is called to get a commit message.
1451 """
1452 """
1452 if extra is None:
1453 if extra is None:
1453 extra = {}
1454 extra = {}
1454
1455
1455 def fail(f, msg):
1456 def fail(f, msg):
1456 raise error.Abort('%s: %s' % (f, msg))
1457 raise error.Abort('%s: %s' % (f, msg))
1457
1458
1458 if not match:
1459 if not match:
1459 match = matchmod.always(self.root, '')
1460 match = matchmod.always(self.root, '')
1460
1461
1461 if not force:
1462 if not force:
1462 vdirs = []
1463 vdirs = []
1463 match.explicitdir = vdirs.append
1464 match.explicitdir = vdirs.append
1464 match.bad = fail
1465 match.bad = fail
1465
1466
1466 wlock = lock = tr = None
1467 wlock = lock = tr = None
1467 try:
1468 try:
1468 wlock = self.wlock()
1469 wlock = self.wlock()
1469 wctx = self[None]
1470 wctx = self[None]
1470 merge = len(wctx.parents()) > 1
1471 merge = len(wctx.parents()) > 1
1471
1472
1472 if not force and merge and match.ispartial():
1473 if not force and merge and match.ispartial():
1473 raise error.Abort(_('cannot partially commit a merge '
1474 raise error.Abort(_('cannot partially commit a merge '
1474 '(do not specify files or patterns)'))
1475 '(do not specify files or patterns)'))
1475
1476
1476 status = self.status(match=match, clean=force)
1477 status = self.status(match=match, clean=force)
1477 if force:
1478 if force:
1478 status.modified.extend(status.clean) # mq may commit clean files
1479 status.modified.extend(status.clean) # mq may commit clean files
1479
1480
1480 # check subrepos
1481 # check subrepos
1481 subs = []
1482 subs = []
1482 commitsubs = set()
1483 commitsubs = set()
1483 newstate = wctx.substate.copy()
1484 newstate = wctx.substate.copy()
1484 # only manage subrepos and .hgsubstate if .hgsub is present
1485 # only manage subrepos and .hgsubstate if .hgsub is present
1485 if '.hgsub' in wctx:
1486 if '.hgsub' in wctx:
1486 # we'll decide whether to track this ourselves, thanks
1487 # we'll decide whether to track this ourselves, thanks
1487 for c in status.modified, status.added, status.removed:
1488 for c in status.modified, status.added, status.removed:
1488 if '.hgsubstate' in c:
1489 if '.hgsubstate' in c:
1489 c.remove('.hgsubstate')
1490 c.remove('.hgsubstate')
1490
1491
1491 # compare current state to last committed state
1492 # compare current state to last committed state
1492 # build new substate based on last committed state
1493 # build new substate based on last committed state
1493 oldstate = wctx.p1().substate
1494 oldstate = wctx.p1().substate
1494 for s in sorted(newstate.keys()):
1495 for s in sorted(newstate.keys()):
1495 if not match(s):
1496 if not match(s):
1496 # ignore working copy, use old state if present
1497 # ignore working copy, use old state if present
1497 if s in oldstate:
1498 if s in oldstate:
1498 newstate[s] = oldstate[s]
1499 newstate[s] = oldstate[s]
1499 continue
1500 continue
1500 if not force:
1501 if not force:
1501 raise error.Abort(
1502 raise error.Abort(
1502 _("commit with new subrepo %s excluded") % s)
1503 _("commit with new subrepo %s excluded") % s)
1503 dirtyreason = wctx.sub(s).dirtyreason(True)
1504 dirtyreason = wctx.sub(s).dirtyreason(True)
1504 if dirtyreason:
1505 if dirtyreason:
1505 if not self.ui.configbool('ui', 'commitsubrepos'):
1506 if not self.ui.configbool('ui', 'commitsubrepos'):
1506 raise error.Abort(dirtyreason,
1507 raise error.Abort(dirtyreason,
1507 hint=_("use --subrepos for recursive commit"))
1508 hint=_("use --subrepos for recursive commit"))
1508 subs.append(s)
1509 subs.append(s)
1509 commitsubs.add(s)
1510 commitsubs.add(s)
1510 else:
1511 else:
1511 bs = wctx.sub(s).basestate()
1512 bs = wctx.sub(s).basestate()
1512 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1513 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1513 if oldstate.get(s, (None, None, None))[1] != bs:
1514 if oldstate.get(s, (None, None, None))[1] != bs:
1514 subs.append(s)
1515 subs.append(s)
1515
1516
1516 # check for removed subrepos
1517 # check for removed subrepos
1517 for p in wctx.parents():
1518 for p in wctx.parents():
1518 r = [s for s in p.substate if s not in newstate]
1519 r = [s for s in p.substate if s not in newstate]
1519 subs += [s for s in r if match(s)]
1520 subs += [s for s in r if match(s)]
1520 if subs:
1521 if subs:
1521 if (not match('.hgsub') and
1522 if (not match('.hgsub') and
1522 '.hgsub' in (wctx.modified() + wctx.added())):
1523 '.hgsub' in (wctx.modified() + wctx.added())):
1523 raise error.Abort(
1524 raise error.Abort(
1524 _("can't commit subrepos without .hgsub"))
1525 _("can't commit subrepos without .hgsub"))
1525 status.modified.insert(0, '.hgsubstate')
1526 status.modified.insert(0, '.hgsubstate')
1526
1527
1527 elif '.hgsub' in status.removed:
1528 elif '.hgsub' in status.removed:
1528 # clean up .hgsubstate when .hgsub is removed
1529 # clean up .hgsubstate when .hgsub is removed
1529 if ('.hgsubstate' in wctx and
1530 if ('.hgsubstate' in wctx and
1530 '.hgsubstate' not in (status.modified + status.added +
1531 '.hgsubstate' not in (status.modified + status.added +
1531 status.removed)):
1532 status.removed)):
1532 status.removed.insert(0, '.hgsubstate')
1533 status.removed.insert(0, '.hgsubstate')
1533
1534
1534 # make sure all explicit patterns are matched
1535 # make sure all explicit patterns are matched
1535 if not force and (match.isexact() or match.prefix()):
1536 if not force and (match.isexact() or match.prefix()):
1536 matched = set(status.modified + status.added + status.removed)
1537 matched = set(status.modified + status.added + status.removed)
1537
1538
1538 for f in match.files():
1539 for f in match.files():
1539 f = self.dirstate.normalize(f)
1540 f = self.dirstate.normalize(f)
1540 if f == '.' or f in matched or f in wctx.substate:
1541 if f == '.' or f in matched or f in wctx.substate:
1541 continue
1542 continue
1542 if f in status.deleted:
1543 if f in status.deleted:
1543 fail(f, _('file not found!'))
1544 fail(f, _('file not found!'))
1544 if f in vdirs: # visited directory
1545 if f in vdirs: # visited directory
1545 d = f + '/'
1546 d = f + '/'
1546 for mf in matched:
1547 for mf in matched:
1547 if mf.startswith(d):
1548 if mf.startswith(d):
1548 break
1549 break
1549 else:
1550 else:
1550 fail(f, _("no match under directory!"))
1551 fail(f, _("no match under directory!"))
1551 elif f not in self.dirstate:
1552 elif f not in self.dirstate:
1552 fail(f, _("file not tracked!"))
1553 fail(f, _("file not tracked!"))
1553
1554
1554 cctx = context.workingcommitctx(self, status,
1555 cctx = context.workingcommitctx(self, status,
1555 text, user, date, extra)
1556 text, user, date, extra)
1556
1557
1557 # internal config: ui.allowemptycommit
1558 # internal config: ui.allowemptycommit
1558 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1559 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1559 or extra.get('close') or merge or cctx.files()
1560 or extra.get('close') or merge or cctx.files()
1560 or self.ui.configbool('ui', 'allowemptycommit'))
1561 or self.ui.configbool('ui', 'allowemptycommit'))
1561 if not allowemptycommit:
1562 if not allowemptycommit:
1562 return None
1563 return None
1563
1564
1564 if merge and cctx.deleted():
1565 if merge and cctx.deleted():
1565 raise error.Abort(_("cannot commit merge with missing files"))
1566 raise error.Abort(_("cannot commit merge with missing files"))
1566
1567
1567 unresolved, driverresolved = False, False
1568 unresolved, driverresolved = False, False
1568 ms = mergemod.mergestate.read(self)
1569 ms = mergemod.mergestate.read(self)
1569 for f in status.modified:
1570 for f in status.modified:
1570 if f in ms:
1571 if f in ms:
1571 if ms[f] == 'u':
1572 if ms[f] == 'u':
1572 unresolved = True
1573 unresolved = True
1573 elif ms[f] == 'd':
1574 elif ms[f] == 'd':
1574 driverresolved = True
1575 driverresolved = True
1575
1576
1576 if unresolved:
1577 if unresolved:
1577 raise error.Abort(_('unresolved merge conflicts '
1578 raise error.Abort(_('unresolved merge conflicts '
1578 '(see "hg help resolve")'))
1579 '(see "hg help resolve")'))
1579 if driverresolved or ms.mdstate() != 's':
1580 if driverresolved or ms.mdstate() != 's':
1580 raise error.Abort(_('driver-resolved merge conflicts'),
1581 raise error.Abort(_('driver-resolved merge conflicts'),
1581 hint=_('run "hg resolve --all" to resolve'))
1582 hint=_('run "hg resolve --all" to resolve'))
1582
1583
1583 if editor:
1584 if editor:
1584 cctx._text = editor(self, cctx, subs)
1585 cctx._text = editor(self, cctx, subs)
1585 edited = (text != cctx._text)
1586 edited = (text != cctx._text)
1586
1587
1587 # Save commit message in case this transaction gets rolled back
1588 # Save commit message in case this transaction gets rolled back
1588 # (e.g. by a pretxncommit hook). Leave the content alone on
1589 # (e.g. by a pretxncommit hook). Leave the content alone on
1589 # the assumption that the user will use the same editor again.
1590 # the assumption that the user will use the same editor again.
1590 msgfn = self.savecommitmessage(cctx._text)
1591 msgfn = self.savecommitmessage(cctx._text)
1591
1592
1592 # commit subs and write new state
1593 # commit subs and write new state
1593 if subs:
1594 if subs:
1594 for s in sorted(commitsubs):
1595 for s in sorted(commitsubs):
1595 sub = wctx.sub(s)
1596 sub = wctx.sub(s)
1596 self.ui.status(_('committing subrepository %s\n') %
1597 self.ui.status(_('committing subrepository %s\n') %
1597 subrepo.subrelpath(sub))
1598 subrepo.subrelpath(sub))
1598 sr = sub.commit(cctx._text, user, date)
1599 sr = sub.commit(cctx._text, user, date)
1599 newstate[s] = (newstate[s][0], sr)
1600 newstate[s] = (newstate[s][0], sr)
1600 subrepo.writestate(self, newstate)
1601 subrepo.writestate(self, newstate)
1601
1602
1602 p1, p2 = self.dirstate.parents()
1603 p1, p2 = self.dirstate.parents()
1603 lock = self.lock()
1604 lock = self.lock()
1604 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1605 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1605 try:
1606 try:
1606 self.hook("precommit", throw=True, parent1=hookp1,
1607 self.hook("precommit", throw=True, parent1=hookp1,
1607 parent2=hookp2)
1608 parent2=hookp2)
1608 tr = self.transaction('commit')
1609 tr = self.transaction('commit')
1609 ret = self.commitctx(cctx, True)
1610 ret = self.commitctx(cctx, True)
1610 except: # re-raises
1611 except: # re-raises
1611 if edited:
1612 if edited:
1612 self.ui.write(
1613 self.ui.write(
1613 _('note: commit message saved in %s\n') % msgfn)
1614 _('note: commit message saved in %s\n') % msgfn)
1614 raise
1615 raise
1615 # update bookmarks, dirstate and mergestate
1616 # update bookmarks, dirstate and mergestate
1616 bookmarks.update(self, [p1, p2], ret)
1617 bookmarks.update(self, [p1, p2], ret)
1617 cctx.markcommitted(ret)
1618 cctx.markcommitted(ret)
1618 ms.reset()
1619 ms.reset()
1619 tr.close()
1620 tr.close()
1620
1621
1621 finally:
1622 finally:
1622 lockmod.release(tr, lock, wlock)
1623 lockmod.release(tr, lock, wlock)
1623
1624
1624 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1625 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1625 # hack for command that use a temporary commit (eg: histedit)
1626 # hack for command that use a temporary commit (eg: histedit)
1626 # temporary commit got stripped before hook release
1627 # temporary commit got stripped before hook release
1627 if self.changelog.hasnode(ret):
1628 if self.changelog.hasnode(ret):
1628 self.hook("commit", node=node, parent1=parent1,
1629 self.hook("commit", node=node, parent1=parent1,
1629 parent2=parent2)
1630 parent2=parent2)
1630 self._afterlock(commithook)
1631 self._afterlock(commithook)
1631 return ret
1632 return ret
1632
1633
1633 @unfilteredmethod
1634 @unfilteredmethod
1634 def commitctx(self, ctx, error=False):
1635 def commitctx(self, ctx, error=False):
1635 """Add a new revision to current repository.
1636 """Add a new revision to current repository.
1636 Revision information is passed via the context argument.
1637 Revision information is passed via the context argument.
1637 """
1638 """
1638
1639
1639 tr = None
1640 tr = None
1640 p1, p2 = ctx.p1(), ctx.p2()
1641 p1, p2 = ctx.p1(), ctx.p2()
1641 user = ctx.user()
1642 user = ctx.user()
1642
1643
1643 lock = self.lock()
1644 lock = self.lock()
1644 try:
1645 try:
1645 tr = self.transaction("commit")
1646 tr = self.transaction("commit")
1646 trp = weakref.proxy(tr)
1647 trp = weakref.proxy(tr)
1647
1648
1648 if ctx.files():
1649 if ctx.files():
1649 m1 = p1.manifest()
1650 m1 = p1.manifest()
1650 m2 = p2.manifest()
1651 m2 = p2.manifest()
1651 m = m1.copy()
1652 m = m1.copy()
1652
1653
1653 # check in files
1654 # check in files
1654 added = []
1655 added = []
1655 changed = []
1656 changed = []
1656 removed = list(ctx.removed())
1657 removed = list(ctx.removed())
1657 linkrev = len(self)
1658 linkrev = len(self)
1658 self.ui.note(_("committing files:\n"))
1659 self.ui.note(_("committing files:\n"))
1659 for f in sorted(ctx.modified() + ctx.added()):
1660 for f in sorted(ctx.modified() + ctx.added()):
1660 self.ui.note(f + "\n")
1661 self.ui.note(f + "\n")
1661 try:
1662 try:
1662 fctx = ctx[f]
1663 fctx = ctx[f]
1663 if fctx is None:
1664 if fctx is None:
1664 removed.append(f)
1665 removed.append(f)
1665 else:
1666 else:
1666 added.append(f)
1667 added.append(f)
1667 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1668 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1668 trp, changed)
1669 trp, changed)
1669 m.setflag(f, fctx.flags())
1670 m.setflag(f, fctx.flags())
1670 except OSError as inst:
1671 except OSError as inst:
1671 self.ui.warn(_("trouble committing %s!\n") % f)
1672 self.ui.warn(_("trouble committing %s!\n") % f)
1672 raise
1673 raise
1673 except IOError as inst:
1674 except IOError as inst:
1674 errcode = getattr(inst, 'errno', errno.ENOENT)
1675 errcode = getattr(inst, 'errno', errno.ENOENT)
1675 if error or errcode and errcode != errno.ENOENT:
1676 if error or errcode and errcode != errno.ENOENT:
1676 self.ui.warn(_("trouble committing %s!\n") % f)
1677 self.ui.warn(_("trouble committing %s!\n") % f)
1677 raise
1678 raise
1678
1679
1679 # update manifest
1680 # update manifest
1680 self.ui.note(_("committing manifest\n"))
1681 self.ui.note(_("committing manifest\n"))
1681 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1682 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1682 drop = [f for f in removed if f in m]
1683 drop = [f for f in removed if f in m]
1683 for f in drop:
1684 for f in drop:
1684 del m[f]
1685 del m[f]
1685 mn = self.manifest.add(m, trp, linkrev,
1686 mn = self.manifest.add(m, trp, linkrev,
1686 p1.manifestnode(), p2.manifestnode(),
1687 p1.manifestnode(), p2.manifestnode(),
1687 added, drop)
1688 added, drop)
1688 files = changed + removed
1689 files = changed + removed
1689 else:
1690 else:
1690 mn = p1.manifestnode()
1691 mn = p1.manifestnode()
1691 files = []
1692 files = []
1692
1693
1693 # update changelog
1694 # update changelog
1694 self.ui.note(_("committing changelog\n"))
1695 self.ui.note(_("committing changelog\n"))
1695 self.changelog.delayupdate(tr)
1696 self.changelog.delayupdate(tr)
1696 n = self.changelog.add(mn, files, ctx.description(),
1697 n = self.changelog.add(mn, files, ctx.description(),
1697 trp, p1.node(), p2.node(),
1698 trp, p1.node(), p2.node(),
1698 user, ctx.date(), ctx.extra().copy())
1699 user, ctx.date(), ctx.extra().copy())
1699 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1700 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1700 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1701 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1701 parent2=xp2)
1702 parent2=xp2)
1702 # set the new commit is proper phase
1703 # set the new commit is proper phase
1703 targetphase = subrepo.newcommitphase(self.ui, ctx)
1704 targetphase = subrepo.newcommitphase(self.ui, ctx)
1704 if targetphase:
1705 if targetphase:
1705 # retract boundary do not alter parent changeset.
1706 # retract boundary do not alter parent changeset.
1706 # if a parent have higher the resulting phase will
1707 # if a parent have higher the resulting phase will
1707 # be compliant anyway
1708 # be compliant anyway
1708 #
1709 #
1709 # if minimal phase was 0 we don't need to retract anything
1710 # if minimal phase was 0 we don't need to retract anything
1710 phases.retractboundary(self, tr, targetphase, [n])
1711 phases.retractboundary(self, tr, targetphase, [n])
1711 tr.close()
1712 tr.close()
1712 branchmap.updatecache(self.filtered('served'))
1713 branchmap.updatecache(self.filtered('served'))
1713 return n
1714 return n
1714 finally:
1715 finally:
1715 if tr:
1716 if tr:
1716 tr.release()
1717 tr.release()
1717 lock.release()
1718 lock.release()
1718
1719
1719 @unfilteredmethod
1720 @unfilteredmethod
1720 def destroying(self):
1721 def destroying(self):
1721 '''Inform the repository that nodes are about to be destroyed.
1722 '''Inform the repository that nodes are about to be destroyed.
1722 Intended for use by strip and rollback, so there's a common
1723 Intended for use by strip and rollback, so there's a common
1723 place for anything that has to be done before destroying history.
1724 place for anything that has to be done before destroying history.
1724
1725
1725 This is mostly useful for saving state that is in memory and waiting
1726 This is mostly useful for saving state that is in memory and waiting
1726 to be flushed when the current lock is released. Because a call to
1727 to be flushed when the current lock is released. Because a call to
1727 destroyed is imminent, the repo will be invalidated causing those
1728 destroyed is imminent, the repo will be invalidated causing those
1728 changes to stay in memory (waiting for the next unlock), or vanish
1729 changes to stay in memory (waiting for the next unlock), or vanish
1729 completely.
1730 completely.
1730 '''
1731 '''
1731 # When using the same lock to commit and strip, the phasecache is left
1732 # When using the same lock to commit and strip, the phasecache is left
1732 # dirty after committing. Then when we strip, the repo is invalidated,
1733 # dirty after committing. Then when we strip, the repo is invalidated,
1733 # causing those changes to disappear.
1734 # causing those changes to disappear.
1734 if '_phasecache' in vars(self):
1735 if '_phasecache' in vars(self):
1735 self._phasecache.write()
1736 self._phasecache.write()
1736
1737
1737 @unfilteredmethod
1738 @unfilteredmethod
1738 def destroyed(self):
1739 def destroyed(self):
1739 '''Inform the repository that nodes have been destroyed.
1740 '''Inform the repository that nodes have been destroyed.
1740 Intended for use by strip and rollback, so there's a common
1741 Intended for use by strip and rollback, so there's a common
1741 place for anything that has to be done after destroying history.
1742 place for anything that has to be done after destroying history.
1742 '''
1743 '''
1743 # When one tries to:
1744 # When one tries to:
1744 # 1) destroy nodes thus calling this method (e.g. strip)
1745 # 1) destroy nodes thus calling this method (e.g. strip)
1745 # 2) use phasecache somewhere (e.g. commit)
1746 # 2) use phasecache somewhere (e.g. commit)
1746 #
1747 #
1747 # then 2) will fail because the phasecache contains nodes that were
1748 # then 2) will fail because the phasecache contains nodes that were
1748 # removed. We can either remove phasecache from the filecache,
1749 # removed. We can either remove phasecache from the filecache,
1749 # causing it to reload next time it is accessed, or simply filter
1750 # causing it to reload next time it is accessed, or simply filter
1750 # the removed nodes now and write the updated cache.
1751 # the removed nodes now and write the updated cache.
1751 self._phasecache.filterunknown(self)
1752 self._phasecache.filterunknown(self)
1752 self._phasecache.write()
1753 self._phasecache.write()
1753
1754
1754 # update the 'served' branch cache to help read only server process
1755 # update the 'served' branch cache to help read only server process
1755 # Thanks to branchcache collaboration this is done from the nearest
1756 # Thanks to branchcache collaboration this is done from the nearest
1756 # filtered subset and it is expected to be fast.
1757 # filtered subset and it is expected to be fast.
1757 branchmap.updatecache(self.filtered('served'))
1758 branchmap.updatecache(self.filtered('served'))
1758
1759
1759 # Ensure the persistent tag cache is updated. Doing it now
1760 # Ensure the persistent tag cache is updated. Doing it now
1760 # means that the tag cache only has to worry about destroyed
1761 # means that the tag cache only has to worry about destroyed
1761 # heads immediately after a strip/rollback. That in turn
1762 # heads immediately after a strip/rollback. That in turn
1762 # guarantees that "cachetip == currenttip" (comparing both rev
1763 # guarantees that "cachetip == currenttip" (comparing both rev
1763 # and node) always means no nodes have been added or destroyed.
1764 # and node) always means no nodes have been added or destroyed.
1764
1765
1765 # XXX this is suboptimal when qrefresh'ing: we strip the current
1766 # XXX this is suboptimal when qrefresh'ing: we strip the current
1766 # head, refresh the tag cache, then immediately add a new head.
1767 # head, refresh the tag cache, then immediately add a new head.
1767 # But I think doing it this way is necessary for the "instant
1768 # But I think doing it this way is necessary for the "instant
1768 # tag cache retrieval" case to work.
1769 # tag cache retrieval" case to work.
1769 self.invalidate()
1770 self.invalidate()
1770
1771
1771 def walk(self, match, node=None):
1772 def walk(self, match, node=None):
1772 '''
1773 '''
1773 walk recursively through the directory tree or a given
1774 walk recursively through the directory tree or a given
1774 changeset, finding all files matched by the match
1775 changeset, finding all files matched by the match
1775 function
1776 function
1776 '''
1777 '''
1777 return self[node].walk(match)
1778 return self[node].walk(match)
1778
1779
1779 def status(self, node1='.', node2=None, match=None,
1780 def status(self, node1='.', node2=None, match=None,
1780 ignored=False, clean=False, unknown=False,
1781 ignored=False, clean=False, unknown=False,
1781 listsubrepos=False):
1782 listsubrepos=False):
1782 '''a convenience method that calls node1.status(node2)'''
1783 '''a convenience method that calls node1.status(node2)'''
1783 return self[node1].status(node2, match, ignored, clean, unknown,
1784 return self[node1].status(node2, match, ignored, clean, unknown,
1784 listsubrepos)
1785 listsubrepos)
1785
1786
1786 def heads(self, start=None):
1787 def heads(self, start=None):
1787 heads = self.changelog.heads(start)
1788 heads = self.changelog.heads(start)
1788 # sort the output in rev descending order
1789 # sort the output in rev descending order
1789 return sorted(heads, key=self.changelog.rev, reverse=True)
1790 return sorted(heads, key=self.changelog.rev, reverse=True)
1790
1791
1791 def branchheads(self, branch=None, start=None, closed=False):
1792 def branchheads(self, branch=None, start=None, closed=False):
1792 '''return a (possibly filtered) list of heads for the given branch
1793 '''return a (possibly filtered) list of heads for the given branch
1793
1794
1794 Heads are returned in topological order, from newest to oldest.
1795 Heads are returned in topological order, from newest to oldest.
1795 If branch is None, use the dirstate branch.
1796 If branch is None, use the dirstate branch.
1796 If start is not None, return only heads reachable from start.
1797 If start is not None, return only heads reachable from start.
1797 If closed is True, return heads that are marked as closed as well.
1798 If closed is True, return heads that are marked as closed as well.
1798 '''
1799 '''
1799 if branch is None:
1800 if branch is None:
1800 branch = self[None].branch()
1801 branch = self[None].branch()
1801 branches = self.branchmap()
1802 branches = self.branchmap()
1802 if branch not in branches:
1803 if branch not in branches:
1803 return []
1804 return []
1804 # the cache returns heads ordered lowest to highest
1805 # the cache returns heads ordered lowest to highest
1805 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1806 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1806 if start is not None:
1807 if start is not None:
1807 # filter out the heads that cannot be reached from startrev
1808 # filter out the heads that cannot be reached from startrev
1808 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1809 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1809 bheads = [h for h in bheads if h in fbheads]
1810 bheads = [h for h in bheads if h in fbheads]
1810 return bheads
1811 return bheads
1811
1812
1812 def branches(self, nodes):
1813 def branches(self, nodes):
1813 if not nodes:
1814 if not nodes:
1814 nodes = [self.changelog.tip()]
1815 nodes = [self.changelog.tip()]
1815 b = []
1816 b = []
1816 for n in nodes:
1817 for n in nodes:
1817 t = n
1818 t = n
1818 while True:
1819 while True:
1819 p = self.changelog.parents(n)
1820 p = self.changelog.parents(n)
1820 if p[1] != nullid or p[0] == nullid:
1821 if p[1] != nullid or p[0] == nullid:
1821 b.append((t, n, p[0], p[1]))
1822 b.append((t, n, p[0], p[1]))
1822 break
1823 break
1823 n = p[0]
1824 n = p[0]
1824 return b
1825 return b
1825
1826
1826 def between(self, pairs):
1827 def between(self, pairs):
1827 r = []
1828 r = []
1828
1829
1829 for top, bottom in pairs:
1830 for top, bottom in pairs:
1830 n, l, i = top, [], 0
1831 n, l, i = top, [], 0
1831 f = 1
1832 f = 1
1832
1833
1833 while n != bottom and n != nullid:
1834 while n != bottom and n != nullid:
1834 p = self.changelog.parents(n)[0]
1835 p = self.changelog.parents(n)[0]
1835 if i == f:
1836 if i == f:
1836 l.append(n)
1837 l.append(n)
1837 f = f * 2
1838 f = f * 2
1838 n = p
1839 n = p
1839 i += 1
1840 i += 1
1840
1841
1841 r.append(l)
1842 r.append(l)
1842
1843
1843 return r
1844 return r
1844
1845
1845 def checkpush(self, pushop):
1846 def checkpush(self, pushop):
1846 """Extensions can override this function if additional checks have
1847 """Extensions can override this function if additional checks have
1847 to be performed before pushing, or call it if they override push
1848 to be performed before pushing, or call it if they override push
1848 command.
1849 command.
1849 """
1850 """
1850 pass
1851 pass
1851
1852
1852 @unfilteredpropertycache
1853 @unfilteredpropertycache
1853 def prepushoutgoinghooks(self):
1854 def prepushoutgoinghooks(self):
1854 """Return util.hooks consists of "(repo, remote, outgoing)"
1855 """Return util.hooks consists of "(repo, remote, outgoing)"
1855 functions, which are called before pushing changesets.
1856 functions, which are called before pushing changesets.
1856 """
1857 """
1857 return util.hooks()
1858 return util.hooks()
1858
1859
1859 def pushkey(self, namespace, key, old, new):
1860 def pushkey(self, namespace, key, old, new):
1860 try:
1861 try:
1861 tr = self.currenttransaction()
1862 tr = self.currenttransaction()
1862 hookargs = {}
1863 hookargs = {}
1863 if tr is not None:
1864 if tr is not None:
1864 hookargs.update(tr.hookargs)
1865 hookargs.update(tr.hookargs)
1865 hookargs['namespace'] = namespace
1866 hookargs['namespace'] = namespace
1866 hookargs['key'] = key
1867 hookargs['key'] = key
1867 hookargs['old'] = old
1868 hookargs['old'] = old
1868 hookargs['new'] = new
1869 hookargs['new'] = new
1869 self.hook('prepushkey', throw=True, **hookargs)
1870 self.hook('prepushkey', throw=True, **hookargs)
1870 except error.HookAbort as exc:
1871 except error.HookAbort as exc:
1871 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1872 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1872 if exc.hint:
1873 if exc.hint:
1873 self.ui.write_err(_("(%s)\n") % exc.hint)
1874 self.ui.write_err(_("(%s)\n") % exc.hint)
1874 return False
1875 return False
1875 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1876 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1876 ret = pushkey.push(self, namespace, key, old, new)
1877 ret = pushkey.push(self, namespace, key, old, new)
1877 def runhook():
1878 def runhook():
1878 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1879 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1879 ret=ret)
1880 ret=ret)
1880 self._afterlock(runhook)
1881 self._afterlock(runhook)
1881 return ret
1882 return ret
1882
1883
1883 def listkeys(self, namespace):
1884 def listkeys(self, namespace):
1884 self.hook('prelistkeys', throw=True, namespace=namespace)
1885 self.hook('prelistkeys', throw=True, namespace=namespace)
1885 self.ui.debug('listing keys for "%s"\n' % namespace)
1886 self.ui.debug('listing keys for "%s"\n' % namespace)
1886 values = pushkey.list(self, namespace)
1887 values = pushkey.list(self, namespace)
1887 self.hook('listkeys', namespace=namespace, values=values)
1888 self.hook('listkeys', namespace=namespace, values=values)
1888 return values
1889 return values
1889
1890
1890 def debugwireargs(self, one, two, three=None, four=None, five=None):
1891 def debugwireargs(self, one, two, three=None, four=None, five=None):
1891 '''used to test argument passing over the wire'''
1892 '''used to test argument passing over the wire'''
1892 return "%s %s %s %s %s" % (one, two, three, four, five)
1893 return "%s %s %s %s %s" % (one, two, three, four, five)
1893
1894
1894 def savecommitmessage(self, text):
1895 def savecommitmessage(self, text):
1895 fp = self.vfs('last-message.txt', 'wb')
1896 fp = self.vfs('last-message.txt', 'wb')
1896 try:
1897 try:
1897 fp.write(text)
1898 fp.write(text)
1898 finally:
1899 finally:
1899 fp.close()
1900 fp.close()
1900 return self.pathto(fp.name[len(self.root) + 1:])
1901 return self.pathto(fp.name[len(self.root) + 1:])
1901
1902
1902 # used to avoid circular references so destructors work
1903 # used to avoid circular references so destructors work
1903 def aftertrans(files):
1904 def aftertrans(files):
1904 renamefiles = [tuple(t) for t in files]
1905 renamefiles = [tuple(t) for t in files]
1905 def a():
1906 def a():
1906 for vfs, src, dest in renamefiles:
1907 for vfs, src, dest in renamefiles:
1907 try:
1908 try:
1908 vfs.rename(src, dest)
1909 vfs.rename(src, dest)
1909 except OSError: # journal file does not yet exist
1910 except OSError: # journal file does not yet exist
1910 pass
1911 pass
1911 return a
1912 return a
1912
1913
1913 def undoname(fn):
1914 def undoname(fn):
1914 base, name = os.path.split(fn)
1915 base, name = os.path.split(fn)
1915 assert name.startswith('journal')
1916 assert name.startswith('journal')
1916 return os.path.join(base, name.replace('journal', 'undo', 1))
1917 return os.path.join(base, name.replace('journal', 'undo', 1))
1917
1918
1918 def instance(ui, path, create):
1919 def instance(ui, path, create):
1919 return localrepository(ui, util.urllocalpath(path), create)
1920 return localrepository(ui, util.urllocalpath(path), create)
1920
1921
1921 def islocal(path):
1922 def islocal(path):
1922 return True
1923 return True
General Comments 0
You need to be logged in to leave comments. Login now