##// END OF EJS Templates
localrepo: improve docstring for revset methods...
Gregory Szorc -
r27071:dfb31eeb default
parent child Browse files
Show More
@@ -1,1930 +1,1938 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, wdirrev, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset, cmdutil
14 import scmutil, util, extensions, hook, error, revset, cmdutil
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception as exc:
138 except Exception as exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced as exc:
155 except error.PushRaced as exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return cg.apply(self._repo, source, url)
162 return cg.apply(self._repo, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 if scmutil.gdinitconfig(self.ui):
261 if scmutil.gdinitconfig(self.ui):
262 self.requirements.add("generaldelta")
262 self.requirements.add("generaldelta")
263 if self.ui.configbool('experimental', 'treemanifest', False):
263 if self.ui.configbool('experimental', 'treemanifest', False):
264 self.requirements.add("treemanifest")
264 self.requirements.add("treemanifest")
265 if self.ui.configbool('experimental', 'manifestv2', False):
265 if self.ui.configbool('experimental', 'manifestv2', False):
266 self.requirements.add("manifestv2")
266 self.requirements.add("manifestv2")
267 else:
267 else:
268 raise error.RepoError(_("repository %s not found") % path)
268 raise error.RepoError(_("repository %s not found") % path)
269 elif create:
269 elif create:
270 raise error.RepoError(_("repository %s already exists") % path)
270 raise error.RepoError(_("repository %s already exists") % path)
271 else:
271 else:
272 try:
272 try:
273 self.requirements = scmutil.readrequires(
273 self.requirements = scmutil.readrequires(
274 self.vfs, self.supported)
274 self.vfs, self.supported)
275 except IOError as inst:
275 except IOError as inst:
276 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
277 raise
277 raise
278
278
279 self.sharedpath = self.path
279 self.sharedpath = self.path
280 try:
280 try:
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 realpath=True)
282 realpath=True)
283 s = vfs.base
283 s = vfs.base
284 if not vfs.exists():
284 if not vfs.exists():
285 raise error.RepoError(
285 raise error.RepoError(
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 self.sharedpath = s
287 self.sharedpath = s
288 except IOError as inst:
288 except IOError as inst:
289 if inst.errno != errno.ENOENT:
289 if inst.errno != errno.ENOENT:
290 raise
290 raise
291
291
292 self.store = store.store(
292 self.store = store.store(
293 self.requirements, self.sharedpath, scmutil.vfs)
293 self.requirements, self.sharedpath, scmutil.vfs)
294 self.spath = self.store.path
294 self.spath = self.store.path
295 self.svfs = self.store.vfs
295 self.svfs = self.store.vfs
296 self.sjoin = self.store.join
296 self.sjoin = self.store.join
297 self.vfs.createmode = self.store.createmode
297 self.vfs.createmode = self.store.createmode
298 self._applyopenerreqs()
298 self._applyopenerreqs()
299 if create:
299 if create:
300 self._writerequirements()
300 self._writerequirements()
301
301
302 self._dirstatevalidatewarned = False
302 self._dirstatevalidatewarned = False
303
303
304 self._branchcaches = {}
304 self._branchcaches = {}
305 self._revbranchcache = None
305 self._revbranchcache = None
306 self.filterpats = {}
306 self.filterpats = {}
307 self._datafilters = {}
307 self._datafilters = {}
308 self._transref = self._lockref = self._wlockref = None
308 self._transref = self._lockref = self._wlockref = None
309
309
310 # A cache for various files under .hg/ that tracks file changes,
310 # A cache for various files under .hg/ that tracks file changes,
311 # (used by the filecache decorator)
311 # (used by the filecache decorator)
312 #
312 #
313 # Maps a property name to its util.filecacheentry
313 # Maps a property name to its util.filecacheentry
314 self._filecache = {}
314 self._filecache = {}
315
315
316 # hold sets of revision to be filtered
316 # hold sets of revision to be filtered
317 # should be cleared when something might have changed the filter value:
317 # should be cleared when something might have changed the filter value:
318 # - new changesets,
318 # - new changesets,
319 # - phase change,
319 # - phase change,
320 # - new obsolescence marker,
320 # - new obsolescence marker,
321 # - working directory parent change,
321 # - working directory parent change,
322 # - bookmark changes
322 # - bookmark changes
323 self.filteredrevcache = {}
323 self.filteredrevcache = {}
324
324
325 # generic mapping between names and nodes
325 # generic mapping between names and nodes
326 self.names = namespaces.namespaces()
326 self.names = namespaces.namespaces()
327
327
328 def close(self):
328 def close(self):
329 self._writecaches()
329 self._writecaches()
330
330
331 def _writecaches(self):
331 def _writecaches(self):
332 if self._revbranchcache:
332 if self._revbranchcache:
333 self._revbranchcache.write()
333 self._revbranchcache.write()
334
334
335 def _restrictcapabilities(self, caps):
335 def _restrictcapabilities(self, caps):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 caps = set(caps)
337 caps = set(caps)
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 caps.add('bundle2=' + urllib.quote(capsblob))
339 caps.add('bundle2=' + urllib.quote(capsblob))
340 return caps
340 return caps
341
341
342 def _applyopenerreqs(self):
342 def _applyopenerreqs(self):
343 self.svfs.options = dict((r, 1) for r in self.requirements
343 self.svfs.options = dict((r, 1) for r in self.requirements
344 if r in self.openerreqs)
344 if r in self.openerreqs)
345 # experimental config: format.chunkcachesize
345 # experimental config: format.chunkcachesize
346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 if chunkcachesize is not None:
347 if chunkcachesize is not None:
348 self.svfs.options['chunkcachesize'] = chunkcachesize
348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 # experimental config: format.maxchainlen
349 # experimental config: format.maxchainlen
350 maxchainlen = self.ui.configint('format', 'maxchainlen')
350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 if maxchainlen is not None:
351 if maxchainlen is not None:
352 self.svfs.options['maxchainlen'] = maxchainlen
352 self.svfs.options['maxchainlen'] = maxchainlen
353 # experimental config: format.manifestcachesize
353 # experimental config: format.manifestcachesize
354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 if manifestcachesize is not None:
355 if manifestcachesize is not None:
356 self.svfs.options['manifestcachesize'] = manifestcachesize
356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 # experimental config: format.aggressivemergedeltas
357 # experimental config: format.aggressivemergedeltas
358 aggressivemergedeltas = self.ui.configbool('format',
358 aggressivemergedeltas = self.ui.configbool('format',
359 'aggressivemergedeltas', False)
359 'aggressivemergedeltas', False)
360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
361 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
361 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
362
362
363 def _writerequirements(self):
363 def _writerequirements(self):
364 scmutil.writerequires(self.vfs, self.requirements)
364 scmutil.writerequires(self.vfs, self.requirements)
365
365
366 def _checknested(self, path):
366 def _checknested(self, path):
367 """Determine if path is a legal nested repository."""
367 """Determine if path is a legal nested repository."""
368 if not path.startswith(self.root):
368 if not path.startswith(self.root):
369 return False
369 return False
370 subpath = path[len(self.root) + 1:]
370 subpath = path[len(self.root) + 1:]
371 normsubpath = util.pconvert(subpath)
371 normsubpath = util.pconvert(subpath)
372
372
373 # XXX: Checking against the current working copy is wrong in
373 # XXX: Checking against the current working copy is wrong in
374 # the sense that it can reject things like
374 # the sense that it can reject things like
375 #
375 #
376 # $ hg cat -r 10 sub/x.txt
376 # $ hg cat -r 10 sub/x.txt
377 #
377 #
378 # if sub/ is no longer a subrepository in the working copy
378 # if sub/ is no longer a subrepository in the working copy
379 # parent revision.
379 # parent revision.
380 #
380 #
381 # However, it can of course also allow things that would have
381 # However, it can of course also allow things that would have
382 # been rejected before, such as the above cat command if sub/
382 # been rejected before, such as the above cat command if sub/
383 # is a subrepository now, but was a normal directory before.
383 # is a subrepository now, but was a normal directory before.
384 # The old path auditor would have rejected by mistake since it
384 # The old path auditor would have rejected by mistake since it
385 # panics when it sees sub/.hg/.
385 # panics when it sees sub/.hg/.
386 #
386 #
387 # All in all, checking against the working copy seems sensible
387 # All in all, checking against the working copy seems sensible
388 # since we want to prevent access to nested repositories on
388 # since we want to prevent access to nested repositories on
389 # the filesystem *now*.
389 # the filesystem *now*.
390 ctx = self[None]
390 ctx = self[None]
391 parts = util.splitpath(subpath)
391 parts = util.splitpath(subpath)
392 while parts:
392 while parts:
393 prefix = '/'.join(parts)
393 prefix = '/'.join(parts)
394 if prefix in ctx.substate:
394 if prefix in ctx.substate:
395 if prefix == normsubpath:
395 if prefix == normsubpath:
396 return True
396 return True
397 else:
397 else:
398 sub = ctx.sub(prefix)
398 sub = ctx.sub(prefix)
399 return sub.checknested(subpath[len(prefix) + 1:])
399 return sub.checknested(subpath[len(prefix) + 1:])
400 else:
400 else:
401 parts.pop()
401 parts.pop()
402 return False
402 return False
403
403
404 def peer(self):
404 def peer(self):
405 return localpeer(self) # not cached to avoid reference cycle
405 return localpeer(self) # not cached to avoid reference cycle
406
406
407 def unfiltered(self):
407 def unfiltered(self):
408 """Return unfiltered version of the repository
408 """Return unfiltered version of the repository
409
409
410 Intended to be overwritten by filtered repo."""
410 Intended to be overwritten by filtered repo."""
411 return self
411 return self
412
412
413 def filtered(self, name):
413 def filtered(self, name):
414 """Return a filtered version of a repository"""
414 """Return a filtered version of a repository"""
415 # build a new class with the mixin and the current class
415 # build a new class with the mixin and the current class
416 # (possibly subclass of the repo)
416 # (possibly subclass of the repo)
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 pass
418 pass
419 return proxycls(self, name)
419 return proxycls(self, name)
420
420
421 @repofilecache('bookmarks')
421 @repofilecache('bookmarks')
422 def _bookmarks(self):
422 def _bookmarks(self):
423 return bookmarks.bmstore(self)
423 return bookmarks.bmstore(self)
424
424
425 @repofilecache('bookmarks.current')
425 @repofilecache('bookmarks.current')
426 def _activebookmark(self):
426 def _activebookmark(self):
427 return bookmarks.readactive(self)
427 return bookmarks.readactive(self)
428
428
429 def bookmarkheads(self, bookmark):
429 def bookmarkheads(self, bookmark):
430 name = bookmark.split('@', 1)[0]
430 name = bookmark.split('@', 1)[0]
431 heads = []
431 heads = []
432 for mark, n in self._bookmarks.iteritems():
432 for mark, n in self._bookmarks.iteritems():
433 if mark.split('@', 1)[0] == name:
433 if mark.split('@', 1)[0] == name:
434 heads.append(n)
434 heads.append(n)
435 return heads
435 return heads
436
436
437 # _phaserevs and _phasesets depend on changelog. what we need is to
437 # _phaserevs and _phasesets depend on changelog. what we need is to
438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 # can't be easily expressed in filecache mechanism.
439 # can't be easily expressed in filecache mechanism.
440 @storecache('phaseroots', '00changelog.i')
440 @storecache('phaseroots', '00changelog.i')
441 def _phasecache(self):
441 def _phasecache(self):
442 return phases.phasecache(self, self._phasedefaults)
442 return phases.phasecache(self, self._phasedefaults)
443
443
444 @storecache('obsstore')
444 @storecache('obsstore')
445 def obsstore(self):
445 def obsstore(self):
446 # read default format for new obsstore.
446 # read default format for new obsstore.
447 # developer config: format.obsstore-version
447 # developer config: format.obsstore-version
448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 # rely on obsstore class default when possible.
449 # rely on obsstore class default when possible.
450 kwargs = {}
450 kwargs = {}
451 if defaultformat is not None:
451 if defaultformat is not None:
452 kwargs['defaultformat'] = defaultformat
452 kwargs['defaultformat'] = defaultformat
453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 store = obsolete.obsstore(self.svfs, readonly=readonly,
454 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 **kwargs)
455 **kwargs)
456 if store and readonly:
456 if store and readonly:
457 self.ui.warn(
457 self.ui.warn(
458 _('obsolete feature not enabled but %i markers found!\n')
458 _('obsolete feature not enabled but %i markers found!\n')
459 % len(list(store)))
459 % len(list(store)))
460 return store
460 return store
461
461
462 @storecache('00changelog.i')
462 @storecache('00changelog.i')
463 def changelog(self):
463 def changelog(self):
464 c = changelog.changelog(self.svfs)
464 c = changelog.changelog(self.svfs)
465 if 'HG_PENDING' in os.environ:
465 if 'HG_PENDING' in os.environ:
466 p = os.environ['HG_PENDING']
466 p = os.environ['HG_PENDING']
467 if p.startswith(self.root):
467 if p.startswith(self.root):
468 c.readpending('00changelog.i.a')
468 c.readpending('00changelog.i.a')
469 return c
469 return c
470
470
471 @storecache('00manifest.i')
471 @storecache('00manifest.i')
472 def manifest(self):
472 def manifest(self):
473 return manifest.manifest(self.svfs)
473 return manifest.manifest(self.svfs)
474
474
475 def dirlog(self, dir):
475 def dirlog(self, dir):
476 return self.manifest.dirlog(dir)
476 return self.manifest.dirlog(dir)
477
477
478 @repofilecache('dirstate')
478 @repofilecache('dirstate')
479 def dirstate(self):
479 def dirstate(self):
480 return dirstate.dirstate(self.vfs, self.ui, self.root,
480 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 self._dirstatevalidate)
481 self._dirstatevalidate)
482
482
483 def _dirstatevalidate(self, node):
483 def _dirstatevalidate(self, node):
484 try:
484 try:
485 self.changelog.rev(node)
485 self.changelog.rev(node)
486 return node
486 return node
487 except error.LookupError:
487 except error.LookupError:
488 if not self._dirstatevalidatewarned:
488 if not self._dirstatevalidatewarned:
489 self._dirstatevalidatewarned = True
489 self._dirstatevalidatewarned = True
490 self.ui.warn(_("warning: ignoring unknown"
490 self.ui.warn(_("warning: ignoring unknown"
491 " working parent %s!\n") % short(node))
491 " working parent %s!\n") % short(node))
492 return nullid
492 return nullid
493
493
494 def __getitem__(self, changeid):
494 def __getitem__(self, changeid):
495 if changeid is None or changeid == wdirrev:
495 if changeid is None or changeid == wdirrev:
496 return context.workingctx(self)
496 return context.workingctx(self)
497 if isinstance(changeid, slice):
497 if isinstance(changeid, slice):
498 return [context.changectx(self, i)
498 return [context.changectx(self, i)
499 for i in xrange(*changeid.indices(len(self)))
499 for i in xrange(*changeid.indices(len(self)))
500 if i not in self.changelog.filteredrevs]
500 if i not in self.changelog.filteredrevs]
501 return context.changectx(self, changeid)
501 return context.changectx(self, changeid)
502
502
503 def __contains__(self, changeid):
503 def __contains__(self, changeid):
504 try:
504 try:
505 self[changeid]
505 self[changeid]
506 return True
506 return True
507 except error.RepoLookupError:
507 except error.RepoLookupError:
508 return False
508 return False
509
509
510 def __nonzero__(self):
510 def __nonzero__(self):
511 return True
511 return True
512
512
513 def __len__(self):
513 def __len__(self):
514 return len(self.changelog)
514 return len(self.changelog)
515
515
516 def __iter__(self):
516 def __iter__(self):
517 return iter(self.changelog)
517 return iter(self.changelog)
518
518
519 def revs(self, expr, *args):
519 def revs(self, expr, *args):
520 '''Return a list of revisions matching the given revset'''
520 '''Find revisions matching a revset.
521
522 The revset is specified as a string ``expr`` that may contain
523 %-formatting to escape certain types. See ``revset.formatspec``.
524
525 Return a revset.abstractsmartset, which is a list-like interface
526 that contains integer revisions.
527 '''
521 expr = revset.formatspec(expr, *args)
528 expr = revset.formatspec(expr, *args)
522 m = revset.match(None, expr)
529 m = revset.match(None, expr)
523 return m(self)
530 return m(self)
524
531
525 def set(self, expr, *args):
532 def set(self, expr, *args):
526 '''
533 '''Find revisions matching a revset and emit changectx instances.
527 Yield a context for each matching revision, after doing arg
534
528 replacement via revset.formatspec
535 This is a convenience wrapper around ``revs()`` that iterates the
536 result and is a generator of changectx instances.
529 '''
537 '''
530 for r in self.revs(expr, *args):
538 for r in self.revs(expr, *args):
531 yield self[r]
539 yield self[r]
532
540
533 def url(self):
541 def url(self):
534 return 'file:' + self.root
542 return 'file:' + self.root
535
543
536 def hook(self, name, throw=False, **args):
544 def hook(self, name, throw=False, **args):
537 """Call a hook, passing this repo instance.
545 """Call a hook, passing this repo instance.
538
546
539 This a convenience method to aid invoking hooks. Extensions likely
547 This a convenience method to aid invoking hooks. Extensions likely
540 won't call this unless they have registered a custom hook or are
548 won't call this unless they have registered a custom hook or are
541 replacing code that is expected to call a hook.
549 replacing code that is expected to call a hook.
542 """
550 """
543 return hook.hook(self.ui, self, name, throw, **args)
551 return hook.hook(self.ui, self, name, throw, **args)
544
552
545 @unfilteredmethod
553 @unfilteredmethod
546 def _tag(self, names, node, message, local, user, date, extra=None,
554 def _tag(self, names, node, message, local, user, date, extra=None,
547 editor=False):
555 editor=False):
548 if isinstance(names, str):
556 if isinstance(names, str):
549 names = (names,)
557 names = (names,)
550
558
551 branches = self.branchmap()
559 branches = self.branchmap()
552 for name in names:
560 for name in names:
553 self.hook('pretag', throw=True, node=hex(node), tag=name,
561 self.hook('pretag', throw=True, node=hex(node), tag=name,
554 local=local)
562 local=local)
555 if name in branches:
563 if name in branches:
556 self.ui.warn(_("warning: tag %s conflicts with existing"
564 self.ui.warn(_("warning: tag %s conflicts with existing"
557 " branch name\n") % name)
565 " branch name\n") % name)
558
566
559 def writetags(fp, names, munge, prevtags):
567 def writetags(fp, names, munge, prevtags):
560 fp.seek(0, 2)
568 fp.seek(0, 2)
561 if prevtags and prevtags[-1] != '\n':
569 if prevtags and prevtags[-1] != '\n':
562 fp.write('\n')
570 fp.write('\n')
563 for name in names:
571 for name in names:
564 if munge:
572 if munge:
565 m = munge(name)
573 m = munge(name)
566 else:
574 else:
567 m = name
575 m = name
568
576
569 if (self._tagscache.tagtypes and
577 if (self._tagscache.tagtypes and
570 name in self._tagscache.tagtypes):
578 name in self._tagscache.tagtypes):
571 old = self.tags().get(name, nullid)
579 old = self.tags().get(name, nullid)
572 fp.write('%s %s\n' % (hex(old), m))
580 fp.write('%s %s\n' % (hex(old), m))
573 fp.write('%s %s\n' % (hex(node), m))
581 fp.write('%s %s\n' % (hex(node), m))
574 fp.close()
582 fp.close()
575
583
576 prevtags = ''
584 prevtags = ''
577 if local:
585 if local:
578 try:
586 try:
579 fp = self.vfs('localtags', 'r+')
587 fp = self.vfs('localtags', 'r+')
580 except IOError:
588 except IOError:
581 fp = self.vfs('localtags', 'a')
589 fp = self.vfs('localtags', 'a')
582 else:
590 else:
583 prevtags = fp.read()
591 prevtags = fp.read()
584
592
585 # local tags are stored in the current charset
593 # local tags are stored in the current charset
586 writetags(fp, names, None, prevtags)
594 writetags(fp, names, None, prevtags)
587 for name in names:
595 for name in names:
588 self.hook('tag', node=hex(node), tag=name, local=local)
596 self.hook('tag', node=hex(node), tag=name, local=local)
589 return
597 return
590
598
591 try:
599 try:
592 fp = self.wfile('.hgtags', 'rb+')
600 fp = self.wfile('.hgtags', 'rb+')
593 except IOError as e:
601 except IOError as e:
594 if e.errno != errno.ENOENT:
602 if e.errno != errno.ENOENT:
595 raise
603 raise
596 fp = self.wfile('.hgtags', 'ab')
604 fp = self.wfile('.hgtags', 'ab')
597 else:
605 else:
598 prevtags = fp.read()
606 prevtags = fp.read()
599
607
600 # committed tags are stored in UTF-8
608 # committed tags are stored in UTF-8
601 writetags(fp, names, encoding.fromlocal, prevtags)
609 writetags(fp, names, encoding.fromlocal, prevtags)
602
610
603 fp.close()
611 fp.close()
604
612
605 self.invalidatecaches()
613 self.invalidatecaches()
606
614
607 if '.hgtags' not in self.dirstate:
615 if '.hgtags' not in self.dirstate:
608 self[None].add(['.hgtags'])
616 self[None].add(['.hgtags'])
609
617
610 m = matchmod.exact(self.root, '', ['.hgtags'])
618 m = matchmod.exact(self.root, '', ['.hgtags'])
611 tagnode = self.commit(message, user, date, extra=extra, match=m,
619 tagnode = self.commit(message, user, date, extra=extra, match=m,
612 editor=editor)
620 editor=editor)
613
621
614 for name in names:
622 for name in names:
615 self.hook('tag', node=hex(node), tag=name, local=local)
623 self.hook('tag', node=hex(node), tag=name, local=local)
616
624
617 return tagnode
625 return tagnode
618
626
619 def tag(self, names, node, message, local, user, date, editor=False):
627 def tag(self, names, node, message, local, user, date, editor=False):
620 '''tag a revision with one or more symbolic names.
628 '''tag a revision with one or more symbolic names.
621
629
622 names is a list of strings or, when adding a single tag, names may be a
630 names is a list of strings or, when adding a single tag, names may be a
623 string.
631 string.
624
632
625 if local is True, the tags are stored in a per-repository file.
633 if local is True, the tags are stored in a per-repository file.
626 otherwise, they are stored in the .hgtags file, and a new
634 otherwise, they are stored in the .hgtags file, and a new
627 changeset is committed with the change.
635 changeset is committed with the change.
628
636
629 keyword arguments:
637 keyword arguments:
630
638
631 local: whether to store tags in non-version-controlled file
639 local: whether to store tags in non-version-controlled file
632 (default False)
640 (default False)
633
641
634 message: commit message to use if committing
642 message: commit message to use if committing
635
643
636 user: name of user to use if committing
644 user: name of user to use if committing
637
645
638 date: date tuple to use if committing'''
646 date: date tuple to use if committing'''
639
647
640 if not local:
648 if not local:
641 m = matchmod.exact(self.root, '', ['.hgtags'])
649 m = matchmod.exact(self.root, '', ['.hgtags'])
642 if any(self.status(match=m, unknown=True, ignored=True)):
650 if any(self.status(match=m, unknown=True, ignored=True)):
643 raise error.Abort(_('working copy of .hgtags is changed'),
651 raise error.Abort(_('working copy of .hgtags is changed'),
644 hint=_('please commit .hgtags manually'))
652 hint=_('please commit .hgtags manually'))
645
653
646 self.tags() # instantiate the cache
654 self.tags() # instantiate the cache
647 self._tag(names, node, message, local, user, date, editor=editor)
655 self._tag(names, node, message, local, user, date, editor=editor)
648
656
649 @filteredpropertycache
657 @filteredpropertycache
650 def _tagscache(self):
658 def _tagscache(self):
651 '''Returns a tagscache object that contains various tags related
659 '''Returns a tagscache object that contains various tags related
652 caches.'''
660 caches.'''
653
661
654 # This simplifies its cache management by having one decorated
662 # This simplifies its cache management by having one decorated
655 # function (this one) and the rest simply fetch things from it.
663 # function (this one) and the rest simply fetch things from it.
656 class tagscache(object):
664 class tagscache(object):
657 def __init__(self):
665 def __init__(self):
658 # These two define the set of tags for this repository. tags
666 # These two define the set of tags for this repository. tags
659 # maps tag name to node; tagtypes maps tag name to 'global' or
667 # maps tag name to node; tagtypes maps tag name to 'global' or
660 # 'local'. (Global tags are defined by .hgtags across all
668 # 'local'. (Global tags are defined by .hgtags across all
661 # heads, and local tags are defined in .hg/localtags.)
669 # heads, and local tags are defined in .hg/localtags.)
662 # They constitute the in-memory cache of tags.
670 # They constitute the in-memory cache of tags.
663 self.tags = self.tagtypes = None
671 self.tags = self.tagtypes = None
664
672
665 self.nodetagscache = self.tagslist = None
673 self.nodetagscache = self.tagslist = None
666
674
667 cache = tagscache()
675 cache = tagscache()
668 cache.tags, cache.tagtypes = self._findtags()
676 cache.tags, cache.tagtypes = self._findtags()
669
677
670 return cache
678 return cache
671
679
672 def tags(self):
680 def tags(self):
673 '''return a mapping of tag to node'''
681 '''return a mapping of tag to node'''
674 t = {}
682 t = {}
675 if self.changelog.filteredrevs:
683 if self.changelog.filteredrevs:
676 tags, tt = self._findtags()
684 tags, tt = self._findtags()
677 else:
685 else:
678 tags = self._tagscache.tags
686 tags = self._tagscache.tags
679 for k, v in tags.iteritems():
687 for k, v in tags.iteritems():
680 try:
688 try:
681 # ignore tags to unknown nodes
689 # ignore tags to unknown nodes
682 self.changelog.rev(v)
690 self.changelog.rev(v)
683 t[k] = v
691 t[k] = v
684 except (error.LookupError, ValueError):
692 except (error.LookupError, ValueError):
685 pass
693 pass
686 return t
694 return t
687
695
688 def _findtags(self):
696 def _findtags(self):
689 '''Do the hard work of finding tags. Return a pair of dicts
697 '''Do the hard work of finding tags. Return a pair of dicts
690 (tags, tagtypes) where tags maps tag name to node, and tagtypes
698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
691 maps tag name to a string like \'global\' or \'local\'.
699 maps tag name to a string like \'global\' or \'local\'.
692 Subclasses or extensions are free to add their own tags, but
700 Subclasses or extensions are free to add their own tags, but
693 should be aware that the returned dicts will be retained for the
701 should be aware that the returned dicts will be retained for the
694 duration of the localrepo object.'''
702 duration of the localrepo object.'''
695
703
696 # XXX what tagtype should subclasses/extensions use? Currently
704 # XXX what tagtype should subclasses/extensions use? Currently
697 # mq and bookmarks add tags, but do not set the tagtype at all.
705 # mq and bookmarks add tags, but do not set the tagtype at all.
698 # Should each extension invent its own tag type? Should there
706 # Should each extension invent its own tag type? Should there
699 # be one tagtype for all such "virtual" tags? Or is the status
707 # be one tagtype for all such "virtual" tags? Or is the status
700 # quo fine?
708 # quo fine?
701
709
702 alltags = {} # map tag name to (node, hist)
710 alltags = {} # map tag name to (node, hist)
703 tagtypes = {}
711 tagtypes = {}
704
712
705 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
706 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
707
715
708 # Build the return dicts. Have to re-encode tag names because
716 # Build the return dicts. Have to re-encode tag names because
709 # the tags module always uses UTF-8 (in order not to lose info
717 # the tags module always uses UTF-8 (in order not to lose info
710 # writing to the cache), but the rest of Mercurial wants them in
718 # writing to the cache), but the rest of Mercurial wants them in
711 # local encoding.
719 # local encoding.
712 tags = {}
720 tags = {}
713 for (name, (node, hist)) in alltags.iteritems():
721 for (name, (node, hist)) in alltags.iteritems():
714 if node != nullid:
722 if node != nullid:
715 tags[encoding.tolocal(name)] = node
723 tags[encoding.tolocal(name)] = node
716 tags['tip'] = self.changelog.tip()
724 tags['tip'] = self.changelog.tip()
717 tagtypes = dict([(encoding.tolocal(name), value)
725 tagtypes = dict([(encoding.tolocal(name), value)
718 for (name, value) in tagtypes.iteritems()])
726 for (name, value) in tagtypes.iteritems()])
719 return (tags, tagtypes)
727 return (tags, tagtypes)
720
728
721 def tagtype(self, tagname):
729 def tagtype(self, tagname):
722 '''
730 '''
723 return the type of the given tag. result can be:
731 return the type of the given tag. result can be:
724
732
725 'local' : a local tag
733 'local' : a local tag
726 'global' : a global tag
734 'global' : a global tag
727 None : tag does not exist
735 None : tag does not exist
728 '''
736 '''
729
737
730 return self._tagscache.tagtypes.get(tagname)
738 return self._tagscache.tagtypes.get(tagname)
731
739
732 def tagslist(self):
740 def tagslist(self):
733 '''return a list of tags ordered by revision'''
741 '''return a list of tags ordered by revision'''
734 if not self._tagscache.tagslist:
742 if not self._tagscache.tagslist:
735 l = []
743 l = []
736 for t, n in self.tags().iteritems():
744 for t, n in self.tags().iteritems():
737 l.append((self.changelog.rev(n), t, n))
745 l.append((self.changelog.rev(n), t, n))
738 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
739
747
740 return self._tagscache.tagslist
748 return self._tagscache.tagslist
741
749
742 def nodetags(self, node):
750 def nodetags(self, node):
743 '''return the tags associated with a node'''
751 '''return the tags associated with a node'''
744 if not self._tagscache.nodetagscache:
752 if not self._tagscache.nodetagscache:
745 nodetagscache = {}
753 nodetagscache = {}
746 for t, n in self._tagscache.tags.iteritems():
754 for t, n in self._tagscache.tags.iteritems():
747 nodetagscache.setdefault(n, []).append(t)
755 nodetagscache.setdefault(n, []).append(t)
748 for tags in nodetagscache.itervalues():
756 for tags in nodetagscache.itervalues():
749 tags.sort()
757 tags.sort()
750 self._tagscache.nodetagscache = nodetagscache
758 self._tagscache.nodetagscache = nodetagscache
751 return self._tagscache.nodetagscache.get(node, [])
759 return self._tagscache.nodetagscache.get(node, [])
752
760
753 def nodebookmarks(self, node):
761 def nodebookmarks(self, node):
754 marks = []
762 marks = []
755 for bookmark, n in self._bookmarks.iteritems():
763 for bookmark, n in self._bookmarks.iteritems():
756 if n == node:
764 if n == node:
757 marks.append(bookmark)
765 marks.append(bookmark)
758 return sorted(marks)
766 return sorted(marks)
759
767
760 def branchmap(self):
768 def branchmap(self):
761 '''returns a dictionary {branch: [branchheads]} with branchheads
769 '''returns a dictionary {branch: [branchheads]} with branchheads
762 ordered by increasing revision number'''
770 ordered by increasing revision number'''
763 branchmap.updatecache(self)
771 branchmap.updatecache(self)
764 return self._branchcaches[self.filtername]
772 return self._branchcaches[self.filtername]
765
773
766 @unfilteredmethod
774 @unfilteredmethod
767 def revbranchcache(self):
775 def revbranchcache(self):
768 if not self._revbranchcache:
776 if not self._revbranchcache:
769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
777 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
770 return self._revbranchcache
778 return self._revbranchcache
771
779
772 def branchtip(self, branch, ignoremissing=False):
780 def branchtip(self, branch, ignoremissing=False):
773 '''return the tip node for a given branch
781 '''return the tip node for a given branch
774
782
775 If ignoremissing is True, then this method will not raise an error.
783 If ignoremissing is True, then this method will not raise an error.
776 This is helpful for callers that only expect None for a missing branch
784 This is helpful for callers that only expect None for a missing branch
777 (e.g. namespace).
785 (e.g. namespace).
778
786
779 '''
787 '''
780 try:
788 try:
781 return self.branchmap().branchtip(branch)
789 return self.branchmap().branchtip(branch)
782 except KeyError:
790 except KeyError:
783 if not ignoremissing:
791 if not ignoremissing:
784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
792 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
785 else:
793 else:
786 pass
794 pass
787
795
788 def lookup(self, key):
796 def lookup(self, key):
789 return self[key].node()
797 return self[key].node()
790
798
791 def lookupbranch(self, key, remote=None):
799 def lookupbranch(self, key, remote=None):
792 repo = remote or self
800 repo = remote or self
793 if key in repo.branchmap():
801 if key in repo.branchmap():
794 return key
802 return key
795
803
796 repo = (remote and remote.local()) and remote or self
804 repo = (remote and remote.local()) and remote or self
797 return repo[key].branch()
805 return repo[key].branch()
798
806
799 def known(self, nodes):
807 def known(self, nodes):
800 nm = self.changelog.nodemap
808 nm = self.changelog.nodemap
801 pc = self._phasecache
809 pc = self._phasecache
802 result = []
810 result = []
803 for n in nodes:
811 for n in nodes:
804 r = nm.get(n)
812 r = nm.get(n)
805 resp = not (r is None or pc.phase(self, r) >= phases.secret)
813 resp = not (r is None or pc.phase(self, r) >= phases.secret)
806 result.append(resp)
814 result.append(resp)
807 return result
815 return result
808
816
809 def local(self):
817 def local(self):
810 return self
818 return self
811
819
812 def publishing(self):
820 def publishing(self):
813 # it's safe (and desirable) to trust the publish flag unconditionally
821 # it's safe (and desirable) to trust the publish flag unconditionally
814 # so that we don't finalize changes shared between users via ssh or nfs
822 # so that we don't finalize changes shared between users via ssh or nfs
815 return self.ui.configbool('phases', 'publish', True, untrusted=True)
823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816
824
817 def cancopy(self):
825 def cancopy(self):
818 # so statichttprepo's override of local() works
826 # so statichttprepo's override of local() works
819 if not self.local():
827 if not self.local():
820 return False
828 return False
821 if not self.publishing():
829 if not self.publishing():
822 return True
830 return True
823 # if publishing we can't copy if there is filtered content
831 # if publishing we can't copy if there is filtered content
824 return not self.filtered('visible').changelog.filteredrevs
832 return not self.filtered('visible').changelog.filteredrevs
825
833
826 def shared(self):
834 def shared(self):
827 '''the type of shared repository (None if not shared)'''
835 '''the type of shared repository (None if not shared)'''
828 if self.sharedpath != self.path:
836 if self.sharedpath != self.path:
829 return 'store'
837 return 'store'
830 return None
838 return None
831
839
832 def join(self, f, *insidef):
840 def join(self, f, *insidef):
833 return self.vfs.join(os.path.join(f, *insidef))
841 return self.vfs.join(os.path.join(f, *insidef))
834
842
835 def wjoin(self, f, *insidef):
843 def wjoin(self, f, *insidef):
836 return self.vfs.reljoin(self.root, f, *insidef)
844 return self.vfs.reljoin(self.root, f, *insidef)
837
845
838 def file(self, f):
846 def file(self, f):
839 if f[0] == '/':
847 if f[0] == '/':
840 f = f[1:]
848 f = f[1:]
841 return filelog.filelog(self.svfs, f)
849 return filelog.filelog(self.svfs, f)
842
850
843 def changectx(self, changeid):
851 def changectx(self, changeid):
844 return self[changeid]
852 return self[changeid]
845
853
846 def parents(self, changeid=None):
854 def parents(self, changeid=None):
847 '''get list of changectxs for parents of changeid'''
855 '''get list of changectxs for parents of changeid'''
848 return self[changeid].parents()
856 return self[changeid].parents()
849
857
850 def setparents(self, p1, p2=nullid):
858 def setparents(self, p1, p2=nullid):
851 self.dirstate.beginparentchange()
859 self.dirstate.beginparentchange()
852 copies = self.dirstate.setparents(p1, p2)
860 copies = self.dirstate.setparents(p1, p2)
853 pctx = self[p1]
861 pctx = self[p1]
854 if copies:
862 if copies:
855 # Adjust copy records, the dirstate cannot do it, it
863 # Adjust copy records, the dirstate cannot do it, it
856 # requires access to parents manifests. Preserve them
864 # requires access to parents manifests. Preserve them
857 # only for entries added to first parent.
865 # only for entries added to first parent.
858 for f in copies:
866 for f in copies:
859 if f not in pctx and copies[f] in pctx:
867 if f not in pctx and copies[f] in pctx:
860 self.dirstate.copy(copies[f], f)
868 self.dirstate.copy(copies[f], f)
861 if p2 == nullid:
869 if p2 == nullid:
862 for f, s in sorted(self.dirstate.copies().items()):
870 for f, s in sorted(self.dirstate.copies().items()):
863 if f not in pctx and s not in pctx:
871 if f not in pctx and s not in pctx:
864 self.dirstate.copy(None, f)
872 self.dirstate.copy(None, f)
865 self.dirstate.endparentchange()
873 self.dirstate.endparentchange()
866
874
867 def filectx(self, path, changeid=None, fileid=None):
875 def filectx(self, path, changeid=None, fileid=None):
868 """changeid can be a changeset revision, node, or tag.
876 """changeid can be a changeset revision, node, or tag.
869 fileid can be a file revision or node."""
877 fileid can be a file revision or node."""
870 return context.filectx(self, path, changeid, fileid)
878 return context.filectx(self, path, changeid, fileid)
871
879
872 def getcwd(self):
880 def getcwd(self):
873 return self.dirstate.getcwd()
881 return self.dirstate.getcwd()
874
882
875 def pathto(self, f, cwd=None):
883 def pathto(self, f, cwd=None):
876 return self.dirstate.pathto(f, cwd)
884 return self.dirstate.pathto(f, cwd)
877
885
878 def wfile(self, f, mode='r'):
886 def wfile(self, f, mode='r'):
879 return self.wvfs(f, mode)
887 return self.wvfs(f, mode)
880
888
881 def _link(self, f):
889 def _link(self, f):
882 return self.wvfs.islink(f)
890 return self.wvfs.islink(f)
883
891
884 def _loadfilter(self, filter):
892 def _loadfilter(self, filter):
885 if filter not in self.filterpats:
893 if filter not in self.filterpats:
886 l = []
894 l = []
887 for pat, cmd in self.ui.configitems(filter):
895 for pat, cmd in self.ui.configitems(filter):
888 if cmd == '!':
896 if cmd == '!':
889 continue
897 continue
890 mf = matchmod.match(self.root, '', [pat])
898 mf = matchmod.match(self.root, '', [pat])
891 fn = None
899 fn = None
892 params = cmd
900 params = cmd
893 for name, filterfn in self._datafilters.iteritems():
901 for name, filterfn in self._datafilters.iteritems():
894 if cmd.startswith(name):
902 if cmd.startswith(name):
895 fn = filterfn
903 fn = filterfn
896 params = cmd[len(name):].lstrip()
904 params = cmd[len(name):].lstrip()
897 break
905 break
898 if not fn:
906 if not fn:
899 fn = lambda s, c, **kwargs: util.filter(s, c)
907 fn = lambda s, c, **kwargs: util.filter(s, c)
900 # Wrap old filters not supporting keyword arguments
908 # Wrap old filters not supporting keyword arguments
901 if not inspect.getargspec(fn)[2]:
909 if not inspect.getargspec(fn)[2]:
902 oldfn = fn
910 oldfn = fn
903 fn = lambda s, c, **kwargs: oldfn(s, c)
911 fn = lambda s, c, **kwargs: oldfn(s, c)
904 l.append((mf, fn, params))
912 l.append((mf, fn, params))
905 self.filterpats[filter] = l
913 self.filterpats[filter] = l
906 return self.filterpats[filter]
914 return self.filterpats[filter]
907
915
908 def _filter(self, filterpats, filename, data):
916 def _filter(self, filterpats, filename, data):
909 for mf, fn, cmd in filterpats:
917 for mf, fn, cmd in filterpats:
910 if mf(filename):
918 if mf(filename):
911 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
919 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
912 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
920 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
913 break
921 break
914
922
915 return data
923 return data
916
924
917 @unfilteredpropertycache
925 @unfilteredpropertycache
918 def _encodefilterpats(self):
926 def _encodefilterpats(self):
919 return self._loadfilter('encode')
927 return self._loadfilter('encode')
920
928
921 @unfilteredpropertycache
929 @unfilteredpropertycache
922 def _decodefilterpats(self):
930 def _decodefilterpats(self):
923 return self._loadfilter('decode')
931 return self._loadfilter('decode')
924
932
925 def adddatafilter(self, name, filter):
933 def adddatafilter(self, name, filter):
926 self._datafilters[name] = filter
934 self._datafilters[name] = filter
927
935
928 def wread(self, filename):
936 def wread(self, filename):
929 if self._link(filename):
937 if self._link(filename):
930 data = self.wvfs.readlink(filename)
938 data = self.wvfs.readlink(filename)
931 else:
939 else:
932 data = self.wvfs.read(filename)
940 data = self.wvfs.read(filename)
933 return self._filter(self._encodefilterpats, filename, data)
941 return self._filter(self._encodefilterpats, filename, data)
934
942
935 def wwrite(self, filename, data, flags):
943 def wwrite(self, filename, data, flags):
936 """write ``data`` into ``filename`` in the working directory
944 """write ``data`` into ``filename`` in the working directory
937
945
938 This returns length of written (maybe decoded) data.
946 This returns length of written (maybe decoded) data.
939 """
947 """
940 data = self._filter(self._decodefilterpats, filename, data)
948 data = self._filter(self._decodefilterpats, filename, data)
941 if 'l' in flags:
949 if 'l' in flags:
942 self.wvfs.symlink(data, filename)
950 self.wvfs.symlink(data, filename)
943 else:
951 else:
944 self.wvfs.write(filename, data)
952 self.wvfs.write(filename, data)
945 if 'x' in flags:
953 if 'x' in flags:
946 self.wvfs.setflags(filename, False, True)
954 self.wvfs.setflags(filename, False, True)
947 return len(data)
955 return len(data)
948
956
949 def wwritedata(self, filename, data):
957 def wwritedata(self, filename, data):
950 return self._filter(self._decodefilterpats, filename, data)
958 return self._filter(self._decodefilterpats, filename, data)
951
959
952 def currenttransaction(self):
960 def currenttransaction(self):
953 """return the current transaction or None if non exists"""
961 """return the current transaction or None if non exists"""
954 if self._transref:
962 if self._transref:
955 tr = self._transref()
963 tr = self._transref()
956 else:
964 else:
957 tr = None
965 tr = None
958
966
959 if tr and tr.running():
967 if tr and tr.running():
960 return tr
968 return tr
961 return None
969 return None
962
970
963 def transaction(self, desc, report=None):
971 def transaction(self, desc, report=None):
964 if (self.ui.configbool('devel', 'all-warnings')
972 if (self.ui.configbool('devel', 'all-warnings')
965 or self.ui.configbool('devel', 'check-locks')):
973 or self.ui.configbool('devel', 'check-locks')):
966 l = self._lockref and self._lockref()
974 l = self._lockref and self._lockref()
967 if l is None or not l.held:
975 if l is None or not l.held:
968 self.ui.develwarn('transaction with no lock')
976 self.ui.develwarn('transaction with no lock')
969 tr = self.currenttransaction()
977 tr = self.currenttransaction()
970 if tr is not None:
978 if tr is not None:
971 return tr.nest()
979 return tr.nest()
972
980
973 # abort here if the journal already exists
981 # abort here if the journal already exists
974 if self.svfs.exists("journal"):
982 if self.svfs.exists("journal"):
975 raise error.RepoError(
983 raise error.RepoError(
976 _("abandoned transaction found"),
984 _("abandoned transaction found"),
977 hint=_("run 'hg recover' to clean up transaction"))
985 hint=_("run 'hg recover' to clean up transaction"))
978
986
979 # make journal.dirstate contain in-memory changes at this point
987 # make journal.dirstate contain in-memory changes at this point
980 self.dirstate.write(None)
988 self.dirstate.write(None)
981
989
982 idbase = "%.40f#%f" % (random.random(), time.time())
990 idbase = "%.40f#%f" % (random.random(), time.time())
983 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
991 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
984 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
992 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
985
993
986 self._writejournal(desc)
994 self._writejournal(desc)
987 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
995 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
988 if report:
996 if report:
989 rp = report
997 rp = report
990 else:
998 else:
991 rp = self.ui.warn
999 rp = self.ui.warn
992 vfsmap = {'plain': self.vfs} # root of .hg/
1000 vfsmap = {'plain': self.vfs} # root of .hg/
993 # we must avoid cyclic reference between repo and transaction.
1001 # we must avoid cyclic reference between repo and transaction.
994 reporef = weakref.ref(self)
1002 reporef = weakref.ref(self)
995 def validate(tr):
1003 def validate(tr):
996 """will run pre-closing hooks"""
1004 """will run pre-closing hooks"""
997 reporef().hook('pretxnclose', throw=True,
1005 reporef().hook('pretxnclose', throw=True,
998 txnname=desc, **tr.hookargs)
1006 txnname=desc, **tr.hookargs)
999 def releasefn(tr, success):
1007 def releasefn(tr, success):
1000 repo = reporef()
1008 repo = reporef()
1001 if success:
1009 if success:
1002 # this should be explicitly invoked here, because
1010 # this should be explicitly invoked here, because
1003 # in-memory changes aren't written out at closing
1011 # in-memory changes aren't written out at closing
1004 # transaction, if tr.addfilegenerator (via
1012 # transaction, if tr.addfilegenerator (via
1005 # dirstate.write or so) isn't invoked while
1013 # dirstate.write or so) isn't invoked while
1006 # transaction running
1014 # transaction running
1007 repo.dirstate.write(None)
1015 repo.dirstate.write(None)
1008 else:
1016 else:
1009 # prevent in-memory changes from being written out at
1017 # prevent in-memory changes from being written out at
1010 # the end of outer wlock scope or so
1018 # the end of outer wlock scope or so
1011 repo.dirstate.invalidate()
1019 repo.dirstate.invalidate()
1012
1020
1013 # discard all changes (including ones already written
1021 # discard all changes (including ones already written
1014 # out) in this transaction
1022 # out) in this transaction
1015 repo.vfs.rename('journal.dirstate', 'dirstate')
1023 repo.vfs.rename('journal.dirstate', 'dirstate')
1016
1024
1017 repo.invalidate(clearfilecache=True)
1025 repo.invalidate(clearfilecache=True)
1018
1026
1019 tr = transaction.transaction(rp, self.svfs, vfsmap,
1027 tr = transaction.transaction(rp, self.svfs, vfsmap,
1020 "journal",
1028 "journal",
1021 "undo",
1029 "undo",
1022 aftertrans(renames),
1030 aftertrans(renames),
1023 self.store.createmode,
1031 self.store.createmode,
1024 validator=validate,
1032 validator=validate,
1025 releasefn=releasefn)
1033 releasefn=releasefn)
1026
1034
1027 tr.hookargs['txnid'] = txnid
1035 tr.hookargs['txnid'] = txnid
1028 # note: writing the fncache only during finalize mean that the file is
1036 # note: writing the fncache only during finalize mean that the file is
1029 # outdated when running hooks. As fncache is used for streaming clone,
1037 # outdated when running hooks. As fncache is used for streaming clone,
1030 # this is not expected to break anything that happen during the hooks.
1038 # this is not expected to break anything that happen during the hooks.
1031 tr.addfinalize('flush-fncache', self.store.write)
1039 tr.addfinalize('flush-fncache', self.store.write)
1032 def txnclosehook(tr2):
1040 def txnclosehook(tr2):
1033 """To be run if transaction is successful, will schedule a hook run
1041 """To be run if transaction is successful, will schedule a hook run
1034 """
1042 """
1035 def hook():
1043 def hook():
1036 reporef().hook('txnclose', throw=False, txnname=desc,
1044 reporef().hook('txnclose', throw=False, txnname=desc,
1037 **tr2.hookargs)
1045 **tr2.hookargs)
1038 reporef()._afterlock(hook)
1046 reporef()._afterlock(hook)
1039 tr.addfinalize('txnclose-hook', txnclosehook)
1047 tr.addfinalize('txnclose-hook', txnclosehook)
1040 def txnaborthook(tr2):
1048 def txnaborthook(tr2):
1041 """To be run if transaction is aborted
1049 """To be run if transaction is aborted
1042 """
1050 """
1043 reporef().hook('txnabort', throw=False, txnname=desc,
1051 reporef().hook('txnabort', throw=False, txnname=desc,
1044 **tr2.hookargs)
1052 **tr2.hookargs)
1045 tr.addabort('txnabort-hook', txnaborthook)
1053 tr.addabort('txnabort-hook', txnaborthook)
1046 # avoid eager cache invalidation. in-memory data should be identical
1054 # avoid eager cache invalidation. in-memory data should be identical
1047 # to stored data if transaction has no error.
1055 # to stored data if transaction has no error.
1048 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1056 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1049 self._transref = weakref.ref(tr)
1057 self._transref = weakref.ref(tr)
1050 return tr
1058 return tr
1051
1059
1052 def _journalfiles(self):
1060 def _journalfiles(self):
1053 return ((self.svfs, 'journal'),
1061 return ((self.svfs, 'journal'),
1054 (self.vfs, 'journal.dirstate'),
1062 (self.vfs, 'journal.dirstate'),
1055 (self.vfs, 'journal.branch'),
1063 (self.vfs, 'journal.branch'),
1056 (self.vfs, 'journal.desc'),
1064 (self.vfs, 'journal.desc'),
1057 (self.vfs, 'journal.bookmarks'),
1065 (self.vfs, 'journal.bookmarks'),
1058 (self.svfs, 'journal.phaseroots'))
1066 (self.svfs, 'journal.phaseroots'))
1059
1067
1060 def undofiles(self):
1068 def undofiles(self):
1061 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1069 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1062
1070
1063 def _writejournal(self, desc):
1071 def _writejournal(self, desc):
1064 self.vfs.write("journal.dirstate",
1072 self.vfs.write("journal.dirstate",
1065 self.vfs.tryread("dirstate"))
1073 self.vfs.tryread("dirstate"))
1066 self.vfs.write("journal.branch",
1074 self.vfs.write("journal.branch",
1067 encoding.fromlocal(self.dirstate.branch()))
1075 encoding.fromlocal(self.dirstate.branch()))
1068 self.vfs.write("journal.desc",
1076 self.vfs.write("journal.desc",
1069 "%d\n%s\n" % (len(self), desc))
1077 "%d\n%s\n" % (len(self), desc))
1070 self.vfs.write("journal.bookmarks",
1078 self.vfs.write("journal.bookmarks",
1071 self.vfs.tryread("bookmarks"))
1079 self.vfs.tryread("bookmarks"))
1072 self.svfs.write("journal.phaseroots",
1080 self.svfs.write("journal.phaseroots",
1073 self.svfs.tryread("phaseroots"))
1081 self.svfs.tryread("phaseroots"))
1074
1082
1075 def recover(self):
1083 def recover(self):
1076 lock = self.lock()
1084 lock = self.lock()
1077 try:
1085 try:
1078 if self.svfs.exists("journal"):
1086 if self.svfs.exists("journal"):
1079 self.ui.status(_("rolling back interrupted transaction\n"))
1087 self.ui.status(_("rolling back interrupted transaction\n"))
1080 vfsmap = {'': self.svfs,
1088 vfsmap = {'': self.svfs,
1081 'plain': self.vfs,}
1089 'plain': self.vfs,}
1082 transaction.rollback(self.svfs, vfsmap, "journal",
1090 transaction.rollback(self.svfs, vfsmap, "journal",
1083 self.ui.warn)
1091 self.ui.warn)
1084 self.invalidate()
1092 self.invalidate()
1085 return True
1093 return True
1086 else:
1094 else:
1087 self.ui.warn(_("no interrupted transaction available\n"))
1095 self.ui.warn(_("no interrupted transaction available\n"))
1088 return False
1096 return False
1089 finally:
1097 finally:
1090 lock.release()
1098 lock.release()
1091
1099
1092 def rollback(self, dryrun=False, force=False):
1100 def rollback(self, dryrun=False, force=False):
1093 wlock = lock = dsguard = None
1101 wlock = lock = dsguard = None
1094 try:
1102 try:
1095 wlock = self.wlock()
1103 wlock = self.wlock()
1096 lock = self.lock()
1104 lock = self.lock()
1097 if self.svfs.exists("undo"):
1105 if self.svfs.exists("undo"):
1098 dsguard = cmdutil.dirstateguard(self, 'rollback')
1106 dsguard = cmdutil.dirstateguard(self, 'rollback')
1099
1107
1100 return self._rollback(dryrun, force, dsguard)
1108 return self._rollback(dryrun, force, dsguard)
1101 else:
1109 else:
1102 self.ui.warn(_("no rollback information available\n"))
1110 self.ui.warn(_("no rollback information available\n"))
1103 return 1
1111 return 1
1104 finally:
1112 finally:
1105 release(dsguard, lock, wlock)
1113 release(dsguard, lock, wlock)
1106
1114
1107 @unfilteredmethod # Until we get smarter cache management
1115 @unfilteredmethod # Until we get smarter cache management
1108 def _rollback(self, dryrun, force, dsguard):
1116 def _rollback(self, dryrun, force, dsguard):
1109 ui = self.ui
1117 ui = self.ui
1110 try:
1118 try:
1111 args = self.vfs.read('undo.desc').splitlines()
1119 args = self.vfs.read('undo.desc').splitlines()
1112 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1120 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1113 if len(args) >= 3:
1121 if len(args) >= 3:
1114 detail = args[2]
1122 detail = args[2]
1115 oldtip = oldlen - 1
1123 oldtip = oldlen - 1
1116
1124
1117 if detail and ui.verbose:
1125 if detail and ui.verbose:
1118 msg = (_('repository tip rolled back to revision %s'
1126 msg = (_('repository tip rolled back to revision %s'
1119 ' (undo %s: %s)\n')
1127 ' (undo %s: %s)\n')
1120 % (oldtip, desc, detail))
1128 % (oldtip, desc, detail))
1121 else:
1129 else:
1122 msg = (_('repository tip rolled back to revision %s'
1130 msg = (_('repository tip rolled back to revision %s'
1123 ' (undo %s)\n')
1131 ' (undo %s)\n')
1124 % (oldtip, desc))
1132 % (oldtip, desc))
1125 except IOError:
1133 except IOError:
1126 msg = _('rolling back unknown transaction\n')
1134 msg = _('rolling back unknown transaction\n')
1127 desc = None
1135 desc = None
1128
1136
1129 if not force and self['.'] != self['tip'] and desc == 'commit':
1137 if not force and self['.'] != self['tip'] and desc == 'commit':
1130 raise error.Abort(
1138 raise error.Abort(
1131 _('rollback of last commit while not checked out '
1139 _('rollback of last commit while not checked out '
1132 'may lose data'), hint=_('use -f to force'))
1140 'may lose data'), hint=_('use -f to force'))
1133
1141
1134 ui.status(msg)
1142 ui.status(msg)
1135 if dryrun:
1143 if dryrun:
1136 return 0
1144 return 0
1137
1145
1138 parents = self.dirstate.parents()
1146 parents = self.dirstate.parents()
1139 self.destroying()
1147 self.destroying()
1140 vfsmap = {'plain': self.vfs, '': self.svfs}
1148 vfsmap = {'plain': self.vfs, '': self.svfs}
1141 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1149 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1142 if self.vfs.exists('undo.bookmarks'):
1150 if self.vfs.exists('undo.bookmarks'):
1143 self.vfs.rename('undo.bookmarks', 'bookmarks')
1151 self.vfs.rename('undo.bookmarks', 'bookmarks')
1144 if self.svfs.exists('undo.phaseroots'):
1152 if self.svfs.exists('undo.phaseroots'):
1145 self.svfs.rename('undo.phaseroots', 'phaseroots')
1153 self.svfs.rename('undo.phaseroots', 'phaseroots')
1146 self.invalidate()
1154 self.invalidate()
1147
1155
1148 parentgone = (parents[0] not in self.changelog.nodemap or
1156 parentgone = (parents[0] not in self.changelog.nodemap or
1149 parents[1] not in self.changelog.nodemap)
1157 parents[1] not in self.changelog.nodemap)
1150 if parentgone:
1158 if parentgone:
1151 # prevent dirstateguard from overwriting already restored one
1159 # prevent dirstateguard from overwriting already restored one
1152 dsguard.close()
1160 dsguard.close()
1153
1161
1154 self.vfs.rename('undo.dirstate', 'dirstate')
1162 self.vfs.rename('undo.dirstate', 'dirstate')
1155 try:
1163 try:
1156 branch = self.vfs.read('undo.branch')
1164 branch = self.vfs.read('undo.branch')
1157 self.dirstate.setbranch(encoding.tolocal(branch))
1165 self.dirstate.setbranch(encoding.tolocal(branch))
1158 except IOError:
1166 except IOError:
1159 ui.warn(_('named branch could not be reset: '
1167 ui.warn(_('named branch could not be reset: '
1160 'current branch is still \'%s\'\n')
1168 'current branch is still \'%s\'\n')
1161 % self.dirstate.branch())
1169 % self.dirstate.branch())
1162
1170
1163 self.dirstate.invalidate()
1171 self.dirstate.invalidate()
1164 parents = tuple([p.rev() for p in self.parents()])
1172 parents = tuple([p.rev() for p in self.parents()])
1165 if len(parents) > 1:
1173 if len(parents) > 1:
1166 ui.status(_('working directory now based on '
1174 ui.status(_('working directory now based on '
1167 'revisions %d and %d\n') % parents)
1175 'revisions %d and %d\n') % parents)
1168 else:
1176 else:
1169 ui.status(_('working directory now based on '
1177 ui.status(_('working directory now based on '
1170 'revision %d\n') % parents)
1178 'revision %d\n') % parents)
1171 mergemod.mergestate.clean(self, self['.'].node())
1179 mergemod.mergestate.clean(self, self['.'].node())
1172
1180
1173 # TODO: if we know which new heads may result from this rollback, pass
1181 # TODO: if we know which new heads may result from this rollback, pass
1174 # them to destroy(), which will prevent the branchhead cache from being
1182 # them to destroy(), which will prevent the branchhead cache from being
1175 # invalidated.
1183 # invalidated.
1176 self.destroyed()
1184 self.destroyed()
1177 return 0
1185 return 0
1178
1186
1179 def invalidatecaches(self):
1187 def invalidatecaches(self):
1180
1188
1181 if '_tagscache' in vars(self):
1189 if '_tagscache' in vars(self):
1182 # can't use delattr on proxy
1190 # can't use delattr on proxy
1183 del self.__dict__['_tagscache']
1191 del self.__dict__['_tagscache']
1184
1192
1185 self.unfiltered()._branchcaches.clear()
1193 self.unfiltered()._branchcaches.clear()
1186 self.invalidatevolatilesets()
1194 self.invalidatevolatilesets()
1187
1195
1188 def invalidatevolatilesets(self):
1196 def invalidatevolatilesets(self):
1189 self.filteredrevcache.clear()
1197 self.filteredrevcache.clear()
1190 obsolete.clearobscaches(self)
1198 obsolete.clearobscaches(self)
1191
1199
1192 def invalidatedirstate(self):
1200 def invalidatedirstate(self):
1193 '''Invalidates the dirstate, causing the next call to dirstate
1201 '''Invalidates the dirstate, causing the next call to dirstate
1194 to check if it was modified since the last time it was read,
1202 to check if it was modified since the last time it was read,
1195 rereading it if it has.
1203 rereading it if it has.
1196
1204
1197 This is different to dirstate.invalidate() that it doesn't always
1205 This is different to dirstate.invalidate() that it doesn't always
1198 rereads the dirstate. Use dirstate.invalidate() if you want to
1206 rereads the dirstate. Use dirstate.invalidate() if you want to
1199 explicitly read the dirstate again (i.e. restoring it to a previous
1207 explicitly read the dirstate again (i.e. restoring it to a previous
1200 known good state).'''
1208 known good state).'''
1201 if hasunfilteredcache(self, 'dirstate'):
1209 if hasunfilteredcache(self, 'dirstate'):
1202 for k in self.dirstate._filecache:
1210 for k in self.dirstate._filecache:
1203 try:
1211 try:
1204 delattr(self.dirstate, k)
1212 delattr(self.dirstate, k)
1205 except AttributeError:
1213 except AttributeError:
1206 pass
1214 pass
1207 delattr(self.unfiltered(), 'dirstate')
1215 delattr(self.unfiltered(), 'dirstate')
1208
1216
1209 def invalidate(self, clearfilecache=False):
1217 def invalidate(self, clearfilecache=False):
1210 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1218 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1211 for k in self._filecache.keys():
1219 for k in self._filecache.keys():
1212 # dirstate is invalidated separately in invalidatedirstate()
1220 # dirstate is invalidated separately in invalidatedirstate()
1213 if k == 'dirstate':
1221 if k == 'dirstate':
1214 continue
1222 continue
1215
1223
1216 if clearfilecache:
1224 if clearfilecache:
1217 del self._filecache[k]
1225 del self._filecache[k]
1218 try:
1226 try:
1219 delattr(unfiltered, k)
1227 delattr(unfiltered, k)
1220 except AttributeError:
1228 except AttributeError:
1221 pass
1229 pass
1222 self.invalidatecaches()
1230 self.invalidatecaches()
1223 self.store.invalidatecaches()
1231 self.store.invalidatecaches()
1224
1232
1225 def invalidateall(self):
1233 def invalidateall(self):
1226 '''Fully invalidates both store and non-store parts, causing the
1234 '''Fully invalidates both store and non-store parts, causing the
1227 subsequent operation to reread any outside changes.'''
1235 subsequent operation to reread any outside changes.'''
1228 # extension should hook this to invalidate its caches
1236 # extension should hook this to invalidate its caches
1229 self.invalidate()
1237 self.invalidate()
1230 self.invalidatedirstate()
1238 self.invalidatedirstate()
1231
1239
1232 def _refreshfilecachestats(self, tr):
1240 def _refreshfilecachestats(self, tr):
1233 """Reload stats of cached files so that they are flagged as valid"""
1241 """Reload stats of cached files so that they are flagged as valid"""
1234 for k, ce in self._filecache.items():
1242 for k, ce in self._filecache.items():
1235 if k == 'dirstate' or k not in self.__dict__:
1243 if k == 'dirstate' or k not in self.__dict__:
1236 continue
1244 continue
1237 ce.refresh()
1245 ce.refresh()
1238
1246
1239 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1247 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1240 inheritchecker=None, parentenvvar=None):
1248 inheritchecker=None, parentenvvar=None):
1241 parentlock = None
1249 parentlock = None
1242 # the contents of parentenvvar are used by the underlying lock to
1250 # the contents of parentenvvar are used by the underlying lock to
1243 # determine whether it can be inherited
1251 # determine whether it can be inherited
1244 if parentenvvar is not None:
1252 if parentenvvar is not None:
1245 parentlock = os.environ.get(parentenvvar)
1253 parentlock = os.environ.get(parentenvvar)
1246 try:
1254 try:
1247 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1255 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1248 acquirefn=acquirefn, desc=desc,
1256 acquirefn=acquirefn, desc=desc,
1249 inheritchecker=inheritchecker,
1257 inheritchecker=inheritchecker,
1250 parentlock=parentlock)
1258 parentlock=parentlock)
1251 except error.LockHeld as inst:
1259 except error.LockHeld as inst:
1252 if not wait:
1260 if not wait:
1253 raise
1261 raise
1254 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1262 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1255 (desc, inst.locker))
1263 (desc, inst.locker))
1256 # default to 600 seconds timeout
1264 # default to 600 seconds timeout
1257 l = lockmod.lock(vfs, lockname,
1265 l = lockmod.lock(vfs, lockname,
1258 int(self.ui.config("ui", "timeout", "600")),
1266 int(self.ui.config("ui", "timeout", "600")),
1259 releasefn=releasefn, acquirefn=acquirefn,
1267 releasefn=releasefn, acquirefn=acquirefn,
1260 desc=desc)
1268 desc=desc)
1261 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1269 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1262 return l
1270 return l
1263
1271
1264 def _afterlock(self, callback):
1272 def _afterlock(self, callback):
1265 """add a callback to be run when the repository is fully unlocked
1273 """add a callback to be run when the repository is fully unlocked
1266
1274
1267 The callback will be executed when the outermost lock is released
1275 The callback will be executed when the outermost lock is released
1268 (with wlock being higher level than 'lock')."""
1276 (with wlock being higher level than 'lock')."""
1269 for ref in (self._wlockref, self._lockref):
1277 for ref in (self._wlockref, self._lockref):
1270 l = ref and ref()
1278 l = ref and ref()
1271 if l and l.held:
1279 if l and l.held:
1272 l.postrelease.append(callback)
1280 l.postrelease.append(callback)
1273 break
1281 break
1274 else: # no lock have been found.
1282 else: # no lock have been found.
1275 callback()
1283 callback()
1276
1284
1277 def lock(self, wait=True):
1285 def lock(self, wait=True):
1278 '''Lock the repository store (.hg/store) and return a weak reference
1286 '''Lock the repository store (.hg/store) and return a weak reference
1279 to the lock. Use this before modifying the store (e.g. committing or
1287 to the lock. Use this before modifying the store (e.g. committing or
1280 stripping). If you are opening a transaction, get a lock as well.)
1288 stripping). If you are opening a transaction, get a lock as well.)
1281
1289
1282 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1290 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1283 'wlock' first to avoid a dead-lock hazard.'''
1291 'wlock' first to avoid a dead-lock hazard.'''
1284 l = self._lockref and self._lockref()
1292 l = self._lockref and self._lockref()
1285 if l is not None and l.held:
1293 if l is not None and l.held:
1286 l.lock()
1294 l.lock()
1287 return l
1295 return l
1288
1296
1289 l = self._lock(self.svfs, "lock", wait, None,
1297 l = self._lock(self.svfs, "lock", wait, None,
1290 self.invalidate, _('repository %s') % self.origroot)
1298 self.invalidate, _('repository %s') % self.origroot)
1291 self._lockref = weakref.ref(l)
1299 self._lockref = weakref.ref(l)
1292 return l
1300 return l
1293
1301
1294 def _wlockchecktransaction(self):
1302 def _wlockchecktransaction(self):
1295 if self.currenttransaction() is not None:
1303 if self.currenttransaction() is not None:
1296 raise error.LockInheritanceContractViolation(
1304 raise error.LockInheritanceContractViolation(
1297 'wlock cannot be inherited in the middle of a transaction')
1305 'wlock cannot be inherited in the middle of a transaction')
1298
1306
1299 def wlock(self, wait=True):
1307 def wlock(self, wait=True):
1300 '''Lock the non-store parts of the repository (everything under
1308 '''Lock the non-store parts of the repository (everything under
1301 .hg except .hg/store) and return a weak reference to the lock.
1309 .hg except .hg/store) and return a weak reference to the lock.
1302
1310
1303 Use this before modifying files in .hg.
1311 Use this before modifying files in .hg.
1304
1312
1305 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1313 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1306 'wlock' first to avoid a dead-lock hazard.'''
1314 'wlock' first to avoid a dead-lock hazard.'''
1307 l = self._wlockref and self._wlockref()
1315 l = self._wlockref and self._wlockref()
1308 if l is not None and l.held:
1316 if l is not None and l.held:
1309 l.lock()
1317 l.lock()
1310 return l
1318 return l
1311
1319
1312 # We do not need to check for non-waiting lock acquisition. Such
1320 # We do not need to check for non-waiting lock acquisition. Such
1313 # acquisition would not cause dead-lock as they would just fail.
1321 # acquisition would not cause dead-lock as they would just fail.
1314 if wait and (self.ui.configbool('devel', 'all-warnings')
1322 if wait and (self.ui.configbool('devel', 'all-warnings')
1315 or self.ui.configbool('devel', 'check-locks')):
1323 or self.ui.configbool('devel', 'check-locks')):
1316 l = self._lockref and self._lockref()
1324 l = self._lockref and self._lockref()
1317 if l is not None and l.held:
1325 if l is not None and l.held:
1318 self.ui.develwarn('"wlock" acquired after "lock"')
1326 self.ui.develwarn('"wlock" acquired after "lock"')
1319
1327
1320 def unlock():
1328 def unlock():
1321 if self.dirstate.pendingparentchange():
1329 if self.dirstate.pendingparentchange():
1322 self.dirstate.invalidate()
1330 self.dirstate.invalidate()
1323 else:
1331 else:
1324 self.dirstate.write(None)
1332 self.dirstate.write(None)
1325
1333
1326 self._filecache['dirstate'].refresh()
1334 self._filecache['dirstate'].refresh()
1327
1335
1328 l = self._lock(self.vfs, "wlock", wait, unlock,
1336 l = self._lock(self.vfs, "wlock", wait, unlock,
1329 self.invalidatedirstate, _('working directory of %s') %
1337 self.invalidatedirstate, _('working directory of %s') %
1330 self.origroot,
1338 self.origroot,
1331 inheritchecker=self._wlockchecktransaction,
1339 inheritchecker=self._wlockchecktransaction,
1332 parentenvvar='HG_WLOCK_LOCKER')
1340 parentenvvar='HG_WLOCK_LOCKER')
1333 self._wlockref = weakref.ref(l)
1341 self._wlockref = weakref.ref(l)
1334 return l
1342 return l
1335
1343
1336 def _currentlock(self, lockref):
1344 def _currentlock(self, lockref):
1337 """Returns the lock if it's held, or None if it's not."""
1345 """Returns the lock if it's held, or None if it's not."""
1338 if lockref is None:
1346 if lockref is None:
1339 return None
1347 return None
1340 l = lockref()
1348 l = lockref()
1341 if l is None or not l.held:
1349 if l is None or not l.held:
1342 return None
1350 return None
1343 return l
1351 return l
1344
1352
1345 def currentwlock(self):
1353 def currentwlock(self):
1346 """Returns the wlock if it's held, or None if it's not."""
1354 """Returns the wlock if it's held, or None if it's not."""
1347 return self._currentlock(self._wlockref)
1355 return self._currentlock(self._wlockref)
1348
1356
1349 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1350 """
1358 """
1351 commit an individual file as part of a larger transaction
1359 commit an individual file as part of a larger transaction
1352 """
1360 """
1353
1361
1354 fname = fctx.path()
1362 fname = fctx.path()
1355 fparent1 = manifest1.get(fname, nullid)
1363 fparent1 = manifest1.get(fname, nullid)
1356 fparent2 = manifest2.get(fname, nullid)
1364 fparent2 = manifest2.get(fname, nullid)
1357 if isinstance(fctx, context.filectx):
1365 if isinstance(fctx, context.filectx):
1358 node = fctx.filenode()
1366 node = fctx.filenode()
1359 if node in [fparent1, fparent2]:
1367 if node in [fparent1, fparent2]:
1360 self.ui.debug('reusing %s filelog entry\n' % fname)
1368 self.ui.debug('reusing %s filelog entry\n' % fname)
1361 return node
1369 return node
1362
1370
1363 flog = self.file(fname)
1371 flog = self.file(fname)
1364 meta = {}
1372 meta = {}
1365 copy = fctx.renamed()
1373 copy = fctx.renamed()
1366 if copy and copy[0] != fname:
1374 if copy and copy[0] != fname:
1367 # Mark the new revision of this file as a copy of another
1375 # Mark the new revision of this file as a copy of another
1368 # file. This copy data will effectively act as a parent
1376 # file. This copy data will effectively act as a parent
1369 # of this new revision. If this is a merge, the first
1377 # of this new revision. If this is a merge, the first
1370 # parent will be the nullid (meaning "look up the copy data")
1378 # parent will be the nullid (meaning "look up the copy data")
1371 # and the second one will be the other parent. For example:
1379 # and the second one will be the other parent. For example:
1372 #
1380 #
1373 # 0 --- 1 --- 3 rev1 changes file foo
1381 # 0 --- 1 --- 3 rev1 changes file foo
1374 # \ / rev2 renames foo to bar and changes it
1382 # \ / rev2 renames foo to bar and changes it
1375 # \- 2 -/ rev3 should have bar with all changes and
1383 # \- 2 -/ rev3 should have bar with all changes and
1376 # should record that bar descends from
1384 # should record that bar descends from
1377 # bar in rev2 and foo in rev1
1385 # bar in rev2 and foo in rev1
1378 #
1386 #
1379 # this allows this merge to succeed:
1387 # this allows this merge to succeed:
1380 #
1388 #
1381 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1389 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1382 # \ / merging rev3 and rev4 should use bar@rev2
1390 # \ / merging rev3 and rev4 should use bar@rev2
1383 # \- 2 --- 4 as the merge base
1391 # \- 2 --- 4 as the merge base
1384 #
1392 #
1385
1393
1386 cfname = copy[0]
1394 cfname = copy[0]
1387 crev = manifest1.get(cfname)
1395 crev = manifest1.get(cfname)
1388 newfparent = fparent2
1396 newfparent = fparent2
1389
1397
1390 if manifest2: # branch merge
1398 if manifest2: # branch merge
1391 if fparent2 == nullid or crev is None: # copied on remote side
1399 if fparent2 == nullid or crev is None: # copied on remote side
1392 if cfname in manifest2:
1400 if cfname in manifest2:
1393 crev = manifest2[cfname]
1401 crev = manifest2[cfname]
1394 newfparent = fparent1
1402 newfparent = fparent1
1395
1403
1396 # Here, we used to search backwards through history to try to find
1404 # Here, we used to search backwards through history to try to find
1397 # where the file copy came from if the source of a copy was not in
1405 # where the file copy came from if the source of a copy was not in
1398 # the parent directory. However, this doesn't actually make sense to
1406 # the parent directory. However, this doesn't actually make sense to
1399 # do (what does a copy from something not in your working copy even
1407 # do (what does a copy from something not in your working copy even
1400 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1408 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1401 # the user that copy information was dropped, so if they didn't
1409 # the user that copy information was dropped, so if they didn't
1402 # expect this outcome it can be fixed, but this is the correct
1410 # expect this outcome it can be fixed, but this is the correct
1403 # behavior in this circumstance.
1411 # behavior in this circumstance.
1404
1412
1405 if crev:
1413 if crev:
1406 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1414 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1407 meta["copy"] = cfname
1415 meta["copy"] = cfname
1408 meta["copyrev"] = hex(crev)
1416 meta["copyrev"] = hex(crev)
1409 fparent1, fparent2 = nullid, newfparent
1417 fparent1, fparent2 = nullid, newfparent
1410 else:
1418 else:
1411 self.ui.warn(_("warning: can't find ancestor for '%s' "
1419 self.ui.warn(_("warning: can't find ancestor for '%s' "
1412 "copied from '%s'!\n") % (fname, cfname))
1420 "copied from '%s'!\n") % (fname, cfname))
1413
1421
1414 elif fparent1 == nullid:
1422 elif fparent1 == nullid:
1415 fparent1, fparent2 = fparent2, nullid
1423 fparent1, fparent2 = fparent2, nullid
1416 elif fparent2 != nullid:
1424 elif fparent2 != nullid:
1417 # is one parent an ancestor of the other?
1425 # is one parent an ancestor of the other?
1418 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1426 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1419 if fparent1 in fparentancestors:
1427 if fparent1 in fparentancestors:
1420 fparent1, fparent2 = fparent2, nullid
1428 fparent1, fparent2 = fparent2, nullid
1421 elif fparent2 in fparentancestors:
1429 elif fparent2 in fparentancestors:
1422 fparent2 = nullid
1430 fparent2 = nullid
1423
1431
1424 # is the file changed?
1432 # is the file changed?
1425 text = fctx.data()
1433 text = fctx.data()
1426 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1434 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1427 changelist.append(fname)
1435 changelist.append(fname)
1428 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1436 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1429 # are just the flags changed during merge?
1437 # are just the flags changed during merge?
1430 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1438 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1431 changelist.append(fname)
1439 changelist.append(fname)
1432
1440
1433 return fparent1
1441 return fparent1
1434
1442
1435 @unfilteredmethod
1443 @unfilteredmethod
1436 def commit(self, text="", user=None, date=None, match=None, force=False,
1444 def commit(self, text="", user=None, date=None, match=None, force=False,
1437 editor=False, extra=None):
1445 editor=False, extra=None):
1438 """Add a new revision to current repository.
1446 """Add a new revision to current repository.
1439
1447
1440 Revision information is gathered from the working directory,
1448 Revision information is gathered from the working directory,
1441 match can be used to filter the committed files. If editor is
1449 match can be used to filter the committed files. If editor is
1442 supplied, it is called to get a commit message.
1450 supplied, it is called to get a commit message.
1443 """
1451 """
1444 if extra is None:
1452 if extra is None:
1445 extra = {}
1453 extra = {}
1446
1454
1447 def fail(f, msg):
1455 def fail(f, msg):
1448 raise error.Abort('%s: %s' % (f, msg))
1456 raise error.Abort('%s: %s' % (f, msg))
1449
1457
1450 if not match:
1458 if not match:
1451 match = matchmod.always(self.root, '')
1459 match = matchmod.always(self.root, '')
1452
1460
1453 if not force:
1461 if not force:
1454 vdirs = []
1462 vdirs = []
1455 match.explicitdir = vdirs.append
1463 match.explicitdir = vdirs.append
1456 match.bad = fail
1464 match.bad = fail
1457
1465
1458 wlock = lock = tr = None
1466 wlock = lock = tr = None
1459 try:
1467 try:
1460 wlock = self.wlock()
1468 wlock = self.wlock()
1461 wctx = self[None]
1469 wctx = self[None]
1462 merge = len(wctx.parents()) > 1
1470 merge = len(wctx.parents()) > 1
1463
1471
1464 if not force and merge and match.ispartial():
1472 if not force and merge and match.ispartial():
1465 raise error.Abort(_('cannot partially commit a merge '
1473 raise error.Abort(_('cannot partially commit a merge '
1466 '(do not specify files or patterns)'))
1474 '(do not specify files or patterns)'))
1467
1475
1468 status = self.status(match=match, clean=force)
1476 status = self.status(match=match, clean=force)
1469 if force:
1477 if force:
1470 status.modified.extend(status.clean) # mq may commit clean files
1478 status.modified.extend(status.clean) # mq may commit clean files
1471
1479
1472 # check subrepos
1480 # check subrepos
1473 subs = []
1481 subs = []
1474 commitsubs = set()
1482 commitsubs = set()
1475 newstate = wctx.substate.copy()
1483 newstate = wctx.substate.copy()
1476 # only manage subrepos and .hgsubstate if .hgsub is present
1484 # only manage subrepos and .hgsubstate if .hgsub is present
1477 if '.hgsub' in wctx:
1485 if '.hgsub' in wctx:
1478 # we'll decide whether to track this ourselves, thanks
1486 # we'll decide whether to track this ourselves, thanks
1479 for c in status.modified, status.added, status.removed:
1487 for c in status.modified, status.added, status.removed:
1480 if '.hgsubstate' in c:
1488 if '.hgsubstate' in c:
1481 c.remove('.hgsubstate')
1489 c.remove('.hgsubstate')
1482
1490
1483 # compare current state to last committed state
1491 # compare current state to last committed state
1484 # build new substate based on last committed state
1492 # build new substate based on last committed state
1485 oldstate = wctx.p1().substate
1493 oldstate = wctx.p1().substate
1486 for s in sorted(newstate.keys()):
1494 for s in sorted(newstate.keys()):
1487 if not match(s):
1495 if not match(s):
1488 # ignore working copy, use old state if present
1496 # ignore working copy, use old state if present
1489 if s in oldstate:
1497 if s in oldstate:
1490 newstate[s] = oldstate[s]
1498 newstate[s] = oldstate[s]
1491 continue
1499 continue
1492 if not force:
1500 if not force:
1493 raise error.Abort(
1501 raise error.Abort(
1494 _("commit with new subrepo %s excluded") % s)
1502 _("commit with new subrepo %s excluded") % s)
1495 dirtyreason = wctx.sub(s).dirtyreason(True)
1503 dirtyreason = wctx.sub(s).dirtyreason(True)
1496 if dirtyreason:
1504 if dirtyreason:
1497 if not self.ui.configbool('ui', 'commitsubrepos'):
1505 if not self.ui.configbool('ui', 'commitsubrepos'):
1498 raise error.Abort(dirtyreason,
1506 raise error.Abort(dirtyreason,
1499 hint=_("use --subrepos for recursive commit"))
1507 hint=_("use --subrepos for recursive commit"))
1500 subs.append(s)
1508 subs.append(s)
1501 commitsubs.add(s)
1509 commitsubs.add(s)
1502 else:
1510 else:
1503 bs = wctx.sub(s).basestate()
1511 bs = wctx.sub(s).basestate()
1504 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1512 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1505 if oldstate.get(s, (None, None, None))[1] != bs:
1513 if oldstate.get(s, (None, None, None))[1] != bs:
1506 subs.append(s)
1514 subs.append(s)
1507
1515
1508 # check for removed subrepos
1516 # check for removed subrepos
1509 for p in wctx.parents():
1517 for p in wctx.parents():
1510 r = [s for s in p.substate if s not in newstate]
1518 r = [s for s in p.substate if s not in newstate]
1511 subs += [s for s in r if match(s)]
1519 subs += [s for s in r if match(s)]
1512 if subs:
1520 if subs:
1513 if (not match('.hgsub') and
1521 if (not match('.hgsub') and
1514 '.hgsub' in (wctx.modified() + wctx.added())):
1522 '.hgsub' in (wctx.modified() + wctx.added())):
1515 raise error.Abort(
1523 raise error.Abort(
1516 _("can't commit subrepos without .hgsub"))
1524 _("can't commit subrepos without .hgsub"))
1517 status.modified.insert(0, '.hgsubstate')
1525 status.modified.insert(0, '.hgsubstate')
1518
1526
1519 elif '.hgsub' in status.removed:
1527 elif '.hgsub' in status.removed:
1520 # clean up .hgsubstate when .hgsub is removed
1528 # clean up .hgsubstate when .hgsub is removed
1521 if ('.hgsubstate' in wctx and
1529 if ('.hgsubstate' in wctx and
1522 '.hgsubstate' not in (status.modified + status.added +
1530 '.hgsubstate' not in (status.modified + status.added +
1523 status.removed)):
1531 status.removed)):
1524 status.removed.insert(0, '.hgsubstate')
1532 status.removed.insert(0, '.hgsubstate')
1525
1533
1526 # make sure all explicit patterns are matched
1534 # make sure all explicit patterns are matched
1527 if not force and (match.isexact() or match.prefix()):
1535 if not force and (match.isexact() or match.prefix()):
1528 matched = set(status.modified + status.added + status.removed)
1536 matched = set(status.modified + status.added + status.removed)
1529
1537
1530 for f in match.files():
1538 for f in match.files():
1531 f = self.dirstate.normalize(f)
1539 f = self.dirstate.normalize(f)
1532 if f == '.' or f in matched or f in wctx.substate:
1540 if f == '.' or f in matched or f in wctx.substate:
1533 continue
1541 continue
1534 if f in status.deleted:
1542 if f in status.deleted:
1535 fail(f, _('file not found!'))
1543 fail(f, _('file not found!'))
1536 if f in vdirs: # visited directory
1544 if f in vdirs: # visited directory
1537 d = f + '/'
1545 d = f + '/'
1538 for mf in matched:
1546 for mf in matched:
1539 if mf.startswith(d):
1547 if mf.startswith(d):
1540 break
1548 break
1541 else:
1549 else:
1542 fail(f, _("no match under directory!"))
1550 fail(f, _("no match under directory!"))
1543 elif f not in self.dirstate:
1551 elif f not in self.dirstate:
1544 fail(f, _("file not tracked!"))
1552 fail(f, _("file not tracked!"))
1545
1553
1546 cctx = context.workingcommitctx(self, status,
1554 cctx = context.workingcommitctx(self, status,
1547 text, user, date, extra)
1555 text, user, date, extra)
1548
1556
1549 # internal config: ui.allowemptycommit
1557 # internal config: ui.allowemptycommit
1550 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1558 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1551 or extra.get('close') or merge or cctx.files()
1559 or extra.get('close') or merge or cctx.files()
1552 or self.ui.configbool('ui', 'allowemptycommit'))
1560 or self.ui.configbool('ui', 'allowemptycommit'))
1553 if not allowemptycommit:
1561 if not allowemptycommit:
1554 return None
1562 return None
1555
1563
1556 if merge and cctx.deleted():
1564 if merge and cctx.deleted():
1557 raise error.Abort(_("cannot commit merge with missing files"))
1565 raise error.Abort(_("cannot commit merge with missing files"))
1558
1566
1559 unresolved, driverresolved = False, False
1567 unresolved, driverresolved = False, False
1560 ms = mergemod.mergestate.read(self)
1568 ms = mergemod.mergestate.read(self)
1561 for f in status.modified:
1569 for f in status.modified:
1562 if f in ms:
1570 if f in ms:
1563 if ms[f] == 'u':
1571 if ms[f] == 'u':
1564 unresolved = True
1572 unresolved = True
1565 elif ms[f] == 'd':
1573 elif ms[f] == 'd':
1566 driverresolved = True
1574 driverresolved = True
1567
1575
1568 if unresolved:
1576 if unresolved:
1569 raise error.Abort(_('unresolved merge conflicts '
1577 raise error.Abort(_('unresolved merge conflicts '
1570 '(see "hg help resolve")'))
1578 '(see "hg help resolve")'))
1571 if driverresolved or ms.mdstate() != 's':
1579 if driverresolved or ms.mdstate() != 's':
1572 raise error.Abort(_('driver-resolved merge conflicts'),
1580 raise error.Abort(_('driver-resolved merge conflicts'),
1573 hint=_('run "hg resolve --all" to resolve'))
1581 hint=_('run "hg resolve --all" to resolve'))
1574
1582
1575 if editor:
1583 if editor:
1576 cctx._text = editor(self, cctx, subs)
1584 cctx._text = editor(self, cctx, subs)
1577 edited = (text != cctx._text)
1585 edited = (text != cctx._text)
1578
1586
1579 # Save commit message in case this transaction gets rolled back
1587 # Save commit message in case this transaction gets rolled back
1580 # (e.g. by a pretxncommit hook). Leave the content alone on
1588 # (e.g. by a pretxncommit hook). Leave the content alone on
1581 # the assumption that the user will use the same editor again.
1589 # the assumption that the user will use the same editor again.
1582 msgfn = self.savecommitmessage(cctx._text)
1590 msgfn = self.savecommitmessage(cctx._text)
1583
1591
1584 # commit subs and write new state
1592 # commit subs and write new state
1585 if subs:
1593 if subs:
1586 for s in sorted(commitsubs):
1594 for s in sorted(commitsubs):
1587 sub = wctx.sub(s)
1595 sub = wctx.sub(s)
1588 self.ui.status(_('committing subrepository %s\n') %
1596 self.ui.status(_('committing subrepository %s\n') %
1589 subrepo.subrelpath(sub))
1597 subrepo.subrelpath(sub))
1590 sr = sub.commit(cctx._text, user, date)
1598 sr = sub.commit(cctx._text, user, date)
1591 newstate[s] = (newstate[s][0], sr)
1599 newstate[s] = (newstate[s][0], sr)
1592 subrepo.writestate(self, newstate)
1600 subrepo.writestate(self, newstate)
1593
1601
1594 p1, p2 = self.dirstate.parents()
1602 p1, p2 = self.dirstate.parents()
1595 lock = self.lock()
1603 lock = self.lock()
1596 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1604 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1597 try:
1605 try:
1598 self.hook("precommit", throw=True, parent1=hookp1,
1606 self.hook("precommit", throw=True, parent1=hookp1,
1599 parent2=hookp2)
1607 parent2=hookp2)
1600 tr = self.transaction('commit')
1608 tr = self.transaction('commit')
1601 ret = self.commitctx(cctx, True)
1609 ret = self.commitctx(cctx, True)
1602 except: # re-raises
1610 except: # re-raises
1603 if edited:
1611 if edited:
1604 self.ui.write(
1612 self.ui.write(
1605 _('note: commit message saved in %s\n') % msgfn)
1613 _('note: commit message saved in %s\n') % msgfn)
1606 raise
1614 raise
1607 # update bookmarks, dirstate and mergestate
1615 # update bookmarks, dirstate and mergestate
1608 bookmarks.update(self, [p1, p2], ret)
1616 bookmarks.update(self, [p1, p2], ret)
1609 cctx.markcommitted(ret)
1617 cctx.markcommitted(ret)
1610 ms.reset()
1618 ms.reset()
1611 tr.close()
1619 tr.close()
1612
1620
1613 finally:
1621 finally:
1614 lockmod.release(tr, lock, wlock)
1622 lockmod.release(tr, lock, wlock)
1615
1623
1616 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1624 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1617 # hack for command that use a temporary commit (eg: histedit)
1625 # hack for command that use a temporary commit (eg: histedit)
1618 # temporary commit got stripped before hook release
1626 # temporary commit got stripped before hook release
1619 if self.changelog.hasnode(ret):
1627 if self.changelog.hasnode(ret):
1620 self.hook("commit", node=node, parent1=parent1,
1628 self.hook("commit", node=node, parent1=parent1,
1621 parent2=parent2)
1629 parent2=parent2)
1622 self._afterlock(commithook)
1630 self._afterlock(commithook)
1623 return ret
1631 return ret
1624
1632
1625 @unfilteredmethod
1633 @unfilteredmethod
1626 def commitctx(self, ctx, error=False):
1634 def commitctx(self, ctx, error=False):
1627 """Add a new revision to current repository.
1635 """Add a new revision to current repository.
1628 Revision information is passed via the context argument.
1636 Revision information is passed via the context argument.
1629 """
1637 """
1630
1638
1631 tr = None
1639 tr = None
1632 p1, p2 = ctx.p1(), ctx.p2()
1640 p1, p2 = ctx.p1(), ctx.p2()
1633 user = ctx.user()
1641 user = ctx.user()
1634
1642
1635 lock = self.lock()
1643 lock = self.lock()
1636 try:
1644 try:
1637 tr = self.transaction("commit")
1645 tr = self.transaction("commit")
1638 trp = weakref.proxy(tr)
1646 trp = weakref.proxy(tr)
1639
1647
1640 if ctx.files():
1648 if ctx.files():
1641 m1 = p1.manifest()
1649 m1 = p1.manifest()
1642 m2 = p2.manifest()
1650 m2 = p2.manifest()
1643 m = m1.copy()
1651 m = m1.copy()
1644
1652
1645 # check in files
1653 # check in files
1646 added = []
1654 added = []
1647 changed = []
1655 changed = []
1648 removed = list(ctx.removed())
1656 removed = list(ctx.removed())
1649 linkrev = len(self)
1657 linkrev = len(self)
1650 self.ui.note(_("committing files:\n"))
1658 self.ui.note(_("committing files:\n"))
1651 for f in sorted(ctx.modified() + ctx.added()):
1659 for f in sorted(ctx.modified() + ctx.added()):
1652 self.ui.note(f + "\n")
1660 self.ui.note(f + "\n")
1653 try:
1661 try:
1654 fctx = ctx[f]
1662 fctx = ctx[f]
1655 if fctx is None:
1663 if fctx is None:
1656 removed.append(f)
1664 removed.append(f)
1657 else:
1665 else:
1658 added.append(f)
1666 added.append(f)
1659 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1667 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1660 trp, changed)
1668 trp, changed)
1661 m.setflag(f, fctx.flags())
1669 m.setflag(f, fctx.flags())
1662 except OSError as inst:
1670 except OSError as inst:
1663 self.ui.warn(_("trouble committing %s!\n") % f)
1671 self.ui.warn(_("trouble committing %s!\n") % f)
1664 raise
1672 raise
1665 except IOError as inst:
1673 except IOError as inst:
1666 errcode = getattr(inst, 'errno', errno.ENOENT)
1674 errcode = getattr(inst, 'errno', errno.ENOENT)
1667 if error or errcode and errcode != errno.ENOENT:
1675 if error or errcode and errcode != errno.ENOENT:
1668 self.ui.warn(_("trouble committing %s!\n") % f)
1676 self.ui.warn(_("trouble committing %s!\n") % f)
1669 raise
1677 raise
1670
1678
1671 # update manifest
1679 # update manifest
1672 self.ui.note(_("committing manifest\n"))
1680 self.ui.note(_("committing manifest\n"))
1673 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1681 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1674 drop = [f for f in removed if f in m]
1682 drop = [f for f in removed if f in m]
1675 for f in drop:
1683 for f in drop:
1676 del m[f]
1684 del m[f]
1677 mn = self.manifest.add(m, trp, linkrev,
1685 mn = self.manifest.add(m, trp, linkrev,
1678 p1.manifestnode(), p2.manifestnode(),
1686 p1.manifestnode(), p2.manifestnode(),
1679 added, drop)
1687 added, drop)
1680 files = changed + removed
1688 files = changed + removed
1681 else:
1689 else:
1682 mn = p1.manifestnode()
1690 mn = p1.manifestnode()
1683 files = []
1691 files = []
1684
1692
1685 # update changelog
1693 # update changelog
1686 self.ui.note(_("committing changelog\n"))
1694 self.ui.note(_("committing changelog\n"))
1687 self.changelog.delayupdate(tr)
1695 self.changelog.delayupdate(tr)
1688 n = self.changelog.add(mn, files, ctx.description(),
1696 n = self.changelog.add(mn, files, ctx.description(),
1689 trp, p1.node(), p2.node(),
1697 trp, p1.node(), p2.node(),
1690 user, ctx.date(), ctx.extra().copy())
1698 user, ctx.date(), ctx.extra().copy())
1691 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1699 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1692 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1700 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1693 parent2=xp2)
1701 parent2=xp2)
1694 # set the new commit is proper phase
1702 # set the new commit is proper phase
1695 targetphase = subrepo.newcommitphase(self.ui, ctx)
1703 targetphase = subrepo.newcommitphase(self.ui, ctx)
1696 if targetphase:
1704 if targetphase:
1697 # retract boundary do not alter parent changeset.
1705 # retract boundary do not alter parent changeset.
1698 # if a parent have higher the resulting phase will
1706 # if a parent have higher the resulting phase will
1699 # be compliant anyway
1707 # be compliant anyway
1700 #
1708 #
1701 # if minimal phase was 0 we don't need to retract anything
1709 # if minimal phase was 0 we don't need to retract anything
1702 phases.retractboundary(self, tr, targetphase, [n])
1710 phases.retractboundary(self, tr, targetphase, [n])
1703 tr.close()
1711 tr.close()
1704 branchmap.updatecache(self.filtered('served'))
1712 branchmap.updatecache(self.filtered('served'))
1705 return n
1713 return n
1706 finally:
1714 finally:
1707 if tr:
1715 if tr:
1708 tr.release()
1716 tr.release()
1709 lock.release()
1717 lock.release()
1710
1718
1711 @unfilteredmethod
1719 @unfilteredmethod
1712 def destroying(self):
1720 def destroying(self):
1713 '''Inform the repository that nodes are about to be destroyed.
1721 '''Inform the repository that nodes are about to be destroyed.
1714 Intended for use by strip and rollback, so there's a common
1722 Intended for use by strip and rollback, so there's a common
1715 place for anything that has to be done before destroying history.
1723 place for anything that has to be done before destroying history.
1716
1724
1717 This is mostly useful for saving state that is in memory and waiting
1725 This is mostly useful for saving state that is in memory and waiting
1718 to be flushed when the current lock is released. Because a call to
1726 to be flushed when the current lock is released. Because a call to
1719 destroyed is imminent, the repo will be invalidated causing those
1727 destroyed is imminent, the repo will be invalidated causing those
1720 changes to stay in memory (waiting for the next unlock), or vanish
1728 changes to stay in memory (waiting for the next unlock), or vanish
1721 completely.
1729 completely.
1722 '''
1730 '''
1723 # When using the same lock to commit and strip, the phasecache is left
1731 # When using the same lock to commit and strip, the phasecache is left
1724 # dirty after committing. Then when we strip, the repo is invalidated,
1732 # dirty after committing. Then when we strip, the repo is invalidated,
1725 # causing those changes to disappear.
1733 # causing those changes to disappear.
1726 if '_phasecache' in vars(self):
1734 if '_phasecache' in vars(self):
1727 self._phasecache.write()
1735 self._phasecache.write()
1728
1736
1729 @unfilteredmethod
1737 @unfilteredmethod
1730 def destroyed(self):
1738 def destroyed(self):
1731 '''Inform the repository that nodes have been destroyed.
1739 '''Inform the repository that nodes have been destroyed.
1732 Intended for use by strip and rollback, so there's a common
1740 Intended for use by strip and rollback, so there's a common
1733 place for anything that has to be done after destroying history.
1741 place for anything that has to be done after destroying history.
1734 '''
1742 '''
1735 # When one tries to:
1743 # When one tries to:
1736 # 1) destroy nodes thus calling this method (e.g. strip)
1744 # 1) destroy nodes thus calling this method (e.g. strip)
1737 # 2) use phasecache somewhere (e.g. commit)
1745 # 2) use phasecache somewhere (e.g. commit)
1738 #
1746 #
1739 # then 2) will fail because the phasecache contains nodes that were
1747 # then 2) will fail because the phasecache contains nodes that were
1740 # removed. We can either remove phasecache from the filecache,
1748 # removed. We can either remove phasecache from the filecache,
1741 # causing it to reload next time it is accessed, or simply filter
1749 # causing it to reload next time it is accessed, or simply filter
1742 # the removed nodes now and write the updated cache.
1750 # the removed nodes now and write the updated cache.
1743 self._phasecache.filterunknown(self)
1751 self._phasecache.filterunknown(self)
1744 self._phasecache.write()
1752 self._phasecache.write()
1745
1753
1746 # update the 'served' branch cache to help read only server process
1754 # update the 'served' branch cache to help read only server process
1747 # Thanks to branchcache collaboration this is done from the nearest
1755 # Thanks to branchcache collaboration this is done from the nearest
1748 # filtered subset and it is expected to be fast.
1756 # filtered subset and it is expected to be fast.
1749 branchmap.updatecache(self.filtered('served'))
1757 branchmap.updatecache(self.filtered('served'))
1750
1758
1751 # Ensure the persistent tag cache is updated. Doing it now
1759 # Ensure the persistent tag cache is updated. Doing it now
1752 # means that the tag cache only has to worry about destroyed
1760 # means that the tag cache only has to worry about destroyed
1753 # heads immediately after a strip/rollback. That in turn
1761 # heads immediately after a strip/rollback. That in turn
1754 # guarantees that "cachetip == currenttip" (comparing both rev
1762 # guarantees that "cachetip == currenttip" (comparing both rev
1755 # and node) always means no nodes have been added or destroyed.
1763 # and node) always means no nodes have been added or destroyed.
1756
1764
1757 # XXX this is suboptimal when qrefresh'ing: we strip the current
1765 # XXX this is suboptimal when qrefresh'ing: we strip the current
1758 # head, refresh the tag cache, then immediately add a new head.
1766 # head, refresh the tag cache, then immediately add a new head.
1759 # But I think doing it this way is necessary for the "instant
1767 # But I think doing it this way is necessary for the "instant
1760 # tag cache retrieval" case to work.
1768 # tag cache retrieval" case to work.
1761 self.invalidate()
1769 self.invalidate()
1762
1770
1763 def walk(self, match, node=None):
1771 def walk(self, match, node=None):
1764 '''
1772 '''
1765 walk recursively through the directory tree or a given
1773 walk recursively through the directory tree or a given
1766 changeset, finding all files matched by the match
1774 changeset, finding all files matched by the match
1767 function
1775 function
1768 '''
1776 '''
1769 return self[node].walk(match)
1777 return self[node].walk(match)
1770
1778
1771 def status(self, node1='.', node2=None, match=None,
1779 def status(self, node1='.', node2=None, match=None,
1772 ignored=False, clean=False, unknown=False,
1780 ignored=False, clean=False, unknown=False,
1773 listsubrepos=False):
1781 listsubrepos=False):
1774 '''a convenience method that calls node1.status(node2)'''
1782 '''a convenience method that calls node1.status(node2)'''
1775 return self[node1].status(node2, match, ignored, clean, unknown,
1783 return self[node1].status(node2, match, ignored, clean, unknown,
1776 listsubrepos)
1784 listsubrepos)
1777
1785
1778 def heads(self, start=None):
1786 def heads(self, start=None):
1779 heads = self.changelog.heads(start)
1787 heads = self.changelog.heads(start)
1780 # sort the output in rev descending order
1788 # sort the output in rev descending order
1781 return sorted(heads, key=self.changelog.rev, reverse=True)
1789 return sorted(heads, key=self.changelog.rev, reverse=True)
1782
1790
1783 def branchheads(self, branch=None, start=None, closed=False):
1791 def branchheads(self, branch=None, start=None, closed=False):
1784 '''return a (possibly filtered) list of heads for the given branch
1792 '''return a (possibly filtered) list of heads for the given branch
1785
1793
1786 Heads are returned in topological order, from newest to oldest.
1794 Heads are returned in topological order, from newest to oldest.
1787 If branch is None, use the dirstate branch.
1795 If branch is None, use the dirstate branch.
1788 If start is not None, return only heads reachable from start.
1796 If start is not None, return only heads reachable from start.
1789 If closed is True, return heads that are marked as closed as well.
1797 If closed is True, return heads that are marked as closed as well.
1790 '''
1798 '''
1791 if branch is None:
1799 if branch is None:
1792 branch = self[None].branch()
1800 branch = self[None].branch()
1793 branches = self.branchmap()
1801 branches = self.branchmap()
1794 if branch not in branches:
1802 if branch not in branches:
1795 return []
1803 return []
1796 # the cache returns heads ordered lowest to highest
1804 # the cache returns heads ordered lowest to highest
1797 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1805 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1798 if start is not None:
1806 if start is not None:
1799 # filter out the heads that cannot be reached from startrev
1807 # filter out the heads that cannot be reached from startrev
1800 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1808 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1801 bheads = [h for h in bheads if h in fbheads]
1809 bheads = [h for h in bheads if h in fbheads]
1802 return bheads
1810 return bheads
1803
1811
1804 def branches(self, nodes):
1812 def branches(self, nodes):
1805 if not nodes:
1813 if not nodes:
1806 nodes = [self.changelog.tip()]
1814 nodes = [self.changelog.tip()]
1807 b = []
1815 b = []
1808 for n in nodes:
1816 for n in nodes:
1809 t = n
1817 t = n
1810 while True:
1818 while True:
1811 p = self.changelog.parents(n)
1819 p = self.changelog.parents(n)
1812 if p[1] != nullid or p[0] == nullid:
1820 if p[1] != nullid or p[0] == nullid:
1813 b.append((t, n, p[0], p[1]))
1821 b.append((t, n, p[0], p[1]))
1814 break
1822 break
1815 n = p[0]
1823 n = p[0]
1816 return b
1824 return b
1817
1825
1818 def between(self, pairs):
1826 def between(self, pairs):
1819 r = []
1827 r = []
1820
1828
1821 for top, bottom in pairs:
1829 for top, bottom in pairs:
1822 n, l, i = top, [], 0
1830 n, l, i = top, [], 0
1823 f = 1
1831 f = 1
1824
1832
1825 while n != bottom and n != nullid:
1833 while n != bottom and n != nullid:
1826 p = self.changelog.parents(n)[0]
1834 p = self.changelog.parents(n)[0]
1827 if i == f:
1835 if i == f:
1828 l.append(n)
1836 l.append(n)
1829 f = f * 2
1837 f = f * 2
1830 n = p
1838 n = p
1831 i += 1
1839 i += 1
1832
1840
1833 r.append(l)
1841 r.append(l)
1834
1842
1835 return r
1843 return r
1836
1844
1837 def checkpush(self, pushop):
1845 def checkpush(self, pushop):
1838 """Extensions can override this function if additional checks have
1846 """Extensions can override this function if additional checks have
1839 to be performed before pushing, or call it if they override push
1847 to be performed before pushing, or call it if they override push
1840 command.
1848 command.
1841 """
1849 """
1842 pass
1850 pass
1843
1851
1844 @unfilteredpropertycache
1852 @unfilteredpropertycache
1845 def prepushoutgoinghooks(self):
1853 def prepushoutgoinghooks(self):
1846 """Return util.hooks consists of "(repo, remote, outgoing)"
1854 """Return util.hooks consists of "(repo, remote, outgoing)"
1847 functions, which are called before pushing changesets.
1855 functions, which are called before pushing changesets.
1848 """
1856 """
1849 return util.hooks()
1857 return util.hooks()
1850
1858
1851 def clone(self, remote, heads=[], stream=None):
1859 def clone(self, remote, heads=[], stream=None):
1852 '''clone remote repository.
1860 '''clone remote repository.
1853
1861
1854 keyword arguments:
1862 keyword arguments:
1855 heads: list of revs to clone (forces use of pull)
1863 heads: list of revs to clone (forces use of pull)
1856 stream: use streaming clone if possible'''
1864 stream: use streaming clone if possible'''
1857 # internal config: ui.quietbookmarkmove
1865 # internal config: ui.quietbookmarkmove
1858 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1866 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1859 try:
1867 try:
1860 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1868 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1861 pullop = exchange.pull(self, remote, heads,
1869 pullop = exchange.pull(self, remote, heads,
1862 streamclonerequested=stream)
1870 streamclonerequested=stream)
1863 return pullop.cgresult
1871 return pullop.cgresult
1864 finally:
1872 finally:
1865 self.ui.restoreconfig(quiet)
1873 self.ui.restoreconfig(quiet)
1866
1874
1867 def pushkey(self, namespace, key, old, new):
1875 def pushkey(self, namespace, key, old, new):
1868 try:
1876 try:
1869 tr = self.currenttransaction()
1877 tr = self.currenttransaction()
1870 hookargs = {}
1878 hookargs = {}
1871 if tr is not None:
1879 if tr is not None:
1872 hookargs.update(tr.hookargs)
1880 hookargs.update(tr.hookargs)
1873 hookargs['namespace'] = namespace
1881 hookargs['namespace'] = namespace
1874 hookargs['key'] = key
1882 hookargs['key'] = key
1875 hookargs['old'] = old
1883 hookargs['old'] = old
1876 hookargs['new'] = new
1884 hookargs['new'] = new
1877 self.hook('prepushkey', throw=True, **hookargs)
1885 self.hook('prepushkey', throw=True, **hookargs)
1878 except error.HookAbort as exc:
1886 except error.HookAbort as exc:
1879 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1887 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1880 if exc.hint:
1888 if exc.hint:
1881 self.ui.write_err(_("(%s)\n") % exc.hint)
1889 self.ui.write_err(_("(%s)\n") % exc.hint)
1882 return False
1890 return False
1883 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1891 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1884 ret = pushkey.push(self, namespace, key, old, new)
1892 ret = pushkey.push(self, namespace, key, old, new)
1885 def runhook():
1893 def runhook():
1886 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1894 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1887 ret=ret)
1895 ret=ret)
1888 self._afterlock(runhook)
1896 self._afterlock(runhook)
1889 return ret
1897 return ret
1890
1898
1891 def listkeys(self, namespace):
1899 def listkeys(self, namespace):
1892 self.hook('prelistkeys', throw=True, namespace=namespace)
1900 self.hook('prelistkeys', throw=True, namespace=namespace)
1893 self.ui.debug('listing keys for "%s"\n' % namespace)
1901 self.ui.debug('listing keys for "%s"\n' % namespace)
1894 values = pushkey.list(self, namespace)
1902 values = pushkey.list(self, namespace)
1895 self.hook('listkeys', namespace=namespace, values=values)
1903 self.hook('listkeys', namespace=namespace, values=values)
1896 return values
1904 return values
1897
1905
1898 def debugwireargs(self, one, two, three=None, four=None, five=None):
1906 def debugwireargs(self, one, two, three=None, four=None, five=None):
1899 '''used to test argument passing over the wire'''
1907 '''used to test argument passing over the wire'''
1900 return "%s %s %s %s %s" % (one, two, three, four, five)
1908 return "%s %s %s %s %s" % (one, two, three, four, five)
1901
1909
1902 def savecommitmessage(self, text):
1910 def savecommitmessage(self, text):
1903 fp = self.vfs('last-message.txt', 'wb')
1911 fp = self.vfs('last-message.txt', 'wb')
1904 try:
1912 try:
1905 fp.write(text)
1913 fp.write(text)
1906 finally:
1914 finally:
1907 fp.close()
1915 fp.close()
1908 return self.pathto(fp.name[len(self.root) + 1:])
1916 return self.pathto(fp.name[len(self.root) + 1:])
1909
1917
1910 # used to avoid circular references so destructors work
1918 # used to avoid circular references so destructors work
1911 def aftertrans(files):
1919 def aftertrans(files):
1912 renamefiles = [tuple(t) for t in files]
1920 renamefiles = [tuple(t) for t in files]
1913 def a():
1921 def a():
1914 for vfs, src, dest in renamefiles:
1922 for vfs, src, dest in renamefiles:
1915 try:
1923 try:
1916 vfs.rename(src, dest)
1924 vfs.rename(src, dest)
1917 except OSError: # journal file does not yet exist
1925 except OSError: # journal file does not yet exist
1918 pass
1926 pass
1919 return a
1927 return a
1920
1928
1921 def undoname(fn):
1929 def undoname(fn):
1922 base, name = os.path.split(fn)
1930 base, name = os.path.split(fn)
1923 assert name.startswith('journal')
1931 assert name.startswith('journal')
1924 return os.path.join(base, name.replace('journal', 'undo', 1))
1932 return os.path.join(base, name.replace('journal', 'undo', 1))
1925
1933
1926 def instance(ui, path, create):
1934 def instance(ui, path, create):
1927 return localrepository(ui, util.urllocalpath(path), create)
1935 return localrepository(ui, util.urllocalpath(path), create)
1928
1936
1929 def islocal(path):
1937 def islocal(path):
1930 return True
1938 return True
General Comments 0
You need to be logged in to leave comments. Login now