##// END OF EJS Templates
localrepo: isolate requirements determination from side effects...
Gregory Szorc -
r28163:5d3495e3 default
parent child Browse files
Show More
@@ -1,1966 +1,1971 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import inspect
11 import inspect
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import urllib
15 import urllib
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 propertycache = util.propertycache
61 propertycache = util.propertycache
62 filecache = scmutil.filecache
62 filecache = scmutil.filecache
63
63
64 class repofilecache(filecache):
64 class repofilecache(filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 def __set__(self, repo, value):
70 def __set__(self, repo, value):
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 def __delete__(self, repo):
72 def __delete__(self, repo):
73 return super(repofilecache, self).__delete__(repo.unfiltered())
73 return super(repofilecache, self).__delete__(repo.unfiltered())
74
74
75 class storecache(repofilecache):
75 class storecache(repofilecache):
76 """filecache for files in the store"""
76 """filecache for files in the store"""
77 def join(self, obj, fname):
77 def join(self, obj, fname):
78 return obj.sjoin(fname)
78 return obj.sjoin(fname)
79
79
80 class unfilteredpropertycache(propertycache):
80 class unfilteredpropertycache(propertycache):
81 """propertycache that apply to unfiltered repo only"""
81 """propertycache that apply to unfiltered repo only"""
82
82
83 def __get__(self, repo, type=None):
83 def __get__(self, repo, type=None):
84 unfi = repo.unfiltered()
84 unfi = repo.unfiltered()
85 if unfi is repo:
85 if unfi is repo:
86 return super(unfilteredpropertycache, self).__get__(unfi)
86 return super(unfilteredpropertycache, self).__get__(unfi)
87 return getattr(unfi, self.name)
87 return getattr(unfi, self.name)
88
88
89 class filteredpropertycache(propertycache):
89 class filteredpropertycache(propertycache):
90 """propertycache that must take filtering in account"""
90 """propertycache that must take filtering in account"""
91
91
92 def cachevalue(self, obj, value):
92 def cachevalue(self, obj, value):
93 object.__setattr__(obj, self.name, value)
93 object.__setattr__(obj, self.name, value)
94
94
95
95
96 def hasunfilteredcache(repo, name):
96 def hasunfilteredcache(repo, name):
97 """check if a repo has an unfilteredpropertycache value for <name>"""
97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 return name in vars(repo.unfiltered())
98 return name in vars(repo.unfiltered())
99
99
100 def unfilteredmethod(orig):
100 def unfilteredmethod(orig):
101 """decorate method that always need to be run on unfiltered version"""
101 """decorate method that always need to be run on unfiltered version"""
102 def wrapper(repo, *args, **kwargs):
102 def wrapper(repo, *args, **kwargs):
103 return orig(repo.unfiltered(), *args, **kwargs)
103 return orig(repo.unfiltered(), *args, **kwargs)
104 return wrapper
104 return wrapper
105
105
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 'unbundle'))
107 'unbundle'))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109
109
110 class localpeer(peer.peerrepository):
110 class localpeer(peer.peerrepository):
111 '''peer for a local repo; reflects only the most recent API'''
111 '''peer for a local repo; reflects only the most recent API'''
112
112
113 def __init__(self, repo, caps=moderncaps):
113 def __init__(self, repo, caps=moderncaps):
114 peer.peerrepository.__init__(self)
114 peer.peerrepository.__init__(self)
115 self._repo = repo.filtered('served')
115 self._repo = repo.filtered('served')
116 self.ui = repo.ui
116 self.ui = repo.ui
117 self._caps = repo._restrictcapabilities(caps)
117 self._caps = repo._restrictcapabilities(caps)
118 self.requirements = repo.requirements
118 self.requirements = repo.requirements
119 self.supportedformats = repo.supportedformats
119 self.supportedformats = repo.supportedformats
120
120
121 def close(self):
121 def close(self):
122 self._repo.close()
122 self._repo.close()
123
123
124 def _capabilities(self):
124 def _capabilities(self):
125 return self._caps
125 return self._caps
126
126
127 def local(self):
127 def local(self):
128 return self._repo
128 return self._repo
129
129
130 def canpush(self):
130 def canpush(self):
131 return True
131 return True
132
132
133 def url(self):
133 def url(self):
134 return self._repo.url()
134 return self._repo.url()
135
135
136 def lookup(self, key):
136 def lookup(self, key):
137 return self._repo.lookup(key)
137 return self._repo.lookup(key)
138
138
139 def branchmap(self):
139 def branchmap(self):
140 return self._repo.branchmap()
140 return self._repo.branchmap()
141
141
142 def heads(self):
142 def heads(self):
143 return self._repo.heads()
143 return self._repo.heads()
144
144
145 def known(self, nodes):
145 def known(self, nodes):
146 return self._repo.known(nodes)
146 return self._repo.known(nodes)
147
147
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 **kwargs):
149 **kwargs):
150 cg = exchange.getbundle(self._repo, source, heads=heads,
150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 common=common, bundlecaps=bundlecaps, **kwargs)
151 common=common, bundlecaps=bundlecaps, **kwargs)
152 if bundlecaps is not None and 'HG20' in bundlecaps:
152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 # When requesting a bundle2, getbundle returns a stream to make the
153 # When requesting a bundle2, getbundle returns a stream to make the
154 # wire level function happier. We need to build a proper object
154 # wire level function happier. We need to build a proper object
155 # from it in local peer.
155 # from it in local peer.
156 cg = bundle2.getunbundler(self.ui, cg)
156 cg = bundle2.getunbundler(self.ui, cg)
157 return cg
157 return cg
158
158
159 # TODO We might want to move the next two calls into legacypeer and add
159 # TODO We might want to move the next two calls into legacypeer and add
160 # unbundle instead.
160 # unbundle instead.
161
161
162 def unbundle(self, cg, heads, url):
162 def unbundle(self, cg, heads, url):
163 """apply a bundle on a repo
163 """apply a bundle on a repo
164
164
165 This function handles the repo locking itself."""
165 This function handles the repo locking itself."""
166 try:
166 try:
167 try:
167 try:
168 cg = exchange.readbundle(self.ui, cg, None)
168 cg = exchange.readbundle(self.ui, cg, None)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 if util.safehasattr(ret, 'getchunks'):
170 if util.safehasattr(ret, 'getchunks'):
171 # This is a bundle20 object, turn it into an unbundler.
171 # This is a bundle20 object, turn it into an unbundler.
172 # This little dance should be dropped eventually when the
172 # This little dance should be dropped eventually when the
173 # API is finally improved.
173 # API is finally improved.
174 stream = util.chunkbuffer(ret.getchunks())
174 stream = util.chunkbuffer(ret.getchunks())
175 ret = bundle2.getunbundler(self.ui, stream)
175 ret = bundle2.getunbundler(self.ui, stream)
176 return ret
176 return ret
177 except Exception as exc:
177 except Exception as exc:
178 # If the exception contains output salvaged from a bundle2
178 # If the exception contains output salvaged from a bundle2
179 # reply, we need to make sure it is printed before continuing
179 # reply, we need to make sure it is printed before continuing
180 # to fail. So we build a bundle2 with such output and consume
180 # to fail. So we build a bundle2 with such output and consume
181 # it directly.
181 # it directly.
182 #
182 #
183 # This is not very elegant but allows a "simple" solution for
183 # This is not very elegant but allows a "simple" solution for
184 # issue4594
184 # issue4594
185 output = getattr(exc, '_bundle2salvagedoutput', ())
185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 if output:
186 if output:
187 bundler = bundle2.bundle20(self._repo.ui)
187 bundler = bundle2.bundle20(self._repo.ui)
188 for out in output:
188 for out in output:
189 bundler.addpart(out)
189 bundler.addpart(out)
190 stream = util.chunkbuffer(bundler.getchunks())
190 stream = util.chunkbuffer(bundler.getchunks())
191 b = bundle2.getunbundler(self.ui, stream)
191 b = bundle2.getunbundler(self.ui, stream)
192 bundle2.processbundle(self._repo, b)
192 bundle2.processbundle(self._repo, b)
193 raise
193 raise
194 except error.PushRaced as exc:
194 except error.PushRaced as exc:
195 raise error.ResponseError(_('push failed:'), str(exc))
195 raise error.ResponseError(_('push failed:'), str(exc))
196
196
197 def lock(self):
197 def lock(self):
198 return self._repo.lock()
198 return self._repo.lock()
199
199
200 def addchangegroup(self, cg, source, url):
200 def addchangegroup(self, cg, source, url):
201 return cg.apply(self._repo, source, url)
201 return cg.apply(self._repo, source, url)
202
202
203 def pushkey(self, namespace, key, old, new):
203 def pushkey(self, namespace, key, old, new):
204 return self._repo.pushkey(namespace, key, old, new)
204 return self._repo.pushkey(namespace, key, old, new)
205
205
206 def listkeys(self, namespace):
206 def listkeys(self, namespace):
207 return self._repo.listkeys(namespace)
207 return self._repo.listkeys(namespace)
208
208
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 '''used to test argument passing over the wire'''
210 '''used to test argument passing over the wire'''
211 return "%s %s %s %s %s" % (one, two, three, four, five)
211 return "%s %s %s %s %s" % (one, two, three, four, five)
212
212
213 class locallegacypeer(localpeer):
213 class locallegacypeer(localpeer):
214 '''peer extension which implements legacy methods too; used for tests with
214 '''peer extension which implements legacy methods too; used for tests with
215 restricted capabilities'''
215 restricted capabilities'''
216
216
217 def __init__(self, repo):
217 def __init__(self, repo):
218 localpeer.__init__(self, repo, caps=legacycaps)
218 localpeer.__init__(self, repo, caps=legacycaps)
219
219
220 def branches(self, nodes):
220 def branches(self, nodes):
221 return self._repo.branches(nodes)
221 return self._repo.branches(nodes)
222
222
223 def between(self, pairs):
223 def between(self, pairs):
224 return self._repo.between(pairs)
224 return self._repo.between(pairs)
225
225
226 def changegroup(self, basenodes, source):
226 def changegroup(self, basenodes, source):
227 return changegroup.changegroup(self._repo, basenodes, source)
227 return changegroup.changegroup(self._repo, basenodes, source)
228
228
229 def changegroupsubset(self, bases, heads, source):
229 def changegroupsubset(self, bases, heads, source):
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231
231
232 class localrepository(object):
232 class localrepository(object):
233
233
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 'manifestv2'))
235 'manifestv2'))
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 'dotencode'))
237 'dotencode'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 filtername = None
239 filtername = None
240
240
241 # a list of (ui, featureset) functions.
241 # a list of (ui, featureset) functions.
242 # only functions defined in module of enabled extensions are invoked
242 # only functions defined in module of enabled extensions are invoked
243 featuresetupfuncs = set()
243 featuresetupfuncs = set()
244
244
245 def _baserequirements(self, create):
245 def _baserequirements(self, create):
246 return ['revlogv1']
246 return ['revlogv1']
247
247
248 def __init__(self, baseui, path=None, create=False):
248 def __init__(self, baseui, path=None, create=False):
249 self.requirements = set()
249 self.requirements = set()
250 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
251 self.wopener = self.wvfs
251 self.wopener = self.wvfs
252 self.root = self.wvfs.base
252 self.root = self.wvfs.base
253 self.path = self.wvfs.join(".hg")
253 self.path = self.wvfs.join(".hg")
254 self.origroot = path
254 self.origroot = path
255 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 self.auditor = pathutil.pathauditor(self.root, self._checknested)
256 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
257 realfs=False)
257 realfs=False)
258 self.vfs = scmutil.vfs(self.path)
258 self.vfs = scmutil.vfs(self.path)
259 self.opener = self.vfs
259 self.opener = self.vfs
260 self.baseui = baseui
260 self.baseui = baseui
261 self.ui = baseui.copy()
261 self.ui = baseui.copy()
262 self.ui.copy = baseui.copy # prevent copying repo configuration
262 self.ui.copy = baseui.copy # prevent copying repo configuration
263 # A list of callback to shape the phase if no data were found.
263 # A list of callback to shape the phase if no data were found.
264 # Callback are in the form: func(repo, roots) --> processed root.
264 # Callback are in the form: func(repo, roots) --> processed root.
265 # This list it to be filled by extension during repo setup
265 # This list it to be filled by extension during repo setup
266 self._phasedefaults = []
266 self._phasedefaults = []
267 try:
267 try:
268 self.ui.readconfig(self.join("hgrc"), self.root)
268 self.ui.readconfig(self.join("hgrc"), self.root)
269 extensions.loadall(self.ui)
269 extensions.loadall(self.ui)
270 except IOError:
270 except IOError:
271 pass
271 pass
272
272
273 if self.featuresetupfuncs:
273 if self.featuresetupfuncs:
274 self.supported = set(self._basesupported) # use private copy
274 self.supported = set(self._basesupported) # use private copy
275 extmods = set(m.__name__ for n, m
275 extmods = set(m.__name__ for n, m
276 in extensions.extensions(self.ui))
276 in extensions.extensions(self.ui))
277 for setupfunc in self.featuresetupfuncs:
277 for setupfunc in self.featuresetupfuncs:
278 if setupfunc.__module__ in extmods:
278 if setupfunc.__module__ in extmods:
279 setupfunc(self.ui, self.supported)
279 setupfunc(self.ui, self.supported)
280 else:
280 else:
281 self.supported = self._basesupported
281 self.supported = self._basesupported
282
282
283 if not self.vfs.isdir():
283 if not self.vfs.isdir():
284 if create:
284 if create:
285 if not self.wvfs.exists():
286 self.wvfs.makedirs()
287 self.vfs.makedir(notindexed=True)
288 requirements = set(self._baserequirements(create))
285 requirements = set(self._baserequirements(create))
289 if self.ui.configbool('format', 'usestore', True):
286 if self.ui.configbool('format', 'usestore', True):
290 self.vfs.mkdir("store")
291 requirements.add("store")
287 requirements.add("store")
292 if self.ui.configbool('format', 'usefncache', True):
288 if self.ui.configbool('format', 'usefncache', True):
293 requirements.add("fncache")
289 requirements.add("fncache")
294 if self.ui.configbool('format', 'dotencode', True):
290 if self.ui.configbool('format', 'dotencode', True):
295 requirements.add('dotencode')
291 requirements.add('dotencode')
296 # create an invalid changelog
292
297 self.vfs.append(
298 "00changelog.i",
299 '\0\0\0\2' # represents revlogv2
300 ' dummy changelog to prevent using the old repo layout'
301 )
302 if scmutil.gdinitconfig(self.ui):
293 if scmutil.gdinitconfig(self.ui):
303 requirements.add("generaldelta")
294 requirements.add("generaldelta")
304 if self.ui.configbool('experimental', 'treemanifest', False):
295 if self.ui.configbool('experimental', 'treemanifest', False):
305 requirements.add("treemanifest")
296 requirements.add("treemanifest")
306 if self.ui.configbool('experimental', 'manifestv2', False):
297 if self.ui.configbool('experimental', 'manifestv2', False):
307 requirements.add("manifestv2")
298 requirements.add("manifestv2")
308
299
309 self.requirements = requirements
300 self.requirements = requirements
301
302 if not self.wvfs.exists():
303 self.wvfs.makedirs()
304 self.vfs.makedir(notindexed=True)
305
306 if 'store' in requirements:
307 self.vfs.mkdir("store")
308
309 # create an invalid changelog
310 self.vfs.append(
311 "00changelog.i",
312 '\0\0\0\2' # represents revlogv2
313 ' dummy changelog to prevent using the old repo layout'
314 )
310 else:
315 else:
311 raise error.RepoError(_("repository %s not found") % path)
316 raise error.RepoError(_("repository %s not found") % path)
312 elif create:
317 elif create:
313 raise error.RepoError(_("repository %s already exists") % path)
318 raise error.RepoError(_("repository %s already exists") % path)
314 else:
319 else:
315 try:
320 try:
316 self.requirements = scmutil.readrequires(
321 self.requirements = scmutil.readrequires(
317 self.vfs, self.supported)
322 self.vfs, self.supported)
318 except IOError as inst:
323 except IOError as inst:
319 if inst.errno != errno.ENOENT:
324 if inst.errno != errno.ENOENT:
320 raise
325 raise
321
326
322 self.sharedpath = self.path
327 self.sharedpath = self.path
323 try:
328 try:
324 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
329 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
325 realpath=True)
330 realpath=True)
326 s = vfs.base
331 s = vfs.base
327 if not vfs.exists():
332 if not vfs.exists():
328 raise error.RepoError(
333 raise error.RepoError(
329 _('.hg/sharedpath points to nonexistent directory %s') % s)
334 _('.hg/sharedpath points to nonexistent directory %s') % s)
330 self.sharedpath = s
335 self.sharedpath = s
331 except IOError as inst:
336 except IOError as inst:
332 if inst.errno != errno.ENOENT:
337 if inst.errno != errno.ENOENT:
333 raise
338 raise
334
339
335 self.store = store.store(
340 self.store = store.store(
336 self.requirements, self.sharedpath, scmutil.vfs)
341 self.requirements, self.sharedpath, scmutil.vfs)
337 self.spath = self.store.path
342 self.spath = self.store.path
338 self.svfs = self.store.vfs
343 self.svfs = self.store.vfs
339 self.sjoin = self.store.join
344 self.sjoin = self.store.join
340 self.vfs.createmode = self.store.createmode
345 self.vfs.createmode = self.store.createmode
341 self._applyopenerreqs()
346 self._applyopenerreqs()
342 if create:
347 if create:
343 self._writerequirements()
348 self._writerequirements()
344
349
345 self._dirstatevalidatewarned = False
350 self._dirstatevalidatewarned = False
346
351
347 self._branchcaches = {}
352 self._branchcaches = {}
348 self._revbranchcache = None
353 self._revbranchcache = None
349 self.filterpats = {}
354 self.filterpats = {}
350 self._datafilters = {}
355 self._datafilters = {}
351 self._transref = self._lockref = self._wlockref = None
356 self._transref = self._lockref = self._wlockref = None
352
357
353 # A cache for various files under .hg/ that tracks file changes,
358 # A cache for various files under .hg/ that tracks file changes,
354 # (used by the filecache decorator)
359 # (used by the filecache decorator)
355 #
360 #
356 # Maps a property name to its util.filecacheentry
361 # Maps a property name to its util.filecacheentry
357 self._filecache = {}
362 self._filecache = {}
358
363
359 # hold sets of revision to be filtered
364 # hold sets of revision to be filtered
360 # should be cleared when something might have changed the filter value:
365 # should be cleared when something might have changed the filter value:
361 # - new changesets,
366 # - new changesets,
362 # - phase change,
367 # - phase change,
363 # - new obsolescence marker,
368 # - new obsolescence marker,
364 # - working directory parent change,
369 # - working directory parent change,
365 # - bookmark changes
370 # - bookmark changes
366 self.filteredrevcache = {}
371 self.filteredrevcache = {}
367
372
368 # generic mapping between names and nodes
373 # generic mapping between names and nodes
369 self.names = namespaces.namespaces()
374 self.names = namespaces.namespaces()
370
375
371 def close(self):
376 def close(self):
372 self._writecaches()
377 self._writecaches()
373
378
374 def _writecaches(self):
379 def _writecaches(self):
375 if self._revbranchcache:
380 if self._revbranchcache:
376 self._revbranchcache.write()
381 self._revbranchcache.write()
377
382
378 def _restrictcapabilities(self, caps):
383 def _restrictcapabilities(self, caps):
379 if self.ui.configbool('experimental', 'bundle2-advertise', True):
384 if self.ui.configbool('experimental', 'bundle2-advertise', True):
380 caps = set(caps)
385 caps = set(caps)
381 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
386 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
382 caps.add('bundle2=' + urllib.quote(capsblob))
387 caps.add('bundle2=' + urllib.quote(capsblob))
383 return caps
388 return caps
384
389
385 def _applyopenerreqs(self):
390 def _applyopenerreqs(self):
386 self.svfs.options = dict((r, 1) for r in self.requirements
391 self.svfs.options = dict((r, 1) for r in self.requirements
387 if r in self.openerreqs)
392 if r in self.openerreqs)
388 # experimental config: format.chunkcachesize
393 # experimental config: format.chunkcachesize
389 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
394 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
390 if chunkcachesize is not None:
395 if chunkcachesize is not None:
391 self.svfs.options['chunkcachesize'] = chunkcachesize
396 self.svfs.options['chunkcachesize'] = chunkcachesize
392 # experimental config: format.maxchainlen
397 # experimental config: format.maxchainlen
393 maxchainlen = self.ui.configint('format', 'maxchainlen')
398 maxchainlen = self.ui.configint('format', 'maxchainlen')
394 if maxchainlen is not None:
399 if maxchainlen is not None:
395 self.svfs.options['maxchainlen'] = maxchainlen
400 self.svfs.options['maxchainlen'] = maxchainlen
396 # experimental config: format.manifestcachesize
401 # experimental config: format.manifestcachesize
397 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
402 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
398 if manifestcachesize is not None:
403 if manifestcachesize is not None:
399 self.svfs.options['manifestcachesize'] = manifestcachesize
404 self.svfs.options['manifestcachesize'] = manifestcachesize
400 # experimental config: format.aggressivemergedeltas
405 # experimental config: format.aggressivemergedeltas
401 aggressivemergedeltas = self.ui.configbool('format',
406 aggressivemergedeltas = self.ui.configbool('format',
402 'aggressivemergedeltas', False)
407 'aggressivemergedeltas', False)
403 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
408 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
404 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
409 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
405
410
406 def _writerequirements(self):
411 def _writerequirements(self):
407 scmutil.writerequires(self.vfs, self.requirements)
412 scmutil.writerequires(self.vfs, self.requirements)
408
413
409 def _checknested(self, path):
414 def _checknested(self, path):
410 """Determine if path is a legal nested repository."""
415 """Determine if path is a legal nested repository."""
411 if not path.startswith(self.root):
416 if not path.startswith(self.root):
412 return False
417 return False
413 subpath = path[len(self.root) + 1:]
418 subpath = path[len(self.root) + 1:]
414 normsubpath = util.pconvert(subpath)
419 normsubpath = util.pconvert(subpath)
415
420
416 # XXX: Checking against the current working copy is wrong in
421 # XXX: Checking against the current working copy is wrong in
417 # the sense that it can reject things like
422 # the sense that it can reject things like
418 #
423 #
419 # $ hg cat -r 10 sub/x.txt
424 # $ hg cat -r 10 sub/x.txt
420 #
425 #
421 # if sub/ is no longer a subrepository in the working copy
426 # if sub/ is no longer a subrepository in the working copy
422 # parent revision.
427 # parent revision.
423 #
428 #
424 # However, it can of course also allow things that would have
429 # However, it can of course also allow things that would have
425 # been rejected before, such as the above cat command if sub/
430 # been rejected before, such as the above cat command if sub/
426 # is a subrepository now, but was a normal directory before.
431 # is a subrepository now, but was a normal directory before.
427 # The old path auditor would have rejected by mistake since it
432 # The old path auditor would have rejected by mistake since it
428 # panics when it sees sub/.hg/.
433 # panics when it sees sub/.hg/.
429 #
434 #
430 # All in all, checking against the working copy seems sensible
435 # All in all, checking against the working copy seems sensible
431 # since we want to prevent access to nested repositories on
436 # since we want to prevent access to nested repositories on
432 # the filesystem *now*.
437 # the filesystem *now*.
433 ctx = self[None]
438 ctx = self[None]
434 parts = util.splitpath(subpath)
439 parts = util.splitpath(subpath)
435 while parts:
440 while parts:
436 prefix = '/'.join(parts)
441 prefix = '/'.join(parts)
437 if prefix in ctx.substate:
442 if prefix in ctx.substate:
438 if prefix == normsubpath:
443 if prefix == normsubpath:
439 return True
444 return True
440 else:
445 else:
441 sub = ctx.sub(prefix)
446 sub = ctx.sub(prefix)
442 return sub.checknested(subpath[len(prefix) + 1:])
447 return sub.checknested(subpath[len(prefix) + 1:])
443 else:
448 else:
444 parts.pop()
449 parts.pop()
445 return False
450 return False
446
451
447 def peer(self):
452 def peer(self):
448 return localpeer(self) # not cached to avoid reference cycle
453 return localpeer(self) # not cached to avoid reference cycle
449
454
450 def unfiltered(self):
455 def unfiltered(self):
451 """Return unfiltered version of the repository
456 """Return unfiltered version of the repository
452
457
453 Intended to be overwritten by filtered repo."""
458 Intended to be overwritten by filtered repo."""
454 return self
459 return self
455
460
456 def filtered(self, name):
461 def filtered(self, name):
457 """Return a filtered version of a repository"""
462 """Return a filtered version of a repository"""
458 # build a new class with the mixin and the current class
463 # build a new class with the mixin and the current class
459 # (possibly subclass of the repo)
464 # (possibly subclass of the repo)
460 class proxycls(repoview.repoview, self.unfiltered().__class__):
465 class proxycls(repoview.repoview, self.unfiltered().__class__):
461 pass
466 pass
462 return proxycls(self, name)
467 return proxycls(self, name)
463
468
464 @repofilecache('bookmarks', 'bookmarks.current')
469 @repofilecache('bookmarks', 'bookmarks.current')
465 def _bookmarks(self):
470 def _bookmarks(self):
466 return bookmarks.bmstore(self)
471 return bookmarks.bmstore(self)
467
472
468 @property
473 @property
469 def _activebookmark(self):
474 def _activebookmark(self):
470 return self._bookmarks.active
475 return self._bookmarks.active
471
476
472 def bookmarkheads(self, bookmark):
477 def bookmarkheads(self, bookmark):
473 name = bookmark.split('@', 1)[0]
478 name = bookmark.split('@', 1)[0]
474 heads = []
479 heads = []
475 for mark, n in self._bookmarks.iteritems():
480 for mark, n in self._bookmarks.iteritems():
476 if mark.split('@', 1)[0] == name:
481 if mark.split('@', 1)[0] == name:
477 heads.append(n)
482 heads.append(n)
478 return heads
483 return heads
479
484
480 # _phaserevs and _phasesets depend on changelog. what we need is to
485 # _phaserevs and _phasesets depend on changelog. what we need is to
481 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
486 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
482 # can't be easily expressed in filecache mechanism.
487 # can't be easily expressed in filecache mechanism.
483 @storecache('phaseroots', '00changelog.i')
488 @storecache('phaseroots', '00changelog.i')
484 def _phasecache(self):
489 def _phasecache(self):
485 return phases.phasecache(self, self._phasedefaults)
490 return phases.phasecache(self, self._phasedefaults)
486
491
487 @storecache('obsstore')
492 @storecache('obsstore')
488 def obsstore(self):
493 def obsstore(self):
489 # read default format for new obsstore.
494 # read default format for new obsstore.
490 # developer config: format.obsstore-version
495 # developer config: format.obsstore-version
491 defaultformat = self.ui.configint('format', 'obsstore-version', None)
496 defaultformat = self.ui.configint('format', 'obsstore-version', None)
492 # rely on obsstore class default when possible.
497 # rely on obsstore class default when possible.
493 kwargs = {}
498 kwargs = {}
494 if defaultformat is not None:
499 if defaultformat is not None:
495 kwargs['defaultformat'] = defaultformat
500 kwargs['defaultformat'] = defaultformat
496 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
501 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
497 store = obsolete.obsstore(self.svfs, readonly=readonly,
502 store = obsolete.obsstore(self.svfs, readonly=readonly,
498 **kwargs)
503 **kwargs)
499 if store and readonly:
504 if store and readonly:
500 self.ui.warn(
505 self.ui.warn(
501 _('obsolete feature not enabled but %i markers found!\n')
506 _('obsolete feature not enabled but %i markers found!\n')
502 % len(list(store)))
507 % len(list(store)))
503 return store
508 return store
504
509
505 @storecache('00changelog.i')
510 @storecache('00changelog.i')
506 def changelog(self):
511 def changelog(self):
507 c = changelog.changelog(self.svfs)
512 c = changelog.changelog(self.svfs)
508 if 'HG_PENDING' in os.environ:
513 if 'HG_PENDING' in os.environ:
509 p = os.environ['HG_PENDING']
514 p = os.environ['HG_PENDING']
510 if p.startswith(self.root):
515 if p.startswith(self.root):
511 c.readpending('00changelog.i.a')
516 c.readpending('00changelog.i.a')
512 return c
517 return c
513
518
514 @storecache('00manifest.i')
519 @storecache('00manifest.i')
515 def manifest(self):
520 def manifest(self):
516 return manifest.manifest(self.svfs)
521 return manifest.manifest(self.svfs)
517
522
518 def dirlog(self, dir):
523 def dirlog(self, dir):
519 return self.manifest.dirlog(dir)
524 return self.manifest.dirlog(dir)
520
525
521 @repofilecache('dirstate')
526 @repofilecache('dirstate')
522 def dirstate(self):
527 def dirstate(self):
523 return dirstate.dirstate(self.vfs, self.ui, self.root,
528 return dirstate.dirstate(self.vfs, self.ui, self.root,
524 self._dirstatevalidate)
529 self._dirstatevalidate)
525
530
526 def _dirstatevalidate(self, node):
531 def _dirstatevalidate(self, node):
527 try:
532 try:
528 self.changelog.rev(node)
533 self.changelog.rev(node)
529 return node
534 return node
530 except error.LookupError:
535 except error.LookupError:
531 if not self._dirstatevalidatewarned:
536 if not self._dirstatevalidatewarned:
532 self._dirstatevalidatewarned = True
537 self._dirstatevalidatewarned = True
533 self.ui.warn(_("warning: ignoring unknown"
538 self.ui.warn(_("warning: ignoring unknown"
534 " working parent %s!\n") % short(node))
539 " working parent %s!\n") % short(node))
535 return nullid
540 return nullid
536
541
537 def __getitem__(self, changeid):
542 def __getitem__(self, changeid):
538 if changeid is None or changeid == wdirrev:
543 if changeid is None or changeid == wdirrev:
539 return context.workingctx(self)
544 return context.workingctx(self)
540 if isinstance(changeid, slice):
545 if isinstance(changeid, slice):
541 return [context.changectx(self, i)
546 return [context.changectx(self, i)
542 for i in xrange(*changeid.indices(len(self)))
547 for i in xrange(*changeid.indices(len(self)))
543 if i not in self.changelog.filteredrevs]
548 if i not in self.changelog.filteredrevs]
544 return context.changectx(self, changeid)
549 return context.changectx(self, changeid)
545
550
546 def __contains__(self, changeid):
551 def __contains__(self, changeid):
547 try:
552 try:
548 self[changeid]
553 self[changeid]
549 return True
554 return True
550 except error.RepoLookupError:
555 except error.RepoLookupError:
551 return False
556 return False
552
557
553 def __nonzero__(self):
558 def __nonzero__(self):
554 return True
559 return True
555
560
556 def __len__(self):
561 def __len__(self):
557 return len(self.changelog)
562 return len(self.changelog)
558
563
559 def __iter__(self):
564 def __iter__(self):
560 return iter(self.changelog)
565 return iter(self.changelog)
561
566
562 def revs(self, expr, *args):
567 def revs(self, expr, *args):
563 '''Find revisions matching a revset.
568 '''Find revisions matching a revset.
564
569
565 The revset is specified as a string ``expr`` that may contain
570 The revset is specified as a string ``expr`` that may contain
566 %-formatting to escape certain types. See ``revset.formatspec``.
571 %-formatting to escape certain types. See ``revset.formatspec``.
567
572
568 Return a revset.abstractsmartset, which is a list-like interface
573 Return a revset.abstractsmartset, which is a list-like interface
569 that contains integer revisions.
574 that contains integer revisions.
570 '''
575 '''
571 expr = revset.formatspec(expr, *args)
576 expr = revset.formatspec(expr, *args)
572 m = revset.match(None, expr)
577 m = revset.match(None, expr)
573 return m(self)
578 return m(self)
574
579
575 def set(self, expr, *args):
580 def set(self, expr, *args):
576 '''Find revisions matching a revset and emit changectx instances.
581 '''Find revisions matching a revset and emit changectx instances.
577
582
578 This is a convenience wrapper around ``revs()`` that iterates the
583 This is a convenience wrapper around ``revs()`` that iterates the
579 result and is a generator of changectx instances.
584 result and is a generator of changectx instances.
580 '''
585 '''
581 for r in self.revs(expr, *args):
586 for r in self.revs(expr, *args):
582 yield self[r]
587 yield self[r]
583
588
584 def url(self):
589 def url(self):
585 return 'file:' + self.root
590 return 'file:' + self.root
586
591
587 def hook(self, name, throw=False, **args):
592 def hook(self, name, throw=False, **args):
588 """Call a hook, passing this repo instance.
593 """Call a hook, passing this repo instance.
589
594
590 This a convenience method to aid invoking hooks. Extensions likely
595 This a convenience method to aid invoking hooks. Extensions likely
591 won't call this unless they have registered a custom hook or are
596 won't call this unless they have registered a custom hook or are
592 replacing code that is expected to call a hook.
597 replacing code that is expected to call a hook.
593 """
598 """
594 return hook.hook(self.ui, self, name, throw, **args)
599 return hook.hook(self.ui, self, name, throw, **args)
595
600
596 @unfilteredmethod
601 @unfilteredmethod
597 def _tag(self, names, node, message, local, user, date, extra=None,
602 def _tag(self, names, node, message, local, user, date, extra=None,
598 editor=False):
603 editor=False):
599 if isinstance(names, str):
604 if isinstance(names, str):
600 names = (names,)
605 names = (names,)
601
606
602 branches = self.branchmap()
607 branches = self.branchmap()
603 for name in names:
608 for name in names:
604 self.hook('pretag', throw=True, node=hex(node), tag=name,
609 self.hook('pretag', throw=True, node=hex(node), tag=name,
605 local=local)
610 local=local)
606 if name in branches:
611 if name in branches:
607 self.ui.warn(_("warning: tag %s conflicts with existing"
612 self.ui.warn(_("warning: tag %s conflicts with existing"
608 " branch name\n") % name)
613 " branch name\n") % name)
609
614
610 def writetags(fp, names, munge, prevtags):
615 def writetags(fp, names, munge, prevtags):
611 fp.seek(0, 2)
616 fp.seek(0, 2)
612 if prevtags and prevtags[-1] != '\n':
617 if prevtags and prevtags[-1] != '\n':
613 fp.write('\n')
618 fp.write('\n')
614 for name in names:
619 for name in names:
615 if munge:
620 if munge:
616 m = munge(name)
621 m = munge(name)
617 else:
622 else:
618 m = name
623 m = name
619
624
620 if (self._tagscache.tagtypes and
625 if (self._tagscache.tagtypes and
621 name in self._tagscache.tagtypes):
626 name in self._tagscache.tagtypes):
622 old = self.tags().get(name, nullid)
627 old = self.tags().get(name, nullid)
623 fp.write('%s %s\n' % (hex(old), m))
628 fp.write('%s %s\n' % (hex(old), m))
624 fp.write('%s %s\n' % (hex(node), m))
629 fp.write('%s %s\n' % (hex(node), m))
625 fp.close()
630 fp.close()
626
631
627 prevtags = ''
632 prevtags = ''
628 if local:
633 if local:
629 try:
634 try:
630 fp = self.vfs('localtags', 'r+')
635 fp = self.vfs('localtags', 'r+')
631 except IOError:
636 except IOError:
632 fp = self.vfs('localtags', 'a')
637 fp = self.vfs('localtags', 'a')
633 else:
638 else:
634 prevtags = fp.read()
639 prevtags = fp.read()
635
640
636 # local tags are stored in the current charset
641 # local tags are stored in the current charset
637 writetags(fp, names, None, prevtags)
642 writetags(fp, names, None, prevtags)
638 for name in names:
643 for name in names:
639 self.hook('tag', node=hex(node), tag=name, local=local)
644 self.hook('tag', node=hex(node), tag=name, local=local)
640 return
645 return
641
646
642 try:
647 try:
643 fp = self.wfile('.hgtags', 'rb+')
648 fp = self.wfile('.hgtags', 'rb+')
644 except IOError as e:
649 except IOError as e:
645 if e.errno != errno.ENOENT:
650 if e.errno != errno.ENOENT:
646 raise
651 raise
647 fp = self.wfile('.hgtags', 'ab')
652 fp = self.wfile('.hgtags', 'ab')
648 else:
653 else:
649 prevtags = fp.read()
654 prevtags = fp.read()
650
655
651 # committed tags are stored in UTF-8
656 # committed tags are stored in UTF-8
652 writetags(fp, names, encoding.fromlocal, prevtags)
657 writetags(fp, names, encoding.fromlocal, prevtags)
653
658
654 fp.close()
659 fp.close()
655
660
656 self.invalidatecaches()
661 self.invalidatecaches()
657
662
658 if '.hgtags' not in self.dirstate:
663 if '.hgtags' not in self.dirstate:
659 self[None].add(['.hgtags'])
664 self[None].add(['.hgtags'])
660
665
661 m = matchmod.exact(self.root, '', ['.hgtags'])
666 m = matchmod.exact(self.root, '', ['.hgtags'])
662 tagnode = self.commit(message, user, date, extra=extra, match=m,
667 tagnode = self.commit(message, user, date, extra=extra, match=m,
663 editor=editor)
668 editor=editor)
664
669
665 for name in names:
670 for name in names:
666 self.hook('tag', node=hex(node), tag=name, local=local)
671 self.hook('tag', node=hex(node), tag=name, local=local)
667
672
668 return tagnode
673 return tagnode
669
674
670 def tag(self, names, node, message, local, user, date, editor=False):
675 def tag(self, names, node, message, local, user, date, editor=False):
671 '''tag a revision with one or more symbolic names.
676 '''tag a revision with one or more symbolic names.
672
677
673 names is a list of strings or, when adding a single tag, names may be a
678 names is a list of strings or, when adding a single tag, names may be a
674 string.
679 string.
675
680
676 if local is True, the tags are stored in a per-repository file.
681 if local is True, the tags are stored in a per-repository file.
677 otherwise, they are stored in the .hgtags file, and a new
682 otherwise, they are stored in the .hgtags file, and a new
678 changeset is committed with the change.
683 changeset is committed with the change.
679
684
680 keyword arguments:
685 keyword arguments:
681
686
682 local: whether to store tags in non-version-controlled file
687 local: whether to store tags in non-version-controlled file
683 (default False)
688 (default False)
684
689
685 message: commit message to use if committing
690 message: commit message to use if committing
686
691
687 user: name of user to use if committing
692 user: name of user to use if committing
688
693
689 date: date tuple to use if committing'''
694 date: date tuple to use if committing'''
690
695
691 if not local:
696 if not local:
692 m = matchmod.exact(self.root, '', ['.hgtags'])
697 m = matchmod.exact(self.root, '', ['.hgtags'])
693 if any(self.status(match=m, unknown=True, ignored=True)):
698 if any(self.status(match=m, unknown=True, ignored=True)):
694 raise error.Abort(_('working copy of .hgtags is changed'),
699 raise error.Abort(_('working copy of .hgtags is changed'),
695 hint=_('please commit .hgtags manually'))
700 hint=_('please commit .hgtags manually'))
696
701
697 self.tags() # instantiate the cache
702 self.tags() # instantiate the cache
698 self._tag(names, node, message, local, user, date, editor=editor)
703 self._tag(names, node, message, local, user, date, editor=editor)
699
704
700 @filteredpropertycache
705 @filteredpropertycache
701 def _tagscache(self):
706 def _tagscache(self):
702 '''Returns a tagscache object that contains various tags related
707 '''Returns a tagscache object that contains various tags related
703 caches.'''
708 caches.'''
704
709
705 # This simplifies its cache management by having one decorated
710 # This simplifies its cache management by having one decorated
706 # function (this one) and the rest simply fetch things from it.
711 # function (this one) and the rest simply fetch things from it.
707 class tagscache(object):
712 class tagscache(object):
708 def __init__(self):
713 def __init__(self):
709 # These two define the set of tags for this repository. tags
714 # These two define the set of tags for this repository. tags
710 # maps tag name to node; tagtypes maps tag name to 'global' or
715 # maps tag name to node; tagtypes maps tag name to 'global' or
711 # 'local'. (Global tags are defined by .hgtags across all
716 # 'local'. (Global tags are defined by .hgtags across all
712 # heads, and local tags are defined in .hg/localtags.)
717 # heads, and local tags are defined in .hg/localtags.)
713 # They constitute the in-memory cache of tags.
718 # They constitute the in-memory cache of tags.
714 self.tags = self.tagtypes = None
719 self.tags = self.tagtypes = None
715
720
716 self.nodetagscache = self.tagslist = None
721 self.nodetagscache = self.tagslist = None
717
722
718 cache = tagscache()
723 cache = tagscache()
719 cache.tags, cache.tagtypes = self._findtags()
724 cache.tags, cache.tagtypes = self._findtags()
720
725
721 return cache
726 return cache
722
727
723 def tags(self):
728 def tags(self):
724 '''return a mapping of tag to node'''
729 '''return a mapping of tag to node'''
725 t = {}
730 t = {}
726 if self.changelog.filteredrevs:
731 if self.changelog.filteredrevs:
727 tags, tt = self._findtags()
732 tags, tt = self._findtags()
728 else:
733 else:
729 tags = self._tagscache.tags
734 tags = self._tagscache.tags
730 for k, v in tags.iteritems():
735 for k, v in tags.iteritems():
731 try:
736 try:
732 # ignore tags to unknown nodes
737 # ignore tags to unknown nodes
733 self.changelog.rev(v)
738 self.changelog.rev(v)
734 t[k] = v
739 t[k] = v
735 except (error.LookupError, ValueError):
740 except (error.LookupError, ValueError):
736 pass
741 pass
737 return t
742 return t
738
743
739 def _findtags(self):
744 def _findtags(self):
740 '''Do the hard work of finding tags. Return a pair of dicts
745 '''Do the hard work of finding tags. Return a pair of dicts
741 (tags, tagtypes) where tags maps tag name to node, and tagtypes
746 (tags, tagtypes) where tags maps tag name to node, and tagtypes
742 maps tag name to a string like \'global\' or \'local\'.
747 maps tag name to a string like \'global\' or \'local\'.
743 Subclasses or extensions are free to add their own tags, but
748 Subclasses or extensions are free to add their own tags, but
744 should be aware that the returned dicts will be retained for the
749 should be aware that the returned dicts will be retained for the
745 duration of the localrepo object.'''
750 duration of the localrepo object.'''
746
751
747 # XXX what tagtype should subclasses/extensions use? Currently
752 # XXX what tagtype should subclasses/extensions use? Currently
748 # mq and bookmarks add tags, but do not set the tagtype at all.
753 # mq and bookmarks add tags, but do not set the tagtype at all.
749 # Should each extension invent its own tag type? Should there
754 # Should each extension invent its own tag type? Should there
750 # be one tagtype for all such "virtual" tags? Or is the status
755 # be one tagtype for all such "virtual" tags? Or is the status
751 # quo fine?
756 # quo fine?
752
757
753 alltags = {} # map tag name to (node, hist)
758 alltags = {} # map tag name to (node, hist)
754 tagtypes = {}
759 tagtypes = {}
755
760
756 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
761 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
757 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
762 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
758
763
759 # Build the return dicts. Have to re-encode tag names because
764 # Build the return dicts. Have to re-encode tag names because
760 # the tags module always uses UTF-8 (in order not to lose info
765 # the tags module always uses UTF-8 (in order not to lose info
761 # writing to the cache), but the rest of Mercurial wants them in
766 # writing to the cache), but the rest of Mercurial wants them in
762 # local encoding.
767 # local encoding.
763 tags = {}
768 tags = {}
764 for (name, (node, hist)) in alltags.iteritems():
769 for (name, (node, hist)) in alltags.iteritems():
765 if node != nullid:
770 if node != nullid:
766 tags[encoding.tolocal(name)] = node
771 tags[encoding.tolocal(name)] = node
767 tags['tip'] = self.changelog.tip()
772 tags['tip'] = self.changelog.tip()
768 tagtypes = dict([(encoding.tolocal(name), value)
773 tagtypes = dict([(encoding.tolocal(name), value)
769 for (name, value) in tagtypes.iteritems()])
774 for (name, value) in tagtypes.iteritems()])
770 return (tags, tagtypes)
775 return (tags, tagtypes)
771
776
772 def tagtype(self, tagname):
777 def tagtype(self, tagname):
773 '''
778 '''
774 return the type of the given tag. result can be:
779 return the type of the given tag. result can be:
775
780
776 'local' : a local tag
781 'local' : a local tag
777 'global' : a global tag
782 'global' : a global tag
778 None : tag does not exist
783 None : tag does not exist
779 '''
784 '''
780
785
781 return self._tagscache.tagtypes.get(tagname)
786 return self._tagscache.tagtypes.get(tagname)
782
787
783 def tagslist(self):
788 def tagslist(self):
784 '''return a list of tags ordered by revision'''
789 '''return a list of tags ordered by revision'''
785 if not self._tagscache.tagslist:
790 if not self._tagscache.tagslist:
786 l = []
791 l = []
787 for t, n in self.tags().iteritems():
792 for t, n in self.tags().iteritems():
788 l.append((self.changelog.rev(n), t, n))
793 l.append((self.changelog.rev(n), t, n))
789 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
794 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
790
795
791 return self._tagscache.tagslist
796 return self._tagscache.tagslist
792
797
793 def nodetags(self, node):
798 def nodetags(self, node):
794 '''return the tags associated with a node'''
799 '''return the tags associated with a node'''
795 if not self._tagscache.nodetagscache:
800 if not self._tagscache.nodetagscache:
796 nodetagscache = {}
801 nodetagscache = {}
797 for t, n in self._tagscache.tags.iteritems():
802 for t, n in self._tagscache.tags.iteritems():
798 nodetagscache.setdefault(n, []).append(t)
803 nodetagscache.setdefault(n, []).append(t)
799 for tags in nodetagscache.itervalues():
804 for tags in nodetagscache.itervalues():
800 tags.sort()
805 tags.sort()
801 self._tagscache.nodetagscache = nodetagscache
806 self._tagscache.nodetagscache = nodetagscache
802 return self._tagscache.nodetagscache.get(node, [])
807 return self._tagscache.nodetagscache.get(node, [])
803
808
804 def nodebookmarks(self, node):
809 def nodebookmarks(self, node):
805 """return the list of bookmarks pointing to the specified node"""
810 """return the list of bookmarks pointing to the specified node"""
806 marks = []
811 marks = []
807 for bookmark, n in self._bookmarks.iteritems():
812 for bookmark, n in self._bookmarks.iteritems():
808 if n == node:
813 if n == node:
809 marks.append(bookmark)
814 marks.append(bookmark)
810 return sorted(marks)
815 return sorted(marks)
811
816
812 def branchmap(self):
817 def branchmap(self):
813 '''returns a dictionary {branch: [branchheads]} with branchheads
818 '''returns a dictionary {branch: [branchheads]} with branchheads
814 ordered by increasing revision number'''
819 ordered by increasing revision number'''
815 branchmap.updatecache(self)
820 branchmap.updatecache(self)
816 return self._branchcaches[self.filtername]
821 return self._branchcaches[self.filtername]
817
822
818 @unfilteredmethod
823 @unfilteredmethod
819 def revbranchcache(self):
824 def revbranchcache(self):
820 if not self._revbranchcache:
825 if not self._revbranchcache:
821 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
826 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
822 return self._revbranchcache
827 return self._revbranchcache
823
828
824 def branchtip(self, branch, ignoremissing=False):
829 def branchtip(self, branch, ignoremissing=False):
825 '''return the tip node for a given branch
830 '''return the tip node for a given branch
826
831
827 If ignoremissing is True, then this method will not raise an error.
832 If ignoremissing is True, then this method will not raise an error.
828 This is helpful for callers that only expect None for a missing branch
833 This is helpful for callers that only expect None for a missing branch
829 (e.g. namespace).
834 (e.g. namespace).
830
835
831 '''
836 '''
832 try:
837 try:
833 return self.branchmap().branchtip(branch)
838 return self.branchmap().branchtip(branch)
834 except KeyError:
839 except KeyError:
835 if not ignoremissing:
840 if not ignoremissing:
836 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
841 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
837 else:
842 else:
838 pass
843 pass
839
844
840 def lookup(self, key):
845 def lookup(self, key):
841 return self[key].node()
846 return self[key].node()
842
847
843 def lookupbranch(self, key, remote=None):
848 def lookupbranch(self, key, remote=None):
844 repo = remote or self
849 repo = remote or self
845 if key in repo.branchmap():
850 if key in repo.branchmap():
846 return key
851 return key
847
852
848 repo = (remote and remote.local()) and remote or self
853 repo = (remote and remote.local()) and remote or self
849 return repo[key].branch()
854 return repo[key].branch()
850
855
851 def known(self, nodes):
856 def known(self, nodes):
852 cl = self.changelog
857 cl = self.changelog
853 nm = cl.nodemap
858 nm = cl.nodemap
854 filtered = cl.filteredrevs
859 filtered = cl.filteredrevs
855 result = []
860 result = []
856 for n in nodes:
861 for n in nodes:
857 r = nm.get(n)
862 r = nm.get(n)
858 resp = not (r is None or r in filtered)
863 resp = not (r is None or r in filtered)
859 result.append(resp)
864 result.append(resp)
860 return result
865 return result
861
866
862 def local(self):
867 def local(self):
863 return self
868 return self
864
869
865 def publishing(self):
870 def publishing(self):
866 # it's safe (and desirable) to trust the publish flag unconditionally
871 # it's safe (and desirable) to trust the publish flag unconditionally
867 # so that we don't finalize changes shared between users via ssh or nfs
872 # so that we don't finalize changes shared between users via ssh or nfs
868 return self.ui.configbool('phases', 'publish', True, untrusted=True)
873 return self.ui.configbool('phases', 'publish', True, untrusted=True)
869
874
870 def cancopy(self):
875 def cancopy(self):
871 # so statichttprepo's override of local() works
876 # so statichttprepo's override of local() works
872 if not self.local():
877 if not self.local():
873 return False
878 return False
874 if not self.publishing():
879 if not self.publishing():
875 return True
880 return True
876 # if publishing we can't copy if there is filtered content
881 # if publishing we can't copy if there is filtered content
877 return not self.filtered('visible').changelog.filteredrevs
882 return not self.filtered('visible').changelog.filteredrevs
878
883
879 def shared(self):
884 def shared(self):
880 '''the type of shared repository (None if not shared)'''
885 '''the type of shared repository (None if not shared)'''
881 if self.sharedpath != self.path:
886 if self.sharedpath != self.path:
882 return 'store'
887 return 'store'
883 return None
888 return None
884
889
885 def join(self, f, *insidef):
890 def join(self, f, *insidef):
886 return self.vfs.join(os.path.join(f, *insidef))
891 return self.vfs.join(os.path.join(f, *insidef))
887
892
888 def wjoin(self, f, *insidef):
893 def wjoin(self, f, *insidef):
889 return self.vfs.reljoin(self.root, f, *insidef)
894 return self.vfs.reljoin(self.root, f, *insidef)
890
895
891 def file(self, f):
896 def file(self, f):
892 if f[0] == '/':
897 if f[0] == '/':
893 f = f[1:]
898 f = f[1:]
894 return filelog.filelog(self.svfs, f)
899 return filelog.filelog(self.svfs, f)
895
900
896 def parents(self, changeid=None):
901 def parents(self, changeid=None):
897 '''get list of changectxs for parents of changeid'''
902 '''get list of changectxs for parents of changeid'''
898 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
903 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
899 self.ui.deprecwarn(msg, '3.7')
904 self.ui.deprecwarn(msg, '3.7')
900 return self[changeid].parents()
905 return self[changeid].parents()
901
906
902 def changectx(self, changeid):
907 def changectx(self, changeid):
903 return self[changeid]
908 return self[changeid]
904
909
905 def setparents(self, p1, p2=nullid):
910 def setparents(self, p1, p2=nullid):
906 self.dirstate.beginparentchange()
911 self.dirstate.beginparentchange()
907 copies = self.dirstate.setparents(p1, p2)
912 copies = self.dirstate.setparents(p1, p2)
908 pctx = self[p1]
913 pctx = self[p1]
909 if copies:
914 if copies:
910 # Adjust copy records, the dirstate cannot do it, it
915 # Adjust copy records, the dirstate cannot do it, it
911 # requires access to parents manifests. Preserve them
916 # requires access to parents manifests. Preserve them
912 # only for entries added to first parent.
917 # only for entries added to first parent.
913 for f in copies:
918 for f in copies:
914 if f not in pctx and copies[f] in pctx:
919 if f not in pctx and copies[f] in pctx:
915 self.dirstate.copy(copies[f], f)
920 self.dirstate.copy(copies[f], f)
916 if p2 == nullid:
921 if p2 == nullid:
917 for f, s in sorted(self.dirstate.copies().items()):
922 for f, s in sorted(self.dirstate.copies().items()):
918 if f not in pctx and s not in pctx:
923 if f not in pctx and s not in pctx:
919 self.dirstate.copy(None, f)
924 self.dirstate.copy(None, f)
920 self.dirstate.endparentchange()
925 self.dirstate.endparentchange()
921
926
922 def filectx(self, path, changeid=None, fileid=None):
927 def filectx(self, path, changeid=None, fileid=None):
923 """changeid can be a changeset revision, node, or tag.
928 """changeid can be a changeset revision, node, or tag.
924 fileid can be a file revision or node."""
929 fileid can be a file revision or node."""
925 return context.filectx(self, path, changeid, fileid)
930 return context.filectx(self, path, changeid, fileid)
926
931
927 def getcwd(self):
932 def getcwd(self):
928 return self.dirstate.getcwd()
933 return self.dirstate.getcwd()
929
934
930 def pathto(self, f, cwd=None):
935 def pathto(self, f, cwd=None):
931 return self.dirstate.pathto(f, cwd)
936 return self.dirstate.pathto(f, cwd)
932
937
933 def wfile(self, f, mode='r'):
938 def wfile(self, f, mode='r'):
934 return self.wvfs(f, mode)
939 return self.wvfs(f, mode)
935
940
936 def _link(self, f):
941 def _link(self, f):
937 return self.wvfs.islink(f)
942 return self.wvfs.islink(f)
938
943
939 def _loadfilter(self, filter):
944 def _loadfilter(self, filter):
940 if filter not in self.filterpats:
945 if filter not in self.filterpats:
941 l = []
946 l = []
942 for pat, cmd in self.ui.configitems(filter):
947 for pat, cmd in self.ui.configitems(filter):
943 if cmd == '!':
948 if cmd == '!':
944 continue
949 continue
945 mf = matchmod.match(self.root, '', [pat])
950 mf = matchmod.match(self.root, '', [pat])
946 fn = None
951 fn = None
947 params = cmd
952 params = cmd
948 for name, filterfn in self._datafilters.iteritems():
953 for name, filterfn in self._datafilters.iteritems():
949 if cmd.startswith(name):
954 if cmd.startswith(name):
950 fn = filterfn
955 fn = filterfn
951 params = cmd[len(name):].lstrip()
956 params = cmd[len(name):].lstrip()
952 break
957 break
953 if not fn:
958 if not fn:
954 fn = lambda s, c, **kwargs: util.filter(s, c)
959 fn = lambda s, c, **kwargs: util.filter(s, c)
955 # Wrap old filters not supporting keyword arguments
960 # Wrap old filters not supporting keyword arguments
956 if not inspect.getargspec(fn)[2]:
961 if not inspect.getargspec(fn)[2]:
957 oldfn = fn
962 oldfn = fn
958 fn = lambda s, c, **kwargs: oldfn(s, c)
963 fn = lambda s, c, **kwargs: oldfn(s, c)
959 l.append((mf, fn, params))
964 l.append((mf, fn, params))
960 self.filterpats[filter] = l
965 self.filterpats[filter] = l
961 return self.filterpats[filter]
966 return self.filterpats[filter]
962
967
963 def _filter(self, filterpats, filename, data):
968 def _filter(self, filterpats, filename, data):
964 for mf, fn, cmd in filterpats:
969 for mf, fn, cmd in filterpats:
965 if mf(filename):
970 if mf(filename):
966 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
971 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
967 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
972 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
968 break
973 break
969
974
970 return data
975 return data
971
976
972 @unfilteredpropertycache
977 @unfilteredpropertycache
973 def _encodefilterpats(self):
978 def _encodefilterpats(self):
974 return self._loadfilter('encode')
979 return self._loadfilter('encode')
975
980
976 @unfilteredpropertycache
981 @unfilteredpropertycache
977 def _decodefilterpats(self):
982 def _decodefilterpats(self):
978 return self._loadfilter('decode')
983 return self._loadfilter('decode')
979
984
980 def adddatafilter(self, name, filter):
985 def adddatafilter(self, name, filter):
981 self._datafilters[name] = filter
986 self._datafilters[name] = filter
982
987
983 def wread(self, filename):
988 def wread(self, filename):
984 if self._link(filename):
989 if self._link(filename):
985 data = self.wvfs.readlink(filename)
990 data = self.wvfs.readlink(filename)
986 else:
991 else:
987 data = self.wvfs.read(filename)
992 data = self.wvfs.read(filename)
988 return self._filter(self._encodefilterpats, filename, data)
993 return self._filter(self._encodefilterpats, filename, data)
989
994
990 def wwrite(self, filename, data, flags):
995 def wwrite(self, filename, data, flags):
991 """write ``data`` into ``filename`` in the working directory
996 """write ``data`` into ``filename`` in the working directory
992
997
993 This returns length of written (maybe decoded) data.
998 This returns length of written (maybe decoded) data.
994 """
999 """
995 data = self._filter(self._decodefilterpats, filename, data)
1000 data = self._filter(self._decodefilterpats, filename, data)
996 if 'l' in flags:
1001 if 'l' in flags:
997 self.wvfs.symlink(data, filename)
1002 self.wvfs.symlink(data, filename)
998 else:
1003 else:
999 self.wvfs.write(filename, data)
1004 self.wvfs.write(filename, data)
1000 if 'x' in flags:
1005 if 'x' in flags:
1001 self.wvfs.setflags(filename, False, True)
1006 self.wvfs.setflags(filename, False, True)
1002 return len(data)
1007 return len(data)
1003
1008
1004 def wwritedata(self, filename, data):
1009 def wwritedata(self, filename, data):
1005 return self._filter(self._decodefilterpats, filename, data)
1010 return self._filter(self._decodefilterpats, filename, data)
1006
1011
1007 def currenttransaction(self):
1012 def currenttransaction(self):
1008 """return the current transaction or None if non exists"""
1013 """return the current transaction or None if non exists"""
1009 if self._transref:
1014 if self._transref:
1010 tr = self._transref()
1015 tr = self._transref()
1011 else:
1016 else:
1012 tr = None
1017 tr = None
1013
1018
1014 if tr and tr.running():
1019 if tr and tr.running():
1015 return tr
1020 return tr
1016 return None
1021 return None
1017
1022
1018 def transaction(self, desc, report=None):
1023 def transaction(self, desc, report=None):
1019 if (self.ui.configbool('devel', 'all-warnings')
1024 if (self.ui.configbool('devel', 'all-warnings')
1020 or self.ui.configbool('devel', 'check-locks')):
1025 or self.ui.configbool('devel', 'check-locks')):
1021 l = self._lockref and self._lockref()
1026 l = self._lockref and self._lockref()
1022 if l is None or not l.held:
1027 if l is None or not l.held:
1023 self.ui.develwarn('transaction with no lock')
1028 self.ui.develwarn('transaction with no lock')
1024 tr = self.currenttransaction()
1029 tr = self.currenttransaction()
1025 if tr is not None:
1030 if tr is not None:
1026 return tr.nest()
1031 return tr.nest()
1027
1032
1028 # abort here if the journal already exists
1033 # abort here if the journal already exists
1029 if self.svfs.exists("journal"):
1034 if self.svfs.exists("journal"):
1030 raise error.RepoError(
1035 raise error.RepoError(
1031 _("abandoned transaction found"),
1036 _("abandoned transaction found"),
1032 hint=_("run 'hg recover' to clean up transaction"))
1037 hint=_("run 'hg recover' to clean up transaction"))
1033
1038
1034 # make journal.dirstate contain in-memory changes at this point
1039 # make journal.dirstate contain in-memory changes at this point
1035 self.dirstate.write(None)
1040 self.dirstate.write(None)
1036
1041
1037 idbase = "%.40f#%f" % (random.random(), time.time())
1042 idbase = "%.40f#%f" % (random.random(), time.time())
1038 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1043 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1039 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1044 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1040
1045
1041 self._writejournal(desc)
1046 self._writejournal(desc)
1042 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1047 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1043 if report:
1048 if report:
1044 rp = report
1049 rp = report
1045 else:
1050 else:
1046 rp = self.ui.warn
1051 rp = self.ui.warn
1047 vfsmap = {'plain': self.vfs} # root of .hg/
1052 vfsmap = {'plain': self.vfs} # root of .hg/
1048 # we must avoid cyclic reference between repo and transaction.
1053 # we must avoid cyclic reference between repo and transaction.
1049 reporef = weakref.ref(self)
1054 reporef = weakref.ref(self)
1050 def validate(tr):
1055 def validate(tr):
1051 """will run pre-closing hooks"""
1056 """will run pre-closing hooks"""
1052 reporef().hook('pretxnclose', throw=True,
1057 reporef().hook('pretxnclose', throw=True,
1053 txnname=desc, **tr.hookargs)
1058 txnname=desc, **tr.hookargs)
1054 def releasefn(tr, success):
1059 def releasefn(tr, success):
1055 repo = reporef()
1060 repo = reporef()
1056 if success:
1061 if success:
1057 # this should be explicitly invoked here, because
1062 # this should be explicitly invoked here, because
1058 # in-memory changes aren't written out at closing
1063 # in-memory changes aren't written out at closing
1059 # transaction, if tr.addfilegenerator (via
1064 # transaction, if tr.addfilegenerator (via
1060 # dirstate.write or so) isn't invoked while
1065 # dirstate.write or so) isn't invoked while
1061 # transaction running
1066 # transaction running
1062 repo.dirstate.write(None)
1067 repo.dirstate.write(None)
1063 else:
1068 else:
1064 # prevent in-memory changes from being written out at
1069 # prevent in-memory changes from being written out at
1065 # the end of outer wlock scope or so
1070 # the end of outer wlock scope or so
1066 repo.dirstate.invalidate()
1071 repo.dirstate.invalidate()
1067
1072
1068 # discard all changes (including ones already written
1073 # discard all changes (including ones already written
1069 # out) in this transaction
1074 # out) in this transaction
1070 repo.vfs.rename('journal.dirstate', 'dirstate')
1075 repo.vfs.rename('journal.dirstate', 'dirstate')
1071
1076
1072 repo.invalidate(clearfilecache=True)
1077 repo.invalidate(clearfilecache=True)
1073
1078
1074 tr = transaction.transaction(rp, self.svfs, vfsmap,
1079 tr = transaction.transaction(rp, self.svfs, vfsmap,
1075 "journal",
1080 "journal",
1076 "undo",
1081 "undo",
1077 aftertrans(renames),
1082 aftertrans(renames),
1078 self.store.createmode,
1083 self.store.createmode,
1079 validator=validate,
1084 validator=validate,
1080 releasefn=releasefn)
1085 releasefn=releasefn)
1081
1086
1082 tr.hookargs['txnid'] = txnid
1087 tr.hookargs['txnid'] = txnid
1083 # note: writing the fncache only during finalize mean that the file is
1088 # note: writing the fncache only during finalize mean that the file is
1084 # outdated when running hooks. As fncache is used for streaming clone,
1089 # outdated when running hooks. As fncache is used for streaming clone,
1085 # this is not expected to break anything that happen during the hooks.
1090 # this is not expected to break anything that happen during the hooks.
1086 tr.addfinalize('flush-fncache', self.store.write)
1091 tr.addfinalize('flush-fncache', self.store.write)
1087 def txnclosehook(tr2):
1092 def txnclosehook(tr2):
1088 """To be run if transaction is successful, will schedule a hook run
1093 """To be run if transaction is successful, will schedule a hook run
1089 """
1094 """
1090 # Don't reference tr2 in hook() so we don't hold a reference.
1095 # Don't reference tr2 in hook() so we don't hold a reference.
1091 # This reduces memory consumption when there are multiple
1096 # This reduces memory consumption when there are multiple
1092 # transactions per lock. This can likely go away if issue5045
1097 # transactions per lock. This can likely go away if issue5045
1093 # fixes the function accumulation.
1098 # fixes the function accumulation.
1094 hookargs = tr2.hookargs
1099 hookargs = tr2.hookargs
1095
1100
1096 def hook():
1101 def hook():
1097 reporef().hook('txnclose', throw=False, txnname=desc,
1102 reporef().hook('txnclose', throw=False, txnname=desc,
1098 **hookargs)
1103 **hookargs)
1099 reporef()._afterlock(hook)
1104 reporef()._afterlock(hook)
1100 tr.addfinalize('txnclose-hook', txnclosehook)
1105 tr.addfinalize('txnclose-hook', txnclosehook)
1101 def txnaborthook(tr2):
1106 def txnaborthook(tr2):
1102 """To be run if transaction is aborted
1107 """To be run if transaction is aborted
1103 """
1108 """
1104 reporef().hook('txnabort', throw=False, txnname=desc,
1109 reporef().hook('txnabort', throw=False, txnname=desc,
1105 **tr2.hookargs)
1110 **tr2.hookargs)
1106 tr.addabort('txnabort-hook', txnaborthook)
1111 tr.addabort('txnabort-hook', txnaborthook)
1107 # avoid eager cache invalidation. in-memory data should be identical
1112 # avoid eager cache invalidation. in-memory data should be identical
1108 # to stored data if transaction has no error.
1113 # to stored data if transaction has no error.
1109 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1114 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1110 self._transref = weakref.ref(tr)
1115 self._transref = weakref.ref(tr)
1111 return tr
1116 return tr
1112
1117
1113 def _journalfiles(self):
1118 def _journalfiles(self):
1114 return ((self.svfs, 'journal'),
1119 return ((self.svfs, 'journal'),
1115 (self.vfs, 'journal.dirstate'),
1120 (self.vfs, 'journal.dirstate'),
1116 (self.vfs, 'journal.branch'),
1121 (self.vfs, 'journal.branch'),
1117 (self.vfs, 'journal.desc'),
1122 (self.vfs, 'journal.desc'),
1118 (self.vfs, 'journal.bookmarks'),
1123 (self.vfs, 'journal.bookmarks'),
1119 (self.svfs, 'journal.phaseroots'))
1124 (self.svfs, 'journal.phaseroots'))
1120
1125
1121 def undofiles(self):
1126 def undofiles(self):
1122 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1127 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1123
1128
1124 def _writejournal(self, desc):
1129 def _writejournal(self, desc):
1125 self.vfs.write("journal.dirstate",
1130 self.vfs.write("journal.dirstate",
1126 self.vfs.tryread("dirstate"))
1131 self.vfs.tryread("dirstate"))
1127 self.vfs.write("journal.branch",
1132 self.vfs.write("journal.branch",
1128 encoding.fromlocal(self.dirstate.branch()))
1133 encoding.fromlocal(self.dirstate.branch()))
1129 self.vfs.write("journal.desc",
1134 self.vfs.write("journal.desc",
1130 "%d\n%s\n" % (len(self), desc))
1135 "%d\n%s\n" % (len(self), desc))
1131 self.vfs.write("journal.bookmarks",
1136 self.vfs.write("journal.bookmarks",
1132 self.vfs.tryread("bookmarks"))
1137 self.vfs.tryread("bookmarks"))
1133 self.svfs.write("journal.phaseroots",
1138 self.svfs.write("journal.phaseroots",
1134 self.svfs.tryread("phaseroots"))
1139 self.svfs.tryread("phaseroots"))
1135
1140
1136 def recover(self):
1141 def recover(self):
1137 with self.lock():
1142 with self.lock():
1138 if self.svfs.exists("journal"):
1143 if self.svfs.exists("journal"):
1139 self.ui.status(_("rolling back interrupted transaction\n"))
1144 self.ui.status(_("rolling back interrupted transaction\n"))
1140 vfsmap = {'': self.svfs,
1145 vfsmap = {'': self.svfs,
1141 'plain': self.vfs,}
1146 'plain': self.vfs,}
1142 transaction.rollback(self.svfs, vfsmap, "journal",
1147 transaction.rollback(self.svfs, vfsmap, "journal",
1143 self.ui.warn)
1148 self.ui.warn)
1144 self.invalidate()
1149 self.invalidate()
1145 return True
1150 return True
1146 else:
1151 else:
1147 self.ui.warn(_("no interrupted transaction available\n"))
1152 self.ui.warn(_("no interrupted transaction available\n"))
1148 return False
1153 return False
1149
1154
1150 def rollback(self, dryrun=False, force=False):
1155 def rollback(self, dryrun=False, force=False):
1151 wlock = lock = dsguard = None
1156 wlock = lock = dsguard = None
1152 try:
1157 try:
1153 wlock = self.wlock()
1158 wlock = self.wlock()
1154 lock = self.lock()
1159 lock = self.lock()
1155 if self.svfs.exists("undo"):
1160 if self.svfs.exists("undo"):
1156 dsguard = cmdutil.dirstateguard(self, 'rollback')
1161 dsguard = cmdutil.dirstateguard(self, 'rollback')
1157
1162
1158 return self._rollback(dryrun, force, dsguard)
1163 return self._rollback(dryrun, force, dsguard)
1159 else:
1164 else:
1160 self.ui.warn(_("no rollback information available\n"))
1165 self.ui.warn(_("no rollback information available\n"))
1161 return 1
1166 return 1
1162 finally:
1167 finally:
1163 release(dsguard, lock, wlock)
1168 release(dsguard, lock, wlock)
1164
1169
1165 @unfilteredmethod # Until we get smarter cache management
1170 @unfilteredmethod # Until we get smarter cache management
1166 def _rollback(self, dryrun, force, dsguard):
1171 def _rollback(self, dryrun, force, dsguard):
1167 ui = self.ui
1172 ui = self.ui
1168 try:
1173 try:
1169 args = self.vfs.read('undo.desc').splitlines()
1174 args = self.vfs.read('undo.desc').splitlines()
1170 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1175 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1171 if len(args) >= 3:
1176 if len(args) >= 3:
1172 detail = args[2]
1177 detail = args[2]
1173 oldtip = oldlen - 1
1178 oldtip = oldlen - 1
1174
1179
1175 if detail and ui.verbose:
1180 if detail and ui.verbose:
1176 msg = (_('repository tip rolled back to revision %s'
1181 msg = (_('repository tip rolled back to revision %s'
1177 ' (undo %s: %s)\n')
1182 ' (undo %s: %s)\n')
1178 % (oldtip, desc, detail))
1183 % (oldtip, desc, detail))
1179 else:
1184 else:
1180 msg = (_('repository tip rolled back to revision %s'
1185 msg = (_('repository tip rolled back to revision %s'
1181 ' (undo %s)\n')
1186 ' (undo %s)\n')
1182 % (oldtip, desc))
1187 % (oldtip, desc))
1183 except IOError:
1188 except IOError:
1184 msg = _('rolling back unknown transaction\n')
1189 msg = _('rolling back unknown transaction\n')
1185 desc = None
1190 desc = None
1186
1191
1187 if not force and self['.'] != self['tip'] and desc == 'commit':
1192 if not force and self['.'] != self['tip'] and desc == 'commit':
1188 raise error.Abort(
1193 raise error.Abort(
1189 _('rollback of last commit while not checked out '
1194 _('rollback of last commit while not checked out '
1190 'may lose data'), hint=_('use -f to force'))
1195 'may lose data'), hint=_('use -f to force'))
1191
1196
1192 ui.status(msg)
1197 ui.status(msg)
1193 if dryrun:
1198 if dryrun:
1194 return 0
1199 return 0
1195
1200
1196 parents = self.dirstate.parents()
1201 parents = self.dirstate.parents()
1197 self.destroying()
1202 self.destroying()
1198 vfsmap = {'plain': self.vfs, '': self.svfs}
1203 vfsmap = {'plain': self.vfs, '': self.svfs}
1199 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1204 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1200 if self.vfs.exists('undo.bookmarks'):
1205 if self.vfs.exists('undo.bookmarks'):
1201 self.vfs.rename('undo.bookmarks', 'bookmarks')
1206 self.vfs.rename('undo.bookmarks', 'bookmarks')
1202 if self.svfs.exists('undo.phaseroots'):
1207 if self.svfs.exists('undo.phaseroots'):
1203 self.svfs.rename('undo.phaseroots', 'phaseroots')
1208 self.svfs.rename('undo.phaseroots', 'phaseroots')
1204 self.invalidate()
1209 self.invalidate()
1205
1210
1206 parentgone = (parents[0] not in self.changelog.nodemap or
1211 parentgone = (parents[0] not in self.changelog.nodemap or
1207 parents[1] not in self.changelog.nodemap)
1212 parents[1] not in self.changelog.nodemap)
1208 if parentgone:
1213 if parentgone:
1209 # prevent dirstateguard from overwriting already restored one
1214 # prevent dirstateguard from overwriting already restored one
1210 dsguard.close()
1215 dsguard.close()
1211
1216
1212 self.vfs.rename('undo.dirstate', 'dirstate')
1217 self.vfs.rename('undo.dirstate', 'dirstate')
1213 try:
1218 try:
1214 branch = self.vfs.read('undo.branch')
1219 branch = self.vfs.read('undo.branch')
1215 self.dirstate.setbranch(encoding.tolocal(branch))
1220 self.dirstate.setbranch(encoding.tolocal(branch))
1216 except IOError:
1221 except IOError:
1217 ui.warn(_('named branch could not be reset: '
1222 ui.warn(_('named branch could not be reset: '
1218 'current branch is still \'%s\'\n')
1223 'current branch is still \'%s\'\n')
1219 % self.dirstate.branch())
1224 % self.dirstate.branch())
1220
1225
1221 self.dirstate.invalidate()
1226 self.dirstate.invalidate()
1222 parents = tuple([p.rev() for p in self[None].parents()])
1227 parents = tuple([p.rev() for p in self[None].parents()])
1223 if len(parents) > 1:
1228 if len(parents) > 1:
1224 ui.status(_('working directory now based on '
1229 ui.status(_('working directory now based on '
1225 'revisions %d and %d\n') % parents)
1230 'revisions %d and %d\n') % parents)
1226 else:
1231 else:
1227 ui.status(_('working directory now based on '
1232 ui.status(_('working directory now based on '
1228 'revision %d\n') % parents)
1233 'revision %d\n') % parents)
1229 mergemod.mergestate.clean(self, self['.'].node())
1234 mergemod.mergestate.clean(self, self['.'].node())
1230
1235
1231 # TODO: if we know which new heads may result from this rollback, pass
1236 # TODO: if we know which new heads may result from this rollback, pass
1232 # them to destroy(), which will prevent the branchhead cache from being
1237 # them to destroy(), which will prevent the branchhead cache from being
1233 # invalidated.
1238 # invalidated.
1234 self.destroyed()
1239 self.destroyed()
1235 return 0
1240 return 0
1236
1241
1237 def invalidatecaches(self):
1242 def invalidatecaches(self):
1238
1243
1239 if '_tagscache' in vars(self):
1244 if '_tagscache' in vars(self):
1240 # can't use delattr on proxy
1245 # can't use delattr on proxy
1241 del self.__dict__['_tagscache']
1246 del self.__dict__['_tagscache']
1242
1247
1243 self.unfiltered()._branchcaches.clear()
1248 self.unfiltered()._branchcaches.clear()
1244 self.invalidatevolatilesets()
1249 self.invalidatevolatilesets()
1245
1250
1246 def invalidatevolatilesets(self):
1251 def invalidatevolatilesets(self):
1247 self.filteredrevcache.clear()
1252 self.filteredrevcache.clear()
1248 obsolete.clearobscaches(self)
1253 obsolete.clearobscaches(self)
1249
1254
1250 def invalidatedirstate(self):
1255 def invalidatedirstate(self):
1251 '''Invalidates the dirstate, causing the next call to dirstate
1256 '''Invalidates the dirstate, causing the next call to dirstate
1252 to check if it was modified since the last time it was read,
1257 to check if it was modified since the last time it was read,
1253 rereading it if it has.
1258 rereading it if it has.
1254
1259
1255 This is different to dirstate.invalidate() that it doesn't always
1260 This is different to dirstate.invalidate() that it doesn't always
1256 rereads the dirstate. Use dirstate.invalidate() if you want to
1261 rereads the dirstate. Use dirstate.invalidate() if you want to
1257 explicitly read the dirstate again (i.e. restoring it to a previous
1262 explicitly read the dirstate again (i.e. restoring it to a previous
1258 known good state).'''
1263 known good state).'''
1259 if hasunfilteredcache(self, 'dirstate'):
1264 if hasunfilteredcache(self, 'dirstate'):
1260 for k in self.dirstate._filecache:
1265 for k in self.dirstate._filecache:
1261 try:
1266 try:
1262 delattr(self.dirstate, k)
1267 delattr(self.dirstate, k)
1263 except AttributeError:
1268 except AttributeError:
1264 pass
1269 pass
1265 delattr(self.unfiltered(), 'dirstate')
1270 delattr(self.unfiltered(), 'dirstate')
1266
1271
1267 def invalidate(self, clearfilecache=False):
1272 def invalidate(self, clearfilecache=False):
1268 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1273 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1269 for k in self._filecache.keys():
1274 for k in self._filecache.keys():
1270 # dirstate is invalidated separately in invalidatedirstate()
1275 # dirstate is invalidated separately in invalidatedirstate()
1271 if k == 'dirstate':
1276 if k == 'dirstate':
1272 continue
1277 continue
1273
1278
1274 if clearfilecache:
1279 if clearfilecache:
1275 del self._filecache[k]
1280 del self._filecache[k]
1276 try:
1281 try:
1277 delattr(unfiltered, k)
1282 delattr(unfiltered, k)
1278 except AttributeError:
1283 except AttributeError:
1279 pass
1284 pass
1280 self.invalidatecaches()
1285 self.invalidatecaches()
1281 self.store.invalidatecaches()
1286 self.store.invalidatecaches()
1282
1287
1283 def invalidateall(self):
1288 def invalidateall(self):
1284 '''Fully invalidates both store and non-store parts, causing the
1289 '''Fully invalidates both store and non-store parts, causing the
1285 subsequent operation to reread any outside changes.'''
1290 subsequent operation to reread any outside changes.'''
1286 # extension should hook this to invalidate its caches
1291 # extension should hook this to invalidate its caches
1287 self.invalidate()
1292 self.invalidate()
1288 self.invalidatedirstate()
1293 self.invalidatedirstate()
1289
1294
1290 def _refreshfilecachestats(self, tr):
1295 def _refreshfilecachestats(self, tr):
1291 """Reload stats of cached files so that they are flagged as valid"""
1296 """Reload stats of cached files so that they are flagged as valid"""
1292 for k, ce in self._filecache.items():
1297 for k, ce in self._filecache.items():
1293 if k == 'dirstate' or k not in self.__dict__:
1298 if k == 'dirstate' or k not in self.__dict__:
1294 continue
1299 continue
1295 ce.refresh()
1300 ce.refresh()
1296
1301
1297 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1302 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1298 inheritchecker=None, parentenvvar=None):
1303 inheritchecker=None, parentenvvar=None):
1299 parentlock = None
1304 parentlock = None
1300 # the contents of parentenvvar are used by the underlying lock to
1305 # the contents of parentenvvar are used by the underlying lock to
1301 # determine whether it can be inherited
1306 # determine whether it can be inherited
1302 if parentenvvar is not None:
1307 if parentenvvar is not None:
1303 parentlock = os.environ.get(parentenvvar)
1308 parentlock = os.environ.get(parentenvvar)
1304 try:
1309 try:
1305 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1310 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1306 acquirefn=acquirefn, desc=desc,
1311 acquirefn=acquirefn, desc=desc,
1307 inheritchecker=inheritchecker,
1312 inheritchecker=inheritchecker,
1308 parentlock=parentlock)
1313 parentlock=parentlock)
1309 except error.LockHeld as inst:
1314 except error.LockHeld as inst:
1310 if not wait:
1315 if not wait:
1311 raise
1316 raise
1312 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1317 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1313 (desc, inst.locker))
1318 (desc, inst.locker))
1314 # default to 600 seconds timeout
1319 # default to 600 seconds timeout
1315 l = lockmod.lock(vfs, lockname,
1320 l = lockmod.lock(vfs, lockname,
1316 int(self.ui.config("ui", "timeout", "600")),
1321 int(self.ui.config("ui", "timeout", "600")),
1317 releasefn=releasefn, acquirefn=acquirefn,
1322 releasefn=releasefn, acquirefn=acquirefn,
1318 desc=desc)
1323 desc=desc)
1319 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1324 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1320 return l
1325 return l
1321
1326
1322 def _afterlock(self, callback):
1327 def _afterlock(self, callback):
1323 """add a callback to be run when the repository is fully unlocked
1328 """add a callback to be run when the repository is fully unlocked
1324
1329
1325 The callback will be executed when the outermost lock is released
1330 The callback will be executed when the outermost lock is released
1326 (with wlock being higher level than 'lock')."""
1331 (with wlock being higher level than 'lock')."""
1327 for ref in (self._wlockref, self._lockref):
1332 for ref in (self._wlockref, self._lockref):
1328 l = ref and ref()
1333 l = ref and ref()
1329 if l and l.held:
1334 if l and l.held:
1330 l.postrelease.append(callback)
1335 l.postrelease.append(callback)
1331 break
1336 break
1332 else: # no lock have been found.
1337 else: # no lock have been found.
1333 callback()
1338 callback()
1334
1339
1335 def lock(self, wait=True):
1340 def lock(self, wait=True):
1336 '''Lock the repository store (.hg/store) and return a weak reference
1341 '''Lock the repository store (.hg/store) and return a weak reference
1337 to the lock. Use this before modifying the store (e.g. committing or
1342 to the lock. Use this before modifying the store (e.g. committing or
1338 stripping). If you are opening a transaction, get a lock as well.)
1343 stripping). If you are opening a transaction, get a lock as well.)
1339
1344
1340 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1345 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1341 'wlock' first to avoid a dead-lock hazard.'''
1346 'wlock' first to avoid a dead-lock hazard.'''
1342 l = self._lockref and self._lockref()
1347 l = self._lockref and self._lockref()
1343 if l is not None and l.held:
1348 if l is not None and l.held:
1344 l.lock()
1349 l.lock()
1345 return l
1350 return l
1346
1351
1347 l = self._lock(self.svfs, "lock", wait, None,
1352 l = self._lock(self.svfs, "lock", wait, None,
1348 self.invalidate, _('repository %s') % self.origroot)
1353 self.invalidate, _('repository %s') % self.origroot)
1349 self._lockref = weakref.ref(l)
1354 self._lockref = weakref.ref(l)
1350 return l
1355 return l
1351
1356
1352 def _wlockchecktransaction(self):
1357 def _wlockchecktransaction(self):
1353 if self.currenttransaction() is not None:
1358 if self.currenttransaction() is not None:
1354 raise error.LockInheritanceContractViolation(
1359 raise error.LockInheritanceContractViolation(
1355 'wlock cannot be inherited in the middle of a transaction')
1360 'wlock cannot be inherited in the middle of a transaction')
1356
1361
1357 def wlock(self, wait=True):
1362 def wlock(self, wait=True):
1358 '''Lock the non-store parts of the repository (everything under
1363 '''Lock the non-store parts of the repository (everything under
1359 .hg except .hg/store) and return a weak reference to the lock.
1364 .hg except .hg/store) and return a weak reference to the lock.
1360
1365
1361 Use this before modifying files in .hg.
1366 Use this before modifying files in .hg.
1362
1367
1363 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1368 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1364 'wlock' first to avoid a dead-lock hazard.'''
1369 'wlock' first to avoid a dead-lock hazard.'''
1365 l = self._wlockref and self._wlockref()
1370 l = self._wlockref and self._wlockref()
1366 if l is not None and l.held:
1371 if l is not None and l.held:
1367 l.lock()
1372 l.lock()
1368 return l
1373 return l
1369
1374
1370 # We do not need to check for non-waiting lock acquisition. Such
1375 # We do not need to check for non-waiting lock acquisition. Such
1371 # acquisition would not cause dead-lock as they would just fail.
1376 # acquisition would not cause dead-lock as they would just fail.
1372 if wait and (self.ui.configbool('devel', 'all-warnings')
1377 if wait and (self.ui.configbool('devel', 'all-warnings')
1373 or self.ui.configbool('devel', 'check-locks')):
1378 or self.ui.configbool('devel', 'check-locks')):
1374 l = self._lockref and self._lockref()
1379 l = self._lockref and self._lockref()
1375 if l is not None and l.held:
1380 if l is not None and l.held:
1376 self.ui.develwarn('"wlock" acquired after "lock"')
1381 self.ui.develwarn('"wlock" acquired after "lock"')
1377
1382
1378 def unlock():
1383 def unlock():
1379 if self.dirstate.pendingparentchange():
1384 if self.dirstate.pendingparentchange():
1380 self.dirstate.invalidate()
1385 self.dirstate.invalidate()
1381 else:
1386 else:
1382 self.dirstate.write(None)
1387 self.dirstate.write(None)
1383
1388
1384 self._filecache['dirstate'].refresh()
1389 self._filecache['dirstate'].refresh()
1385
1390
1386 l = self._lock(self.vfs, "wlock", wait, unlock,
1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1387 self.invalidatedirstate, _('working directory of %s') %
1392 self.invalidatedirstate, _('working directory of %s') %
1388 self.origroot,
1393 self.origroot,
1389 inheritchecker=self._wlockchecktransaction,
1394 inheritchecker=self._wlockchecktransaction,
1390 parentenvvar='HG_WLOCK_LOCKER')
1395 parentenvvar='HG_WLOCK_LOCKER')
1391 self._wlockref = weakref.ref(l)
1396 self._wlockref = weakref.ref(l)
1392 return l
1397 return l
1393
1398
1394 def _currentlock(self, lockref):
1399 def _currentlock(self, lockref):
1395 """Returns the lock if it's held, or None if it's not."""
1400 """Returns the lock if it's held, or None if it's not."""
1396 if lockref is None:
1401 if lockref is None:
1397 return None
1402 return None
1398 l = lockref()
1403 l = lockref()
1399 if l is None or not l.held:
1404 if l is None or not l.held:
1400 return None
1405 return None
1401 return l
1406 return l
1402
1407
1403 def currentwlock(self):
1408 def currentwlock(self):
1404 """Returns the wlock if it's held, or None if it's not."""
1409 """Returns the wlock if it's held, or None if it's not."""
1405 return self._currentlock(self._wlockref)
1410 return self._currentlock(self._wlockref)
1406
1411
1407 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1408 """
1413 """
1409 commit an individual file as part of a larger transaction
1414 commit an individual file as part of a larger transaction
1410 """
1415 """
1411
1416
1412 fname = fctx.path()
1417 fname = fctx.path()
1413 fparent1 = manifest1.get(fname, nullid)
1418 fparent1 = manifest1.get(fname, nullid)
1414 fparent2 = manifest2.get(fname, nullid)
1419 fparent2 = manifest2.get(fname, nullid)
1415 if isinstance(fctx, context.filectx):
1420 if isinstance(fctx, context.filectx):
1416 node = fctx.filenode()
1421 node = fctx.filenode()
1417 if node in [fparent1, fparent2]:
1422 if node in [fparent1, fparent2]:
1418 self.ui.debug('reusing %s filelog entry\n' % fname)
1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1419 return node
1424 return node
1420
1425
1421 flog = self.file(fname)
1426 flog = self.file(fname)
1422 meta = {}
1427 meta = {}
1423 copy = fctx.renamed()
1428 copy = fctx.renamed()
1424 if copy and copy[0] != fname:
1429 if copy and copy[0] != fname:
1425 # Mark the new revision of this file as a copy of another
1430 # Mark the new revision of this file as a copy of another
1426 # file. This copy data will effectively act as a parent
1431 # file. This copy data will effectively act as a parent
1427 # of this new revision. If this is a merge, the first
1432 # of this new revision. If this is a merge, the first
1428 # parent will be the nullid (meaning "look up the copy data")
1433 # parent will be the nullid (meaning "look up the copy data")
1429 # and the second one will be the other parent. For example:
1434 # and the second one will be the other parent. For example:
1430 #
1435 #
1431 # 0 --- 1 --- 3 rev1 changes file foo
1436 # 0 --- 1 --- 3 rev1 changes file foo
1432 # \ / rev2 renames foo to bar and changes it
1437 # \ / rev2 renames foo to bar and changes it
1433 # \- 2 -/ rev3 should have bar with all changes and
1438 # \- 2 -/ rev3 should have bar with all changes and
1434 # should record that bar descends from
1439 # should record that bar descends from
1435 # bar in rev2 and foo in rev1
1440 # bar in rev2 and foo in rev1
1436 #
1441 #
1437 # this allows this merge to succeed:
1442 # this allows this merge to succeed:
1438 #
1443 #
1439 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1444 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1440 # \ / merging rev3 and rev4 should use bar@rev2
1445 # \ / merging rev3 and rev4 should use bar@rev2
1441 # \- 2 --- 4 as the merge base
1446 # \- 2 --- 4 as the merge base
1442 #
1447 #
1443
1448
1444 cfname = copy[0]
1449 cfname = copy[0]
1445 crev = manifest1.get(cfname)
1450 crev = manifest1.get(cfname)
1446 newfparent = fparent2
1451 newfparent = fparent2
1447
1452
1448 if manifest2: # branch merge
1453 if manifest2: # branch merge
1449 if fparent2 == nullid or crev is None: # copied on remote side
1454 if fparent2 == nullid or crev is None: # copied on remote side
1450 if cfname in manifest2:
1455 if cfname in manifest2:
1451 crev = manifest2[cfname]
1456 crev = manifest2[cfname]
1452 newfparent = fparent1
1457 newfparent = fparent1
1453
1458
1454 # Here, we used to search backwards through history to try to find
1459 # Here, we used to search backwards through history to try to find
1455 # where the file copy came from if the source of a copy was not in
1460 # where the file copy came from if the source of a copy was not in
1456 # the parent directory. However, this doesn't actually make sense to
1461 # the parent directory. However, this doesn't actually make sense to
1457 # do (what does a copy from something not in your working copy even
1462 # do (what does a copy from something not in your working copy even
1458 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1463 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1459 # the user that copy information was dropped, so if they didn't
1464 # the user that copy information was dropped, so if they didn't
1460 # expect this outcome it can be fixed, but this is the correct
1465 # expect this outcome it can be fixed, but this is the correct
1461 # behavior in this circumstance.
1466 # behavior in this circumstance.
1462
1467
1463 if crev:
1468 if crev:
1464 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1469 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1465 meta["copy"] = cfname
1470 meta["copy"] = cfname
1466 meta["copyrev"] = hex(crev)
1471 meta["copyrev"] = hex(crev)
1467 fparent1, fparent2 = nullid, newfparent
1472 fparent1, fparent2 = nullid, newfparent
1468 else:
1473 else:
1469 self.ui.warn(_("warning: can't find ancestor for '%s' "
1474 self.ui.warn(_("warning: can't find ancestor for '%s' "
1470 "copied from '%s'!\n") % (fname, cfname))
1475 "copied from '%s'!\n") % (fname, cfname))
1471
1476
1472 elif fparent1 == nullid:
1477 elif fparent1 == nullid:
1473 fparent1, fparent2 = fparent2, nullid
1478 fparent1, fparent2 = fparent2, nullid
1474 elif fparent2 != nullid:
1479 elif fparent2 != nullid:
1475 # is one parent an ancestor of the other?
1480 # is one parent an ancestor of the other?
1476 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1481 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1477 if fparent1 in fparentancestors:
1482 if fparent1 in fparentancestors:
1478 fparent1, fparent2 = fparent2, nullid
1483 fparent1, fparent2 = fparent2, nullid
1479 elif fparent2 in fparentancestors:
1484 elif fparent2 in fparentancestors:
1480 fparent2 = nullid
1485 fparent2 = nullid
1481
1486
1482 # is the file changed?
1487 # is the file changed?
1483 text = fctx.data()
1488 text = fctx.data()
1484 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1489 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1485 changelist.append(fname)
1490 changelist.append(fname)
1486 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1491 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1487 # are just the flags changed during merge?
1492 # are just the flags changed during merge?
1488 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1493 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1489 changelist.append(fname)
1494 changelist.append(fname)
1490
1495
1491 return fparent1
1496 return fparent1
1492
1497
1493 @unfilteredmethod
1498 @unfilteredmethod
1494 def commit(self, text="", user=None, date=None, match=None, force=False,
1499 def commit(self, text="", user=None, date=None, match=None, force=False,
1495 editor=False, extra=None):
1500 editor=False, extra=None):
1496 """Add a new revision to current repository.
1501 """Add a new revision to current repository.
1497
1502
1498 Revision information is gathered from the working directory,
1503 Revision information is gathered from the working directory,
1499 match can be used to filter the committed files. If editor is
1504 match can be used to filter the committed files. If editor is
1500 supplied, it is called to get a commit message.
1505 supplied, it is called to get a commit message.
1501 """
1506 """
1502 if extra is None:
1507 if extra is None:
1503 extra = {}
1508 extra = {}
1504
1509
1505 def fail(f, msg):
1510 def fail(f, msg):
1506 raise error.Abort('%s: %s' % (f, msg))
1511 raise error.Abort('%s: %s' % (f, msg))
1507
1512
1508 if not match:
1513 if not match:
1509 match = matchmod.always(self.root, '')
1514 match = matchmod.always(self.root, '')
1510
1515
1511 if not force:
1516 if not force:
1512 vdirs = []
1517 vdirs = []
1513 match.explicitdir = vdirs.append
1518 match.explicitdir = vdirs.append
1514 match.bad = fail
1519 match.bad = fail
1515
1520
1516 wlock = lock = tr = None
1521 wlock = lock = tr = None
1517 try:
1522 try:
1518 wlock = self.wlock()
1523 wlock = self.wlock()
1519 lock = self.lock() # for recent changelog (see issue4368)
1524 lock = self.lock() # for recent changelog (see issue4368)
1520
1525
1521 wctx = self[None]
1526 wctx = self[None]
1522 merge = len(wctx.parents()) > 1
1527 merge = len(wctx.parents()) > 1
1523
1528
1524 if not force and merge and match.ispartial():
1529 if not force and merge and match.ispartial():
1525 raise error.Abort(_('cannot partially commit a merge '
1530 raise error.Abort(_('cannot partially commit a merge '
1526 '(do not specify files or patterns)'))
1531 '(do not specify files or patterns)'))
1527
1532
1528 status = self.status(match=match, clean=force)
1533 status = self.status(match=match, clean=force)
1529 if force:
1534 if force:
1530 status.modified.extend(status.clean) # mq may commit clean files
1535 status.modified.extend(status.clean) # mq may commit clean files
1531
1536
1532 # check subrepos
1537 # check subrepos
1533 subs = []
1538 subs = []
1534 commitsubs = set()
1539 commitsubs = set()
1535 newstate = wctx.substate.copy()
1540 newstate = wctx.substate.copy()
1536 # only manage subrepos and .hgsubstate if .hgsub is present
1541 # only manage subrepos and .hgsubstate if .hgsub is present
1537 if '.hgsub' in wctx:
1542 if '.hgsub' in wctx:
1538 # we'll decide whether to track this ourselves, thanks
1543 # we'll decide whether to track this ourselves, thanks
1539 for c in status.modified, status.added, status.removed:
1544 for c in status.modified, status.added, status.removed:
1540 if '.hgsubstate' in c:
1545 if '.hgsubstate' in c:
1541 c.remove('.hgsubstate')
1546 c.remove('.hgsubstate')
1542
1547
1543 # compare current state to last committed state
1548 # compare current state to last committed state
1544 # build new substate based on last committed state
1549 # build new substate based on last committed state
1545 oldstate = wctx.p1().substate
1550 oldstate = wctx.p1().substate
1546 for s in sorted(newstate.keys()):
1551 for s in sorted(newstate.keys()):
1547 if not match(s):
1552 if not match(s):
1548 # ignore working copy, use old state if present
1553 # ignore working copy, use old state if present
1549 if s in oldstate:
1554 if s in oldstate:
1550 newstate[s] = oldstate[s]
1555 newstate[s] = oldstate[s]
1551 continue
1556 continue
1552 if not force:
1557 if not force:
1553 raise error.Abort(
1558 raise error.Abort(
1554 _("commit with new subrepo %s excluded") % s)
1559 _("commit with new subrepo %s excluded") % s)
1555 dirtyreason = wctx.sub(s).dirtyreason(True)
1560 dirtyreason = wctx.sub(s).dirtyreason(True)
1556 if dirtyreason:
1561 if dirtyreason:
1557 if not self.ui.configbool('ui', 'commitsubrepos'):
1562 if not self.ui.configbool('ui', 'commitsubrepos'):
1558 raise error.Abort(dirtyreason,
1563 raise error.Abort(dirtyreason,
1559 hint=_("use --subrepos for recursive commit"))
1564 hint=_("use --subrepos for recursive commit"))
1560 subs.append(s)
1565 subs.append(s)
1561 commitsubs.add(s)
1566 commitsubs.add(s)
1562 else:
1567 else:
1563 bs = wctx.sub(s).basestate()
1568 bs = wctx.sub(s).basestate()
1564 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1569 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1565 if oldstate.get(s, (None, None, None))[1] != bs:
1570 if oldstate.get(s, (None, None, None))[1] != bs:
1566 subs.append(s)
1571 subs.append(s)
1567
1572
1568 # check for removed subrepos
1573 # check for removed subrepos
1569 for p in wctx.parents():
1574 for p in wctx.parents():
1570 r = [s for s in p.substate if s not in newstate]
1575 r = [s for s in p.substate if s not in newstate]
1571 subs += [s for s in r if match(s)]
1576 subs += [s for s in r if match(s)]
1572 if subs:
1577 if subs:
1573 if (not match('.hgsub') and
1578 if (not match('.hgsub') and
1574 '.hgsub' in (wctx.modified() + wctx.added())):
1579 '.hgsub' in (wctx.modified() + wctx.added())):
1575 raise error.Abort(
1580 raise error.Abort(
1576 _("can't commit subrepos without .hgsub"))
1581 _("can't commit subrepos without .hgsub"))
1577 status.modified.insert(0, '.hgsubstate')
1582 status.modified.insert(0, '.hgsubstate')
1578
1583
1579 elif '.hgsub' in status.removed:
1584 elif '.hgsub' in status.removed:
1580 # clean up .hgsubstate when .hgsub is removed
1585 # clean up .hgsubstate when .hgsub is removed
1581 if ('.hgsubstate' in wctx and
1586 if ('.hgsubstate' in wctx and
1582 '.hgsubstate' not in (status.modified + status.added +
1587 '.hgsubstate' not in (status.modified + status.added +
1583 status.removed)):
1588 status.removed)):
1584 status.removed.insert(0, '.hgsubstate')
1589 status.removed.insert(0, '.hgsubstate')
1585
1590
1586 # make sure all explicit patterns are matched
1591 # make sure all explicit patterns are matched
1587 if not force and (match.isexact() or match.prefix()):
1592 if not force and (match.isexact() or match.prefix()):
1588 matched = set(status.modified + status.added + status.removed)
1593 matched = set(status.modified + status.added + status.removed)
1589
1594
1590 for f in match.files():
1595 for f in match.files():
1591 f = self.dirstate.normalize(f)
1596 f = self.dirstate.normalize(f)
1592 if f == '.' or f in matched or f in wctx.substate:
1597 if f == '.' or f in matched or f in wctx.substate:
1593 continue
1598 continue
1594 if f in status.deleted:
1599 if f in status.deleted:
1595 fail(f, _('file not found!'))
1600 fail(f, _('file not found!'))
1596 if f in vdirs: # visited directory
1601 if f in vdirs: # visited directory
1597 d = f + '/'
1602 d = f + '/'
1598 for mf in matched:
1603 for mf in matched:
1599 if mf.startswith(d):
1604 if mf.startswith(d):
1600 break
1605 break
1601 else:
1606 else:
1602 fail(f, _("no match under directory!"))
1607 fail(f, _("no match under directory!"))
1603 elif f not in self.dirstate:
1608 elif f not in self.dirstate:
1604 fail(f, _("file not tracked!"))
1609 fail(f, _("file not tracked!"))
1605
1610
1606 cctx = context.workingcommitctx(self, status,
1611 cctx = context.workingcommitctx(self, status,
1607 text, user, date, extra)
1612 text, user, date, extra)
1608
1613
1609 # internal config: ui.allowemptycommit
1614 # internal config: ui.allowemptycommit
1610 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1615 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1611 or extra.get('close') or merge or cctx.files()
1616 or extra.get('close') or merge or cctx.files()
1612 or self.ui.configbool('ui', 'allowemptycommit'))
1617 or self.ui.configbool('ui', 'allowemptycommit'))
1613 if not allowemptycommit:
1618 if not allowemptycommit:
1614 return None
1619 return None
1615
1620
1616 if merge and cctx.deleted():
1621 if merge and cctx.deleted():
1617 raise error.Abort(_("cannot commit merge with missing files"))
1622 raise error.Abort(_("cannot commit merge with missing files"))
1618
1623
1619 ms = mergemod.mergestate.read(self)
1624 ms = mergemod.mergestate.read(self)
1620
1625
1621 if list(ms.unresolved()):
1626 if list(ms.unresolved()):
1622 raise error.Abort(_('unresolved merge conflicts '
1627 raise error.Abort(_('unresolved merge conflicts '
1623 '(see "hg help resolve")'))
1628 '(see "hg help resolve")'))
1624 if ms.mdstate() != 's' or list(ms.driverresolved()):
1629 if ms.mdstate() != 's' or list(ms.driverresolved()):
1625 raise error.Abort(_('driver-resolved merge conflicts'),
1630 raise error.Abort(_('driver-resolved merge conflicts'),
1626 hint=_('run "hg resolve --all" to resolve'))
1631 hint=_('run "hg resolve --all" to resolve'))
1627
1632
1628 if editor:
1633 if editor:
1629 cctx._text = editor(self, cctx, subs)
1634 cctx._text = editor(self, cctx, subs)
1630 edited = (text != cctx._text)
1635 edited = (text != cctx._text)
1631
1636
1632 # Save commit message in case this transaction gets rolled back
1637 # Save commit message in case this transaction gets rolled back
1633 # (e.g. by a pretxncommit hook). Leave the content alone on
1638 # (e.g. by a pretxncommit hook). Leave the content alone on
1634 # the assumption that the user will use the same editor again.
1639 # the assumption that the user will use the same editor again.
1635 msgfn = self.savecommitmessage(cctx._text)
1640 msgfn = self.savecommitmessage(cctx._text)
1636
1641
1637 # commit subs and write new state
1642 # commit subs and write new state
1638 if subs:
1643 if subs:
1639 for s in sorted(commitsubs):
1644 for s in sorted(commitsubs):
1640 sub = wctx.sub(s)
1645 sub = wctx.sub(s)
1641 self.ui.status(_('committing subrepository %s\n') %
1646 self.ui.status(_('committing subrepository %s\n') %
1642 subrepo.subrelpath(sub))
1647 subrepo.subrelpath(sub))
1643 sr = sub.commit(cctx._text, user, date)
1648 sr = sub.commit(cctx._text, user, date)
1644 newstate[s] = (newstate[s][0], sr)
1649 newstate[s] = (newstate[s][0], sr)
1645 subrepo.writestate(self, newstate)
1650 subrepo.writestate(self, newstate)
1646
1651
1647 p1, p2 = self.dirstate.parents()
1652 p1, p2 = self.dirstate.parents()
1648 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1653 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1649 try:
1654 try:
1650 self.hook("precommit", throw=True, parent1=hookp1,
1655 self.hook("precommit", throw=True, parent1=hookp1,
1651 parent2=hookp2)
1656 parent2=hookp2)
1652 tr = self.transaction('commit')
1657 tr = self.transaction('commit')
1653 ret = self.commitctx(cctx, True)
1658 ret = self.commitctx(cctx, True)
1654 except: # re-raises
1659 except: # re-raises
1655 if edited:
1660 if edited:
1656 self.ui.write(
1661 self.ui.write(
1657 _('note: commit message saved in %s\n') % msgfn)
1662 _('note: commit message saved in %s\n') % msgfn)
1658 raise
1663 raise
1659 # update bookmarks, dirstate and mergestate
1664 # update bookmarks, dirstate and mergestate
1660 bookmarks.update(self, [p1, p2], ret)
1665 bookmarks.update(self, [p1, p2], ret)
1661 cctx.markcommitted(ret)
1666 cctx.markcommitted(ret)
1662 ms.reset()
1667 ms.reset()
1663 tr.close()
1668 tr.close()
1664
1669
1665 finally:
1670 finally:
1666 lockmod.release(tr, lock, wlock)
1671 lockmod.release(tr, lock, wlock)
1667
1672
1668 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1673 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1669 # hack for command that use a temporary commit (eg: histedit)
1674 # hack for command that use a temporary commit (eg: histedit)
1670 # temporary commit got stripped before hook release
1675 # temporary commit got stripped before hook release
1671 if self.changelog.hasnode(ret):
1676 if self.changelog.hasnode(ret):
1672 self.hook("commit", node=node, parent1=parent1,
1677 self.hook("commit", node=node, parent1=parent1,
1673 parent2=parent2)
1678 parent2=parent2)
1674 self._afterlock(commithook)
1679 self._afterlock(commithook)
1675 return ret
1680 return ret
1676
1681
1677 @unfilteredmethod
1682 @unfilteredmethod
1678 def commitctx(self, ctx, error=False):
1683 def commitctx(self, ctx, error=False):
1679 """Add a new revision to current repository.
1684 """Add a new revision to current repository.
1680 Revision information is passed via the context argument.
1685 Revision information is passed via the context argument.
1681 """
1686 """
1682
1687
1683 tr = None
1688 tr = None
1684 p1, p2 = ctx.p1(), ctx.p2()
1689 p1, p2 = ctx.p1(), ctx.p2()
1685 user = ctx.user()
1690 user = ctx.user()
1686
1691
1687 lock = self.lock()
1692 lock = self.lock()
1688 try:
1693 try:
1689 tr = self.transaction("commit")
1694 tr = self.transaction("commit")
1690 trp = weakref.proxy(tr)
1695 trp = weakref.proxy(tr)
1691
1696
1692 if ctx.files():
1697 if ctx.files():
1693 m1 = p1.manifest()
1698 m1 = p1.manifest()
1694 m2 = p2.manifest()
1699 m2 = p2.manifest()
1695 m = m1.copy()
1700 m = m1.copy()
1696
1701
1697 # check in files
1702 # check in files
1698 added = []
1703 added = []
1699 changed = []
1704 changed = []
1700 removed = list(ctx.removed())
1705 removed = list(ctx.removed())
1701 linkrev = len(self)
1706 linkrev = len(self)
1702 self.ui.note(_("committing files:\n"))
1707 self.ui.note(_("committing files:\n"))
1703 for f in sorted(ctx.modified() + ctx.added()):
1708 for f in sorted(ctx.modified() + ctx.added()):
1704 self.ui.note(f + "\n")
1709 self.ui.note(f + "\n")
1705 try:
1710 try:
1706 fctx = ctx[f]
1711 fctx = ctx[f]
1707 if fctx is None:
1712 if fctx is None:
1708 removed.append(f)
1713 removed.append(f)
1709 else:
1714 else:
1710 added.append(f)
1715 added.append(f)
1711 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1716 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1712 trp, changed)
1717 trp, changed)
1713 m.setflag(f, fctx.flags())
1718 m.setflag(f, fctx.flags())
1714 except OSError as inst:
1719 except OSError as inst:
1715 self.ui.warn(_("trouble committing %s!\n") % f)
1720 self.ui.warn(_("trouble committing %s!\n") % f)
1716 raise
1721 raise
1717 except IOError as inst:
1722 except IOError as inst:
1718 errcode = getattr(inst, 'errno', errno.ENOENT)
1723 errcode = getattr(inst, 'errno', errno.ENOENT)
1719 if error or errcode and errcode != errno.ENOENT:
1724 if error or errcode and errcode != errno.ENOENT:
1720 self.ui.warn(_("trouble committing %s!\n") % f)
1725 self.ui.warn(_("trouble committing %s!\n") % f)
1721 raise
1726 raise
1722
1727
1723 # update manifest
1728 # update manifest
1724 self.ui.note(_("committing manifest\n"))
1729 self.ui.note(_("committing manifest\n"))
1725 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1730 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1726 drop = [f for f in removed if f in m]
1731 drop = [f for f in removed if f in m]
1727 for f in drop:
1732 for f in drop:
1728 del m[f]
1733 del m[f]
1729 mn = self.manifest.add(m, trp, linkrev,
1734 mn = self.manifest.add(m, trp, linkrev,
1730 p1.manifestnode(), p2.manifestnode(),
1735 p1.manifestnode(), p2.manifestnode(),
1731 added, drop)
1736 added, drop)
1732 files = changed + removed
1737 files = changed + removed
1733 else:
1738 else:
1734 mn = p1.manifestnode()
1739 mn = p1.manifestnode()
1735 files = []
1740 files = []
1736
1741
1737 # update changelog
1742 # update changelog
1738 self.ui.note(_("committing changelog\n"))
1743 self.ui.note(_("committing changelog\n"))
1739 self.changelog.delayupdate(tr)
1744 self.changelog.delayupdate(tr)
1740 n = self.changelog.add(mn, files, ctx.description(),
1745 n = self.changelog.add(mn, files, ctx.description(),
1741 trp, p1.node(), p2.node(),
1746 trp, p1.node(), p2.node(),
1742 user, ctx.date(), ctx.extra().copy())
1747 user, ctx.date(), ctx.extra().copy())
1743 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1748 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1744 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1749 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1745 parent2=xp2)
1750 parent2=xp2)
1746 # set the new commit is proper phase
1751 # set the new commit is proper phase
1747 targetphase = subrepo.newcommitphase(self.ui, ctx)
1752 targetphase = subrepo.newcommitphase(self.ui, ctx)
1748 if targetphase:
1753 if targetphase:
1749 # retract boundary do not alter parent changeset.
1754 # retract boundary do not alter parent changeset.
1750 # if a parent have higher the resulting phase will
1755 # if a parent have higher the resulting phase will
1751 # be compliant anyway
1756 # be compliant anyway
1752 #
1757 #
1753 # if minimal phase was 0 we don't need to retract anything
1758 # if minimal phase was 0 we don't need to retract anything
1754 phases.retractboundary(self, tr, targetphase, [n])
1759 phases.retractboundary(self, tr, targetphase, [n])
1755 tr.close()
1760 tr.close()
1756 branchmap.updatecache(self.filtered('served'))
1761 branchmap.updatecache(self.filtered('served'))
1757 return n
1762 return n
1758 finally:
1763 finally:
1759 if tr:
1764 if tr:
1760 tr.release()
1765 tr.release()
1761 lock.release()
1766 lock.release()
1762
1767
1763 @unfilteredmethod
1768 @unfilteredmethod
1764 def destroying(self):
1769 def destroying(self):
1765 '''Inform the repository that nodes are about to be destroyed.
1770 '''Inform the repository that nodes are about to be destroyed.
1766 Intended for use by strip and rollback, so there's a common
1771 Intended for use by strip and rollback, so there's a common
1767 place for anything that has to be done before destroying history.
1772 place for anything that has to be done before destroying history.
1768
1773
1769 This is mostly useful for saving state that is in memory and waiting
1774 This is mostly useful for saving state that is in memory and waiting
1770 to be flushed when the current lock is released. Because a call to
1775 to be flushed when the current lock is released. Because a call to
1771 destroyed is imminent, the repo will be invalidated causing those
1776 destroyed is imminent, the repo will be invalidated causing those
1772 changes to stay in memory (waiting for the next unlock), or vanish
1777 changes to stay in memory (waiting for the next unlock), or vanish
1773 completely.
1778 completely.
1774 '''
1779 '''
1775 # When using the same lock to commit and strip, the phasecache is left
1780 # When using the same lock to commit and strip, the phasecache is left
1776 # dirty after committing. Then when we strip, the repo is invalidated,
1781 # dirty after committing. Then when we strip, the repo is invalidated,
1777 # causing those changes to disappear.
1782 # causing those changes to disappear.
1778 if '_phasecache' in vars(self):
1783 if '_phasecache' in vars(self):
1779 self._phasecache.write()
1784 self._phasecache.write()
1780
1785
1781 @unfilteredmethod
1786 @unfilteredmethod
1782 def destroyed(self):
1787 def destroyed(self):
1783 '''Inform the repository that nodes have been destroyed.
1788 '''Inform the repository that nodes have been destroyed.
1784 Intended for use by strip and rollback, so there's a common
1789 Intended for use by strip and rollback, so there's a common
1785 place for anything that has to be done after destroying history.
1790 place for anything that has to be done after destroying history.
1786 '''
1791 '''
1787 # When one tries to:
1792 # When one tries to:
1788 # 1) destroy nodes thus calling this method (e.g. strip)
1793 # 1) destroy nodes thus calling this method (e.g. strip)
1789 # 2) use phasecache somewhere (e.g. commit)
1794 # 2) use phasecache somewhere (e.g. commit)
1790 #
1795 #
1791 # then 2) will fail because the phasecache contains nodes that were
1796 # then 2) will fail because the phasecache contains nodes that were
1792 # removed. We can either remove phasecache from the filecache,
1797 # removed. We can either remove phasecache from the filecache,
1793 # causing it to reload next time it is accessed, or simply filter
1798 # causing it to reload next time it is accessed, or simply filter
1794 # the removed nodes now and write the updated cache.
1799 # the removed nodes now and write the updated cache.
1795 self._phasecache.filterunknown(self)
1800 self._phasecache.filterunknown(self)
1796 self._phasecache.write()
1801 self._phasecache.write()
1797
1802
1798 # update the 'served' branch cache to help read only server process
1803 # update the 'served' branch cache to help read only server process
1799 # Thanks to branchcache collaboration this is done from the nearest
1804 # Thanks to branchcache collaboration this is done from the nearest
1800 # filtered subset and it is expected to be fast.
1805 # filtered subset and it is expected to be fast.
1801 branchmap.updatecache(self.filtered('served'))
1806 branchmap.updatecache(self.filtered('served'))
1802
1807
1803 # Ensure the persistent tag cache is updated. Doing it now
1808 # Ensure the persistent tag cache is updated. Doing it now
1804 # means that the tag cache only has to worry about destroyed
1809 # means that the tag cache only has to worry about destroyed
1805 # heads immediately after a strip/rollback. That in turn
1810 # heads immediately after a strip/rollback. That in turn
1806 # guarantees that "cachetip == currenttip" (comparing both rev
1811 # guarantees that "cachetip == currenttip" (comparing both rev
1807 # and node) always means no nodes have been added or destroyed.
1812 # and node) always means no nodes have been added or destroyed.
1808
1813
1809 # XXX this is suboptimal when qrefresh'ing: we strip the current
1814 # XXX this is suboptimal when qrefresh'ing: we strip the current
1810 # head, refresh the tag cache, then immediately add a new head.
1815 # head, refresh the tag cache, then immediately add a new head.
1811 # But I think doing it this way is necessary for the "instant
1816 # But I think doing it this way is necessary for the "instant
1812 # tag cache retrieval" case to work.
1817 # tag cache retrieval" case to work.
1813 self.invalidate()
1818 self.invalidate()
1814
1819
1815 def walk(self, match, node=None):
1820 def walk(self, match, node=None):
1816 '''
1821 '''
1817 walk recursively through the directory tree or a given
1822 walk recursively through the directory tree or a given
1818 changeset, finding all files matched by the match
1823 changeset, finding all files matched by the match
1819 function
1824 function
1820 '''
1825 '''
1821 return self[node].walk(match)
1826 return self[node].walk(match)
1822
1827
1823 def status(self, node1='.', node2=None, match=None,
1828 def status(self, node1='.', node2=None, match=None,
1824 ignored=False, clean=False, unknown=False,
1829 ignored=False, clean=False, unknown=False,
1825 listsubrepos=False):
1830 listsubrepos=False):
1826 '''a convenience method that calls node1.status(node2)'''
1831 '''a convenience method that calls node1.status(node2)'''
1827 return self[node1].status(node2, match, ignored, clean, unknown,
1832 return self[node1].status(node2, match, ignored, clean, unknown,
1828 listsubrepos)
1833 listsubrepos)
1829
1834
1830 def heads(self, start=None):
1835 def heads(self, start=None):
1831 heads = self.changelog.heads(start)
1836 heads = self.changelog.heads(start)
1832 # sort the output in rev descending order
1837 # sort the output in rev descending order
1833 return sorted(heads, key=self.changelog.rev, reverse=True)
1838 return sorted(heads, key=self.changelog.rev, reverse=True)
1834
1839
1835 def branchheads(self, branch=None, start=None, closed=False):
1840 def branchheads(self, branch=None, start=None, closed=False):
1836 '''return a (possibly filtered) list of heads for the given branch
1841 '''return a (possibly filtered) list of heads for the given branch
1837
1842
1838 Heads are returned in topological order, from newest to oldest.
1843 Heads are returned in topological order, from newest to oldest.
1839 If branch is None, use the dirstate branch.
1844 If branch is None, use the dirstate branch.
1840 If start is not None, return only heads reachable from start.
1845 If start is not None, return only heads reachable from start.
1841 If closed is True, return heads that are marked as closed as well.
1846 If closed is True, return heads that are marked as closed as well.
1842 '''
1847 '''
1843 if branch is None:
1848 if branch is None:
1844 branch = self[None].branch()
1849 branch = self[None].branch()
1845 branches = self.branchmap()
1850 branches = self.branchmap()
1846 if branch not in branches:
1851 if branch not in branches:
1847 return []
1852 return []
1848 # the cache returns heads ordered lowest to highest
1853 # the cache returns heads ordered lowest to highest
1849 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1854 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1850 if start is not None:
1855 if start is not None:
1851 # filter out the heads that cannot be reached from startrev
1856 # filter out the heads that cannot be reached from startrev
1852 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1857 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1853 bheads = [h for h in bheads if h in fbheads]
1858 bheads = [h for h in bheads if h in fbheads]
1854 return bheads
1859 return bheads
1855
1860
1856 def branches(self, nodes):
1861 def branches(self, nodes):
1857 if not nodes:
1862 if not nodes:
1858 nodes = [self.changelog.tip()]
1863 nodes = [self.changelog.tip()]
1859 b = []
1864 b = []
1860 for n in nodes:
1865 for n in nodes:
1861 t = n
1866 t = n
1862 while True:
1867 while True:
1863 p = self.changelog.parents(n)
1868 p = self.changelog.parents(n)
1864 if p[1] != nullid or p[0] == nullid:
1869 if p[1] != nullid or p[0] == nullid:
1865 b.append((t, n, p[0], p[1]))
1870 b.append((t, n, p[0], p[1]))
1866 break
1871 break
1867 n = p[0]
1872 n = p[0]
1868 return b
1873 return b
1869
1874
1870 def between(self, pairs):
1875 def between(self, pairs):
1871 r = []
1876 r = []
1872
1877
1873 for top, bottom in pairs:
1878 for top, bottom in pairs:
1874 n, l, i = top, [], 0
1879 n, l, i = top, [], 0
1875 f = 1
1880 f = 1
1876
1881
1877 while n != bottom and n != nullid:
1882 while n != bottom and n != nullid:
1878 p = self.changelog.parents(n)[0]
1883 p = self.changelog.parents(n)[0]
1879 if i == f:
1884 if i == f:
1880 l.append(n)
1885 l.append(n)
1881 f = f * 2
1886 f = f * 2
1882 n = p
1887 n = p
1883 i += 1
1888 i += 1
1884
1889
1885 r.append(l)
1890 r.append(l)
1886
1891
1887 return r
1892 return r
1888
1893
1889 def checkpush(self, pushop):
1894 def checkpush(self, pushop):
1890 """Extensions can override this function if additional checks have
1895 """Extensions can override this function if additional checks have
1891 to be performed before pushing, or call it if they override push
1896 to be performed before pushing, or call it if they override push
1892 command.
1897 command.
1893 """
1898 """
1894 pass
1899 pass
1895
1900
1896 @unfilteredpropertycache
1901 @unfilteredpropertycache
1897 def prepushoutgoinghooks(self):
1902 def prepushoutgoinghooks(self):
1898 """Return util.hooks consists of "(repo, remote, outgoing)"
1903 """Return util.hooks consists of "(repo, remote, outgoing)"
1899 functions, which are called before pushing changesets.
1904 functions, which are called before pushing changesets.
1900 """
1905 """
1901 return util.hooks()
1906 return util.hooks()
1902
1907
1903 def pushkey(self, namespace, key, old, new):
1908 def pushkey(self, namespace, key, old, new):
1904 try:
1909 try:
1905 tr = self.currenttransaction()
1910 tr = self.currenttransaction()
1906 hookargs = {}
1911 hookargs = {}
1907 if tr is not None:
1912 if tr is not None:
1908 hookargs.update(tr.hookargs)
1913 hookargs.update(tr.hookargs)
1909 hookargs['namespace'] = namespace
1914 hookargs['namespace'] = namespace
1910 hookargs['key'] = key
1915 hookargs['key'] = key
1911 hookargs['old'] = old
1916 hookargs['old'] = old
1912 hookargs['new'] = new
1917 hookargs['new'] = new
1913 self.hook('prepushkey', throw=True, **hookargs)
1918 self.hook('prepushkey', throw=True, **hookargs)
1914 except error.HookAbort as exc:
1919 except error.HookAbort as exc:
1915 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1916 if exc.hint:
1921 if exc.hint:
1917 self.ui.write_err(_("(%s)\n") % exc.hint)
1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1918 return False
1923 return False
1919 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1920 ret = pushkey.push(self, namespace, key, old, new)
1925 ret = pushkey.push(self, namespace, key, old, new)
1921 def runhook():
1926 def runhook():
1922 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1923 ret=ret)
1928 ret=ret)
1924 self._afterlock(runhook)
1929 self._afterlock(runhook)
1925 return ret
1930 return ret
1926
1931
1927 def listkeys(self, namespace):
1932 def listkeys(self, namespace):
1928 self.hook('prelistkeys', throw=True, namespace=namespace)
1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1929 self.ui.debug('listing keys for "%s"\n' % namespace)
1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1930 values = pushkey.list(self, namespace)
1935 values = pushkey.list(self, namespace)
1931 self.hook('listkeys', namespace=namespace, values=values)
1936 self.hook('listkeys', namespace=namespace, values=values)
1932 return values
1937 return values
1933
1938
1934 def debugwireargs(self, one, two, three=None, four=None, five=None):
1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1935 '''used to test argument passing over the wire'''
1940 '''used to test argument passing over the wire'''
1936 return "%s %s %s %s %s" % (one, two, three, four, five)
1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1937
1942
1938 def savecommitmessage(self, text):
1943 def savecommitmessage(self, text):
1939 fp = self.vfs('last-message.txt', 'wb')
1944 fp = self.vfs('last-message.txt', 'wb')
1940 try:
1945 try:
1941 fp.write(text)
1946 fp.write(text)
1942 finally:
1947 finally:
1943 fp.close()
1948 fp.close()
1944 return self.pathto(fp.name[len(self.root) + 1:])
1949 return self.pathto(fp.name[len(self.root) + 1:])
1945
1950
1946 # used to avoid circular references so destructors work
1951 # used to avoid circular references so destructors work
1947 def aftertrans(files):
1952 def aftertrans(files):
1948 renamefiles = [tuple(t) for t in files]
1953 renamefiles = [tuple(t) for t in files]
1949 def a():
1954 def a():
1950 for vfs, src, dest in renamefiles:
1955 for vfs, src, dest in renamefiles:
1951 try:
1956 try:
1952 vfs.rename(src, dest)
1957 vfs.rename(src, dest)
1953 except OSError: # journal file does not yet exist
1958 except OSError: # journal file does not yet exist
1954 pass
1959 pass
1955 return a
1960 return a
1956
1961
1957 def undoname(fn):
1962 def undoname(fn):
1958 base, name = os.path.split(fn)
1963 base, name = os.path.split(fn)
1959 assert name.startswith('journal')
1964 assert name.startswith('journal')
1960 return os.path.join(base, name.replace('journal', 'undo', 1))
1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1961
1966
1962 def instance(ui, path, create):
1967 def instance(ui, path, create):
1963 return localrepository(ui, util.urllocalpath(path), create)
1968 return localrepository(ui, util.urllocalpath(path), create)
1964
1969
1965 def islocal(path):
1970 def islocal(path):
1966 return True
1971 return True
General Comments 0
You need to be logged in to leave comments. Login now