##// END OF EJS Templates
localrepo: don't reference transaction from hook closure (issue5043)...
Gregory Szorc -
r27907:e219dbfd default
parent child Browse files
Show More
@@ -1,1958 +1,1964
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import inspect
11 import inspect
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import urllib
15 import urllib
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 propertycache = util.propertycache
61 propertycache = util.propertycache
62 filecache = scmutil.filecache
62 filecache = scmutil.filecache
63
63
64 class repofilecache(filecache):
64 class repofilecache(filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 def __set__(self, repo, value):
70 def __set__(self, repo, value):
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 def __delete__(self, repo):
72 def __delete__(self, repo):
73 return super(repofilecache, self).__delete__(repo.unfiltered())
73 return super(repofilecache, self).__delete__(repo.unfiltered())
74
74
75 class storecache(repofilecache):
75 class storecache(repofilecache):
76 """filecache for files in the store"""
76 """filecache for files in the store"""
77 def join(self, obj, fname):
77 def join(self, obj, fname):
78 return obj.sjoin(fname)
78 return obj.sjoin(fname)
79
79
80 class unfilteredpropertycache(propertycache):
80 class unfilteredpropertycache(propertycache):
81 """propertycache that apply to unfiltered repo only"""
81 """propertycache that apply to unfiltered repo only"""
82
82
83 def __get__(self, repo, type=None):
83 def __get__(self, repo, type=None):
84 unfi = repo.unfiltered()
84 unfi = repo.unfiltered()
85 if unfi is repo:
85 if unfi is repo:
86 return super(unfilteredpropertycache, self).__get__(unfi)
86 return super(unfilteredpropertycache, self).__get__(unfi)
87 return getattr(unfi, self.name)
87 return getattr(unfi, self.name)
88
88
89 class filteredpropertycache(propertycache):
89 class filteredpropertycache(propertycache):
90 """propertycache that must take filtering in account"""
90 """propertycache that must take filtering in account"""
91
91
92 def cachevalue(self, obj, value):
92 def cachevalue(self, obj, value):
93 object.__setattr__(obj, self.name, value)
93 object.__setattr__(obj, self.name, value)
94
94
95
95
96 def hasunfilteredcache(repo, name):
96 def hasunfilteredcache(repo, name):
97 """check if a repo has an unfilteredpropertycache value for <name>"""
97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 return name in vars(repo.unfiltered())
98 return name in vars(repo.unfiltered())
99
99
100 def unfilteredmethod(orig):
100 def unfilteredmethod(orig):
101 """decorate method that always need to be run on unfiltered version"""
101 """decorate method that always need to be run on unfiltered version"""
102 def wrapper(repo, *args, **kwargs):
102 def wrapper(repo, *args, **kwargs):
103 return orig(repo.unfiltered(), *args, **kwargs)
103 return orig(repo.unfiltered(), *args, **kwargs)
104 return wrapper
104 return wrapper
105
105
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 'unbundle'))
107 'unbundle'))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109
109
110 class localpeer(peer.peerrepository):
110 class localpeer(peer.peerrepository):
111 '''peer for a local repo; reflects only the most recent API'''
111 '''peer for a local repo; reflects only the most recent API'''
112
112
113 def __init__(self, repo, caps=moderncaps):
113 def __init__(self, repo, caps=moderncaps):
114 peer.peerrepository.__init__(self)
114 peer.peerrepository.__init__(self)
115 self._repo = repo.filtered('served')
115 self._repo = repo.filtered('served')
116 self.ui = repo.ui
116 self.ui = repo.ui
117 self._caps = repo._restrictcapabilities(caps)
117 self._caps = repo._restrictcapabilities(caps)
118 self.requirements = repo.requirements
118 self.requirements = repo.requirements
119 self.supportedformats = repo.supportedformats
119 self.supportedformats = repo.supportedformats
120
120
121 def close(self):
121 def close(self):
122 self._repo.close()
122 self._repo.close()
123
123
124 def _capabilities(self):
124 def _capabilities(self):
125 return self._caps
125 return self._caps
126
126
127 def local(self):
127 def local(self):
128 return self._repo
128 return self._repo
129
129
130 def canpush(self):
130 def canpush(self):
131 return True
131 return True
132
132
133 def url(self):
133 def url(self):
134 return self._repo.url()
134 return self._repo.url()
135
135
136 def lookup(self, key):
136 def lookup(self, key):
137 return self._repo.lookup(key)
137 return self._repo.lookup(key)
138
138
139 def branchmap(self):
139 def branchmap(self):
140 return self._repo.branchmap()
140 return self._repo.branchmap()
141
141
142 def heads(self):
142 def heads(self):
143 return self._repo.heads()
143 return self._repo.heads()
144
144
145 def known(self, nodes):
145 def known(self, nodes):
146 return self._repo.known(nodes)
146 return self._repo.known(nodes)
147
147
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 **kwargs):
149 **kwargs):
150 cg = exchange.getbundle(self._repo, source, heads=heads,
150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 common=common, bundlecaps=bundlecaps, **kwargs)
151 common=common, bundlecaps=bundlecaps, **kwargs)
152 if bundlecaps is not None and 'HG20' in bundlecaps:
152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 # When requesting a bundle2, getbundle returns a stream to make the
153 # When requesting a bundle2, getbundle returns a stream to make the
154 # wire level function happier. We need to build a proper object
154 # wire level function happier. We need to build a proper object
155 # from it in local peer.
155 # from it in local peer.
156 cg = bundle2.getunbundler(self.ui, cg)
156 cg = bundle2.getunbundler(self.ui, cg)
157 return cg
157 return cg
158
158
159 # TODO We might want to move the next two calls into legacypeer and add
159 # TODO We might want to move the next two calls into legacypeer and add
160 # unbundle instead.
160 # unbundle instead.
161
161
162 def unbundle(self, cg, heads, url):
162 def unbundle(self, cg, heads, url):
163 """apply a bundle on a repo
163 """apply a bundle on a repo
164
164
165 This function handles the repo locking itself."""
165 This function handles the repo locking itself."""
166 try:
166 try:
167 try:
167 try:
168 cg = exchange.readbundle(self.ui, cg, None)
168 cg = exchange.readbundle(self.ui, cg, None)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 if util.safehasattr(ret, 'getchunks'):
170 if util.safehasattr(ret, 'getchunks'):
171 # This is a bundle20 object, turn it into an unbundler.
171 # This is a bundle20 object, turn it into an unbundler.
172 # This little dance should be dropped eventually when the
172 # This little dance should be dropped eventually when the
173 # API is finally improved.
173 # API is finally improved.
174 stream = util.chunkbuffer(ret.getchunks())
174 stream = util.chunkbuffer(ret.getchunks())
175 ret = bundle2.getunbundler(self.ui, stream)
175 ret = bundle2.getunbundler(self.ui, stream)
176 return ret
176 return ret
177 except Exception as exc:
177 except Exception as exc:
178 # If the exception contains output salvaged from a bundle2
178 # If the exception contains output salvaged from a bundle2
179 # reply, we need to make sure it is printed before continuing
179 # reply, we need to make sure it is printed before continuing
180 # to fail. So we build a bundle2 with such output and consume
180 # to fail. So we build a bundle2 with such output and consume
181 # it directly.
181 # it directly.
182 #
182 #
183 # This is not very elegant but allows a "simple" solution for
183 # This is not very elegant but allows a "simple" solution for
184 # issue4594
184 # issue4594
185 output = getattr(exc, '_bundle2salvagedoutput', ())
185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 if output:
186 if output:
187 bundler = bundle2.bundle20(self._repo.ui)
187 bundler = bundle2.bundle20(self._repo.ui)
188 for out in output:
188 for out in output:
189 bundler.addpart(out)
189 bundler.addpart(out)
190 stream = util.chunkbuffer(bundler.getchunks())
190 stream = util.chunkbuffer(bundler.getchunks())
191 b = bundle2.getunbundler(self.ui, stream)
191 b = bundle2.getunbundler(self.ui, stream)
192 bundle2.processbundle(self._repo, b)
192 bundle2.processbundle(self._repo, b)
193 raise
193 raise
194 except error.PushRaced as exc:
194 except error.PushRaced as exc:
195 raise error.ResponseError(_('push failed:'), str(exc))
195 raise error.ResponseError(_('push failed:'), str(exc))
196
196
197 def lock(self):
197 def lock(self):
198 return self._repo.lock()
198 return self._repo.lock()
199
199
200 def addchangegroup(self, cg, source, url):
200 def addchangegroup(self, cg, source, url):
201 return cg.apply(self._repo, source, url)
201 return cg.apply(self._repo, source, url)
202
202
203 def pushkey(self, namespace, key, old, new):
203 def pushkey(self, namespace, key, old, new):
204 return self._repo.pushkey(namespace, key, old, new)
204 return self._repo.pushkey(namespace, key, old, new)
205
205
206 def listkeys(self, namespace):
206 def listkeys(self, namespace):
207 return self._repo.listkeys(namespace)
207 return self._repo.listkeys(namespace)
208
208
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 '''used to test argument passing over the wire'''
210 '''used to test argument passing over the wire'''
211 return "%s %s %s %s %s" % (one, two, three, four, five)
211 return "%s %s %s %s %s" % (one, two, three, four, five)
212
212
213 class locallegacypeer(localpeer):
213 class locallegacypeer(localpeer):
214 '''peer extension which implements legacy methods too; used for tests with
214 '''peer extension which implements legacy methods too; used for tests with
215 restricted capabilities'''
215 restricted capabilities'''
216
216
217 def __init__(self, repo):
217 def __init__(self, repo):
218 localpeer.__init__(self, repo, caps=legacycaps)
218 localpeer.__init__(self, repo, caps=legacycaps)
219
219
220 def branches(self, nodes):
220 def branches(self, nodes):
221 return self._repo.branches(nodes)
221 return self._repo.branches(nodes)
222
222
223 def between(self, pairs):
223 def between(self, pairs):
224 return self._repo.between(pairs)
224 return self._repo.between(pairs)
225
225
226 def changegroup(self, basenodes, source):
226 def changegroup(self, basenodes, source):
227 return changegroup.changegroup(self._repo, basenodes, source)
227 return changegroup.changegroup(self._repo, basenodes, source)
228
228
229 def changegroupsubset(self, bases, heads, source):
229 def changegroupsubset(self, bases, heads, source):
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231
231
232 class localrepository(object):
232 class localrepository(object):
233
233
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 'manifestv2'))
235 'manifestv2'))
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 'dotencode'))
237 'dotencode'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 filtername = None
239 filtername = None
240
240
241 # a list of (ui, featureset) functions.
241 # a list of (ui, featureset) functions.
242 # only functions defined in module of enabled extensions are invoked
242 # only functions defined in module of enabled extensions are invoked
243 featuresetupfuncs = set()
243 featuresetupfuncs = set()
244
244
245 def _baserequirements(self, create):
245 def _baserequirements(self, create):
246 return ['revlogv1']
246 return ['revlogv1']
247
247
248 def __init__(self, baseui, path=None, create=False):
248 def __init__(self, baseui, path=None, create=False):
249 self.requirements = set()
249 self.requirements = set()
250 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
251 self.wopener = self.wvfs
251 self.wopener = self.wvfs
252 self.root = self.wvfs.base
252 self.root = self.wvfs.base
253 self.path = self.wvfs.join(".hg")
253 self.path = self.wvfs.join(".hg")
254 self.origroot = path
254 self.origroot = path
255 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 self.auditor = pathutil.pathauditor(self.root, self._checknested)
256 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
257 realfs=False)
257 realfs=False)
258 self.vfs = scmutil.vfs(self.path)
258 self.vfs = scmutil.vfs(self.path)
259 self.opener = self.vfs
259 self.opener = self.vfs
260 self.baseui = baseui
260 self.baseui = baseui
261 self.ui = baseui.copy()
261 self.ui = baseui.copy()
262 self.ui.copy = baseui.copy # prevent copying repo configuration
262 self.ui.copy = baseui.copy # prevent copying repo configuration
263 # A list of callback to shape the phase if no data were found.
263 # A list of callback to shape the phase if no data were found.
264 # Callback are in the form: func(repo, roots) --> processed root.
264 # Callback are in the form: func(repo, roots) --> processed root.
265 # This list it to be filled by extension during repo setup
265 # This list it to be filled by extension during repo setup
266 self._phasedefaults = []
266 self._phasedefaults = []
267 try:
267 try:
268 self.ui.readconfig(self.join("hgrc"), self.root)
268 self.ui.readconfig(self.join("hgrc"), self.root)
269 extensions.loadall(self.ui)
269 extensions.loadall(self.ui)
270 except IOError:
270 except IOError:
271 pass
271 pass
272
272
273 if self.featuresetupfuncs:
273 if self.featuresetupfuncs:
274 self.supported = set(self._basesupported) # use private copy
274 self.supported = set(self._basesupported) # use private copy
275 extmods = set(m.__name__ for n, m
275 extmods = set(m.__name__ for n, m
276 in extensions.extensions(self.ui))
276 in extensions.extensions(self.ui))
277 for setupfunc in self.featuresetupfuncs:
277 for setupfunc in self.featuresetupfuncs:
278 if setupfunc.__module__ in extmods:
278 if setupfunc.__module__ in extmods:
279 setupfunc(self.ui, self.supported)
279 setupfunc(self.ui, self.supported)
280 else:
280 else:
281 self.supported = self._basesupported
281 self.supported = self._basesupported
282
282
283 if not self.vfs.isdir():
283 if not self.vfs.isdir():
284 if create:
284 if create:
285 if not self.wvfs.exists():
285 if not self.wvfs.exists():
286 self.wvfs.makedirs()
286 self.wvfs.makedirs()
287 self.vfs.makedir(notindexed=True)
287 self.vfs.makedir(notindexed=True)
288 self.requirements.update(self._baserequirements(create))
288 self.requirements.update(self._baserequirements(create))
289 if self.ui.configbool('format', 'usestore', True):
289 if self.ui.configbool('format', 'usestore', True):
290 self.vfs.mkdir("store")
290 self.vfs.mkdir("store")
291 self.requirements.add("store")
291 self.requirements.add("store")
292 if self.ui.configbool('format', 'usefncache', True):
292 if self.ui.configbool('format', 'usefncache', True):
293 self.requirements.add("fncache")
293 self.requirements.add("fncache")
294 if self.ui.configbool('format', 'dotencode', True):
294 if self.ui.configbool('format', 'dotencode', True):
295 self.requirements.add('dotencode')
295 self.requirements.add('dotencode')
296 # create an invalid changelog
296 # create an invalid changelog
297 self.vfs.append(
297 self.vfs.append(
298 "00changelog.i",
298 "00changelog.i",
299 '\0\0\0\2' # represents revlogv2
299 '\0\0\0\2' # represents revlogv2
300 ' dummy changelog to prevent using the old repo layout'
300 ' dummy changelog to prevent using the old repo layout'
301 )
301 )
302 if scmutil.gdinitconfig(self.ui):
302 if scmutil.gdinitconfig(self.ui):
303 self.requirements.add("generaldelta")
303 self.requirements.add("generaldelta")
304 if self.ui.configbool('experimental', 'treemanifest', False):
304 if self.ui.configbool('experimental', 'treemanifest', False):
305 self.requirements.add("treemanifest")
305 self.requirements.add("treemanifest")
306 if self.ui.configbool('experimental', 'manifestv2', False):
306 if self.ui.configbool('experimental', 'manifestv2', False):
307 self.requirements.add("manifestv2")
307 self.requirements.add("manifestv2")
308 else:
308 else:
309 raise error.RepoError(_("repository %s not found") % path)
309 raise error.RepoError(_("repository %s not found") % path)
310 elif create:
310 elif create:
311 raise error.RepoError(_("repository %s already exists") % path)
311 raise error.RepoError(_("repository %s already exists") % path)
312 else:
312 else:
313 try:
313 try:
314 self.requirements = scmutil.readrequires(
314 self.requirements = scmutil.readrequires(
315 self.vfs, self.supported)
315 self.vfs, self.supported)
316 except IOError as inst:
316 except IOError as inst:
317 if inst.errno != errno.ENOENT:
317 if inst.errno != errno.ENOENT:
318 raise
318 raise
319
319
320 self.sharedpath = self.path
320 self.sharedpath = self.path
321 try:
321 try:
322 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
322 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
323 realpath=True)
323 realpath=True)
324 s = vfs.base
324 s = vfs.base
325 if not vfs.exists():
325 if not vfs.exists():
326 raise error.RepoError(
326 raise error.RepoError(
327 _('.hg/sharedpath points to nonexistent directory %s') % s)
327 _('.hg/sharedpath points to nonexistent directory %s') % s)
328 self.sharedpath = s
328 self.sharedpath = s
329 except IOError as inst:
329 except IOError as inst:
330 if inst.errno != errno.ENOENT:
330 if inst.errno != errno.ENOENT:
331 raise
331 raise
332
332
333 self.store = store.store(
333 self.store = store.store(
334 self.requirements, self.sharedpath, scmutil.vfs)
334 self.requirements, self.sharedpath, scmutil.vfs)
335 self.spath = self.store.path
335 self.spath = self.store.path
336 self.svfs = self.store.vfs
336 self.svfs = self.store.vfs
337 self.sjoin = self.store.join
337 self.sjoin = self.store.join
338 self.vfs.createmode = self.store.createmode
338 self.vfs.createmode = self.store.createmode
339 self._applyopenerreqs()
339 self._applyopenerreqs()
340 if create:
340 if create:
341 self._writerequirements()
341 self._writerequirements()
342
342
343 self._dirstatevalidatewarned = False
343 self._dirstatevalidatewarned = False
344
344
345 self._branchcaches = {}
345 self._branchcaches = {}
346 self._revbranchcache = None
346 self._revbranchcache = None
347 self.filterpats = {}
347 self.filterpats = {}
348 self._datafilters = {}
348 self._datafilters = {}
349 self._transref = self._lockref = self._wlockref = None
349 self._transref = self._lockref = self._wlockref = None
350
350
351 # A cache for various files under .hg/ that tracks file changes,
351 # A cache for various files under .hg/ that tracks file changes,
352 # (used by the filecache decorator)
352 # (used by the filecache decorator)
353 #
353 #
354 # Maps a property name to its util.filecacheentry
354 # Maps a property name to its util.filecacheentry
355 self._filecache = {}
355 self._filecache = {}
356
356
357 # hold sets of revision to be filtered
357 # hold sets of revision to be filtered
358 # should be cleared when something might have changed the filter value:
358 # should be cleared when something might have changed the filter value:
359 # - new changesets,
359 # - new changesets,
360 # - phase change,
360 # - phase change,
361 # - new obsolescence marker,
361 # - new obsolescence marker,
362 # - working directory parent change,
362 # - working directory parent change,
363 # - bookmark changes
363 # - bookmark changes
364 self.filteredrevcache = {}
364 self.filteredrevcache = {}
365
365
366 # generic mapping between names and nodes
366 # generic mapping between names and nodes
367 self.names = namespaces.namespaces()
367 self.names = namespaces.namespaces()
368
368
369 def close(self):
369 def close(self):
370 self._writecaches()
370 self._writecaches()
371
371
372 def _writecaches(self):
372 def _writecaches(self):
373 if self._revbranchcache:
373 if self._revbranchcache:
374 self._revbranchcache.write()
374 self._revbranchcache.write()
375
375
376 def _restrictcapabilities(self, caps):
376 def _restrictcapabilities(self, caps):
377 if self.ui.configbool('experimental', 'bundle2-advertise', True):
377 if self.ui.configbool('experimental', 'bundle2-advertise', True):
378 caps = set(caps)
378 caps = set(caps)
379 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
379 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
380 caps.add('bundle2=' + urllib.quote(capsblob))
380 caps.add('bundle2=' + urllib.quote(capsblob))
381 return caps
381 return caps
382
382
383 def _applyopenerreqs(self):
383 def _applyopenerreqs(self):
384 self.svfs.options = dict((r, 1) for r in self.requirements
384 self.svfs.options = dict((r, 1) for r in self.requirements
385 if r in self.openerreqs)
385 if r in self.openerreqs)
386 # experimental config: format.chunkcachesize
386 # experimental config: format.chunkcachesize
387 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
387 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
388 if chunkcachesize is not None:
388 if chunkcachesize is not None:
389 self.svfs.options['chunkcachesize'] = chunkcachesize
389 self.svfs.options['chunkcachesize'] = chunkcachesize
390 # experimental config: format.maxchainlen
390 # experimental config: format.maxchainlen
391 maxchainlen = self.ui.configint('format', 'maxchainlen')
391 maxchainlen = self.ui.configint('format', 'maxchainlen')
392 if maxchainlen is not None:
392 if maxchainlen is not None:
393 self.svfs.options['maxchainlen'] = maxchainlen
393 self.svfs.options['maxchainlen'] = maxchainlen
394 # experimental config: format.manifestcachesize
394 # experimental config: format.manifestcachesize
395 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
395 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
396 if manifestcachesize is not None:
396 if manifestcachesize is not None:
397 self.svfs.options['manifestcachesize'] = manifestcachesize
397 self.svfs.options['manifestcachesize'] = manifestcachesize
398 # experimental config: format.aggressivemergedeltas
398 # experimental config: format.aggressivemergedeltas
399 aggressivemergedeltas = self.ui.configbool('format',
399 aggressivemergedeltas = self.ui.configbool('format',
400 'aggressivemergedeltas', False)
400 'aggressivemergedeltas', False)
401 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
401 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
402 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
402 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
403
403
404 def _writerequirements(self):
404 def _writerequirements(self):
405 scmutil.writerequires(self.vfs, self.requirements)
405 scmutil.writerequires(self.vfs, self.requirements)
406
406
407 def _checknested(self, path):
407 def _checknested(self, path):
408 """Determine if path is a legal nested repository."""
408 """Determine if path is a legal nested repository."""
409 if not path.startswith(self.root):
409 if not path.startswith(self.root):
410 return False
410 return False
411 subpath = path[len(self.root) + 1:]
411 subpath = path[len(self.root) + 1:]
412 normsubpath = util.pconvert(subpath)
412 normsubpath = util.pconvert(subpath)
413
413
414 # XXX: Checking against the current working copy is wrong in
414 # XXX: Checking against the current working copy is wrong in
415 # the sense that it can reject things like
415 # the sense that it can reject things like
416 #
416 #
417 # $ hg cat -r 10 sub/x.txt
417 # $ hg cat -r 10 sub/x.txt
418 #
418 #
419 # if sub/ is no longer a subrepository in the working copy
419 # if sub/ is no longer a subrepository in the working copy
420 # parent revision.
420 # parent revision.
421 #
421 #
422 # However, it can of course also allow things that would have
422 # However, it can of course also allow things that would have
423 # been rejected before, such as the above cat command if sub/
423 # been rejected before, such as the above cat command if sub/
424 # is a subrepository now, but was a normal directory before.
424 # is a subrepository now, but was a normal directory before.
425 # The old path auditor would have rejected by mistake since it
425 # The old path auditor would have rejected by mistake since it
426 # panics when it sees sub/.hg/.
426 # panics when it sees sub/.hg/.
427 #
427 #
428 # All in all, checking against the working copy seems sensible
428 # All in all, checking against the working copy seems sensible
429 # since we want to prevent access to nested repositories on
429 # since we want to prevent access to nested repositories on
430 # the filesystem *now*.
430 # the filesystem *now*.
431 ctx = self[None]
431 ctx = self[None]
432 parts = util.splitpath(subpath)
432 parts = util.splitpath(subpath)
433 while parts:
433 while parts:
434 prefix = '/'.join(parts)
434 prefix = '/'.join(parts)
435 if prefix in ctx.substate:
435 if prefix in ctx.substate:
436 if prefix == normsubpath:
436 if prefix == normsubpath:
437 return True
437 return True
438 else:
438 else:
439 sub = ctx.sub(prefix)
439 sub = ctx.sub(prefix)
440 return sub.checknested(subpath[len(prefix) + 1:])
440 return sub.checknested(subpath[len(prefix) + 1:])
441 else:
441 else:
442 parts.pop()
442 parts.pop()
443 return False
443 return False
444
444
445 def peer(self):
445 def peer(self):
446 return localpeer(self) # not cached to avoid reference cycle
446 return localpeer(self) # not cached to avoid reference cycle
447
447
448 def unfiltered(self):
448 def unfiltered(self):
449 """Return unfiltered version of the repository
449 """Return unfiltered version of the repository
450
450
451 Intended to be overwritten by filtered repo."""
451 Intended to be overwritten by filtered repo."""
452 return self
452 return self
453
453
454 def filtered(self, name):
454 def filtered(self, name):
455 """Return a filtered version of a repository"""
455 """Return a filtered version of a repository"""
456 # build a new class with the mixin and the current class
456 # build a new class with the mixin and the current class
457 # (possibly subclass of the repo)
457 # (possibly subclass of the repo)
458 class proxycls(repoview.repoview, self.unfiltered().__class__):
458 class proxycls(repoview.repoview, self.unfiltered().__class__):
459 pass
459 pass
460 return proxycls(self, name)
460 return proxycls(self, name)
461
461
462 @repofilecache('bookmarks', 'bookmarks.current')
462 @repofilecache('bookmarks', 'bookmarks.current')
463 def _bookmarks(self):
463 def _bookmarks(self):
464 return bookmarks.bmstore(self)
464 return bookmarks.bmstore(self)
465
465
466 @property
466 @property
467 def _activebookmark(self):
467 def _activebookmark(self):
468 return self._bookmarks.active
468 return self._bookmarks.active
469
469
470 def bookmarkheads(self, bookmark):
470 def bookmarkheads(self, bookmark):
471 name = bookmark.split('@', 1)[0]
471 name = bookmark.split('@', 1)[0]
472 heads = []
472 heads = []
473 for mark, n in self._bookmarks.iteritems():
473 for mark, n in self._bookmarks.iteritems():
474 if mark.split('@', 1)[0] == name:
474 if mark.split('@', 1)[0] == name:
475 heads.append(n)
475 heads.append(n)
476 return heads
476 return heads
477
477
478 # _phaserevs and _phasesets depend on changelog. what we need is to
478 # _phaserevs and _phasesets depend on changelog. what we need is to
479 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
479 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
480 # can't be easily expressed in filecache mechanism.
480 # can't be easily expressed in filecache mechanism.
481 @storecache('phaseroots', '00changelog.i')
481 @storecache('phaseroots', '00changelog.i')
482 def _phasecache(self):
482 def _phasecache(self):
483 return phases.phasecache(self, self._phasedefaults)
483 return phases.phasecache(self, self._phasedefaults)
484
484
485 @storecache('obsstore')
485 @storecache('obsstore')
486 def obsstore(self):
486 def obsstore(self):
487 # read default format for new obsstore.
487 # read default format for new obsstore.
488 # developer config: format.obsstore-version
488 # developer config: format.obsstore-version
489 defaultformat = self.ui.configint('format', 'obsstore-version', None)
489 defaultformat = self.ui.configint('format', 'obsstore-version', None)
490 # rely on obsstore class default when possible.
490 # rely on obsstore class default when possible.
491 kwargs = {}
491 kwargs = {}
492 if defaultformat is not None:
492 if defaultformat is not None:
493 kwargs['defaultformat'] = defaultformat
493 kwargs['defaultformat'] = defaultformat
494 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
494 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
495 store = obsolete.obsstore(self.svfs, readonly=readonly,
495 store = obsolete.obsstore(self.svfs, readonly=readonly,
496 **kwargs)
496 **kwargs)
497 if store and readonly:
497 if store and readonly:
498 self.ui.warn(
498 self.ui.warn(
499 _('obsolete feature not enabled but %i markers found!\n')
499 _('obsolete feature not enabled but %i markers found!\n')
500 % len(list(store)))
500 % len(list(store)))
501 return store
501 return store
502
502
503 @storecache('00changelog.i')
503 @storecache('00changelog.i')
504 def changelog(self):
504 def changelog(self):
505 c = changelog.changelog(self.svfs)
505 c = changelog.changelog(self.svfs)
506 if 'HG_PENDING' in os.environ:
506 if 'HG_PENDING' in os.environ:
507 p = os.environ['HG_PENDING']
507 p = os.environ['HG_PENDING']
508 if p.startswith(self.root):
508 if p.startswith(self.root):
509 c.readpending('00changelog.i.a')
509 c.readpending('00changelog.i.a')
510 return c
510 return c
511
511
512 @storecache('00manifest.i')
512 @storecache('00manifest.i')
513 def manifest(self):
513 def manifest(self):
514 return manifest.manifest(self.svfs)
514 return manifest.manifest(self.svfs)
515
515
516 def dirlog(self, dir):
516 def dirlog(self, dir):
517 return self.manifest.dirlog(dir)
517 return self.manifest.dirlog(dir)
518
518
519 @repofilecache('dirstate')
519 @repofilecache('dirstate')
520 def dirstate(self):
520 def dirstate(self):
521 return dirstate.dirstate(self.vfs, self.ui, self.root,
521 return dirstate.dirstate(self.vfs, self.ui, self.root,
522 self._dirstatevalidate)
522 self._dirstatevalidate)
523
523
524 def _dirstatevalidate(self, node):
524 def _dirstatevalidate(self, node):
525 try:
525 try:
526 self.changelog.rev(node)
526 self.changelog.rev(node)
527 return node
527 return node
528 except error.LookupError:
528 except error.LookupError:
529 if not self._dirstatevalidatewarned:
529 if not self._dirstatevalidatewarned:
530 self._dirstatevalidatewarned = True
530 self._dirstatevalidatewarned = True
531 self.ui.warn(_("warning: ignoring unknown"
531 self.ui.warn(_("warning: ignoring unknown"
532 " working parent %s!\n") % short(node))
532 " working parent %s!\n") % short(node))
533 return nullid
533 return nullid
534
534
535 def __getitem__(self, changeid):
535 def __getitem__(self, changeid):
536 if changeid is None or changeid == wdirrev:
536 if changeid is None or changeid == wdirrev:
537 return context.workingctx(self)
537 return context.workingctx(self)
538 if isinstance(changeid, slice):
538 if isinstance(changeid, slice):
539 return [context.changectx(self, i)
539 return [context.changectx(self, i)
540 for i in xrange(*changeid.indices(len(self)))
540 for i in xrange(*changeid.indices(len(self)))
541 if i not in self.changelog.filteredrevs]
541 if i not in self.changelog.filteredrevs]
542 return context.changectx(self, changeid)
542 return context.changectx(self, changeid)
543
543
544 def __contains__(self, changeid):
544 def __contains__(self, changeid):
545 try:
545 try:
546 self[changeid]
546 self[changeid]
547 return True
547 return True
548 except error.RepoLookupError:
548 except error.RepoLookupError:
549 return False
549 return False
550
550
551 def __nonzero__(self):
551 def __nonzero__(self):
552 return True
552 return True
553
553
554 def __len__(self):
554 def __len__(self):
555 return len(self.changelog)
555 return len(self.changelog)
556
556
557 def __iter__(self):
557 def __iter__(self):
558 return iter(self.changelog)
558 return iter(self.changelog)
559
559
560 def revs(self, expr, *args):
560 def revs(self, expr, *args):
561 '''Find revisions matching a revset.
561 '''Find revisions matching a revset.
562
562
563 The revset is specified as a string ``expr`` that may contain
563 The revset is specified as a string ``expr`` that may contain
564 %-formatting to escape certain types. See ``revset.formatspec``.
564 %-formatting to escape certain types. See ``revset.formatspec``.
565
565
566 Return a revset.abstractsmartset, which is a list-like interface
566 Return a revset.abstractsmartset, which is a list-like interface
567 that contains integer revisions.
567 that contains integer revisions.
568 '''
568 '''
569 expr = revset.formatspec(expr, *args)
569 expr = revset.formatspec(expr, *args)
570 m = revset.match(None, expr)
570 m = revset.match(None, expr)
571 return m(self)
571 return m(self)
572
572
573 def set(self, expr, *args):
573 def set(self, expr, *args):
574 '''Find revisions matching a revset and emit changectx instances.
574 '''Find revisions matching a revset and emit changectx instances.
575
575
576 This is a convenience wrapper around ``revs()`` that iterates the
576 This is a convenience wrapper around ``revs()`` that iterates the
577 result and is a generator of changectx instances.
577 result and is a generator of changectx instances.
578 '''
578 '''
579 for r in self.revs(expr, *args):
579 for r in self.revs(expr, *args):
580 yield self[r]
580 yield self[r]
581
581
582 def url(self):
582 def url(self):
583 return 'file:' + self.root
583 return 'file:' + self.root
584
584
585 def hook(self, name, throw=False, **args):
585 def hook(self, name, throw=False, **args):
586 """Call a hook, passing this repo instance.
586 """Call a hook, passing this repo instance.
587
587
588 This a convenience method to aid invoking hooks. Extensions likely
588 This a convenience method to aid invoking hooks. Extensions likely
589 won't call this unless they have registered a custom hook or are
589 won't call this unless they have registered a custom hook or are
590 replacing code that is expected to call a hook.
590 replacing code that is expected to call a hook.
591 """
591 """
592 return hook.hook(self.ui, self, name, throw, **args)
592 return hook.hook(self.ui, self, name, throw, **args)
593
593
594 @unfilteredmethod
594 @unfilteredmethod
595 def _tag(self, names, node, message, local, user, date, extra=None,
595 def _tag(self, names, node, message, local, user, date, extra=None,
596 editor=False):
596 editor=False):
597 if isinstance(names, str):
597 if isinstance(names, str):
598 names = (names,)
598 names = (names,)
599
599
600 branches = self.branchmap()
600 branches = self.branchmap()
601 for name in names:
601 for name in names:
602 self.hook('pretag', throw=True, node=hex(node), tag=name,
602 self.hook('pretag', throw=True, node=hex(node), tag=name,
603 local=local)
603 local=local)
604 if name in branches:
604 if name in branches:
605 self.ui.warn(_("warning: tag %s conflicts with existing"
605 self.ui.warn(_("warning: tag %s conflicts with existing"
606 " branch name\n") % name)
606 " branch name\n") % name)
607
607
608 def writetags(fp, names, munge, prevtags):
608 def writetags(fp, names, munge, prevtags):
609 fp.seek(0, 2)
609 fp.seek(0, 2)
610 if prevtags and prevtags[-1] != '\n':
610 if prevtags and prevtags[-1] != '\n':
611 fp.write('\n')
611 fp.write('\n')
612 for name in names:
612 for name in names:
613 if munge:
613 if munge:
614 m = munge(name)
614 m = munge(name)
615 else:
615 else:
616 m = name
616 m = name
617
617
618 if (self._tagscache.tagtypes and
618 if (self._tagscache.tagtypes and
619 name in self._tagscache.tagtypes):
619 name in self._tagscache.tagtypes):
620 old = self.tags().get(name, nullid)
620 old = self.tags().get(name, nullid)
621 fp.write('%s %s\n' % (hex(old), m))
621 fp.write('%s %s\n' % (hex(old), m))
622 fp.write('%s %s\n' % (hex(node), m))
622 fp.write('%s %s\n' % (hex(node), m))
623 fp.close()
623 fp.close()
624
624
625 prevtags = ''
625 prevtags = ''
626 if local:
626 if local:
627 try:
627 try:
628 fp = self.vfs('localtags', 'r+')
628 fp = self.vfs('localtags', 'r+')
629 except IOError:
629 except IOError:
630 fp = self.vfs('localtags', 'a')
630 fp = self.vfs('localtags', 'a')
631 else:
631 else:
632 prevtags = fp.read()
632 prevtags = fp.read()
633
633
634 # local tags are stored in the current charset
634 # local tags are stored in the current charset
635 writetags(fp, names, None, prevtags)
635 writetags(fp, names, None, prevtags)
636 for name in names:
636 for name in names:
637 self.hook('tag', node=hex(node), tag=name, local=local)
637 self.hook('tag', node=hex(node), tag=name, local=local)
638 return
638 return
639
639
640 try:
640 try:
641 fp = self.wfile('.hgtags', 'rb+')
641 fp = self.wfile('.hgtags', 'rb+')
642 except IOError as e:
642 except IOError as e:
643 if e.errno != errno.ENOENT:
643 if e.errno != errno.ENOENT:
644 raise
644 raise
645 fp = self.wfile('.hgtags', 'ab')
645 fp = self.wfile('.hgtags', 'ab')
646 else:
646 else:
647 prevtags = fp.read()
647 prevtags = fp.read()
648
648
649 # committed tags are stored in UTF-8
649 # committed tags are stored in UTF-8
650 writetags(fp, names, encoding.fromlocal, prevtags)
650 writetags(fp, names, encoding.fromlocal, prevtags)
651
651
652 fp.close()
652 fp.close()
653
653
654 self.invalidatecaches()
654 self.invalidatecaches()
655
655
656 if '.hgtags' not in self.dirstate:
656 if '.hgtags' not in self.dirstate:
657 self[None].add(['.hgtags'])
657 self[None].add(['.hgtags'])
658
658
659 m = matchmod.exact(self.root, '', ['.hgtags'])
659 m = matchmod.exact(self.root, '', ['.hgtags'])
660 tagnode = self.commit(message, user, date, extra=extra, match=m,
660 tagnode = self.commit(message, user, date, extra=extra, match=m,
661 editor=editor)
661 editor=editor)
662
662
663 for name in names:
663 for name in names:
664 self.hook('tag', node=hex(node), tag=name, local=local)
664 self.hook('tag', node=hex(node), tag=name, local=local)
665
665
666 return tagnode
666 return tagnode
667
667
668 def tag(self, names, node, message, local, user, date, editor=False):
668 def tag(self, names, node, message, local, user, date, editor=False):
669 '''tag a revision with one or more symbolic names.
669 '''tag a revision with one or more symbolic names.
670
670
671 names is a list of strings or, when adding a single tag, names may be a
671 names is a list of strings or, when adding a single tag, names may be a
672 string.
672 string.
673
673
674 if local is True, the tags are stored in a per-repository file.
674 if local is True, the tags are stored in a per-repository file.
675 otherwise, they are stored in the .hgtags file, and a new
675 otherwise, they are stored in the .hgtags file, and a new
676 changeset is committed with the change.
676 changeset is committed with the change.
677
677
678 keyword arguments:
678 keyword arguments:
679
679
680 local: whether to store tags in non-version-controlled file
680 local: whether to store tags in non-version-controlled file
681 (default False)
681 (default False)
682
682
683 message: commit message to use if committing
683 message: commit message to use if committing
684
684
685 user: name of user to use if committing
685 user: name of user to use if committing
686
686
687 date: date tuple to use if committing'''
687 date: date tuple to use if committing'''
688
688
689 if not local:
689 if not local:
690 m = matchmod.exact(self.root, '', ['.hgtags'])
690 m = matchmod.exact(self.root, '', ['.hgtags'])
691 if any(self.status(match=m, unknown=True, ignored=True)):
691 if any(self.status(match=m, unknown=True, ignored=True)):
692 raise error.Abort(_('working copy of .hgtags is changed'),
692 raise error.Abort(_('working copy of .hgtags is changed'),
693 hint=_('please commit .hgtags manually'))
693 hint=_('please commit .hgtags manually'))
694
694
695 self.tags() # instantiate the cache
695 self.tags() # instantiate the cache
696 self._tag(names, node, message, local, user, date, editor=editor)
696 self._tag(names, node, message, local, user, date, editor=editor)
697
697
698 @filteredpropertycache
698 @filteredpropertycache
699 def _tagscache(self):
699 def _tagscache(self):
700 '''Returns a tagscache object that contains various tags related
700 '''Returns a tagscache object that contains various tags related
701 caches.'''
701 caches.'''
702
702
703 # This simplifies its cache management by having one decorated
703 # This simplifies its cache management by having one decorated
704 # function (this one) and the rest simply fetch things from it.
704 # function (this one) and the rest simply fetch things from it.
705 class tagscache(object):
705 class tagscache(object):
706 def __init__(self):
706 def __init__(self):
707 # These two define the set of tags for this repository. tags
707 # These two define the set of tags for this repository. tags
708 # maps tag name to node; tagtypes maps tag name to 'global' or
708 # maps tag name to node; tagtypes maps tag name to 'global' or
709 # 'local'. (Global tags are defined by .hgtags across all
709 # 'local'. (Global tags are defined by .hgtags across all
710 # heads, and local tags are defined in .hg/localtags.)
710 # heads, and local tags are defined in .hg/localtags.)
711 # They constitute the in-memory cache of tags.
711 # They constitute the in-memory cache of tags.
712 self.tags = self.tagtypes = None
712 self.tags = self.tagtypes = None
713
713
714 self.nodetagscache = self.tagslist = None
714 self.nodetagscache = self.tagslist = None
715
715
716 cache = tagscache()
716 cache = tagscache()
717 cache.tags, cache.tagtypes = self._findtags()
717 cache.tags, cache.tagtypes = self._findtags()
718
718
719 return cache
719 return cache
720
720
721 def tags(self):
721 def tags(self):
722 '''return a mapping of tag to node'''
722 '''return a mapping of tag to node'''
723 t = {}
723 t = {}
724 if self.changelog.filteredrevs:
724 if self.changelog.filteredrevs:
725 tags, tt = self._findtags()
725 tags, tt = self._findtags()
726 else:
726 else:
727 tags = self._tagscache.tags
727 tags = self._tagscache.tags
728 for k, v in tags.iteritems():
728 for k, v in tags.iteritems():
729 try:
729 try:
730 # ignore tags to unknown nodes
730 # ignore tags to unknown nodes
731 self.changelog.rev(v)
731 self.changelog.rev(v)
732 t[k] = v
732 t[k] = v
733 except (error.LookupError, ValueError):
733 except (error.LookupError, ValueError):
734 pass
734 pass
735 return t
735 return t
736
736
737 def _findtags(self):
737 def _findtags(self):
738 '''Do the hard work of finding tags. Return a pair of dicts
738 '''Do the hard work of finding tags. Return a pair of dicts
739 (tags, tagtypes) where tags maps tag name to node, and tagtypes
739 (tags, tagtypes) where tags maps tag name to node, and tagtypes
740 maps tag name to a string like \'global\' or \'local\'.
740 maps tag name to a string like \'global\' or \'local\'.
741 Subclasses or extensions are free to add their own tags, but
741 Subclasses or extensions are free to add their own tags, but
742 should be aware that the returned dicts will be retained for the
742 should be aware that the returned dicts will be retained for the
743 duration of the localrepo object.'''
743 duration of the localrepo object.'''
744
744
745 # XXX what tagtype should subclasses/extensions use? Currently
745 # XXX what tagtype should subclasses/extensions use? Currently
746 # mq and bookmarks add tags, but do not set the tagtype at all.
746 # mq and bookmarks add tags, but do not set the tagtype at all.
747 # Should each extension invent its own tag type? Should there
747 # Should each extension invent its own tag type? Should there
748 # be one tagtype for all such "virtual" tags? Or is the status
748 # be one tagtype for all such "virtual" tags? Or is the status
749 # quo fine?
749 # quo fine?
750
750
751 alltags = {} # map tag name to (node, hist)
751 alltags = {} # map tag name to (node, hist)
752 tagtypes = {}
752 tagtypes = {}
753
753
754 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
754 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
755 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
755 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
756
756
757 # Build the return dicts. Have to re-encode tag names because
757 # Build the return dicts. Have to re-encode tag names because
758 # the tags module always uses UTF-8 (in order not to lose info
758 # the tags module always uses UTF-8 (in order not to lose info
759 # writing to the cache), but the rest of Mercurial wants them in
759 # writing to the cache), but the rest of Mercurial wants them in
760 # local encoding.
760 # local encoding.
761 tags = {}
761 tags = {}
762 for (name, (node, hist)) in alltags.iteritems():
762 for (name, (node, hist)) in alltags.iteritems():
763 if node != nullid:
763 if node != nullid:
764 tags[encoding.tolocal(name)] = node
764 tags[encoding.tolocal(name)] = node
765 tags['tip'] = self.changelog.tip()
765 tags['tip'] = self.changelog.tip()
766 tagtypes = dict([(encoding.tolocal(name), value)
766 tagtypes = dict([(encoding.tolocal(name), value)
767 for (name, value) in tagtypes.iteritems()])
767 for (name, value) in tagtypes.iteritems()])
768 return (tags, tagtypes)
768 return (tags, tagtypes)
769
769
770 def tagtype(self, tagname):
770 def tagtype(self, tagname):
771 '''
771 '''
772 return the type of the given tag. result can be:
772 return the type of the given tag. result can be:
773
773
774 'local' : a local tag
774 'local' : a local tag
775 'global' : a global tag
775 'global' : a global tag
776 None : tag does not exist
776 None : tag does not exist
777 '''
777 '''
778
778
779 return self._tagscache.tagtypes.get(tagname)
779 return self._tagscache.tagtypes.get(tagname)
780
780
781 def tagslist(self):
781 def tagslist(self):
782 '''return a list of tags ordered by revision'''
782 '''return a list of tags ordered by revision'''
783 if not self._tagscache.tagslist:
783 if not self._tagscache.tagslist:
784 l = []
784 l = []
785 for t, n in self.tags().iteritems():
785 for t, n in self.tags().iteritems():
786 l.append((self.changelog.rev(n), t, n))
786 l.append((self.changelog.rev(n), t, n))
787 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
787 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
788
788
789 return self._tagscache.tagslist
789 return self._tagscache.tagslist
790
790
791 def nodetags(self, node):
791 def nodetags(self, node):
792 '''return the tags associated with a node'''
792 '''return the tags associated with a node'''
793 if not self._tagscache.nodetagscache:
793 if not self._tagscache.nodetagscache:
794 nodetagscache = {}
794 nodetagscache = {}
795 for t, n in self._tagscache.tags.iteritems():
795 for t, n in self._tagscache.tags.iteritems():
796 nodetagscache.setdefault(n, []).append(t)
796 nodetagscache.setdefault(n, []).append(t)
797 for tags in nodetagscache.itervalues():
797 for tags in nodetagscache.itervalues():
798 tags.sort()
798 tags.sort()
799 self._tagscache.nodetagscache = nodetagscache
799 self._tagscache.nodetagscache = nodetagscache
800 return self._tagscache.nodetagscache.get(node, [])
800 return self._tagscache.nodetagscache.get(node, [])
801
801
802 def nodebookmarks(self, node):
802 def nodebookmarks(self, node):
803 """return the list of bookmarks pointing to the specified node"""
803 """return the list of bookmarks pointing to the specified node"""
804 marks = []
804 marks = []
805 for bookmark, n in self._bookmarks.iteritems():
805 for bookmark, n in self._bookmarks.iteritems():
806 if n == node:
806 if n == node:
807 marks.append(bookmark)
807 marks.append(bookmark)
808 return sorted(marks)
808 return sorted(marks)
809
809
810 def branchmap(self):
810 def branchmap(self):
811 '''returns a dictionary {branch: [branchheads]} with branchheads
811 '''returns a dictionary {branch: [branchheads]} with branchheads
812 ordered by increasing revision number'''
812 ordered by increasing revision number'''
813 branchmap.updatecache(self)
813 branchmap.updatecache(self)
814 return self._branchcaches[self.filtername]
814 return self._branchcaches[self.filtername]
815
815
816 @unfilteredmethod
816 @unfilteredmethod
817 def revbranchcache(self):
817 def revbranchcache(self):
818 if not self._revbranchcache:
818 if not self._revbranchcache:
819 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
819 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
820 return self._revbranchcache
820 return self._revbranchcache
821
821
822 def branchtip(self, branch, ignoremissing=False):
822 def branchtip(self, branch, ignoremissing=False):
823 '''return the tip node for a given branch
823 '''return the tip node for a given branch
824
824
825 If ignoremissing is True, then this method will not raise an error.
825 If ignoremissing is True, then this method will not raise an error.
826 This is helpful for callers that only expect None for a missing branch
826 This is helpful for callers that only expect None for a missing branch
827 (e.g. namespace).
827 (e.g. namespace).
828
828
829 '''
829 '''
830 try:
830 try:
831 return self.branchmap().branchtip(branch)
831 return self.branchmap().branchtip(branch)
832 except KeyError:
832 except KeyError:
833 if not ignoremissing:
833 if not ignoremissing:
834 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
834 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
835 else:
835 else:
836 pass
836 pass
837
837
838 def lookup(self, key):
838 def lookup(self, key):
839 return self[key].node()
839 return self[key].node()
840
840
841 def lookupbranch(self, key, remote=None):
841 def lookupbranch(self, key, remote=None):
842 repo = remote or self
842 repo = remote or self
843 if key in repo.branchmap():
843 if key in repo.branchmap():
844 return key
844 return key
845
845
846 repo = (remote and remote.local()) and remote or self
846 repo = (remote and remote.local()) and remote or self
847 return repo[key].branch()
847 return repo[key].branch()
848
848
849 def known(self, nodes):
849 def known(self, nodes):
850 cl = self.changelog
850 cl = self.changelog
851 nm = cl.nodemap
851 nm = cl.nodemap
852 filtered = cl.filteredrevs
852 filtered = cl.filteredrevs
853 result = []
853 result = []
854 for n in nodes:
854 for n in nodes:
855 r = nm.get(n)
855 r = nm.get(n)
856 resp = not (r is None or r in filtered)
856 resp = not (r is None or r in filtered)
857 result.append(resp)
857 result.append(resp)
858 return result
858 return result
859
859
860 def local(self):
860 def local(self):
861 return self
861 return self
862
862
863 def publishing(self):
863 def publishing(self):
864 # it's safe (and desirable) to trust the publish flag unconditionally
864 # it's safe (and desirable) to trust the publish flag unconditionally
865 # so that we don't finalize changes shared between users via ssh or nfs
865 # so that we don't finalize changes shared between users via ssh or nfs
866 return self.ui.configbool('phases', 'publish', True, untrusted=True)
866 return self.ui.configbool('phases', 'publish', True, untrusted=True)
867
867
868 def cancopy(self):
868 def cancopy(self):
869 # so statichttprepo's override of local() works
869 # so statichttprepo's override of local() works
870 if not self.local():
870 if not self.local():
871 return False
871 return False
872 if not self.publishing():
872 if not self.publishing():
873 return True
873 return True
874 # if publishing we can't copy if there is filtered content
874 # if publishing we can't copy if there is filtered content
875 return not self.filtered('visible').changelog.filteredrevs
875 return not self.filtered('visible').changelog.filteredrevs
876
876
877 def shared(self):
877 def shared(self):
878 '''the type of shared repository (None if not shared)'''
878 '''the type of shared repository (None if not shared)'''
879 if self.sharedpath != self.path:
879 if self.sharedpath != self.path:
880 return 'store'
880 return 'store'
881 return None
881 return None
882
882
883 def join(self, f, *insidef):
883 def join(self, f, *insidef):
884 return self.vfs.join(os.path.join(f, *insidef))
884 return self.vfs.join(os.path.join(f, *insidef))
885
885
886 def wjoin(self, f, *insidef):
886 def wjoin(self, f, *insidef):
887 return self.vfs.reljoin(self.root, f, *insidef)
887 return self.vfs.reljoin(self.root, f, *insidef)
888
888
889 def file(self, f):
889 def file(self, f):
890 if f[0] == '/':
890 if f[0] == '/':
891 f = f[1:]
891 f = f[1:]
892 return filelog.filelog(self.svfs, f)
892 return filelog.filelog(self.svfs, f)
893
893
894 def parents(self, changeid=None):
894 def parents(self, changeid=None):
895 '''get list of changectxs for parents of changeid'''
895 '''get list of changectxs for parents of changeid'''
896 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
896 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
897 self.ui.deprecwarn(msg, '3.7')
897 self.ui.deprecwarn(msg, '3.7')
898 return self[changeid].parents()
898 return self[changeid].parents()
899
899
900 def changectx(self, changeid):
900 def changectx(self, changeid):
901 return self[changeid]
901 return self[changeid]
902
902
903 def setparents(self, p1, p2=nullid):
903 def setparents(self, p1, p2=nullid):
904 self.dirstate.beginparentchange()
904 self.dirstate.beginparentchange()
905 copies = self.dirstate.setparents(p1, p2)
905 copies = self.dirstate.setparents(p1, p2)
906 pctx = self[p1]
906 pctx = self[p1]
907 if copies:
907 if copies:
908 # Adjust copy records, the dirstate cannot do it, it
908 # Adjust copy records, the dirstate cannot do it, it
909 # requires access to parents manifests. Preserve them
909 # requires access to parents manifests. Preserve them
910 # only for entries added to first parent.
910 # only for entries added to first parent.
911 for f in copies:
911 for f in copies:
912 if f not in pctx and copies[f] in pctx:
912 if f not in pctx and copies[f] in pctx:
913 self.dirstate.copy(copies[f], f)
913 self.dirstate.copy(copies[f], f)
914 if p2 == nullid:
914 if p2 == nullid:
915 for f, s in sorted(self.dirstate.copies().items()):
915 for f, s in sorted(self.dirstate.copies().items()):
916 if f not in pctx and s not in pctx:
916 if f not in pctx and s not in pctx:
917 self.dirstate.copy(None, f)
917 self.dirstate.copy(None, f)
918 self.dirstate.endparentchange()
918 self.dirstate.endparentchange()
919
919
920 def filectx(self, path, changeid=None, fileid=None):
920 def filectx(self, path, changeid=None, fileid=None):
921 """changeid can be a changeset revision, node, or tag.
921 """changeid can be a changeset revision, node, or tag.
922 fileid can be a file revision or node."""
922 fileid can be a file revision or node."""
923 return context.filectx(self, path, changeid, fileid)
923 return context.filectx(self, path, changeid, fileid)
924
924
925 def getcwd(self):
925 def getcwd(self):
926 return self.dirstate.getcwd()
926 return self.dirstate.getcwd()
927
927
928 def pathto(self, f, cwd=None):
928 def pathto(self, f, cwd=None):
929 return self.dirstate.pathto(f, cwd)
929 return self.dirstate.pathto(f, cwd)
930
930
931 def wfile(self, f, mode='r'):
931 def wfile(self, f, mode='r'):
932 return self.wvfs(f, mode)
932 return self.wvfs(f, mode)
933
933
934 def _link(self, f):
934 def _link(self, f):
935 return self.wvfs.islink(f)
935 return self.wvfs.islink(f)
936
936
937 def _loadfilter(self, filter):
937 def _loadfilter(self, filter):
938 if filter not in self.filterpats:
938 if filter not in self.filterpats:
939 l = []
939 l = []
940 for pat, cmd in self.ui.configitems(filter):
940 for pat, cmd in self.ui.configitems(filter):
941 if cmd == '!':
941 if cmd == '!':
942 continue
942 continue
943 mf = matchmod.match(self.root, '', [pat])
943 mf = matchmod.match(self.root, '', [pat])
944 fn = None
944 fn = None
945 params = cmd
945 params = cmd
946 for name, filterfn in self._datafilters.iteritems():
946 for name, filterfn in self._datafilters.iteritems():
947 if cmd.startswith(name):
947 if cmd.startswith(name):
948 fn = filterfn
948 fn = filterfn
949 params = cmd[len(name):].lstrip()
949 params = cmd[len(name):].lstrip()
950 break
950 break
951 if not fn:
951 if not fn:
952 fn = lambda s, c, **kwargs: util.filter(s, c)
952 fn = lambda s, c, **kwargs: util.filter(s, c)
953 # Wrap old filters not supporting keyword arguments
953 # Wrap old filters not supporting keyword arguments
954 if not inspect.getargspec(fn)[2]:
954 if not inspect.getargspec(fn)[2]:
955 oldfn = fn
955 oldfn = fn
956 fn = lambda s, c, **kwargs: oldfn(s, c)
956 fn = lambda s, c, **kwargs: oldfn(s, c)
957 l.append((mf, fn, params))
957 l.append((mf, fn, params))
958 self.filterpats[filter] = l
958 self.filterpats[filter] = l
959 return self.filterpats[filter]
959 return self.filterpats[filter]
960
960
961 def _filter(self, filterpats, filename, data):
961 def _filter(self, filterpats, filename, data):
962 for mf, fn, cmd in filterpats:
962 for mf, fn, cmd in filterpats:
963 if mf(filename):
963 if mf(filename):
964 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
964 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
965 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
965 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
966 break
966 break
967
967
968 return data
968 return data
969
969
970 @unfilteredpropertycache
970 @unfilteredpropertycache
971 def _encodefilterpats(self):
971 def _encodefilterpats(self):
972 return self._loadfilter('encode')
972 return self._loadfilter('encode')
973
973
974 @unfilteredpropertycache
974 @unfilteredpropertycache
975 def _decodefilterpats(self):
975 def _decodefilterpats(self):
976 return self._loadfilter('decode')
976 return self._loadfilter('decode')
977
977
978 def adddatafilter(self, name, filter):
978 def adddatafilter(self, name, filter):
979 self._datafilters[name] = filter
979 self._datafilters[name] = filter
980
980
981 def wread(self, filename):
981 def wread(self, filename):
982 if self._link(filename):
982 if self._link(filename):
983 data = self.wvfs.readlink(filename)
983 data = self.wvfs.readlink(filename)
984 else:
984 else:
985 data = self.wvfs.read(filename)
985 data = self.wvfs.read(filename)
986 return self._filter(self._encodefilterpats, filename, data)
986 return self._filter(self._encodefilterpats, filename, data)
987
987
988 def wwrite(self, filename, data, flags):
988 def wwrite(self, filename, data, flags):
989 """write ``data`` into ``filename`` in the working directory
989 """write ``data`` into ``filename`` in the working directory
990
990
991 This returns length of written (maybe decoded) data.
991 This returns length of written (maybe decoded) data.
992 """
992 """
993 data = self._filter(self._decodefilterpats, filename, data)
993 data = self._filter(self._decodefilterpats, filename, data)
994 if 'l' in flags:
994 if 'l' in flags:
995 self.wvfs.symlink(data, filename)
995 self.wvfs.symlink(data, filename)
996 else:
996 else:
997 self.wvfs.write(filename, data)
997 self.wvfs.write(filename, data)
998 if 'x' in flags:
998 if 'x' in flags:
999 self.wvfs.setflags(filename, False, True)
999 self.wvfs.setflags(filename, False, True)
1000 return len(data)
1000 return len(data)
1001
1001
1002 def wwritedata(self, filename, data):
1002 def wwritedata(self, filename, data):
1003 return self._filter(self._decodefilterpats, filename, data)
1003 return self._filter(self._decodefilterpats, filename, data)
1004
1004
1005 def currenttransaction(self):
1005 def currenttransaction(self):
1006 """return the current transaction or None if non exists"""
1006 """return the current transaction or None if non exists"""
1007 if self._transref:
1007 if self._transref:
1008 tr = self._transref()
1008 tr = self._transref()
1009 else:
1009 else:
1010 tr = None
1010 tr = None
1011
1011
1012 if tr and tr.running():
1012 if tr and tr.running():
1013 return tr
1013 return tr
1014 return None
1014 return None
1015
1015
1016 def transaction(self, desc, report=None):
1016 def transaction(self, desc, report=None):
1017 if (self.ui.configbool('devel', 'all-warnings')
1017 if (self.ui.configbool('devel', 'all-warnings')
1018 or self.ui.configbool('devel', 'check-locks')):
1018 or self.ui.configbool('devel', 'check-locks')):
1019 l = self._lockref and self._lockref()
1019 l = self._lockref and self._lockref()
1020 if l is None or not l.held:
1020 if l is None or not l.held:
1021 self.ui.develwarn('transaction with no lock')
1021 self.ui.develwarn('transaction with no lock')
1022 tr = self.currenttransaction()
1022 tr = self.currenttransaction()
1023 if tr is not None:
1023 if tr is not None:
1024 return tr.nest()
1024 return tr.nest()
1025
1025
1026 # abort here if the journal already exists
1026 # abort here if the journal already exists
1027 if self.svfs.exists("journal"):
1027 if self.svfs.exists("journal"):
1028 raise error.RepoError(
1028 raise error.RepoError(
1029 _("abandoned transaction found"),
1029 _("abandoned transaction found"),
1030 hint=_("run 'hg recover' to clean up transaction"))
1030 hint=_("run 'hg recover' to clean up transaction"))
1031
1031
1032 # make journal.dirstate contain in-memory changes at this point
1032 # make journal.dirstate contain in-memory changes at this point
1033 self.dirstate.write(None)
1033 self.dirstate.write(None)
1034
1034
1035 idbase = "%.40f#%f" % (random.random(), time.time())
1035 idbase = "%.40f#%f" % (random.random(), time.time())
1036 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1036 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1037 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1037 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1038
1038
1039 self._writejournal(desc)
1039 self._writejournal(desc)
1040 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1040 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1041 if report:
1041 if report:
1042 rp = report
1042 rp = report
1043 else:
1043 else:
1044 rp = self.ui.warn
1044 rp = self.ui.warn
1045 vfsmap = {'plain': self.vfs} # root of .hg/
1045 vfsmap = {'plain': self.vfs} # root of .hg/
1046 # we must avoid cyclic reference between repo and transaction.
1046 # we must avoid cyclic reference between repo and transaction.
1047 reporef = weakref.ref(self)
1047 reporef = weakref.ref(self)
1048 def validate(tr):
1048 def validate(tr):
1049 """will run pre-closing hooks"""
1049 """will run pre-closing hooks"""
1050 reporef().hook('pretxnclose', throw=True,
1050 reporef().hook('pretxnclose', throw=True,
1051 txnname=desc, **tr.hookargs)
1051 txnname=desc, **tr.hookargs)
1052 def releasefn(tr, success):
1052 def releasefn(tr, success):
1053 repo = reporef()
1053 repo = reporef()
1054 if success:
1054 if success:
1055 # this should be explicitly invoked here, because
1055 # this should be explicitly invoked here, because
1056 # in-memory changes aren't written out at closing
1056 # in-memory changes aren't written out at closing
1057 # transaction, if tr.addfilegenerator (via
1057 # transaction, if tr.addfilegenerator (via
1058 # dirstate.write or so) isn't invoked while
1058 # dirstate.write or so) isn't invoked while
1059 # transaction running
1059 # transaction running
1060 repo.dirstate.write(None)
1060 repo.dirstate.write(None)
1061 else:
1061 else:
1062 # prevent in-memory changes from being written out at
1062 # prevent in-memory changes from being written out at
1063 # the end of outer wlock scope or so
1063 # the end of outer wlock scope or so
1064 repo.dirstate.invalidate()
1064 repo.dirstate.invalidate()
1065
1065
1066 # discard all changes (including ones already written
1066 # discard all changes (including ones already written
1067 # out) in this transaction
1067 # out) in this transaction
1068 repo.vfs.rename('journal.dirstate', 'dirstate')
1068 repo.vfs.rename('journal.dirstate', 'dirstate')
1069
1069
1070 repo.invalidate(clearfilecache=True)
1070 repo.invalidate(clearfilecache=True)
1071
1071
1072 tr = transaction.transaction(rp, self.svfs, vfsmap,
1072 tr = transaction.transaction(rp, self.svfs, vfsmap,
1073 "journal",
1073 "journal",
1074 "undo",
1074 "undo",
1075 aftertrans(renames),
1075 aftertrans(renames),
1076 self.store.createmode,
1076 self.store.createmode,
1077 validator=validate,
1077 validator=validate,
1078 releasefn=releasefn)
1078 releasefn=releasefn)
1079
1079
1080 tr.hookargs['txnid'] = txnid
1080 tr.hookargs['txnid'] = txnid
1081 # note: writing the fncache only during finalize mean that the file is
1081 # note: writing the fncache only during finalize mean that the file is
1082 # outdated when running hooks. As fncache is used for streaming clone,
1082 # outdated when running hooks. As fncache is used for streaming clone,
1083 # this is not expected to break anything that happen during the hooks.
1083 # this is not expected to break anything that happen during the hooks.
1084 tr.addfinalize('flush-fncache', self.store.write)
1084 tr.addfinalize('flush-fncache', self.store.write)
1085 def txnclosehook(tr2):
1085 def txnclosehook(tr2):
1086 """To be run if transaction is successful, will schedule a hook run
1086 """To be run if transaction is successful, will schedule a hook run
1087 """
1087 """
1088 # Don't reference tr2 in hook() so we don't hold a reference.
1089 # This reduces memory consumption when there are multiple
1090 # transactions per lock. This can likely go away if issue5045
1091 # fixes the function accumulation.
1092 hookargs = tr2.hookargs
1093
1088 def hook():
1094 def hook():
1089 reporef().hook('txnclose', throw=False, txnname=desc,
1095 reporef().hook('txnclose', throw=False, txnname=desc,
1090 **tr2.hookargs)
1096 **hookargs)
1091 reporef()._afterlock(hook)
1097 reporef()._afterlock(hook)
1092 tr.addfinalize('txnclose-hook', txnclosehook)
1098 tr.addfinalize('txnclose-hook', txnclosehook)
1093 def txnaborthook(tr2):
1099 def txnaborthook(tr2):
1094 """To be run if transaction is aborted
1100 """To be run if transaction is aborted
1095 """
1101 """
1096 reporef().hook('txnabort', throw=False, txnname=desc,
1102 reporef().hook('txnabort', throw=False, txnname=desc,
1097 **tr2.hookargs)
1103 **tr2.hookargs)
1098 tr.addabort('txnabort-hook', txnaborthook)
1104 tr.addabort('txnabort-hook', txnaborthook)
1099 # avoid eager cache invalidation. in-memory data should be identical
1105 # avoid eager cache invalidation. in-memory data should be identical
1100 # to stored data if transaction has no error.
1106 # to stored data if transaction has no error.
1101 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1107 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1102 self._transref = weakref.ref(tr)
1108 self._transref = weakref.ref(tr)
1103 return tr
1109 return tr
1104
1110
1105 def _journalfiles(self):
1111 def _journalfiles(self):
1106 return ((self.svfs, 'journal'),
1112 return ((self.svfs, 'journal'),
1107 (self.vfs, 'journal.dirstate'),
1113 (self.vfs, 'journal.dirstate'),
1108 (self.vfs, 'journal.branch'),
1114 (self.vfs, 'journal.branch'),
1109 (self.vfs, 'journal.desc'),
1115 (self.vfs, 'journal.desc'),
1110 (self.vfs, 'journal.bookmarks'),
1116 (self.vfs, 'journal.bookmarks'),
1111 (self.svfs, 'journal.phaseroots'))
1117 (self.svfs, 'journal.phaseroots'))
1112
1118
1113 def undofiles(self):
1119 def undofiles(self):
1114 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1120 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1115
1121
1116 def _writejournal(self, desc):
1122 def _writejournal(self, desc):
1117 self.vfs.write("journal.dirstate",
1123 self.vfs.write("journal.dirstate",
1118 self.vfs.tryread("dirstate"))
1124 self.vfs.tryread("dirstate"))
1119 self.vfs.write("journal.branch",
1125 self.vfs.write("journal.branch",
1120 encoding.fromlocal(self.dirstate.branch()))
1126 encoding.fromlocal(self.dirstate.branch()))
1121 self.vfs.write("journal.desc",
1127 self.vfs.write("journal.desc",
1122 "%d\n%s\n" % (len(self), desc))
1128 "%d\n%s\n" % (len(self), desc))
1123 self.vfs.write("journal.bookmarks",
1129 self.vfs.write("journal.bookmarks",
1124 self.vfs.tryread("bookmarks"))
1130 self.vfs.tryread("bookmarks"))
1125 self.svfs.write("journal.phaseroots",
1131 self.svfs.write("journal.phaseroots",
1126 self.svfs.tryread("phaseroots"))
1132 self.svfs.tryread("phaseroots"))
1127
1133
1128 def recover(self):
1134 def recover(self):
1129 with self.lock():
1135 with self.lock():
1130 if self.svfs.exists("journal"):
1136 if self.svfs.exists("journal"):
1131 self.ui.status(_("rolling back interrupted transaction\n"))
1137 self.ui.status(_("rolling back interrupted transaction\n"))
1132 vfsmap = {'': self.svfs,
1138 vfsmap = {'': self.svfs,
1133 'plain': self.vfs,}
1139 'plain': self.vfs,}
1134 transaction.rollback(self.svfs, vfsmap, "journal",
1140 transaction.rollback(self.svfs, vfsmap, "journal",
1135 self.ui.warn)
1141 self.ui.warn)
1136 self.invalidate()
1142 self.invalidate()
1137 return True
1143 return True
1138 else:
1144 else:
1139 self.ui.warn(_("no interrupted transaction available\n"))
1145 self.ui.warn(_("no interrupted transaction available\n"))
1140 return False
1146 return False
1141
1147
1142 def rollback(self, dryrun=False, force=False):
1148 def rollback(self, dryrun=False, force=False):
1143 wlock = lock = dsguard = None
1149 wlock = lock = dsguard = None
1144 try:
1150 try:
1145 wlock = self.wlock()
1151 wlock = self.wlock()
1146 lock = self.lock()
1152 lock = self.lock()
1147 if self.svfs.exists("undo"):
1153 if self.svfs.exists("undo"):
1148 dsguard = cmdutil.dirstateguard(self, 'rollback')
1154 dsguard = cmdutil.dirstateguard(self, 'rollback')
1149
1155
1150 return self._rollback(dryrun, force, dsguard)
1156 return self._rollback(dryrun, force, dsguard)
1151 else:
1157 else:
1152 self.ui.warn(_("no rollback information available\n"))
1158 self.ui.warn(_("no rollback information available\n"))
1153 return 1
1159 return 1
1154 finally:
1160 finally:
1155 release(dsguard, lock, wlock)
1161 release(dsguard, lock, wlock)
1156
1162
1157 @unfilteredmethod # Until we get smarter cache management
1163 @unfilteredmethod # Until we get smarter cache management
1158 def _rollback(self, dryrun, force, dsguard):
1164 def _rollback(self, dryrun, force, dsguard):
1159 ui = self.ui
1165 ui = self.ui
1160 try:
1166 try:
1161 args = self.vfs.read('undo.desc').splitlines()
1167 args = self.vfs.read('undo.desc').splitlines()
1162 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1163 if len(args) >= 3:
1169 if len(args) >= 3:
1164 detail = args[2]
1170 detail = args[2]
1165 oldtip = oldlen - 1
1171 oldtip = oldlen - 1
1166
1172
1167 if detail and ui.verbose:
1173 if detail and ui.verbose:
1168 msg = (_('repository tip rolled back to revision %s'
1174 msg = (_('repository tip rolled back to revision %s'
1169 ' (undo %s: %s)\n')
1175 ' (undo %s: %s)\n')
1170 % (oldtip, desc, detail))
1176 % (oldtip, desc, detail))
1171 else:
1177 else:
1172 msg = (_('repository tip rolled back to revision %s'
1178 msg = (_('repository tip rolled back to revision %s'
1173 ' (undo %s)\n')
1179 ' (undo %s)\n')
1174 % (oldtip, desc))
1180 % (oldtip, desc))
1175 except IOError:
1181 except IOError:
1176 msg = _('rolling back unknown transaction\n')
1182 msg = _('rolling back unknown transaction\n')
1177 desc = None
1183 desc = None
1178
1184
1179 if not force and self['.'] != self['tip'] and desc == 'commit':
1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1180 raise error.Abort(
1186 raise error.Abort(
1181 _('rollback of last commit while not checked out '
1187 _('rollback of last commit while not checked out '
1182 'may lose data'), hint=_('use -f to force'))
1188 'may lose data'), hint=_('use -f to force'))
1183
1189
1184 ui.status(msg)
1190 ui.status(msg)
1185 if dryrun:
1191 if dryrun:
1186 return 0
1192 return 0
1187
1193
1188 parents = self.dirstate.parents()
1194 parents = self.dirstate.parents()
1189 self.destroying()
1195 self.destroying()
1190 vfsmap = {'plain': self.vfs, '': self.svfs}
1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1191 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1192 if self.vfs.exists('undo.bookmarks'):
1198 if self.vfs.exists('undo.bookmarks'):
1193 self.vfs.rename('undo.bookmarks', 'bookmarks')
1199 self.vfs.rename('undo.bookmarks', 'bookmarks')
1194 if self.svfs.exists('undo.phaseroots'):
1200 if self.svfs.exists('undo.phaseroots'):
1195 self.svfs.rename('undo.phaseroots', 'phaseroots')
1201 self.svfs.rename('undo.phaseroots', 'phaseroots')
1196 self.invalidate()
1202 self.invalidate()
1197
1203
1198 parentgone = (parents[0] not in self.changelog.nodemap or
1204 parentgone = (parents[0] not in self.changelog.nodemap or
1199 parents[1] not in self.changelog.nodemap)
1205 parents[1] not in self.changelog.nodemap)
1200 if parentgone:
1206 if parentgone:
1201 # prevent dirstateguard from overwriting already restored one
1207 # prevent dirstateguard from overwriting already restored one
1202 dsguard.close()
1208 dsguard.close()
1203
1209
1204 self.vfs.rename('undo.dirstate', 'dirstate')
1210 self.vfs.rename('undo.dirstate', 'dirstate')
1205 try:
1211 try:
1206 branch = self.vfs.read('undo.branch')
1212 branch = self.vfs.read('undo.branch')
1207 self.dirstate.setbranch(encoding.tolocal(branch))
1213 self.dirstate.setbranch(encoding.tolocal(branch))
1208 except IOError:
1214 except IOError:
1209 ui.warn(_('named branch could not be reset: '
1215 ui.warn(_('named branch could not be reset: '
1210 'current branch is still \'%s\'\n')
1216 'current branch is still \'%s\'\n')
1211 % self.dirstate.branch())
1217 % self.dirstate.branch())
1212
1218
1213 self.dirstate.invalidate()
1219 self.dirstate.invalidate()
1214 parents = tuple([p.rev() for p in self[None].parents()])
1220 parents = tuple([p.rev() for p in self[None].parents()])
1215 if len(parents) > 1:
1221 if len(parents) > 1:
1216 ui.status(_('working directory now based on '
1222 ui.status(_('working directory now based on '
1217 'revisions %d and %d\n') % parents)
1223 'revisions %d and %d\n') % parents)
1218 else:
1224 else:
1219 ui.status(_('working directory now based on '
1225 ui.status(_('working directory now based on '
1220 'revision %d\n') % parents)
1226 'revision %d\n') % parents)
1221 mergemod.mergestate.clean(self, self['.'].node())
1227 mergemod.mergestate.clean(self, self['.'].node())
1222
1228
1223 # TODO: if we know which new heads may result from this rollback, pass
1229 # TODO: if we know which new heads may result from this rollback, pass
1224 # them to destroy(), which will prevent the branchhead cache from being
1230 # them to destroy(), which will prevent the branchhead cache from being
1225 # invalidated.
1231 # invalidated.
1226 self.destroyed()
1232 self.destroyed()
1227 return 0
1233 return 0
1228
1234
1229 def invalidatecaches(self):
1235 def invalidatecaches(self):
1230
1236
1231 if '_tagscache' in vars(self):
1237 if '_tagscache' in vars(self):
1232 # can't use delattr on proxy
1238 # can't use delattr on proxy
1233 del self.__dict__['_tagscache']
1239 del self.__dict__['_tagscache']
1234
1240
1235 self.unfiltered()._branchcaches.clear()
1241 self.unfiltered()._branchcaches.clear()
1236 self.invalidatevolatilesets()
1242 self.invalidatevolatilesets()
1237
1243
1238 def invalidatevolatilesets(self):
1244 def invalidatevolatilesets(self):
1239 self.filteredrevcache.clear()
1245 self.filteredrevcache.clear()
1240 obsolete.clearobscaches(self)
1246 obsolete.clearobscaches(self)
1241
1247
1242 def invalidatedirstate(self):
1248 def invalidatedirstate(self):
1243 '''Invalidates the dirstate, causing the next call to dirstate
1249 '''Invalidates the dirstate, causing the next call to dirstate
1244 to check if it was modified since the last time it was read,
1250 to check if it was modified since the last time it was read,
1245 rereading it if it has.
1251 rereading it if it has.
1246
1252
1247 This is different to dirstate.invalidate() that it doesn't always
1253 This is different to dirstate.invalidate() that it doesn't always
1248 rereads the dirstate. Use dirstate.invalidate() if you want to
1254 rereads the dirstate. Use dirstate.invalidate() if you want to
1249 explicitly read the dirstate again (i.e. restoring it to a previous
1255 explicitly read the dirstate again (i.e. restoring it to a previous
1250 known good state).'''
1256 known good state).'''
1251 if hasunfilteredcache(self, 'dirstate'):
1257 if hasunfilteredcache(self, 'dirstate'):
1252 for k in self.dirstate._filecache:
1258 for k in self.dirstate._filecache:
1253 try:
1259 try:
1254 delattr(self.dirstate, k)
1260 delattr(self.dirstate, k)
1255 except AttributeError:
1261 except AttributeError:
1256 pass
1262 pass
1257 delattr(self.unfiltered(), 'dirstate')
1263 delattr(self.unfiltered(), 'dirstate')
1258
1264
1259 def invalidate(self, clearfilecache=False):
1265 def invalidate(self, clearfilecache=False):
1260 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1266 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1261 for k in self._filecache.keys():
1267 for k in self._filecache.keys():
1262 # dirstate is invalidated separately in invalidatedirstate()
1268 # dirstate is invalidated separately in invalidatedirstate()
1263 if k == 'dirstate':
1269 if k == 'dirstate':
1264 continue
1270 continue
1265
1271
1266 if clearfilecache:
1272 if clearfilecache:
1267 del self._filecache[k]
1273 del self._filecache[k]
1268 try:
1274 try:
1269 delattr(unfiltered, k)
1275 delattr(unfiltered, k)
1270 except AttributeError:
1276 except AttributeError:
1271 pass
1277 pass
1272 self.invalidatecaches()
1278 self.invalidatecaches()
1273 self.store.invalidatecaches()
1279 self.store.invalidatecaches()
1274
1280
1275 def invalidateall(self):
1281 def invalidateall(self):
1276 '''Fully invalidates both store and non-store parts, causing the
1282 '''Fully invalidates both store and non-store parts, causing the
1277 subsequent operation to reread any outside changes.'''
1283 subsequent operation to reread any outside changes.'''
1278 # extension should hook this to invalidate its caches
1284 # extension should hook this to invalidate its caches
1279 self.invalidate()
1285 self.invalidate()
1280 self.invalidatedirstate()
1286 self.invalidatedirstate()
1281
1287
1282 def _refreshfilecachestats(self, tr):
1288 def _refreshfilecachestats(self, tr):
1283 """Reload stats of cached files so that they are flagged as valid"""
1289 """Reload stats of cached files so that they are flagged as valid"""
1284 for k, ce in self._filecache.items():
1290 for k, ce in self._filecache.items():
1285 if k == 'dirstate' or k not in self.__dict__:
1291 if k == 'dirstate' or k not in self.__dict__:
1286 continue
1292 continue
1287 ce.refresh()
1293 ce.refresh()
1288
1294
1289 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1295 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1290 inheritchecker=None, parentenvvar=None):
1296 inheritchecker=None, parentenvvar=None):
1291 parentlock = None
1297 parentlock = None
1292 # the contents of parentenvvar are used by the underlying lock to
1298 # the contents of parentenvvar are used by the underlying lock to
1293 # determine whether it can be inherited
1299 # determine whether it can be inherited
1294 if parentenvvar is not None:
1300 if parentenvvar is not None:
1295 parentlock = os.environ.get(parentenvvar)
1301 parentlock = os.environ.get(parentenvvar)
1296 try:
1302 try:
1297 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1303 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1298 acquirefn=acquirefn, desc=desc,
1304 acquirefn=acquirefn, desc=desc,
1299 inheritchecker=inheritchecker,
1305 inheritchecker=inheritchecker,
1300 parentlock=parentlock)
1306 parentlock=parentlock)
1301 except error.LockHeld as inst:
1307 except error.LockHeld as inst:
1302 if not wait:
1308 if not wait:
1303 raise
1309 raise
1304 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1310 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1305 (desc, inst.locker))
1311 (desc, inst.locker))
1306 # default to 600 seconds timeout
1312 # default to 600 seconds timeout
1307 l = lockmod.lock(vfs, lockname,
1313 l = lockmod.lock(vfs, lockname,
1308 int(self.ui.config("ui", "timeout", "600")),
1314 int(self.ui.config("ui", "timeout", "600")),
1309 releasefn=releasefn, acquirefn=acquirefn,
1315 releasefn=releasefn, acquirefn=acquirefn,
1310 desc=desc)
1316 desc=desc)
1311 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1317 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1312 return l
1318 return l
1313
1319
1314 def _afterlock(self, callback):
1320 def _afterlock(self, callback):
1315 """add a callback to be run when the repository is fully unlocked
1321 """add a callback to be run when the repository is fully unlocked
1316
1322
1317 The callback will be executed when the outermost lock is released
1323 The callback will be executed when the outermost lock is released
1318 (with wlock being higher level than 'lock')."""
1324 (with wlock being higher level than 'lock')."""
1319 for ref in (self._wlockref, self._lockref):
1325 for ref in (self._wlockref, self._lockref):
1320 l = ref and ref()
1326 l = ref and ref()
1321 if l and l.held:
1327 if l and l.held:
1322 l.postrelease.append(callback)
1328 l.postrelease.append(callback)
1323 break
1329 break
1324 else: # no lock have been found.
1330 else: # no lock have been found.
1325 callback()
1331 callback()
1326
1332
1327 def lock(self, wait=True):
1333 def lock(self, wait=True):
1328 '''Lock the repository store (.hg/store) and return a weak reference
1334 '''Lock the repository store (.hg/store) and return a weak reference
1329 to the lock. Use this before modifying the store (e.g. committing or
1335 to the lock. Use this before modifying the store (e.g. committing or
1330 stripping). If you are opening a transaction, get a lock as well.)
1336 stripping). If you are opening a transaction, get a lock as well.)
1331
1337
1332 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1338 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1333 'wlock' first to avoid a dead-lock hazard.'''
1339 'wlock' first to avoid a dead-lock hazard.'''
1334 l = self._lockref and self._lockref()
1340 l = self._lockref and self._lockref()
1335 if l is not None and l.held:
1341 if l is not None and l.held:
1336 l.lock()
1342 l.lock()
1337 return l
1343 return l
1338
1344
1339 l = self._lock(self.svfs, "lock", wait, None,
1345 l = self._lock(self.svfs, "lock", wait, None,
1340 self.invalidate, _('repository %s') % self.origroot)
1346 self.invalidate, _('repository %s') % self.origroot)
1341 self._lockref = weakref.ref(l)
1347 self._lockref = weakref.ref(l)
1342 return l
1348 return l
1343
1349
1344 def _wlockchecktransaction(self):
1350 def _wlockchecktransaction(self):
1345 if self.currenttransaction() is not None:
1351 if self.currenttransaction() is not None:
1346 raise error.LockInheritanceContractViolation(
1352 raise error.LockInheritanceContractViolation(
1347 'wlock cannot be inherited in the middle of a transaction')
1353 'wlock cannot be inherited in the middle of a transaction')
1348
1354
1349 def wlock(self, wait=True):
1355 def wlock(self, wait=True):
1350 '''Lock the non-store parts of the repository (everything under
1356 '''Lock the non-store parts of the repository (everything under
1351 .hg except .hg/store) and return a weak reference to the lock.
1357 .hg except .hg/store) and return a weak reference to the lock.
1352
1358
1353 Use this before modifying files in .hg.
1359 Use this before modifying files in .hg.
1354
1360
1355 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1361 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1356 'wlock' first to avoid a dead-lock hazard.'''
1362 'wlock' first to avoid a dead-lock hazard.'''
1357 l = self._wlockref and self._wlockref()
1363 l = self._wlockref and self._wlockref()
1358 if l is not None and l.held:
1364 if l is not None and l.held:
1359 l.lock()
1365 l.lock()
1360 return l
1366 return l
1361
1367
1362 # We do not need to check for non-waiting lock acquisition. Such
1368 # We do not need to check for non-waiting lock acquisition. Such
1363 # acquisition would not cause dead-lock as they would just fail.
1369 # acquisition would not cause dead-lock as they would just fail.
1364 if wait and (self.ui.configbool('devel', 'all-warnings')
1370 if wait and (self.ui.configbool('devel', 'all-warnings')
1365 or self.ui.configbool('devel', 'check-locks')):
1371 or self.ui.configbool('devel', 'check-locks')):
1366 l = self._lockref and self._lockref()
1372 l = self._lockref and self._lockref()
1367 if l is not None and l.held:
1373 if l is not None and l.held:
1368 self.ui.develwarn('"wlock" acquired after "lock"')
1374 self.ui.develwarn('"wlock" acquired after "lock"')
1369
1375
1370 def unlock():
1376 def unlock():
1371 if self.dirstate.pendingparentchange():
1377 if self.dirstate.pendingparentchange():
1372 self.dirstate.invalidate()
1378 self.dirstate.invalidate()
1373 else:
1379 else:
1374 self.dirstate.write(None)
1380 self.dirstate.write(None)
1375
1381
1376 self._filecache['dirstate'].refresh()
1382 self._filecache['dirstate'].refresh()
1377
1383
1378 l = self._lock(self.vfs, "wlock", wait, unlock,
1384 l = self._lock(self.vfs, "wlock", wait, unlock,
1379 self.invalidatedirstate, _('working directory of %s') %
1385 self.invalidatedirstate, _('working directory of %s') %
1380 self.origroot,
1386 self.origroot,
1381 inheritchecker=self._wlockchecktransaction,
1387 inheritchecker=self._wlockchecktransaction,
1382 parentenvvar='HG_WLOCK_LOCKER')
1388 parentenvvar='HG_WLOCK_LOCKER')
1383 self._wlockref = weakref.ref(l)
1389 self._wlockref = weakref.ref(l)
1384 return l
1390 return l
1385
1391
1386 def _currentlock(self, lockref):
1392 def _currentlock(self, lockref):
1387 """Returns the lock if it's held, or None if it's not."""
1393 """Returns the lock if it's held, or None if it's not."""
1388 if lockref is None:
1394 if lockref is None:
1389 return None
1395 return None
1390 l = lockref()
1396 l = lockref()
1391 if l is None or not l.held:
1397 if l is None or not l.held:
1392 return None
1398 return None
1393 return l
1399 return l
1394
1400
1395 def currentwlock(self):
1401 def currentwlock(self):
1396 """Returns the wlock if it's held, or None if it's not."""
1402 """Returns the wlock if it's held, or None if it's not."""
1397 return self._currentlock(self._wlockref)
1403 return self._currentlock(self._wlockref)
1398
1404
1399 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1405 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1400 """
1406 """
1401 commit an individual file as part of a larger transaction
1407 commit an individual file as part of a larger transaction
1402 """
1408 """
1403
1409
1404 fname = fctx.path()
1410 fname = fctx.path()
1405 fparent1 = manifest1.get(fname, nullid)
1411 fparent1 = manifest1.get(fname, nullid)
1406 fparent2 = manifest2.get(fname, nullid)
1412 fparent2 = manifest2.get(fname, nullid)
1407 if isinstance(fctx, context.filectx):
1413 if isinstance(fctx, context.filectx):
1408 node = fctx.filenode()
1414 node = fctx.filenode()
1409 if node in [fparent1, fparent2]:
1415 if node in [fparent1, fparent2]:
1410 self.ui.debug('reusing %s filelog entry\n' % fname)
1416 self.ui.debug('reusing %s filelog entry\n' % fname)
1411 return node
1417 return node
1412
1418
1413 flog = self.file(fname)
1419 flog = self.file(fname)
1414 meta = {}
1420 meta = {}
1415 copy = fctx.renamed()
1421 copy = fctx.renamed()
1416 if copy and copy[0] != fname:
1422 if copy and copy[0] != fname:
1417 # Mark the new revision of this file as a copy of another
1423 # Mark the new revision of this file as a copy of another
1418 # file. This copy data will effectively act as a parent
1424 # file. This copy data will effectively act as a parent
1419 # of this new revision. If this is a merge, the first
1425 # of this new revision. If this is a merge, the first
1420 # parent will be the nullid (meaning "look up the copy data")
1426 # parent will be the nullid (meaning "look up the copy data")
1421 # and the second one will be the other parent. For example:
1427 # and the second one will be the other parent. For example:
1422 #
1428 #
1423 # 0 --- 1 --- 3 rev1 changes file foo
1429 # 0 --- 1 --- 3 rev1 changes file foo
1424 # \ / rev2 renames foo to bar and changes it
1430 # \ / rev2 renames foo to bar and changes it
1425 # \- 2 -/ rev3 should have bar with all changes and
1431 # \- 2 -/ rev3 should have bar with all changes and
1426 # should record that bar descends from
1432 # should record that bar descends from
1427 # bar in rev2 and foo in rev1
1433 # bar in rev2 and foo in rev1
1428 #
1434 #
1429 # this allows this merge to succeed:
1435 # this allows this merge to succeed:
1430 #
1436 #
1431 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1437 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1432 # \ / merging rev3 and rev4 should use bar@rev2
1438 # \ / merging rev3 and rev4 should use bar@rev2
1433 # \- 2 --- 4 as the merge base
1439 # \- 2 --- 4 as the merge base
1434 #
1440 #
1435
1441
1436 cfname = copy[0]
1442 cfname = copy[0]
1437 crev = manifest1.get(cfname)
1443 crev = manifest1.get(cfname)
1438 newfparent = fparent2
1444 newfparent = fparent2
1439
1445
1440 if manifest2: # branch merge
1446 if manifest2: # branch merge
1441 if fparent2 == nullid or crev is None: # copied on remote side
1447 if fparent2 == nullid or crev is None: # copied on remote side
1442 if cfname in manifest2:
1448 if cfname in manifest2:
1443 crev = manifest2[cfname]
1449 crev = manifest2[cfname]
1444 newfparent = fparent1
1450 newfparent = fparent1
1445
1451
1446 # Here, we used to search backwards through history to try to find
1452 # Here, we used to search backwards through history to try to find
1447 # where the file copy came from if the source of a copy was not in
1453 # where the file copy came from if the source of a copy was not in
1448 # the parent directory. However, this doesn't actually make sense to
1454 # the parent directory. However, this doesn't actually make sense to
1449 # do (what does a copy from something not in your working copy even
1455 # do (what does a copy from something not in your working copy even
1450 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1456 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1451 # the user that copy information was dropped, so if they didn't
1457 # the user that copy information was dropped, so if they didn't
1452 # expect this outcome it can be fixed, but this is the correct
1458 # expect this outcome it can be fixed, but this is the correct
1453 # behavior in this circumstance.
1459 # behavior in this circumstance.
1454
1460
1455 if crev:
1461 if crev:
1456 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1462 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1457 meta["copy"] = cfname
1463 meta["copy"] = cfname
1458 meta["copyrev"] = hex(crev)
1464 meta["copyrev"] = hex(crev)
1459 fparent1, fparent2 = nullid, newfparent
1465 fparent1, fparent2 = nullid, newfparent
1460 else:
1466 else:
1461 self.ui.warn(_("warning: can't find ancestor for '%s' "
1467 self.ui.warn(_("warning: can't find ancestor for '%s' "
1462 "copied from '%s'!\n") % (fname, cfname))
1468 "copied from '%s'!\n") % (fname, cfname))
1463
1469
1464 elif fparent1 == nullid:
1470 elif fparent1 == nullid:
1465 fparent1, fparent2 = fparent2, nullid
1471 fparent1, fparent2 = fparent2, nullid
1466 elif fparent2 != nullid:
1472 elif fparent2 != nullid:
1467 # is one parent an ancestor of the other?
1473 # is one parent an ancestor of the other?
1468 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1474 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1469 if fparent1 in fparentancestors:
1475 if fparent1 in fparentancestors:
1470 fparent1, fparent2 = fparent2, nullid
1476 fparent1, fparent2 = fparent2, nullid
1471 elif fparent2 in fparentancestors:
1477 elif fparent2 in fparentancestors:
1472 fparent2 = nullid
1478 fparent2 = nullid
1473
1479
1474 # is the file changed?
1480 # is the file changed?
1475 text = fctx.data()
1481 text = fctx.data()
1476 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1482 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1477 changelist.append(fname)
1483 changelist.append(fname)
1478 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1484 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1479 # are just the flags changed during merge?
1485 # are just the flags changed during merge?
1480 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1486 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1481 changelist.append(fname)
1487 changelist.append(fname)
1482
1488
1483 return fparent1
1489 return fparent1
1484
1490
1485 @unfilteredmethod
1491 @unfilteredmethod
1486 def commit(self, text="", user=None, date=None, match=None, force=False,
1492 def commit(self, text="", user=None, date=None, match=None, force=False,
1487 editor=False, extra=None):
1493 editor=False, extra=None):
1488 """Add a new revision to current repository.
1494 """Add a new revision to current repository.
1489
1495
1490 Revision information is gathered from the working directory,
1496 Revision information is gathered from the working directory,
1491 match can be used to filter the committed files. If editor is
1497 match can be used to filter the committed files. If editor is
1492 supplied, it is called to get a commit message.
1498 supplied, it is called to get a commit message.
1493 """
1499 """
1494 if extra is None:
1500 if extra is None:
1495 extra = {}
1501 extra = {}
1496
1502
1497 def fail(f, msg):
1503 def fail(f, msg):
1498 raise error.Abort('%s: %s' % (f, msg))
1504 raise error.Abort('%s: %s' % (f, msg))
1499
1505
1500 if not match:
1506 if not match:
1501 match = matchmod.always(self.root, '')
1507 match = matchmod.always(self.root, '')
1502
1508
1503 if not force:
1509 if not force:
1504 vdirs = []
1510 vdirs = []
1505 match.explicitdir = vdirs.append
1511 match.explicitdir = vdirs.append
1506 match.bad = fail
1512 match.bad = fail
1507
1513
1508 wlock = lock = tr = None
1514 wlock = lock = tr = None
1509 try:
1515 try:
1510 wlock = self.wlock()
1516 wlock = self.wlock()
1511 lock = self.lock() # for recent changelog (see issue4368)
1517 lock = self.lock() # for recent changelog (see issue4368)
1512
1518
1513 wctx = self[None]
1519 wctx = self[None]
1514 merge = len(wctx.parents()) > 1
1520 merge = len(wctx.parents()) > 1
1515
1521
1516 if not force and merge and match.ispartial():
1522 if not force and merge and match.ispartial():
1517 raise error.Abort(_('cannot partially commit a merge '
1523 raise error.Abort(_('cannot partially commit a merge '
1518 '(do not specify files or patterns)'))
1524 '(do not specify files or patterns)'))
1519
1525
1520 status = self.status(match=match, clean=force)
1526 status = self.status(match=match, clean=force)
1521 if force:
1527 if force:
1522 status.modified.extend(status.clean) # mq may commit clean files
1528 status.modified.extend(status.clean) # mq may commit clean files
1523
1529
1524 # check subrepos
1530 # check subrepos
1525 subs = []
1531 subs = []
1526 commitsubs = set()
1532 commitsubs = set()
1527 newstate = wctx.substate.copy()
1533 newstate = wctx.substate.copy()
1528 # only manage subrepos and .hgsubstate if .hgsub is present
1534 # only manage subrepos and .hgsubstate if .hgsub is present
1529 if '.hgsub' in wctx:
1535 if '.hgsub' in wctx:
1530 # we'll decide whether to track this ourselves, thanks
1536 # we'll decide whether to track this ourselves, thanks
1531 for c in status.modified, status.added, status.removed:
1537 for c in status.modified, status.added, status.removed:
1532 if '.hgsubstate' in c:
1538 if '.hgsubstate' in c:
1533 c.remove('.hgsubstate')
1539 c.remove('.hgsubstate')
1534
1540
1535 # compare current state to last committed state
1541 # compare current state to last committed state
1536 # build new substate based on last committed state
1542 # build new substate based on last committed state
1537 oldstate = wctx.p1().substate
1543 oldstate = wctx.p1().substate
1538 for s in sorted(newstate.keys()):
1544 for s in sorted(newstate.keys()):
1539 if not match(s):
1545 if not match(s):
1540 # ignore working copy, use old state if present
1546 # ignore working copy, use old state if present
1541 if s in oldstate:
1547 if s in oldstate:
1542 newstate[s] = oldstate[s]
1548 newstate[s] = oldstate[s]
1543 continue
1549 continue
1544 if not force:
1550 if not force:
1545 raise error.Abort(
1551 raise error.Abort(
1546 _("commit with new subrepo %s excluded") % s)
1552 _("commit with new subrepo %s excluded") % s)
1547 dirtyreason = wctx.sub(s).dirtyreason(True)
1553 dirtyreason = wctx.sub(s).dirtyreason(True)
1548 if dirtyreason:
1554 if dirtyreason:
1549 if not self.ui.configbool('ui', 'commitsubrepos'):
1555 if not self.ui.configbool('ui', 'commitsubrepos'):
1550 raise error.Abort(dirtyreason,
1556 raise error.Abort(dirtyreason,
1551 hint=_("use --subrepos for recursive commit"))
1557 hint=_("use --subrepos for recursive commit"))
1552 subs.append(s)
1558 subs.append(s)
1553 commitsubs.add(s)
1559 commitsubs.add(s)
1554 else:
1560 else:
1555 bs = wctx.sub(s).basestate()
1561 bs = wctx.sub(s).basestate()
1556 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1562 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1557 if oldstate.get(s, (None, None, None))[1] != bs:
1563 if oldstate.get(s, (None, None, None))[1] != bs:
1558 subs.append(s)
1564 subs.append(s)
1559
1565
1560 # check for removed subrepos
1566 # check for removed subrepos
1561 for p in wctx.parents():
1567 for p in wctx.parents():
1562 r = [s for s in p.substate if s not in newstate]
1568 r = [s for s in p.substate if s not in newstate]
1563 subs += [s for s in r if match(s)]
1569 subs += [s for s in r if match(s)]
1564 if subs:
1570 if subs:
1565 if (not match('.hgsub') and
1571 if (not match('.hgsub') and
1566 '.hgsub' in (wctx.modified() + wctx.added())):
1572 '.hgsub' in (wctx.modified() + wctx.added())):
1567 raise error.Abort(
1573 raise error.Abort(
1568 _("can't commit subrepos without .hgsub"))
1574 _("can't commit subrepos without .hgsub"))
1569 status.modified.insert(0, '.hgsubstate')
1575 status.modified.insert(0, '.hgsubstate')
1570
1576
1571 elif '.hgsub' in status.removed:
1577 elif '.hgsub' in status.removed:
1572 # clean up .hgsubstate when .hgsub is removed
1578 # clean up .hgsubstate when .hgsub is removed
1573 if ('.hgsubstate' in wctx and
1579 if ('.hgsubstate' in wctx and
1574 '.hgsubstate' not in (status.modified + status.added +
1580 '.hgsubstate' not in (status.modified + status.added +
1575 status.removed)):
1581 status.removed)):
1576 status.removed.insert(0, '.hgsubstate')
1582 status.removed.insert(0, '.hgsubstate')
1577
1583
1578 # make sure all explicit patterns are matched
1584 # make sure all explicit patterns are matched
1579 if not force and (match.isexact() or match.prefix()):
1585 if not force and (match.isexact() or match.prefix()):
1580 matched = set(status.modified + status.added + status.removed)
1586 matched = set(status.modified + status.added + status.removed)
1581
1587
1582 for f in match.files():
1588 for f in match.files():
1583 f = self.dirstate.normalize(f)
1589 f = self.dirstate.normalize(f)
1584 if f == '.' or f in matched or f in wctx.substate:
1590 if f == '.' or f in matched or f in wctx.substate:
1585 continue
1591 continue
1586 if f in status.deleted:
1592 if f in status.deleted:
1587 fail(f, _('file not found!'))
1593 fail(f, _('file not found!'))
1588 if f in vdirs: # visited directory
1594 if f in vdirs: # visited directory
1589 d = f + '/'
1595 d = f + '/'
1590 for mf in matched:
1596 for mf in matched:
1591 if mf.startswith(d):
1597 if mf.startswith(d):
1592 break
1598 break
1593 else:
1599 else:
1594 fail(f, _("no match under directory!"))
1600 fail(f, _("no match under directory!"))
1595 elif f not in self.dirstate:
1601 elif f not in self.dirstate:
1596 fail(f, _("file not tracked!"))
1602 fail(f, _("file not tracked!"))
1597
1603
1598 cctx = context.workingcommitctx(self, status,
1604 cctx = context.workingcommitctx(self, status,
1599 text, user, date, extra)
1605 text, user, date, extra)
1600
1606
1601 # internal config: ui.allowemptycommit
1607 # internal config: ui.allowemptycommit
1602 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1608 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1603 or extra.get('close') or merge or cctx.files()
1609 or extra.get('close') or merge or cctx.files()
1604 or self.ui.configbool('ui', 'allowemptycommit'))
1610 or self.ui.configbool('ui', 'allowemptycommit'))
1605 if not allowemptycommit:
1611 if not allowemptycommit:
1606 return None
1612 return None
1607
1613
1608 if merge and cctx.deleted():
1614 if merge and cctx.deleted():
1609 raise error.Abort(_("cannot commit merge with missing files"))
1615 raise error.Abort(_("cannot commit merge with missing files"))
1610
1616
1611 ms = mergemod.mergestate.read(self)
1617 ms = mergemod.mergestate.read(self)
1612
1618
1613 if list(ms.unresolved()):
1619 if list(ms.unresolved()):
1614 raise error.Abort(_('unresolved merge conflicts '
1620 raise error.Abort(_('unresolved merge conflicts '
1615 '(see "hg help resolve")'))
1621 '(see "hg help resolve")'))
1616 if ms.mdstate() != 's' or list(ms.driverresolved()):
1622 if ms.mdstate() != 's' or list(ms.driverresolved()):
1617 raise error.Abort(_('driver-resolved merge conflicts'),
1623 raise error.Abort(_('driver-resolved merge conflicts'),
1618 hint=_('run "hg resolve --all" to resolve'))
1624 hint=_('run "hg resolve --all" to resolve'))
1619
1625
1620 if editor:
1626 if editor:
1621 cctx._text = editor(self, cctx, subs)
1627 cctx._text = editor(self, cctx, subs)
1622 edited = (text != cctx._text)
1628 edited = (text != cctx._text)
1623
1629
1624 # Save commit message in case this transaction gets rolled back
1630 # Save commit message in case this transaction gets rolled back
1625 # (e.g. by a pretxncommit hook). Leave the content alone on
1631 # (e.g. by a pretxncommit hook). Leave the content alone on
1626 # the assumption that the user will use the same editor again.
1632 # the assumption that the user will use the same editor again.
1627 msgfn = self.savecommitmessage(cctx._text)
1633 msgfn = self.savecommitmessage(cctx._text)
1628
1634
1629 # commit subs and write new state
1635 # commit subs and write new state
1630 if subs:
1636 if subs:
1631 for s in sorted(commitsubs):
1637 for s in sorted(commitsubs):
1632 sub = wctx.sub(s)
1638 sub = wctx.sub(s)
1633 self.ui.status(_('committing subrepository %s\n') %
1639 self.ui.status(_('committing subrepository %s\n') %
1634 subrepo.subrelpath(sub))
1640 subrepo.subrelpath(sub))
1635 sr = sub.commit(cctx._text, user, date)
1641 sr = sub.commit(cctx._text, user, date)
1636 newstate[s] = (newstate[s][0], sr)
1642 newstate[s] = (newstate[s][0], sr)
1637 subrepo.writestate(self, newstate)
1643 subrepo.writestate(self, newstate)
1638
1644
1639 p1, p2 = self.dirstate.parents()
1645 p1, p2 = self.dirstate.parents()
1640 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1646 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1641 try:
1647 try:
1642 self.hook("precommit", throw=True, parent1=hookp1,
1648 self.hook("precommit", throw=True, parent1=hookp1,
1643 parent2=hookp2)
1649 parent2=hookp2)
1644 tr = self.transaction('commit')
1650 tr = self.transaction('commit')
1645 ret = self.commitctx(cctx, True)
1651 ret = self.commitctx(cctx, True)
1646 except: # re-raises
1652 except: # re-raises
1647 if edited:
1653 if edited:
1648 self.ui.write(
1654 self.ui.write(
1649 _('note: commit message saved in %s\n') % msgfn)
1655 _('note: commit message saved in %s\n') % msgfn)
1650 raise
1656 raise
1651 # update bookmarks, dirstate and mergestate
1657 # update bookmarks, dirstate and mergestate
1652 bookmarks.update(self, [p1, p2], ret)
1658 bookmarks.update(self, [p1, p2], ret)
1653 cctx.markcommitted(ret)
1659 cctx.markcommitted(ret)
1654 ms.reset()
1660 ms.reset()
1655 tr.close()
1661 tr.close()
1656
1662
1657 finally:
1663 finally:
1658 lockmod.release(tr, lock, wlock)
1664 lockmod.release(tr, lock, wlock)
1659
1665
1660 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1666 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1661 # hack for command that use a temporary commit (eg: histedit)
1667 # hack for command that use a temporary commit (eg: histedit)
1662 # temporary commit got stripped before hook release
1668 # temporary commit got stripped before hook release
1663 if self.changelog.hasnode(ret):
1669 if self.changelog.hasnode(ret):
1664 self.hook("commit", node=node, parent1=parent1,
1670 self.hook("commit", node=node, parent1=parent1,
1665 parent2=parent2)
1671 parent2=parent2)
1666 self._afterlock(commithook)
1672 self._afterlock(commithook)
1667 return ret
1673 return ret
1668
1674
1669 @unfilteredmethod
1675 @unfilteredmethod
1670 def commitctx(self, ctx, error=False):
1676 def commitctx(self, ctx, error=False):
1671 """Add a new revision to current repository.
1677 """Add a new revision to current repository.
1672 Revision information is passed via the context argument.
1678 Revision information is passed via the context argument.
1673 """
1679 """
1674
1680
1675 tr = None
1681 tr = None
1676 p1, p2 = ctx.p1(), ctx.p2()
1682 p1, p2 = ctx.p1(), ctx.p2()
1677 user = ctx.user()
1683 user = ctx.user()
1678
1684
1679 lock = self.lock()
1685 lock = self.lock()
1680 try:
1686 try:
1681 tr = self.transaction("commit")
1687 tr = self.transaction("commit")
1682 trp = weakref.proxy(tr)
1688 trp = weakref.proxy(tr)
1683
1689
1684 if ctx.files():
1690 if ctx.files():
1685 m1 = p1.manifest()
1691 m1 = p1.manifest()
1686 m2 = p2.manifest()
1692 m2 = p2.manifest()
1687 m = m1.copy()
1693 m = m1.copy()
1688
1694
1689 # check in files
1695 # check in files
1690 added = []
1696 added = []
1691 changed = []
1697 changed = []
1692 removed = list(ctx.removed())
1698 removed = list(ctx.removed())
1693 linkrev = len(self)
1699 linkrev = len(self)
1694 self.ui.note(_("committing files:\n"))
1700 self.ui.note(_("committing files:\n"))
1695 for f in sorted(ctx.modified() + ctx.added()):
1701 for f in sorted(ctx.modified() + ctx.added()):
1696 self.ui.note(f + "\n")
1702 self.ui.note(f + "\n")
1697 try:
1703 try:
1698 fctx = ctx[f]
1704 fctx = ctx[f]
1699 if fctx is None:
1705 if fctx is None:
1700 removed.append(f)
1706 removed.append(f)
1701 else:
1707 else:
1702 added.append(f)
1708 added.append(f)
1703 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1709 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1704 trp, changed)
1710 trp, changed)
1705 m.setflag(f, fctx.flags())
1711 m.setflag(f, fctx.flags())
1706 except OSError as inst:
1712 except OSError as inst:
1707 self.ui.warn(_("trouble committing %s!\n") % f)
1713 self.ui.warn(_("trouble committing %s!\n") % f)
1708 raise
1714 raise
1709 except IOError as inst:
1715 except IOError as inst:
1710 errcode = getattr(inst, 'errno', errno.ENOENT)
1716 errcode = getattr(inst, 'errno', errno.ENOENT)
1711 if error or errcode and errcode != errno.ENOENT:
1717 if error or errcode and errcode != errno.ENOENT:
1712 self.ui.warn(_("trouble committing %s!\n") % f)
1718 self.ui.warn(_("trouble committing %s!\n") % f)
1713 raise
1719 raise
1714
1720
1715 # update manifest
1721 # update manifest
1716 self.ui.note(_("committing manifest\n"))
1722 self.ui.note(_("committing manifest\n"))
1717 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1723 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1718 drop = [f for f in removed if f in m]
1724 drop = [f for f in removed if f in m]
1719 for f in drop:
1725 for f in drop:
1720 del m[f]
1726 del m[f]
1721 mn = self.manifest.add(m, trp, linkrev,
1727 mn = self.manifest.add(m, trp, linkrev,
1722 p1.manifestnode(), p2.manifestnode(),
1728 p1.manifestnode(), p2.manifestnode(),
1723 added, drop)
1729 added, drop)
1724 files = changed + removed
1730 files = changed + removed
1725 else:
1731 else:
1726 mn = p1.manifestnode()
1732 mn = p1.manifestnode()
1727 files = []
1733 files = []
1728
1734
1729 # update changelog
1735 # update changelog
1730 self.ui.note(_("committing changelog\n"))
1736 self.ui.note(_("committing changelog\n"))
1731 self.changelog.delayupdate(tr)
1737 self.changelog.delayupdate(tr)
1732 n = self.changelog.add(mn, files, ctx.description(),
1738 n = self.changelog.add(mn, files, ctx.description(),
1733 trp, p1.node(), p2.node(),
1739 trp, p1.node(), p2.node(),
1734 user, ctx.date(), ctx.extra().copy())
1740 user, ctx.date(), ctx.extra().copy())
1735 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1741 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1736 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1742 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1737 parent2=xp2)
1743 parent2=xp2)
1738 # set the new commit is proper phase
1744 # set the new commit is proper phase
1739 targetphase = subrepo.newcommitphase(self.ui, ctx)
1745 targetphase = subrepo.newcommitphase(self.ui, ctx)
1740 if targetphase:
1746 if targetphase:
1741 # retract boundary do not alter parent changeset.
1747 # retract boundary do not alter parent changeset.
1742 # if a parent have higher the resulting phase will
1748 # if a parent have higher the resulting phase will
1743 # be compliant anyway
1749 # be compliant anyway
1744 #
1750 #
1745 # if minimal phase was 0 we don't need to retract anything
1751 # if minimal phase was 0 we don't need to retract anything
1746 phases.retractboundary(self, tr, targetphase, [n])
1752 phases.retractboundary(self, tr, targetphase, [n])
1747 tr.close()
1753 tr.close()
1748 branchmap.updatecache(self.filtered('served'))
1754 branchmap.updatecache(self.filtered('served'))
1749 return n
1755 return n
1750 finally:
1756 finally:
1751 if tr:
1757 if tr:
1752 tr.release()
1758 tr.release()
1753 lock.release()
1759 lock.release()
1754
1760
1755 @unfilteredmethod
1761 @unfilteredmethod
1756 def destroying(self):
1762 def destroying(self):
1757 '''Inform the repository that nodes are about to be destroyed.
1763 '''Inform the repository that nodes are about to be destroyed.
1758 Intended for use by strip and rollback, so there's a common
1764 Intended for use by strip and rollback, so there's a common
1759 place for anything that has to be done before destroying history.
1765 place for anything that has to be done before destroying history.
1760
1766
1761 This is mostly useful for saving state that is in memory and waiting
1767 This is mostly useful for saving state that is in memory and waiting
1762 to be flushed when the current lock is released. Because a call to
1768 to be flushed when the current lock is released. Because a call to
1763 destroyed is imminent, the repo will be invalidated causing those
1769 destroyed is imminent, the repo will be invalidated causing those
1764 changes to stay in memory (waiting for the next unlock), or vanish
1770 changes to stay in memory (waiting for the next unlock), or vanish
1765 completely.
1771 completely.
1766 '''
1772 '''
1767 # When using the same lock to commit and strip, the phasecache is left
1773 # When using the same lock to commit and strip, the phasecache is left
1768 # dirty after committing. Then when we strip, the repo is invalidated,
1774 # dirty after committing. Then when we strip, the repo is invalidated,
1769 # causing those changes to disappear.
1775 # causing those changes to disappear.
1770 if '_phasecache' in vars(self):
1776 if '_phasecache' in vars(self):
1771 self._phasecache.write()
1777 self._phasecache.write()
1772
1778
1773 @unfilteredmethod
1779 @unfilteredmethod
1774 def destroyed(self):
1780 def destroyed(self):
1775 '''Inform the repository that nodes have been destroyed.
1781 '''Inform the repository that nodes have been destroyed.
1776 Intended for use by strip and rollback, so there's a common
1782 Intended for use by strip and rollback, so there's a common
1777 place for anything that has to be done after destroying history.
1783 place for anything that has to be done after destroying history.
1778 '''
1784 '''
1779 # When one tries to:
1785 # When one tries to:
1780 # 1) destroy nodes thus calling this method (e.g. strip)
1786 # 1) destroy nodes thus calling this method (e.g. strip)
1781 # 2) use phasecache somewhere (e.g. commit)
1787 # 2) use phasecache somewhere (e.g. commit)
1782 #
1788 #
1783 # then 2) will fail because the phasecache contains nodes that were
1789 # then 2) will fail because the phasecache contains nodes that were
1784 # removed. We can either remove phasecache from the filecache,
1790 # removed. We can either remove phasecache from the filecache,
1785 # causing it to reload next time it is accessed, or simply filter
1791 # causing it to reload next time it is accessed, or simply filter
1786 # the removed nodes now and write the updated cache.
1792 # the removed nodes now and write the updated cache.
1787 self._phasecache.filterunknown(self)
1793 self._phasecache.filterunknown(self)
1788 self._phasecache.write()
1794 self._phasecache.write()
1789
1795
1790 # update the 'served' branch cache to help read only server process
1796 # update the 'served' branch cache to help read only server process
1791 # Thanks to branchcache collaboration this is done from the nearest
1797 # Thanks to branchcache collaboration this is done from the nearest
1792 # filtered subset and it is expected to be fast.
1798 # filtered subset and it is expected to be fast.
1793 branchmap.updatecache(self.filtered('served'))
1799 branchmap.updatecache(self.filtered('served'))
1794
1800
1795 # Ensure the persistent tag cache is updated. Doing it now
1801 # Ensure the persistent tag cache is updated. Doing it now
1796 # means that the tag cache only has to worry about destroyed
1802 # means that the tag cache only has to worry about destroyed
1797 # heads immediately after a strip/rollback. That in turn
1803 # heads immediately after a strip/rollback. That in turn
1798 # guarantees that "cachetip == currenttip" (comparing both rev
1804 # guarantees that "cachetip == currenttip" (comparing both rev
1799 # and node) always means no nodes have been added or destroyed.
1805 # and node) always means no nodes have been added or destroyed.
1800
1806
1801 # XXX this is suboptimal when qrefresh'ing: we strip the current
1807 # XXX this is suboptimal when qrefresh'ing: we strip the current
1802 # head, refresh the tag cache, then immediately add a new head.
1808 # head, refresh the tag cache, then immediately add a new head.
1803 # But I think doing it this way is necessary for the "instant
1809 # But I think doing it this way is necessary for the "instant
1804 # tag cache retrieval" case to work.
1810 # tag cache retrieval" case to work.
1805 self.invalidate()
1811 self.invalidate()
1806
1812
1807 def walk(self, match, node=None):
1813 def walk(self, match, node=None):
1808 '''
1814 '''
1809 walk recursively through the directory tree or a given
1815 walk recursively through the directory tree or a given
1810 changeset, finding all files matched by the match
1816 changeset, finding all files matched by the match
1811 function
1817 function
1812 '''
1818 '''
1813 return self[node].walk(match)
1819 return self[node].walk(match)
1814
1820
1815 def status(self, node1='.', node2=None, match=None,
1821 def status(self, node1='.', node2=None, match=None,
1816 ignored=False, clean=False, unknown=False,
1822 ignored=False, clean=False, unknown=False,
1817 listsubrepos=False):
1823 listsubrepos=False):
1818 '''a convenience method that calls node1.status(node2)'''
1824 '''a convenience method that calls node1.status(node2)'''
1819 return self[node1].status(node2, match, ignored, clean, unknown,
1825 return self[node1].status(node2, match, ignored, clean, unknown,
1820 listsubrepos)
1826 listsubrepos)
1821
1827
1822 def heads(self, start=None):
1828 def heads(self, start=None):
1823 heads = self.changelog.heads(start)
1829 heads = self.changelog.heads(start)
1824 # sort the output in rev descending order
1830 # sort the output in rev descending order
1825 return sorted(heads, key=self.changelog.rev, reverse=True)
1831 return sorted(heads, key=self.changelog.rev, reverse=True)
1826
1832
1827 def branchheads(self, branch=None, start=None, closed=False):
1833 def branchheads(self, branch=None, start=None, closed=False):
1828 '''return a (possibly filtered) list of heads for the given branch
1834 '''return a (possibly filtered) list of heads for the given branch
1829
1835
1830 Heads are returned in topological order, from newest to oldest.
1836 Heads are returned in topological order, from newest to oldest.
1831 If branch is None, use the dirstate branch.
1837 If branch is None, use the dirstate branch.
1832 If start is not None, return only heads reachable from start.
1838 If start is not None, return only heads reachable from start.
1833 If closed is True, return heads that are marked as closed as well.
1839 If closed is True, return heads that are marked as closed as well.
1834 '''
1840 '''
1835 if branch is None:
1841 if branch is None:
1836 branch = self[None].branch()
1842 branch = self[None].branch()
1837 branches = self.branchmap()
1843 branches = self.branchmap()
1838 if branch not in branches:
1844 if branch not in branches:
1839 return []
1845 return []
1840 # the cache returns heads ordered lowest to highest
1846 # the cache returns heads ordered lowest to highest
1841 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1847 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1842 if start is not None:
1848 if start is not None:
1843 # filter out the heads that cannot be reached from startrev
1849 # filter out the heads that cannot be reached from startrev
1844 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1850 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1845 bheads = [h for h in bheads if h in fbheads]
1851 bheads = [h for h in bheads if h in fbheads]
1846 return bheads
1852 return bheads
1847
1853
1848 def branches(self, nodes):
1854 def branches(self, nodes):
1849 if not nodes:
1855 if not nodes:
1850 nodes = [self.changelog.tip()]
1856 nodes = [self.changelog.tip()]
1851 b = []
1857 b = []
1852 for n in nodes:
1858 for n in nodes:
1853 t = n
1859 t = n
1854 while True:
1860 while True:
1855 p = self.changelog.parents(n)
1861 p = self.changelog.parents(n)
1856 if p[1] != nullid or p[0] == nullid:
1862 if p[1] != nullid or p[0] == nullid:
1857 b.append((t, n, p[0], p[1]))
1863 b.append((t, n, p[0], p[1]))
1858 break
1864 break
1859 n = p[0]
1865 n = p[0]
1860 return b
1866 return b
1861
1867
1862 def between(self, pairs):
1868 def between(self, pairs):
1863 r = []
1869 r = []
1864
1870
1865 for top, bottom in pairs:
1871 for top, bottom in pairs:
1866 n, l, i = top, [], 0
1872 n, l, i = top, [], 0
1867 f = 1
1873 f = 1
1868
1874
1869 while n != bottom and n != nullid:
1875 while n != bottom and n != nullid:
1870 p = self.changelog.parents(n)[0]
1876 p = self.changelog.parents(n)[0]
1871 if i == f:
1877 if i == f:
1872 l.append(n)
1878 l.append(n)
1873 f = f * 2
1879 f = f * 2
1874 n = p
1880 n = p
1875 i += 1
1881 i += 1
1876
1882
1877 r.append(l)
1883 r.append(l)
1878
1884
1879 return r
1885 return r
1880
1886
1881 def checkpush(self, pushop):
1887 def checkpush(self, pushop):
1882 """Extensions can override this function if additional checks have
1888 """Extensions can override this function if additional checks have
1883 to be performed before pushing, or call it if they override push
1889 to be performed before pushing, or call it if they override push
1884 command.
1890 command.
1885 """
1891 """
1886 pass
1892 pass
1887
1893
1888 @unfilteredpropertycache
1894 @unfilteredpropertycache
1889 def prepushoutgoinghooks(self):
1895 def prepushoutgoinghooks(self):
1890 """Return util.hooks consists of "(repo, remote, outgoing)"
1896 """Return util.hooks consists of "(repo, remote, outgoing)"
1891 functions, which are called before pushing changesets.
1897 functions, which are called before pushing changesets.
1892 """
1898 """
1893 return util.hooks()
1899 return util.hooks()
1894
1900
1895 def pushkey(self, namespace, key, old, new):
1901 def pushkey(self, namespace, key, old, new):
1896 try:
1902 try:
1897 tr = self.currenttransaction()
1903 tr = self.currenttransaction()
1898 hookargs = {}
1904 hookargs = {}
1899 if tr is not None:
1905 if tr is not None:
1900 hookargs.update(tr.hookargs)
1906 hookargs.update(tr.hookargs)
1901 hookargs['namespace'] = namespace
1907 hookargs['namespace'] = namespace
1902 hookargs['key'] = key
1908 hookargs['key'] = key
1903 hookargs['old'] = old
1909 hookargs['old'] = old
1904 hookargs['new'] = new
1910 hookargs['new'] = new
1905 self.hook('prepushkey', throw=True, **hookargs)
1911 self.hook('prepushkey', throw=True, **hookargs)
1906 except error.HookAbort as exc:
1912 except error.HookAbort as exc:
1907 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1913 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1908 if exc.hint:
1914 if exc.hint:
1909 self.ui.write_err(_("(%s)\n") % exc.hint)
1915 self.ui.write_err(_("(%s)\n") % exc.hint)
1910 return False
1916 return False
1911 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1917 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1912 ret = pushkey.push(self, namespace, key, old, new)
1918 ret = pushkey.push(self, namespace, key, old, new)
1913 def runhook():
1919 def runhook():
1914 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1920 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1915 ret=ret)
1921 ret=ret)
1916 self._afterlock(runhook)
1922 self._afterlock(runhook)
1917 return ret
1923 return ret
1918
1924
1919 def listkeys(self, namespace):
1925 def listkeys(self, namespace):
1920 self.hook('prelistkeys', throw=True, namespace=namespace)
1926 self.hook('prelistkeys', throw=True, namespace=namespace)
1921 self.ui.debug('listing keys for "%s"\n' % namespace)
1927 self.ui.debug('listing keys for "%s"\n' % namespace)
1922 values = pushkey.list(self, namespace)
1928 values = pushkey.list(self, namespace)
1923 self.hook('listkeys', namespace=namespace, values=values)
1929 self.hook('listkeys', namespace=namespace, values=values)
1924 return values
1930 return values
1925
1931
1926 def debugwireargs(self, one, two, three=None, four=None, five=None):
1932 def debugwireargs(self, one, two, three=None, four=None, five=None):
1927 '''used to test argument passing over the wire'''
1933 '''used to test argument passing over the wire'''
1928 return "%s %s %s %s %s" % (one, two, three, four, five)
1934 return "%s %s %s %s %s" % (one, two, three, four, five)
1929
1935
1930 def savecommitmessage(self, text):
1936 def savecommitmessage(self, text):
1931 fp = self.vfs('last-message.txt', 'wb')
1937 fp = self.vfs('last-message.txt', 'wb')
1932 try:
1938 try:
1933 fp.write(text)
1939 fp.write(text)
1934 finally:
1940 finally:
1935 fp.close()
1941 fp.close()
1936 return self.pathto(fp.name[len(self.root) + 1:])
1942 return self.pathto(fp.name[len(self.root) + 1:])
1937
1943
1938 # used to avoid circular references so destructors work
1944 # used to avoid circular references so destructors work
1939 def aftertrans(files):
1945 def aftertrans(files):
1940 renamefiles = [tuple(t) for t in files]
1946 renamefiles = [tuple(t) for t in files]
1941 def a():
1947 def a():
1942 for vfs, src, dest in renamefiles:
1948 for vfs, src, dest in renamefiles:
1943 try:
1949 try:
1944 vfs.rename(src, dest)
1950 vfs.rename(src, dest)
1945 except OSError: # journal file does not yet exist
1951 except OSError: # journal file does not yet exist
1946 pass
1952 pass
1947 return a
1953 return a
1948
1954
1949 def undoname(fn):
1955 def undoname(fn):
1950 base, name = os.path.split(fn)
1956 base, name = os.path.split(fn)
1951 assert name.startswith('journal')
1957 assert name.startswith('journal')
1952 return os.path.join(base, name.replace('journal', 'undo', 1))
1958 return os.path.join(base, name.replace('journal', 'undo', 1))
1953
1959
1954 def instance(ui, path, create):
1960 def instance(ui, path, create):
1955 return localrepository(ui, util.urllocalpath(path), create)
1961 return localrepository(ui, util.urllocalpath(path), create)
1956
1962
1957 def islocal(path):
1963 def islocal(path):
1958 return True
1964 return True
General Comments 0
You need to be logged in to leave comments. Login now