##// END OF EJS Templates
localrepo: move extension loading to a separate method...
Jun Wu -
r30989:74af89c6 default
parent child Browse files
Show More
@@ -1,2031 +1,2034 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 repoview,
51 repoview,
52 revset,
52 revset,
53 scmutil,
53 scmutil,
54 store,
54 store,
55 subrepo,
55 subrepo,
56 tags as tagsmod,
56 tags as tagsmod,
57 transaction,
57 transaction,
58 util,
58 util,
59 )
59 )
60
60
61 release = lockmod.release
61 release = lockmod.release
62 urlerr = util.urlerr
62 urlerr = util.urlerr
63 urlreq = util.urlreq
63 urlreq = util.urlreq
64
64
65 class repofilecache(scmutil.filecache):
65 class repofilecache(scmutil.filecache):
66 """All filecache usage on repo are done for logic that should be unfiltered
66 """All filecache usage on repo are done for logic that should be unfiltered
67 """
67 """
68
68
69 def __get__(self, repo, type=None):
69 def __get__(self, repo, type=None):
70 if repo is None:
70 if repo is None:
71 return self
71 return self
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
73 def __set__(self, repo, value):
73 def __set__(self, repo, value):
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
75 def __delete__(self, repo):
75 def __delete__(self, repo):
76 return super(repofilecache, self).__delete__(repo.unfiltered())
76 return super(repofilecache, self).__delete__(repo.unfiltered())
77
77
78 class storecache(repofilecache):
78 class storecache(repofilecache):
79 """filecache for files in the store"""
79 """filecache for files in the store"""
80 def join(self, obj, fname):
80 def join(self, obj, fname):
81 return obj.sjoin(fname)
81 return obj.sjoin(fname)
82
82
83 class unfilteredpropertycache(util.propertycache):
83 class unfilteredpropertycache(util.propertycache):
84 """propertycache that apply to unfiltered repo only"""
84 """propertycache that apply to unfiltered repo only"""
85
85
86 def __get__(self, repo, type=None):
86 def __get__(self, repo, type=None):
87 unfi = repo.unfiltered()
87 unfi = repo.unfiltered()
88 if unfi is repo:
88 if unfi is repo:
89 return super(unfilteredpropertycache, self).__get__(unfi)
89 return super(unfilteredpropertycache, self).__get__(unfi)
90 return getattr(unfi, self.name)
90 return getattr(unfi, self.name)
91
91
92 class filteredpropertycache(util.propertycache):
92 class filteredpropertycache(util.propertycache):
93 """propertycache that must take filtering in account"""
93 """propertycache that must take filtering in account"""
94
94
95 def cachevalue(self, obj, value):
95 def cachevalue(self, obj, value):
96 object.__setattr__(obj, self.name, value)
96 object.__setattr__(obj, self.name, value)
97
97
98
98
99 def hasunfilteredcache(repo, name):
99 def hasunfilteredcache(repo, name):
100 """check if a repo has an unfilteredpropertycache value for <name>"""
100 """check if a repo has an unfilteredpropertycache value for <name>"""
101 return name in vars(repo.unfiltered())
101 return name in vars(repo.unfiltered())
102
102
103 def unfilteredmethod(orig):
103 def unfilteredmethod(orig):
104 """decorate method that always need to be run on unfiltered version"""
104 """decorate method that always need to be run on unfiltered version"""
105 def wrapper(repo, *args, **kwargs):
105 def wrapper(repo, *args, **kwargs):
106 return orig(repo.unfiltered(), *args, **kwargs)
106 return orig(repo.unfiltered(), *args, **kwargs)
107 return wrapper
107 return wrapper
108
108
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
110 'unbundle'))
110 'unbundle'))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
112
112
113 class localpeer(peer.peerrepository):
113 class localpeer(peer.peerrepository):
114 '''peer for a local repo; reflects only the most recent API'''
114 '''peer for a local repo; reflects only the most recent API'''
115
115
116 def __init__(self, repo, caps=moderncaps):
116 def __init__(self, repo, caps=moderncaps):
117 peer.peerrepository.__init__(self)
117 peer.peerrepository.__init__(self)
118 self._repo = repo.filtered('served')
118 self._repo = repo.filtered('served')
119 self.ui = repo.ui
119 self.ui = repo.ui
120 self._caps = repo._restrictcapabilities(caps)
120 self._caps = repo._restrictcapabilities(caps)
121 self.requirements = repo.requirements
121 self.requirements = repo.requirements
122 self.supportedformats = repo.supportedformats
122 self.supportedformats = repo.supportedformats
123
123
124 def close(self):
124 def close(self):
125 self._repo.close()
125 self._repo.close()
126
126
127 def _capabilities(self):
127 def _capabilities(self):
128 return self._caps
128 return self._caps
129
129
130 def local(self):
130 def local(self):
131 return self._repo
131 return self._repo
132
132
133 def canpush(self):
133 def canpush(self):
134 return True
134 return True
135
135
136 def url(self):
136 def url(self):
137 return self._repo.url()
137 return self._repo.url()
138
138
139 def lookup(self, key):
139 def lookup(self, key):
140 return self._repo.lookup(key)
140 return self._repo.lookup(key)
141
141
142 def branchmap(self):
142 def branchmap(self):
143 return self._repo.branchmap()
143 return self._repo.branchmap()
144
144
145 def heads(self):
145 def heads(self):
146 return self._repo.heads()
146 return self._repo.heads()
147
147
148 def known(self, nodes):
148 def known(self, nodes):
149 return self._repo.known(nodes)
149 return self._repo.known(nodes)
150
150
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
152 **kwargs):
152 **kwargs):
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
154 common=common, bundlecaps=bundlecaps,
154 common=common, bundlecaps=bundlecaps,
155 **kwargs)
155 **kwargs)
156 cb = util.chunkbuffer(chunks)
156 cb = util.chunkbuffer(chunks)
157
157
158 if bundlecaps is not None and 'HG20' in bundlecaps:
158 if bundlecaps is not None and 'HG20' in bundlecaps:
159 # When requesting a bundle2, getbundle returns a stream to make the
159 # When requesting a bundle2, getbundle returns a stream to make the
160 # wire level function happier. We need to build a proper object
160 # wire level function happier. We need to build a proper object
161 # from it in local peer.
161 # from it in local peer.
162 return bundle2.getunbundler(self.ui, cb)
162 return bundle2.getunbundler(self.ui, cb)
163 else:
163 else:
164 return changegroup.getunbundler('01', cb, None)
164 return changegroup.getunbundler('01', cb, None)
165
165
166 # TODO We might want to move the next two calls into legacypeer and add
166 # TODO We might want to move the next two calls into legacypeer and add
167 # unbundle instead.
167 # unbundle instead.
168
168
169 def unbundle(self, cg, heads, url):
169 def unbundle(self, cg, heads, url):
170 """apply a bundle on a repo
170 """apply a bundle on a repo
171
171
172 This function handles the repo locking itself."""
172 This function handles the repo locking itself."""
173 try:
173 try:
174 try:
174 try:
175 cg = exchange.readbundle(self.ui, cg, None)
175 cg = exchange.readbundle(self.ui, cg, None)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
177 if util.safehasattr(ret, 'getchunks'):
177 if util.safehasattr(ret, 'getchunks'):
178 # This is a bundle20 object, turn it into an unbundler.
178 # This is a bundle20 object, turn it into an unbundler.
179 # This little dance should be dropped eventually when the
179 # This little dance should be dropped eventually when the
180 # API is finally improved.
180 # API is finally improved.
181 stream = util.chunkbuffer(ret.getchunks())
181 stream = util.chunkbuffer(ret.getchunks())
182 ret = bundle2.getunbundler(self.ui, stream)
182 ret = bundle2.getunbundler(self.ui, stream)
183 return ret
183 return ret
184 except Exception as exc:
184 except Exception as exc:
185 # If the exception contains output salvaged from a bundle2
185 # If the exception contains output salvaged from a bundle2
186 # reply, we need to make sure it is printed before continuing
186 # reply, we need to make sure it is printed before continuing
187 # to fail. So we build a bundle2 with such output and consume
187 # to fail. So we build a bundle2 with such output and consume
188 # it directly.
188 # it directly.
189 #
189 #
190 # This is not very elegant but allows a "simple" solution for
190 # This is not very elegant but allows a "simple" solution for
191 # issue4594
191 # issue4594
192 output = getattr(exc, '_bundle2salvagedoutput', ())
192 output = getattr(exc, '_bundle2salvagedoutput', ())
193 if output:
193 if output:
194 bundler = bundle2.bundle20(self._repo.ui)
194 bundler = bundle2.bundle20(self._repo.ui)
195 for out in output:
195 for out in output:
196 bundler.addpart(out)
196 bundler.addpart(out)
197 stream = util.chunkbuffer(bundler.getchunks())
197 stream = util.chunkbuffer(bundler.getchunks())
198 b = bundle2.getunbundler(self.ui, stream)
198 b = bundle2.getunbundler(self.ui, stream)
199 bundle2.processbundle(self._repo, b)
199 bundle2.processbundle(self._repo, b)
200 raise
200 raise
201 except error.PushRaced as exc:
201 except error.PushRaced as exc:
202 raise error.ResponseError(_('push failed:'), str(exc))
202 raise error.ResponseError(_('push failed:'), str(exc))
203
203
204 def lock(self):
204 def lock(self):
205 return self._repo.lock()
205 return self._repo.lock()
206
206
207 def addchangegroup(self, cg, source, url):
207 def addchangegroup(self, cg, source, url):
208 return cg.apply(self._repo, source, url)
208 return cg.apply(self._repo, source, url)
209
209
210 def pushkey(self, namespace, key, old, new):
210 def pushkey(self, namespace, key, old, new):
211 return self._repo.pushkey(namespace, key, old, new)
211 return self._repo.pushkey(namespace, key, old, new)
212
212
213 def listkeys(self, namespace):
213 def listkeys(self, namespace):
214 return self._repo.listkeys(namespace)
214 return self._repo.listkeys(namespace)
215
215
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
217 '''used to test argument passing over the wire'''
217 '''used to test argument passing over the wire'''
218 return "%s %s %s %s %s" % (one, two, three, four, five)
218 return "%s %s %s %s %s" % (one, two, three, four, five)
219
219
220 class locallegacypeer(localpeer):
220 class locallegacypeer(localpeer):
221 '''peer extension which implements legacy methods too; used for tests with
221 '''peer extension which implements legacy methods too; used for tests with
222 restricted capabilities'''
222 restricted capabilities'''
223
223
224 def __init__(self, repo):
224 def __init__(self, repo):
225 localpeer.__init__(self, repo, caps=legacycaps)
225 localpeer.__init__(self, repo, caps=legacycaps)
226
226
227 def branches(self, nodes):
227 def branches(self, nodes):
228 return self._repo.branches(nodes)
228 return self._repo.branches(nodes)
229
229
230 def between(self, pairs):
230 def between(self, pairs):
231 return self._repo.between(pairs)
231 return self._repo.between(pairs)
232
232
233 def changegroup(self, basenodes, source):
233 def changegroup(self, basenodes, source):
234 return changegroup.changegroup(self._repo, basenodes, source)
234 return changegroup.changegroup(self._repo, basenodes, source)
235
235
236 def changegroupsubset(self, bases, heads, source):
236 def changegroupsubset(self, bases, heads, source):
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
238
238
239 class localrepository(object):
239 class localrepository(object):
240
240
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
242 'manifestv2'))
242 'manifestv2'))
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
244 'dotencode'))
244 'dotencode'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
246 filtername = None
246 filtername = None
247
247
248 # a list of (ui, featureset) functions.
248 # a list of (ui, featureset) functions.
249 # only functions defined in module of enabled extensions are invoked
249 # only functions defined in module of enabled extensions are invoked
250 featuresetupfuncs = set()
250 featuresetupfuncs = set()
251
251
252 def __init__(self, baseui, path, create=False):
252 def __init__(self, baseui, path, create=False):
253 self.requirements = set()
253 self.requirements = set()
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
255 self.wopener = self.wvfs
255 self.wopener = self.wvfs
256 self.root = self.wvfs.base
256 self.root = self.wvfs.base
257 self.path = self.wvfs.join(".hg")
257 self.path = self.wvfs.join(".hg")
258 self.origroot = path
258 self.origroot = path
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
261 realfs=False)
261 realfs=False)
262 self.vfs = scmutil.vfs(self.path)
262 self.vfs = scmutil.vfs(self.path)
263 self.opener = self.vfs
263 self.opener = self.vfs
264 self.baseui = baseui
264 self.baseui = baseui
265 self.ui = baseui.copy()
265 self.ui = baseui.copy()
266 self.ui.copy = baseui.copy # prevent copying repo configuration
266 self.ui.copy = baseui.copy # prevent copying repo configuration
267 # A list of callback to shape the phase if no data were found.
267 # A list of callback to shape the phase if no data were found.
268 # Callback are in the form: func(repo, roots) --> processed root.
268 # Callback are in the form: func(repo, roots) --> processed root.
269 # This list it to be filled by extension during repo setup
269 # This list it to be filled by extension during repo setup
270 self._phasedefaults = []
270 self._phasedefaults = []
271 try:
271 try:
272 self.ui.readconfig(self.join("hgrc"), self.root)
272 self.ui.readconfig(self.join("hgrc"), self.root)
273 extensions.loadall(self.ui)
273 self._loadextensions()
274 except IOError:
274 except IOError:
275 pass
275 pass
276
276
277 if self.featuresetupfuncs:
277 if self.featuresetupfuncs:
278 self.supported = set(self._basesupported) # use private copy
278 self.supported = set(self._basesupported) # use private copy
279 extmods = set(m.__name__ for n, m
279 extmods = set(m.__name__ for n, m
280 in extensions.extensions(self.ui))
280 in extensions.extensions(self.ui))
281 for setupfunc in self.featuresetupfuncs:
281 for setupfunc in self.featuresetupfuncs:
282 if setupfunc.__module__ in extmods:
282 if setupfunc.__module__ in extmods:
283 setupfunc(self.ui, self.supported)
283 setupfunc(self.ui, self.supported)
284 else:
284 else:
285 self.supported = self._basesupported
285 self.supported = self._basesupported
286
286
287 # Add compression engines.
287 # Add compression engines.
288 for name in util.compengines:
288 for name in util.compengines:
289 engine = util.compengines[name]
289 engine = util.compengines[name]
290 if engine.revlogheader():
290 if engine.revlogheader():
291 self.supported.add('exp-compression-%s' % name)
291 self.supported.add('exp-compression-%s' % name)
292
292
293 if not self.vfs.isdir():
293 if not self.vfs.isdir():
294 if create:
294 if create:
295 self.requirements = newreporequirements(self)
295 self.requirements = newreporequirements(self)
296
296
297 if not self.wvfs.exists():
297 if not self.wvfs.exists():
298 self.wvfs.makedirs()
298 self.wvfs.makedirs()
299 self.vfs.makedir(notindexed=True)
299 self.vfs.makedir(notindexed=True)
300
300
301 if 'store' in self.requirements:
301 if 'store' in self.requirements:
302 self.vfs.mkdir("store")
302 self.vfs.mkdir("store")
303
303
304 # create an invalid changelog
304 # create an invalid changelog
305 self.vfs.append(
305 self.vfs.append(
306 "00changelog.i",
306 "00changelog.i",
307 '\0\0\0\2' # represents revlogv2
307 '\0\0\0\2' # represents revlogv2
308 ' dummy changelog to prevent using the old repo layout'
308 ' dummy changelog to prevent using the old repo layout'
309 )
309 )
310 else:
310 else:
311 raise error.RepoError(_("repository %s not found") % path)
311 raise error.RepoError(_("repository %s not found") % path)
312 elif create:
312 elif create:
313 raise error.RepoError(_("repository %s already exists") % path)
313 raise error.RepoError(_("repository %s already exists") % path)
314 else:
314 else:
315 try:
315 try:
316 self.requirements = scmutil.readrequires(
316 self.requirements = scmutil.readrequires(
317 self.vfs, self.supported)
317 self.vfs, self.supported)
318 except IOError as inst:
318 except IOError as inst:
319 if inst.errno != errno.ENOENT:
319 if inst.errno != errno.ENOENT:
320 raise
320 raise
321
321
322 self.sharedpath = self.path
322 self.sharedpath = self.path
323 try:
323 try:
324 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
324 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
325 realpath=True)
325 realpath=True)
326 s = vfs.base
326 s = vfs.base
327 if not vfs.exists():
327 if not vfs.exists():
328 raise error.RepoError(
328 raise error.RepoError(
329 _('.hg/sharedpath points to nonexistent directory %s') % s)
329 _('.hg/sharedpath points to nonexistent directory %s') % s)
330 self.sharedpath = s
330 self.sharedpath = s
331 except IOError as inst:
331 except IOError as inst:
332 if inst.errno != errno.ENOENT:
332 if inst.errno != errno.ENOENT:
333 raise
333 raise
334
334
335 self.store = store.store(
335 self.store = store.store(
336 self.requirements, self.sharedpath, scmutil.vfs)
336 self.requirements, self.sharedpath, scmutil.vfs)
337 self.spath = self.store.path
337 self.spath = self.store.path
338 self.svfs = self.store.vfs
338 self.svfs = self.store.vfs
339 self.sjoin = self.store.join
339 self.sjoin = self.store.join
340 self.vfs.createmode = self.store.createmode
340 self.vfs.createmode = self.store.createmode
341 self._applyopenerreqs()
341 self._applyopenerreqs()
342 if create:
342 if create:
343 self._writerequirements()
343 self._writerequirements()
344
344
345 self._dirstatevalidatewarned = False
345 self._dirstatevalidatewarned = False
346
346
347 self._branchcaches = {}
347 self._branchcaches = {}
348 self._revbranchcache = None
348 self._revbranchcache = None
349 self.filterpats = {}
349 self.filterpats = {}
350 self._datafilters = {}
350 self._datafilters = {}
351 self._transref = self._lockref = self._wlockref = None
351 self._transref = self._lockref = self._wlockref = None
352
352
353 # A cache for various files under .hg/ that tracks file changes,
353 # A cache for various files under .hg/ that tracks file changes,
354 # (used by the filecache decorator)
354 # (used by the filecache decorator)
355 #
355 #
356 # Maps a property name to its util.filecacheentry
356 # Maps a property name to its util.filecacheentry
357 self._filecache = {}
357 self._filecache = {}
358
358
359 # hold sets of revision to be filtered
359 # hold sets of revision to be filtered
360 # should be cleared when something might have changed the filter value:
360 # should be cleared when something might have changed the filter value:
361 # - new changesets,
361 # - new changesets,
362 # - phase change,
362 # - phase change,
363 # - new obsolescence marker,
363 # - new obsolescence marker,
364 # - working directory parent change,
364 # - working directory parent change,
365 # - bookmark changes
365 # - bookmark changes
366 self.filteredrevcache = {}
366 self.filteredrevcache = {}
367
367
368 # generic mapping between names and nodes
368 # generic mapping between names and nodes
369 self.names = namespaces.namespaces()
369 self.names = namespaces.namespaces()
370
370
371 def close(self):
371 def close(self):
372 self._writecaches()
372 self._writecaches()
373
373
374 def _loadextensions(self):
375 extensions.loadall(self.ui)
376
374 def _writecaches(self):
377 def _writecaches(self):
375 if self._revbranchcache:
378 if self._revbranchcache:
376 self._revbranchcache.write()
379 self._revbranchcache.write()
377
380
378 def _restrictcapabilities(self, caps):
381 def _restrictcapabilities(self, caps):
379 if self.ui.configbool('experimental', 'bundle2-advertise', True):
382 if self.ui.configbool('experimental', 'bundle2-advertise', True):
380 caps = set(caps)
383 caps = set(caps)
381 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
384 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
382 caps.add('bundle2=' + urlreq.quote(capsblob))
385 caps.add('bundle2=' + urlreq.quote(capsblob))
383 return caps
386 return caps
384
387
385 def _applyopenerreqs(self):
388 def _applyopenerreqs(self):
386 self.svfs.options = dict((r, 1) for r in self.requirements
389 self.svfs.options = dict((r, 1) for r in self.requirements
387 if r in self.openerreqs)
390 if r in self.openerreqs)
388 # experimental config: format.chunkcachesize
391 # experimental config: format.chunkcachesize
389 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
392 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
390 if chunkcachesize is not None:
393 if chunkcachesize is not None:
391 self.svfs.options['chunkcachesize'] = chunkcachesize
394 self.svfs.options['chunkcachesize'] = chunkcachesize
392 # experimental config: format.maxchainlen
395 # experimental config: format.maxchainlen
393 maxchainlen = self.ui.configint('format', 'maxchainlen')
396 maxchainlen = self.ui.configint('format', 'maxchainlen')
394 if maxchainlen is not None:
397 if maxchainlen is not None:
395 self.svfs.options['maxchainlen'] = maxchainlen
398 self.svfs.options['maxchainlen'] = maxchainlen
396 # experimental config: format.manifestcachesize
399 # experimental config: format.manifestcachesize
397 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
400 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
398 if manifestcachesize is not None:
401 if manifestcachesize is not None:
399 self.svfs.options['manifestcachesize'] = manifestcachesize
402 self.svfs.options['manifestcachesize'] = manifestcachesize
400 # experimental config: format.aggressivemergedeltas
403 # experimental config: format.aggressivemergedeltas
401 aggressivemergedeltas = self.ui.configbool('format',
404 aggressivemergedeltas = self.ui.configbool('format',
402 'aggressivemergedeltas', False)
405 'aggressivemergedeltas', False)
403 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
406 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
404 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
407 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
405
408
406 for r in self.requirements:
409 for r in self.requirements:
407 if r.startswith('exp-compression-'):
410 if r.startswith('exp-compression-'):
408 self.svfs.options['compengine'] = r[len('exp-compression-'):]
411 self.svfs.options['compengine'] = r[len('exp-compression-'):]
409
412
410 def _writerequirements(self):
413 def _writerequirements(self):
411 scmutil.writerequires(self.vfs, self.requirements)
414 scmutil.writerequires(self.vfs, self.requirements)
412
415
413 def _checknested(self, path):
416 def _checknested(self, path):
414 """Determine if path is a legal nested repository."""
417 """Determine if path is a legal nested repository."""
415 if not path.startswith(self.root):
418 if not path.startswith(self.root):
416 return False
419 return False
417 subpath = path[len(self.root) + 1:]
420 subpath = path[len(self.root) + 1:]
418 normsubpath = util.pconvert(subpath)
421 normsubpath = util.pconvert(subpath)
419
422
420 # XXX: Checking against the current working copy is wrong in
423 # XXX: Checking against the current working copy is wrong in
421 # the sense that it can reject things like
424 # the sense that it can reject things like
422 #
425 #
423 # $ hg cat -r 10 sub/x.txt
426 # $ hg cat -r 10 sub/x.txt
424 #
427 #
425 # if sub/ is no longer a subrepository in the working copy
428 # if sub/ is no longer a subrepository in the working copy
426 # parent revision.
429 # parent revision.
427 #
430 #
428 # However, it can of course also allow things that would have
431 # However, it can of course also allow things that would have
429 # been rejected before, such as the above cat command if sub/
432 # been rejected before, such as the above cat command if sub/
430 # is a subrepository now, but was a normal directory before.
433 # is a subrepository now, but was a normal directory before.
431 # The old path auditor would have rejected by mistake since it
434 # The old path auditor would have rejected by mistake since it
432 # panics when it sees sub/.hg/.
435 # panics when it sees sub/.hg/.
433 #
436 #
434 # All in all, checking against the working copy seems sensible
437 # All in all, checking against the working copy seems sensible
435 # since we want to prevent access to nested repositories on
438 # since we want to prevent access to nested repositories on
436 # the filesystem *now*.
439 # the filesystem *now*.
437 ctx = self[None]
440 ctx = self[None]
438 parts = util.splitpath(subpath)
441 parts = util.splitpath(subpath)
439 while parts:
442 while parts:
440 prefix = '/'.join(parts)
443 prefix = '/'.join(parts)
441 if prefix in ctx.substate:
444 if prefix in ctx.substate:
442 if prefix == normsubpath:
445 if prefix == normsubpath:
443 return True
446 return True
444 else:
447 else:
445 sub = ctx.sub(prefix)
448 sub = ctx.sub(prefix)
446 return sub.checknested(subpath[len(prefix) + 1:])
449 return sub.checknested(subpath[len(prefix) + 1:])
447 else:
450 else:
448 parts.pop()
451 parts.pop()
449 return False
452 return False
450
453
451 def peer(self):
454 def peer(self):
452 return localpeer(self) # not cached to avoid reference cycle
455 return localpeer(self) # not cached to avoid reference cycle
453
456
454 def unfiltered(self):
457 def unfiltered(self):
455 """Return unfiltered version of the repository
458 """Return unfiltered version of the repository
456
459
457 Intended to be overwritten by filtered repo."""
460 Intended to be overwritten by filtered repo."""
458 return self
461 return self
459
462
460 def filtered(self, name):
463 def filtered(self, name):
461 """Return a filtered version of a repository"""
464 """Return a filtered version of a repository"""
462 # build a new class with the mixin and the current class
465 # build a new class with the mixin and the current class
463 # (possibly subclass of the repo)
466 # (possibly subclass of the repo)
464 class proxycls(repoview.repoview, self.unfiltered().__class__):
467 class proxycls(repoview.repoview, self.unfiltered().__class__):
465 pass
468 pass
466 return proxycls(self, name)
469 return proxycls(self, name)
467
470
468 @repofilecache('bookmarks', 'bookmarks.current')
471 @repofilecache('bookmarks', 'bookmarks.current')
469 def _bookmarks(self):
472 def _bookmarks(self):
470 return bookmarks.bmstore(self)
473 return bookmarks.bmstore(self)
471
474
472 @property
475 @property
473 def _activebookmark(self):
476 def _activebookmark(self):
474 return self._bookmarks.active
477 return self._bookmarks.active
475
478
476 def bookmarkheads(self, bookmark):
479 def bookmarkheads(self, bookmark):
477 name = bookmark.split('@', 1)[0]
480 name = bookmark.split('@', 1)[0]
478 heads = []
481 heads = []
479 for mark, n in self._bookmarks.iteritems():
482 for mark, n in self._bookmarks.iteritems():
480 if mark.split('@', 1)[0] == name:
483 if mark.split('@', 1)[0] == name:
481 heads.append(n)
484 heads.append(n)
482 return heads
485 return heads
483
486
484 # _phaserevs and _phasesets depend on changelog. what we need is to
487 # _phaserevs and _phasesets depend on changelog. what we need is to
485 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
488 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
486 # can't be easily expressed in filecache mechanism.
489 # can't be easily expressed in filecache mechanism.
487 @storecache('phaseroots', '00changelog.i')
490 @storecache('phaseroots', '00changelog.i')
488 def _phasecache(self):
491 def _phasecache(self):
489 return phases.phasecache(self, self._phasedefaults)
492 return phases.phasecache(self, self._phasedefaults)
490
493
491 @storecache('obsstore')
494 @storecache('obsstore')
492 def obsstore(self):
495 def obsstore(self):
493 # read default format for new obsstore.
496 # read default format for new obsstore.
494 # developer config: format.obsstore-version
497 # developer config: format.obsstore-version
495 defaultformat = self.ui.configint('format', 'obsstore-version', None)
498 defaultformat = self.ui.configint('format', 'obsstore-version', None)
496 # rely on obsstore class default when possible.
499 # rely on obsstore class default when possible.
497 kwargs = {}
500 kwargs = {}
498 if defaultformat is not None:
501 if defaultformat is not None:
499 kwargs['defaultformat'] = defaultformat
502 kwargs['defaultformat'] = defaultformat
500 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
503 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
501 store = obsolete.obsstore(self.svfs, readonly=readonly,
504 store = obsolete.obsstore(self.svfs, readonly=readonly,
502 **kwargs)
505 **kwargs)
503 if store and readonly:
506 if store and readonly:
504 self.ui.warn(
507 self.ui.warn(
505 _('obsolete feature not enabled but %i markers found!\n')
508 _('obsolete feature not enabled but %i markers found!\n')
506 % len(list(store)))
509 % len(list(store)))
507 return store
510 return store
508
511
509 @storecache('00changelog.i')
512 @storecache('00changelog.i')
510 def changelog(self):
513 def changelog(self):
511 c = changelog.changelog(self.svfs)
514 c = changelog.changelog(self.svfs)
512 if 'HG_PENDING' in encoding.environ:
515 if 'HG_PENDING' in encoding.environ:
513 p = encoding.environ['HG_PENDING']
516 p = encoding.environ['HG_PENDING']
514 if p.startswith(self.root):
517 if p.startswith(self.root):
515 c.readpending('00changelog.i.a')
518 c.readpending('00changelog.i.a')
516 return c
519 return c
517
520
518 def _constructmanifest(self):
521 def _constructmanifest(self):
519 # This is a temporary function while we migrate from manifest to
522 # This is a temporary function while we migrate from manifest to
520 # manifestlog. It allows bundlerepo and unionrepo to intercept the
523 # manifestlog. It allows bundlerepo and unionrepo to intercept the
521 # manifest creation.
524 # manifest creation.
522 return manifest.manifestrevlog(self.svfs)
525 return manifest.manifestrevlog(self.svfs)
523
526
524 @storecache('00manifest.i')
527 @storecache('00manifest.i')
525 def manifestlog(self):
528 def manifestlog(self):
526 return manifest.manifestlog(self.svfs, self)
529 return manifest.manifestlog(self.svfs, self)
527
530
528 @repofilecache('dirstate')
531 @repofilecache('dirstate')
529 def dirstate(self):
532 def dirstate(self):
530 return dirstate.dirstate(self.vfs, self.ui, self.root,
533 return dirstate.dirstate(self.vfs, self.ui, self.root,
531 self._dirstatevalidate)
534 self._dirstatevalidate)
532
535
533 def _dirstatevalidate(self, node):
536 def _dirstatevalidate(self, node):
534 try:
537 try:
535 self.changelog.rev(node)
538 self.changelog.rev(node)
536 return node
539 return node
537 except error.LookupError:
540 except error.LookupError:
538 if not self._dirstatevalidatewarned:
541 if not self._dirstatevalidatewarned:
539 self._dirstatevalidatewarned = True
542 self._dirstatevalidatewarned = True
540 self.ui.warn(_("warning: ignoring unknown"
543 self.ui.warn(_("warning: ignoring unknown"
541 " working parent %s!\n") % short(node))
544 " working parent %s!\n") % short(node))
542 return nullid
545 return nullid
543
546
544 def __getitem__(self, changeid):
547 def __getitem__(self, changeid):
545 if changeid is None or changeid == wdirrev:
548 if changeid is None or changeid == wdirrev:
546 return context.workingctx(self)
549 return context.workingctx(self)
547 if isinstance(changeid, slice):
550 if isinstance(changeid, slice):
548 return [context.changectx(self, i)
551 return [context.changectx(self, i)
549 for i in xrange(*changeid.indices(len(self)))
552 for i in xrange(*changeid.indices(len(self)))
550 if i not in self.changelog.filteredrevs]
553 if i not in self.changelog.filteredrevs]
551 return context.changectx(self, changeid)
554 return context.changectx(self, changeid)
552
555
553 def __contains__(self, changeid):
556 def __contains__(self, changeid):
554 try:
557 try:
555 self[changeid]
558 self[changeid]
556 return True
559 return True
557 except error.RepoLookupError:
560 except error.RepoLookupError:
558 return False
561 return False
559
562
560 def __nonzero__(self):
563 def __nonzero__(self):
561 return True
564 return True
562
565
563 def __len__(self):
566 def __len__(self):
564 return len(self.changelog)
567 return len(self.changelog)
565
568
566 def __iter__(self):
569 def __iter__(self):
567 return iter(self.changelog)
570 return iter(self.changelog)
568
571
569 def revs(self, expr, *args):
572 def revs(self, expr, *args):
570 '''Find revisions matching a revset.
573 '''Find revisions matching a revset.
571
574
572 The revset is specified as a string ``expr`` that may contain
575 The revset is specified as a string ``expr`` that may contain
573 %-formatting to escape certain types. See ``revset.formatspec``.
576 %-formatting to escape certain types. See ``revset.formatspec``.
574
577
575 Revset aliases from the configuration are not expanded. To expand
578 Revset aliases from the configuration are not expanded. To expand
576 user aliases, consider calling ``scmutil.revrange()``.
579 user aliases, consider calling ``scmutil.revrange()``.
577
580
578 Returns a revset.abstractsmartset, which is a list-like interface
581 Returns a revset.abstractsmartset, which is a list-like interface
579 that contains integer revisions.
582 that contains integer revisions.
580 '''
583 '''
581 expr = revset.formatspec(expr, *args)
584 expr = revset.formatspec(expr, *args)
582 m = revset.match(None, expr)
585 m = revset.match(None, expr)
583 return m(self)
586 return m(self)
584
587
585 def set(self, expr, *args):
588 def set(self, expr, *args):
586 '''Find revisions matching a revset and emit changectx instances.
589 '''Find revisions matching a revset and emit changectx instances.
587
590
588 This is a convenience wrapper around ``revs()`` that iterates the
591 This is a convenience wrapper around ``revs()`` that iterates the
589 result and is a generator of changectx instances.
592 result and is a generator of changectx instances.
590
593
591 Revset aliases from the configuration are not expanded. To expand
594 Revset aliases from the configuration are not expanded. To expand
592 user aliases, consider calling ``scmutil.revrange()``.
595 user aliases, consider calling ``scmutil.revrange()``.
593 '''
596 '''
594 for r in self.revs(expr, *args):
597 for r in self.revs(expr, *args):
595 yield self[r]
598 yield self[r]
596
599
597 def url(self):
600 def url(self):
598 return 'file:' + self.root
601 return 'file:' + self.root
599
602
600 def hook(self, name, throw=False, **args):
603 def hook(self, name, throw=False, **args):
601 """Call a hook, passing this repo instance.
604 """Call a hook, passing this repo instance.
602
605
603 This a convenience method to aid invoking hooks. Extensions likely
606 This a convenience method to aid invoking hooks. Extensions likely
604 won't call this unless they have registered a custom hook or are
607 won't call this unless they have registered a custom hook or are
605 replacing code that is expected to call a hook.
608 replacing code that is expected to call a hook.
606 """
609 """
607 return hook.hook(self.ui, self, name, throw, **args)
610 return hook.hook(self.ui, self, name, throw, **args)
608
611
609 @unfilteredmethod
612 @unfilteredmethod
610 def _tag(self, names, node, message, local, user, date, extra=None,
613 def _tag(self, names, node, message, local, user, date, extra=None,
611 editor=False):
614 editor=False):
612 if isinstance(names, str):
615 if isinstance(names, str):
613 names = (names,)
616 names = (names,)
614
617
615 branches = self.branchmap()
618 branches = self.branchmap()
616 for name in names:
619 for name in names:
617 self.hook('pretag', throw=True, node=hex(node), tag=name,
620 self.hook('pretag', throw=True, node=hex(node), tag=name,
618 local=local)
621 local=local)
619 if name in branches:
622 if name in branches:
620 self.ui.warn(_("warning: tag %s conflicts with existing"
623 self.ui.warn(_("warning: tag %s conflicts with existing"
621 " branch name\n") % name)
624 " branch name\n") % name)
622
625
623 def writetags(fp, names, munge, prevtags):
626 def writetags(fp, names, munge, prevtags):
624 fp.seek(0, 2)
627 fp.seek(0, 2)
625 if prevtags and prevtags[-1] != '\n':
628 if prevtags and prevtags[-1] != '\n':
626 fp.write('\n')
629 fp.write('\n')
627 for name in names:
630 for name in names:
628 if munge:
631 if munge:
629 m = munge(name)
632 m = munge(name)
630 else:
633 else:
631 m = name
634 m = name
632
635
633 if (self._tagscache.tagtypes and
636 if (self._tagscache.tagtypes and
634 name in self._tagscache.tagtypes):
637 name in self._tagscache.tagtypes):
635 old = self.tags().get(name, nullid)
638 old = self.tags().get(name, nullid)
636 fp.write('%s %s\n' % (hex(old), m))
639 fp.write('%s %s\n' % (hex(old), m))
637 fp.write('%s %s\n' % (hex(node), m))
640 fp.write('%s %s\n' % (hex(node), m))
638 fp.close()
641 fp.close()
639
642
640 prevtags = ''
643 prevtags = ''
641 if local:
644 if local:
642 try:
645 try:
643 fp = self.vfs('localtags', 'r+')
646 fp = self.vfs('localtags', 'r+')
644 except IOError:
647 except IOError:
645 fp = self.vfs('localtags', 'a')
648 fp = self.vfs('localtags', 'a')
646 else:
649 else:
647 prevtags = fp.read()
650 prevtags = fp.read()
648
651
649 # local tags are stored in the current charset
652 # local tags are stored in the current charset
650 writetags(fp, names, None, prevtags)
653 writetags(fp, names, None, prevtags)
651 for name in names:
654 for name in names:
652 self.hook('tag', node=hex(node), tag=name, local=local)
655 self.hook('tag', node=hex(node), tag=name, local=local)
653 return
656 return
654
657
655 try:
658 try:
656 fp = self.wfile('.hgtags', 'rb+')
659 fp = self.wfile('.hgtags', 'rb+')
657 except IOError as e:
660 except IOError as e:
658 if e.errno != errno.ENOENT:
661 if e.errno != errno.ENOENT:
659 raise
662 raise
660 fp = self.wfile('.hgtags', 'ab')
663 fp = self.wfile('.hgtags', 'ab')
661 else:
664 else:
662 prevtags = fp.read()
665 prevtags = fp.read()
663
666
664 # committed tags are stored in UTF-8
667 # committed tags are stored in UTF-8
665 writetags(fp, names, encoding.fromlocal, prevtags)
668 writetags(fp, names, encoding.fromlocal, prevtags)
666
669
667 fp.close()
670 fp.close()
668
671
669 self.invalidatecaches()
672 self.invalidatecaches()
670
673
671 if '.hgtags' not in self.dirstate:
674 if '.hgtags' not in self.dirstate:
672 self[None].add(['.hgtags'])
675 self[None].add(['.hgtags'])
673
676
674 m = matchmod.exact(self.root, '', ['.hgtags'])
677 m = matchmod.exact(self.root, '', ['.hgtags'])
675 tagnode = self.commit(message, user, date, extra=extra, match=m,
678 tagnode = self.commit(message, user, date, extra=extra, match=m,
676 editor=editor)
679 editor=editor)
677
680
678 for name in names:
681 for name in names:
679 self.hook('tag', node=hex(node), tag=name, local=local)
682 self.hook('tag', node=hex(node), tag=name, local=local)
680
683
681 return tagnode
684 return tagnode
682
685
683 def tag(self, names, node, message, local, user, date, editor=False):
686 def tag(self, names, node, message, local, user, date, editor=False):
684 '''tag a revision with one or more symbolic names.
687 '''tag a revision with one or more symbolic names.
685
688
686 names is a list of strings or, when adding a single tag, names may be a
689 names is a list of strings or, when adding a single tag, names may be a
687 string.
690 string.
688
691
689 if local is True, the tags are stored in a per-repository file.
692 if local is True, the tags are stored in a per-repository file.
690 otherwise, they are stored in the .hgtags file, and a new
693 otherwise, they are stored in the .hgtags file, and a new
691 changeset is committed with the change.
694 changeset is committed with the change.
692
695
693 keyword arguments:
696 keyword arguments:
694
697
695 local: whether to store tags in non-version-controlled file
698 local: whether to store tags in non-version-controlled file
696 (default False)
699 (default False)
697
700
698 message: commit message to use if committing
701 message: commit message to use if committing
699
702
700 user: name of user to use if committing
703 user: name of user to use if committing
701
704
702 date: date tuple to use if committing'''
705 date: date tuple to use if committing'''
703
706
704 if not local:
707 if not local:
705 m = matchmod.exact(self.root, '', ['.hgtags'])
708 m = matchmod.exact(self.root, '', ['.hgtags'])
706 if any(self.status(match=m, unknown=True, ignored=True)):
709 if any(self.status(match=m, unknown=True, ignored=True)):
707 raise error.Abort(_('working copy of .hgtags is changed'),
710 raise error.Abort(_('working copy of .hgtags is changed'),
708 hint=_('please commit .hgtags manually'))
711 hint=_('please commit .hgtags manually'))
709
712
710 self.tags() # instantiate the cache
713 self.tags() # instantiate the cache
711 self._tag(names, node, message, local, user, date, editor=editor)
714 self._tag(names, node, message, local, user, date, editor=editor)
712
715
713 @filteredpropertycache
716 @filteredpropertycache
714 def _tagscache(self):
717 def _tagscache(self):
715 '''Returns a tagscache object that contains various tags related
718 '''Returns a tagscache object that contains various tags related
716 caches.'''
719 caches.'''
717
720
718 # This simplifies its cache management by having one decorated
721 # This simplifies its cache management by having one decorated
719 # function (this one) and the rest simply fetch things from it.
722 # function (this one) and the rest simply fetch things from it.
720 class tagscache(object):
723 class tagscache(object):
721 def __init__(self):
724 def __init__(self):
722 # These two define the set of tags for this repository. tags
725 # These two define the set of tags for this repository. tags
723 # maps tag name to node; tagtypes maps tag name to 'global' or
726 # maps tag name to node; tagtypes maps tag name to 'global' or
724 # 'local'. (Global tags are defined by .hgtags across all
727 # 'local'. (Global tags are defined by .hgtags across all
725 # heads, and local tags are defined in .hg/localtags.)
728 # heads, and local tags are defined in .hg/localtags.)
726 # They constitute the in-memory cache of tags.
729 # They constitute the in-memory cache of tags.
727 self.tags = self.tagtypes = None
730 self.tags = self.tagtypes = None
728
731
729 self.nodetagscache = self.tagslist = None
732 self.nodetagscache = self.tagslist = None
730
733
731 cache = tagscache()
734 cache = tagscache()
732 cache.tags, cache.tagtypes = self._findtags()
735 cache.tags, cache.tagtypes = self._findtags()
733
736
734 return cache
737 return cache
735
738
736 def tags(self):
739 def tags(self):
737 '''return a mapping of tag to node'''
740 '''return a mapping of tag to node'''
738 t = {}
741 t = {}
739 if self.changelog.filteredrevs:
742 if self.changelog.filteredrevs:
740 tags, tt = self._findtags()
743 tags, tt = self._findtags()
741 else:
744 else:
742 tags = self._tagscache.tags
745 tags = self._tagscache.tags
743 for k, v in tags.iteritems():
746 for k, v in tags.iteritems():
744 try:
747 try:
745 # ignore tags to unknown nodes
748 # ignore tags to unknown nodes
746 self.changelog.rev(v)
749 self.changelog.rev(v)
747 t[k] = v
750 t[k] = v
748 except (error.LookupError, ValueError):
751 except (error.LookupError, ValueError):
749 pass
752 pass
750 return t
753 return t
751
754
752 def _findtags(self):
755 def _findtags(self):
753 '''Do the hard work of finding tags. Return a pair of dicts
756 '''Do the hard work of finding tags. Return a pair of dicts
754 (tags, tagtypes) where tags maps tag name to node, and tagtypes
757 (tags, tagtypes) where tags maps tag name to node, and tagtypes
755 maps tag name to a string like \'global\' or \'local\'.
758 maps tag name to a string like \'global\' or \'local\'.
756 Subclasses or extensions are free to add their own tags, but
759 Subclasses or extensions are free to add their own tags, but
757 should be aware that the returned dicts will be retained for the
760 should be aware that the returned dicts will be retained for the
758 duration of the localrepo object.'''
761 duration of the localrepo object.'''
759
762
760 # XXX what tagtype should subclasses/extensions use? Currently
763 # XXX what tagtype should subclasses/extensions use? Currently
761 # mq and bookmarks add tags, but do not set the tagtype at all.
764 # mq and bookmarks add tags, but do not set the tagtype at all.
762 # Should each extension invent its own tag type? Should there
765 # Should each extension invent its own tag type? Should there
763 # be one tagtype for all such "virtual" tags? Or is the status
766 # be one tagtype for all such "virtual" tags? Or is the status
764 # quo fine?
767 # quo fine?
765
768
766 alltags = {} # map tag name to (node, hist)
769 alltags = {} # map tag name to (node, hist)
767 tagtypes = {}
770 tagtypes = {}
768
771
769 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
772 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
770 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
773 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
771
774
772 # Build the return dicts. Have to re-encode tag names because
775 # Build the return dicts. Have to re-encode tag names because
773 # the tags module always uses UTF-8 (in order not to lose info
776 # the tags module always uses UTF-8 (in order not to lose info
774 # writing to the cache), but the rest of Mercurial wants them in
777 # writing to the cache), but the rest of Mercurial wants them in
775 # local encoding.
778 # local encoding.
776 tags = {}
779 tags = {}
777 for (name, (node, hist)) in alltags.iteritems():
780 for (name, (node, hist)) in alltags.iteritems():
778 if node != nullid:
781 if node != nullid:
779 tags[encoding.tolocal(name)] = node
782 tags[encoding.tolocal(name)] = node
780 tags['tip'] = self.changelog.tip()
783 tags['tip'] = self.changelog.tip()
781 tagtypes = dict([(encoding.tolocal(name), value)
784 tagtypes = dict([(encoding.tolocal(name), value)
782 for (name, value) in tagtypes.iteritems()])
785 for (name, value) in tagtypes.iteritems()])
783 return (tags, tagtypes)
786 return (tags, tagtypes)
784
787
785 def tagtype(self, tagname):
788 def tagtype(self, tagname):
786 '''
789 '''
787 return the type of the given tag. result can be:
790 return the type of the given tag. result can be:
788
791
789 'local' : a local tag
792 'local' : a local tag
790 'global' : a global tag
793 'global' : a global tag
791 None : tag does not exist
794 None : tag does not exist
792 '''
795 '''
793
796
794 return self._tagscache.tagtypes.get(tagname)
797 return self._tagscache.tagtypes.get(tagname)
795
798
796 def tagslist(self):
799 def tagslist(self):
797 '''return a list of tags ordered by revision'''
800 '''return a list of tags ordered by revision'''
798 if not self._tagscache.tagslist:
801 if not self._tagscache.tagslist:
799 l = []
802 l = []
800 for t, n in self.tags().iteritems():
803 for t, n in self.tags().iteritems():
801 l.append((self.changelog.rev(n), t, n))
804 l.append((self.changelog.rev(n), t, n))
802 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
805 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
803
806
804 return self._tagscache.tagslist
807 return self._tagscache.tagslist
805
808
806 def nodetags(self, node):
809 def nodetags(self, node):
807 '''return the tags associated with a node'''
810 '''return the tags associated with a node'''
808 if not self._tagscache.nodetagscache:
811 if not self._tagscache.nodetagscache:
809 nodetagscache = {}
812 nodetagscache = {}
810 for t, n in self._tagscache.tags.iteritems():
813 for t, n in self._tagscache.tags.iteritems():
811 nodetagscache.setdefault(n, []).append(t)
814 nodetagscache.setdefault(n, []).append(t)
812 for tags in nodetagscache.itervalues():
815 for tags in nodetagscache.itervalues():
813 tags.sort()
816 tags.sort()
814 self._tagscache.nodetagscache = nodetagscache
817 self._tagscache.nodetagscache = nodetagscache
815 return self._tagscache.nodetagscache.get(node, [])
818 return self._tagscache.nodetagscache.get(node, [])
816
819
817 def nodebookmarks(self, node):
820 def nodebookmarks(self, node):
818 """return the list of bookmarks pointing to the specified node"""
821 """return the list of bookmarks pointing to the specified node"""
819 marks = []
822 marks = []
820 for bookmark, n in self._bookmarks.iteritems():
823 for bookmark, n in self._bookmarks.iteritems():
821 if n == node:
824 if n == node:
822 marks.append(bookmark)
825 marks.append(bookmark)
823 return sorted(marks)
826 return sorted(marks)
824
827
825 def branchmap(self):
828 def branchmap(self):
826 '''returns a dictionary {branch: [branchheads]} with branchheads
829 '''returns a dictionary {branch: [branchheads]} with branchheads
827 ordered by increasing revision number'''
830 ordered by increasing revision number'''
828 branchmap.updatecache(self)
831 branchmap.updatecache(self)
829 return self._branchcaches[self.filtername]
832 return self._branchcaches[self.filtername]
830
833
831 @unfilteredmethod
834 @unfilteredmethod
832 def revbranchcache(self):
835 def revbranchcache(self):
833 if not self._revbranchcache:
836 if not self._revbranchcache:
834 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
837 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
835 return self._revbranchcache
838 return self._revbranchcache
836
839
837 def branchtip(self, branch, ignoremissing=False):
840 def branchtip(self, branch, ignoremissing=False):
838 '''return the tip node for a given branch
841 '''return the tip node for a given branch
839
842
840 If ignoremissing is True, then this method will not raise an error.
843 If ignoremissing is True, then this method will not raise an error.
841 This is helpful for callers that only expect None for a missing branch
844 This is helpful for callers that only expect None for a missing branch
842 (e.g. namespace).
845 (e.g. namespace).
843
846
844 '''
847 '''
845 try:
848 try:
846 return self.branchmap().branchtip(branch)
849 return self.branchmap().branchtip(branch)
847 except KeyError:
850 except KeyError:
848 if not ignoremissing:
851 if not ignoremissing:
849 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
852 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
850 else:
853 else:
851 pass
854 pass
852
855
853 def lookup(self, key):
856 def lookup(self, key):
854 return self[key].node()
857 return self[key].node()
855
858
856 def lookupbranch(self, key, remote=None):
859 def lookupbranch(self, key, remote=None):
857 repo = remote or self
860 repo = remote or self
858 if key in repo.branchmap():
861 if key in repo.branchmap():
859 return key
862 return key
860
863
861 repo = (remote and remote.local()) and remote or self
864 repo = (remote and remote.local()) and remote or self
862 return repo[key].branch()
865 return repo[key].branch()
863
866
864 def known(self, nodes):
867 def known(self, nodes):
865 cl = self.changelog
868 cl = self.changelog
866 nm = cl.nodemap
869 nm = cl.nodemap
867 filtered = cl.filteredrevs
870 filtered = cl.filteredrevs
868 result = []
871 result = []
869 for n in nodes:
872 for n in nodes:
870 r = nm.get(n)
873 r = nm.get(n)
871 resp = not (r is None or r in filtered)
874 resp = not (r is None or r in filtered)
872 result.append(resp)
875 result.append(resp)
873 return result
876 return result
874
877
875 def local(self):
878 def local(self):
876 return self
879 return self
877
880
878 def publishing(self):
881 def publishing(self):
879 # it's safe (and desirable) to trust the publish flag unconditionally
882 # it's safe (and desirable) to trust the publish flag unconditionally
880 # so that we don't finalize changes shared between users via ssh or nfs
883 # so that we don't finalize changes shared between users via ssh or nfs
881 return self.ui.configbool('phases', 'publish', True, untrusted=True)
884 return self.ui.configbool('phases', 'publish', True, untrusted=True)
882
885
883 def cancopy(self):
886 def cancopy(self):
884 # so statichttprepo's override of local() works
887 # so statichttprepo's override of local() works
885 if not self.local():
888 if not self.local():
886 return False
889 return False
887 if not self.publishing():
890 if not self.publishing():
888 return True
891 return True
889 # if publishing we can't copy if there is filtered content
892 # if publishing we can't copy if there is filtered content
890 return not self.filtered('visible').changelog.filteredrevs
893 return not self.filtered('visible').changelog.filteredrevs
891
894
892 def shared(self):
895 def shared(self):
893 '''the type of shared repository (None if not shared)'''
896 '''the type of shared repository (None if not shared)'''
894 if self.sharedpath != self.path:
897 if self.sharedpath != self.path:
895 return 'store'
898 return 'store'
896 return None
899 return None
897
900
898 def join(self, f, *insidef):
901 def join(self, f, *insidef):
899 return self.vfs.join(os.path.join(f, *insidef))
902 return self.vfs.join(os.path.join(f, *insidef))
900
903
901 def wjoin(self, f, *insidef):
904 def wjoin(self, f, *insidef):
902 return self.vfs.reljoin(self.root, f, *insidef)
905 return self.vfs.reljoin(self.root, f, *insidef)
903
906
904 def file(self, f):
907 def file(self, f):
905 if f[0] == '/':
908 if f[0] == '/':
906 f = f[1:]
909 f = f[1:]
907 return filelog.filelog(self.svfs, f)
910 return filelog.filelog(self.svfs, f)
908
911
909 def changectx(self, changeid):
912 def changectx(self, changeid):
910 return self[changeid]
913 return self[changeid]
911
914
912 def setparents(self, p1, p2=nullid):
915 def setparents(self, p1, p2=nullid):
913 self.dirstate.beginparentchange()
916 self.dirstate.beginparentchange()
914 copies = self.dirstate.setparents(p1, p2)
917 copies = self.dirstate.setparents(p1, p2)
915 pctx = self[p1]
918 pctx = self[p1]
916 if copies:
919 if copies:
917 # Adjust copy records, the dirstate cannot do it, it
920 # Adjust copy records, the dirstate cannot do it, it
918 # requires access to parents manifests. Preserve them
921 # requires access to parents manifests. Preserve them
919 # only for entries added to first parent.
922 # only for entries added to first parent.
920 for f in copies:
923 for f in copies:
921 if f not in pctx and copies[f] in pctx:
924 if f not in pctx and copies[f] in pctx:
922 self.dirstate.copy(copies[f], f)
925 self.dirstate.copy(copies[f], f)
923 if p2 == nullid:
926 if p2 == nullid:
924 for f, s in sorted(self.dirstate.copies().items()):
927 for f, s in sorted(self.dirstate.copies().items()):
925 if f not in pctx and s not in pctx:
928 if f not in pctx and s not in pctx:
926 self.dirstate.copy(None, f)
929 self.dirstate.copy(None, f)
927 self.dirstate.endparentchange()
930 self.dirstate.endparentchange()
928
931
929 def filectx(self, path, changeid=None, fileid=None):
932 def filectx(self, path, changeid=None, fileid=None):
930 """changeid can be a changeset revision, node, or tag.
933 """changeid can be a changeset revision, node, or tag.
931 fileid can be a file revision or node."""
934 fileid can be a file revision or node."""
932 return context.filectx(self, path, changeid, fileid)
935 return context.filectx(self, path, changeid, fileid)
933
936
934 def getcwd(self):
937 def getcwd(self):
935 return self.dirstate.getcwd()
938 return self.dirstate.getcwd()
936
939
937 def pathto(self, f, cwd=None):
940 def pathto(self, f, cwd=None):
938 return self.dirstate.pathto(f, cwd)
941 return self.dirstate.pathto(f, cwd)
939
942
940 def wfile(self, f, mode='r'):
943 def wfile(self, f, mode='r'):
941 return self.wvfs(f, mode)
944 return self.wvfs(f, mode)
942
945
943 def _link(self, f):
946 def _link(self, f):
944 return self.wvfs.islink(f)
947 return self.wvfs.islink(f)
945
948
946 def _loadfilter(self, filter):
949 def _loadfilter(self, filter):
947 if filter not in self.filterpats:
950 if filter not in self.filterpats:
948 l = []
951 l = []
949 for pat, cmd in self.ui.configitems(filter):
952 for pat, cmd in self.ui.configitems(filter):
950 if cmd == '!':
953 if cmd == '!':
951 continue
954 continue
952 mf = matchmod.match(self.root, '', [pat])
955 mf = matchmod.match(self.root, '', [pat])
953 fn = None
956 fn = None
954 params = cmd
957 params = cmd
955 for name, filterfn in self._datafilters.iteritems():
958 for name, filterfn in self._datafilters.iteritems():
956 if cmd.startswith(name):
959 if cmd.startswith(name):
957 fn = filterfn
960 fn = filterfn
958 params = cmd[len(name):].lstrip()
961 params = cmd[len(name):].lstrip()
959 break
962 break
960 if not fn:
963 if not fn:
961 fn = lambda s, c, **kwargs: util.filter(s, c)
964 fn = lambda s, c, **kwargs: util.filter(s, c)
962 # Wrap old filters not supporting keyword arguments
965 # Wrap old filters not supporting keyword arguments
963 if not inspect.getargspec(fn)[2]:
966 if not inspect.getargspec(fn)[2]:
964 oldfn = fn
967 oldfn = fn
965 fn = lambda s, c, **kwargs: oldfn(s, c)
968 fn = lambda s, c, **kwargs: oldfn(s, c)
966 l.append((mf, fn, params))
969 l.append((mf, fn, params))
967 self.filterpats[filter] = l
970 self.filterpats[filter] = l
968 return self.filterpats[filter]
971 return self.filterpats[filter]
969
972
970 def _filter(self, filterpats, filename, data):
973 def _filter(self, filterpats, filename, data):
971 for mf, fn, cmd in filterpats:
974 for mf, fn, cmd in filterpats:
972 if mf(filename):
975 if mf(filename):
973 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
976 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
974 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
977 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
975 break
978 break
976
979
977 return data
980 return data
978
981
979 @unfilteredpropertycache
982 @unfilteredpropertycache
980 def _encodefilterpats(self):
983 def _encodefilterpats(self):
981 return self._loadfilter('encode')
984 return self._loadfilter('encode')
982
985
983 @unfilteredpropertycache
986 @unfilteredpropertycache
984 def _decodefilterpats(self):
987 def _decodefilterpats(self):
985 return self._loadfilter('decode')
988 return self._loadfilter('decode')
986
989
987 def adddatafilter(self, name, filter):
990 def adddatafilter(self, name, filter):
988 self._datafilters[name] = filter
991 self._datafilters[name] = filter
989
992
990 def wread(self, filename):
993 def wread(self, filename):
991 if self._link(filename):
994 if self._link(filename):
992 data = self.wvfs.readlink(filename)
995 data = self.wvfs.readlink(filename)
993 else:
996 else:
994 data = self.wvfs.read(filename)
997 data = self.wvfs.read(filename)
995 return self._filter(self._encodefilterpats, filename, data)
998 return self._filter(self._encodefilterpats, filename, data)
996
999
997 def wwrite(self, filename, data, flags, backgroundclose=False):
1000 def wwrite(self, filename, data, flags, backgroundclose=False):
998 """write ``data`` into ``filename`` in the working directory
1001 """write ``data`` into ``filename`` in the working directory
999
1002
1000 This returns length of written (maybe decoded) data.
1003 This returns length of written (maybe decoded) data.
1001 """
1004 """
1002 data = self._filter(self._decodefilterpats, filename, data)
1005 data = self._filter(self._decodefilterpats, filename, data)
1003 if 'l' in flags:
1006 if 'l' in flags:
1004 self.wvfs.symlink(data, filename)
1007 self.wvfs.symlink(data, filename)
1005 else:
1008 else:
1006 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1009 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1007 if 'x' in flags:
1010 if 'x' in flags:
1008 self.wvfs.setflags(filename, False, True)
1011 self.wvfs.setflags(filename, False, True)
1009 return len(data)
1012 return len(data)
1010
1013
1011 def wwritedata(self, filename, data):
1014 def wwritedata(self, filename, data):
1012 return self._filter(self._decodefilterpats, filename, data)
1015 return self._filter(self._decodefilterpats, filename, data)
1013
1016
1014 def currenttransaction(self):
1017 def currenttransaction(self):
1015 """return the current transaction or None if non exists"""
1018 """return the current transaction or None if non exists"""
1016 if self._transref:
1019 if self._transref:
1017 tr = self._transref()
1020 tr = self._transref()
1018 else:
1021 else:
1019 tr = None
1022 tr = None
1020
1023
1021 if tr and tr.running():
1024 if tr and tr.running():
1022 return tr
1025 return tr
1023 return None
1026 return None
1024
1027
1025 def transaction(self, desc, report=None):
1028 def transaction(self, desc, report=None):
1026 if (self.ui.configbool('devel', 'all-warnings')
1029 if (self.ui.configbool('devel', 'all-warnings')
1027 or self.ui.configbool('devel', 'check-locks')):
1030 or self.ui.configbool('devel', 'check-locks')):
1028 if self._currentlock(self._lockref) is None:
1031 if self._currentlock(self._lockref) is None:
1029 raise error.ProgrammingError('transaction requires locking')
1032 raise error.ProgrammingError('transaction requires locking')
1030 tr = self.currenttransaction()
1033 tr = self.currenttransaction()
1031 if tr is not None:
1034 if tr is not None:
1032 return tr.nest()
1035 return tr.nest()
1033
1036
1034 # abort here if the journal already exists
1037 # abort here if the journal already exists
1035 if self.svfs.exists("journal"):
1038 if self.svfs.exists("journal"):
1036 raise error.RepoError(
1039 raise error.RepoError(
1037 _("abandoned transaction found"),
1040 _("abandoned transaction found"),
1038 hint=_("run 'hg recover' to clean up transaction"))
1041 hint=_("run 'hg recover' to clean up transaction"))
1039
1042
1040 idbase = "%.40f#%f" % (random.random(), time.time())
1043 idbase = "%.40f#%f" % (random.random(), time.time())
1041 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1044 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1042 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1045 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1043
1046
1044 self._writejournal(desc)
1047 self._writejournal(desc)
1045 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1048 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1046 if report:
1049 if report:
1047 rp = report
1050 rp = report
1048 else:
1051 else:
1049 rp = self.ui.warn
1052 rp = self.ui.warn
1050 vfsmap = {'plain': self.vfs} # root of .hg/
1053 vfsmap = {'plain': self.vfs} # root of .hg/
1051 # we must avoid cyclic reference between repo and transaction.
1054 # we must avoid cyclic reference between repo and transaction.
1052 reporef = weakref.ref(self)
1055 reporef = weakref.ref(self)
1053 def validate(tr):
1056 def validate(tr):
1054 """will run pre-closing hooks"""
1057 """will run pre-closing hooks"""
1055 reporef().hook('pretxnclose', throw=True,
1058 reporef().hook('pretxnclose', throw=True,
1056 txnname=desc, **tr.hookargs)
1059 txnname=desc, **tr.hookargs)
1057 def releasefn(tr, success):
1060 def releasefn(tr, success):
1058 repo = reporef()
1061 repo = reporef()
1059 if success:
1062 if success:
1060 # this should be explicitly invoked here, because
1063 # this should be explicitly invoked here, because
1061 # in-memory changes aren't written out at closing
1064 # in-memory changes aren't written out at closing
1062 # transaction, if tr.addfilegenerator (via
1065 # transaction, if tr.addfilegenerator (via
1063 # dirstate.write or so) isn't invoked while
1066 # dirstate.write or so) isn't invoked while
1064 # transaction running
1067 # transaction running
1065 repo.dirstate.write(None)
1068 repo.dirstate.write(None)
1066 else:
1069 else:
1067 # discard all changes (including ones already written
1070 # discard all changes (including ones already written
1068 # out) in this transaction
1071 # out) in this transaction
1069 repo.dirstate.restorebackup(None, prefix='journal.')
1072 repo.dirstate.restorebackup(None, prefix='journal.')
1070
1073
1071 repo.invalidate(clearfilecache=True)
1074 repo.invalidate(clearfilecache=True)
1072
1075
1073 tr = transaction.transaction(rp, self.svfs, vfsmap,
1076 tr = transaction.transaction(rp, self.svfs, vfsmap,
1074 "journal",
1077 "journal",
1075 "undo",
1078 "undo",
1076 aftertrans(renames),
1079 aftertrans(renames),
1077 self.store.createmode,
1080 self.store.createmode,
1078 validator=validate,
1081 validator=validate,
1079 releasefn=releasefn)
1082 releasefn=releasefn)
1080
1083
1081 tr.hookargs['txnid'] = txnid
1084 tr.hookargs['txnid'] = txnid
1082 # note: writing the fncache only during finalize mean that the file is
1085 # note: writing the fncache only during finalize mean that the file is
1083 # outdated when running hooks. As fncache is used for streaming clone,
1086 # outdated when running hooks. As fncache is used for streaming clone,
1084 # this is not expected to break anything that happen during the hooks.
1087 # this is not expected to break anything that happen during the hooks.
1085 tr.addfinalize('flush-fncache', self.store.write)
1088 tr.addfinalize('flush-fncache', self.store.write)
1086 def txnclosehook(tr2):
1089 def txnclosehook(tr2):
1087 """To be run if transaction is successful, will schedule a hook run
1090 """To be run if transaction is successful, will schedule a hook run
1088 """
1091 """
1089 # Don't reference tr2 in hook() so we don't hold a reference.
1092 # Don't reference tr2 in hook() so we don't hold a reference.
1090 # This reduces memory consumption when there are multiple
1093 # This reduces memory consumption when there are multiple
1091 # transactions per lock. This can likely go away if issue5045
1094 # transactions per lock. This can likely go away if issue5045
1092 # fixes the function accumulation.
1095 # fixes the function accumulation.
1093 hookargs = tr2.hookargs
1096 hookargs = tr2.hookargs
1094
1097
1095 def hook():
1098 def hook():
1096 reporef().hook('txnclose', throw=False, txnname=desc,
1099 reporef().hook('txnclose', throw=False, txnname=desc,
1097 **hookargs)
1100 **hookargs)
1098 reporef()._afterlock(hook)
1101 reporef()._afterlock(hook)
1099 tr.addfinalize('txnclose-hook', txnclosehook)
1102 tr.addfinalize('txnclose-hook', txnclosehook)
1100 def txnaborthook(tr2):
1103 def txnaborthook(tr2):
1101 """To be run if transaction is aborted
1104 """To be run if transaction is aborted
1102 """
1105 """
1103 reporef().hook('txnabort', throw=False, txnname=desc,
1106 reporef().hook('txnabort', throw=False, txnname=desc,
1104 **tr2.hookargs)
1107 **tr2.hookargs)
1105 tr.addabort('txnabort-hook', txnaborthook)
1108 tr.addabort('txnabort-hook', txnaborthook)
1106 # avoid eager cache invalidation. in-memory data should be identical
1109 # avoid eager cache invalidation. in-memory data should be identical
1107 # to stored data if transaction has no error.
1110 # to stored data if transaction has no error.
1108 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1111 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1109 self._transref = weakref.ref(tr)
1112 self._transref = weakref.ref(tr)
1110 return tr
1113 return tr
1111
1114
1112 def _journalfiles(self):
1115 def _journalfiles(self):
1113 return ((self.svfs, 'journal'),
1116 return ((self.svfs, 'journal'),
1114 (self.vfs, 'journal.dirstate'),
1117 (self.vfs, 'journal.dirstate'),
1115 (self.vfs, 'journal.branch'),
1118 (self.vfs, 'journal.branch'),
1116 (self.vfs, 'journal.desc'),
1119 (self.vfs, 'journal.desc'),
1117 (self.vfs, 'journal.bookmarks'),
1120 (self.vfs, 'journal.bookmarks'),
1118 (self.svfs, 'journal.phaseroots'))
1121 (self.svfs, 'journal.phaseroots'))
1119
1122
1120 def undofiles(self):
1123 def undofiles(self):
1121 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1124 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1122
1125
1123 def _writejournal(self, desc):
1126 def _writejournal(self, desc):
1124 self.dirstate.savebackup(None, prefix='journal.')
1127 self.dirstate.savebackup(None, prefix='journal.')
1125 self.vfs.write("journal.branch",
1128 self.vfs.write("journal.branch",
1126 encoding.fromlocal(self.dirstate.branch()))
1129 encoding.fromlocal(self.dirstate.branch()))
1127 self.vfs.write("journal.desc",
1130 self.vfs.write("journal.desc",
1128 "%d\n%s\n" % (len(self), desc))
1131 "%d\n%s\n" % (len(self), desc))
1129 self.vfs.write("journal.bookmarks",
1132 self.vfs.write("journal.bookmarks",
1130 self.vfs.tryread("bookmarks"))
1133 self.vfs.tryread("bookmarks"))
1131 self.svfs.write("journal.phaseroots",
1134 self.svfs.write("journal.phaseroots",
1132 self.svfs.tryread("phaseroots"))
1135 self.svfs.tryread("phaseroots"))
1133
1136
1134 def recover(self):
1137 def recover(self):
1135 with self.lock():
1138 with self.lock():
1136 if self.svfs.exists("journal"):
1139 if self.svfs.exists("journal"):
1137 self.ui.status(_("rolling back interrupted transaction\n"))
1140 self.ui.status(_("rolling back interrupted transaction\n"))
1138 vfsmap = {'': self.svfs,
1141 vfsmap = {'': self.svfs,
1139 'plain': self.vfs,}
1142 'plain': self.vfs,}
1140 transaction.rollback(self.svfs, vfsmap, "journal",
1143 transaction.rollback(self.svfs, vfsmap, "journal",
1141 self.ui.warn)
1144 self.ui.warn)
1142 self.invalidate()
1145 self.invalidate()
1143 return True
1146 return True
1144 else:
1147 else:
1145 self.ui.warn(_("no interrupted transaction available\n"))
1148 self.ui.warn(_("no interrupted transaction available\n"))
1146 return False
1149 return False
1147
1150
1148 def rollback(self, dryrun=False, force=False):
1151 def rollback(self, dryrun=False, force=False):
1149 wlock = lock = dsguard = None
1152 wlock = lock = dsguard = None
1150 try:
1153 try:
1151 wlock = self.wlock()
1154 wlock = self.wlock()
1152 lock = self.lock()
1155 lock = self.lock()
1153 if self.svfs.exists("undo"):
1156 if self.svfs.exists("undo"):
1154 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1157 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1155
1158
1156 return self._rollback(dryrun, force, dsguard)
1159 return self._rollback(dryrun, force, dsguard)
1157 else:
1160 else:
1158 self.ui.warn(_("no rollback information available\n"))
1161 self.ui.warn(_("no rollback information available\n"))
1159 return 1
1162 return 1
1160 finally:
1163 finally:
1161 release(dsguard, lock, wlock)
1164 release(dsguard, lock, wlock)
1162
1165
1163 @unfilteredmethod # Until we get smarter cache management
1166 @unfilteredmethod # Until we get smarter cache management
1164 def _rollback(self, dryrun, force, dsguard):
1167 def _rollback(self, dryrun, force, dsguard):
1165 ui = self.ui
1168 ui = self.ui
1166 try:
1169 try:
1167 args = self.vfs.read('undo.desc').splitlines()
1170 args = self.vfs.read('undo.desc').splitlines()
1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1171 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1169 if len(args) >= 3:
1172 if len(args) >= 3:
1170 detail = args[2]
1173 detail = args[2]
1171 oldtip = oldlen - 1
1174 oldtip = oldlen - 1
1172
1175
1173 if detail and ui.verbose:
1176 if detail and ui.verbose:
1174 msg = (_('repository tip rolled back to revision %s'
1177 msg = (_('repository tip rolled back to revision %s'
1175 ' (undo %s: %s)\n')
1178 ' (undo %s: %s)\n')
1176 % (oldtip, desc, detail))
1179 % (oldtip, desc, detail))
1177 else:
1180 else:
1178 msg = (_('repository tip rolled back to revision %s'
1181 msg = (_('repository tip rolled back to revision %s'
1179 ' (undo %s)\n')
1182 ' (undo %s)\n')
1180 % (oldtip, desc))
1183 % (oldtip, desc))
1181 except IOError:
1184 except IOError:
1182 msg = _('rolling back unknown transaction\n')
1185 msg = _('rolling back unknown transaction\n')
1183 desc = None
1186 desc = None
1184
1187
1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1188 if not force and self['.'] != self['tip'] and desc == 'commit':
1186 raise error.Abort(
1189 raise error.Abort(
1187 _('rollback of last commit while not checked out '
1190 _('rollback of last commit while not checked out '
1188 'may lose data'), hint=_('use -f to force'))
1191 'may lose data'), hint=_('use -f to force'))
1189
1192
1190 ui.status(msg)
1193 ui.status(msg)
1191 if dryrun:
1194 if dryrun:
1192 return 0
1195 return 0
1193
1196
1194 parents = self.dirstate.parents()
1197 parents = self.dirstate.parents()
1195 self.destroying()
1198 self.destroying()
1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1199 vfsmap = {'plain': self.vfs, '': self.svfs}
1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1200 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1198 if self.vfs.exists('undo.bookmarks'):
1201 if self.vfs.exists('undo.bookmarks'):
1199 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1202 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1200 if self.svfs.exists('undo.phaseroots'):
1203 if self.svfs.exists('undo.phaseroots'):
1201 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1204 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1202 self.invalidate()
1205 self.invalidate()
1203
1206
1204 parentgone = (parents[0] not in self.changelog.nodemap or
1207 parentgone = (parents[0] not in self.changelog.nodemap or
1205 parents[1] not in self.changelog.nodemap)
1208 parents[1] not in self.changelog.nodemap)
1206 if parentgone:
1209 if parentgone:
1207 # prevent dirstateguard from overwriting already restored one
1210 # prevent dirstateguard from overwriting already restored one
1208 dsguard.close()
1211 dsguard.close()
1209
1212
1210 self.dirstate.restorebackup(None, prefix='undo.')
1213 self.dirstate.restorebackup(None, prefix='undo.')
1211 try:
1214 try:
1212 branch = self.vfs.read('undo.branch')
1215 branch = self.vfs.read('undo.branch')
1213 self.dirstate.setbranch(encoding.tolocal(branch))
1216 self.dirstate.setbranch(encoding.tolocal(branch))
1214 except IOError:
1217 except IOError:
1215 ui.warn(_('named branch could not be reset: '
1218 ui.warn(_('named branch could not be reset: '
1216 'current branch is still \'%s\'\n')
1219 'current branch is still \'%s\'\n')
1217 % self.dirstate.branch())
1220 % self.dirstate.branch())
1218
1221
1219 parents = tuple([p.rev() for p in self[None].parents()])
1222 parents = tuple([p.rev() for p in self[None].parents()])
1220 if len(parents) > 1:
1223 if len(parents) > 1:
1221 ui.status(_('working directory now based on '
1224 ui.status(_('working directory now based on '
1222 'revisions %d and %d\n') % parents)
1225 'revisions %d and %d\n') % parents)
1223 else:
1226 else:
1224 ui.status(_('working directory now based on '
1227 ui.status(_('working directory now based on '
1225 'revision %d\n') % parents)
1228 'revision %d\n') % parents)
1226 mergemod.mergestate.clean(self, self['.'].node())
1229 mergemod.mergestate.clean(self, self['.'].node())
1227
1230
1228 # TODO: if we know which new heads may result from this rollback, pass
1231 # TODO: if we know which new heads may result from this rollback, pass
1229 # them to destroy(), which will prevent the branchhead cache from being
1232 # them to destroy(), which will prevent the branchhead cache from being
1230 # invalidated.
1233 # invalidated.
1231 self.destroyed()
1234 self.destroyed()
1232 return 0
1235 return 0
1233
1236
1234 def invalidatecaches(self):
1237 def invalidatecaches(self):
1235
1238
1236 if '_tagscache' in vars(self):
1239 if '_tagscache' in vars(self):
1237 # can't use delattr on proxy
1240 # can't use delattr on proxy
1238 del self.__dict__['_tagscache']
1241 del self.__dict__['_tagscache']
1239
1242
1240 self.unfiltered()._branchcaches.clear()
1243 self.unfiltered()._branchcaches.clear()
1241 self.invalidatevolatilesets()
1244 self.invalidatevolatilesets()
1242
1245
1243 def invalidatevolatilesets(self):
1246 def invalidatevolatilesets(self):
1244 self.filteredrevcache.clear()
1247 self.filteredrevcache.clear()
1245 obsolete.clearobscaches(self)
1248 obsolete.clearobscaches(self)
1246
1249
1247 def invalidatedirstate(self):
1250 def invalidatedirstate(self):
1248 '''Invalidates the dirstate, causing the next call to dirstate
1251 '''Invalidates the dirstate, causing the next call to dirstate
1249 to check if it was modified since the last time it was read,
1252 to check if it was modified since the last time it was read,
1250 rereading it if it has.
1253 rereading it if it has.
1251
1254
1252 This is different to dirstate.invalidate() that it doesn't always
1255 This is different to dirstate.invalidate() that it doesn't always
1253 rereads the dirstate. Use dirstate.invalidate() if you want to
1256 rereads the dirstate. Use dirstate.invalidate() if you want to
1254 explicitly read the dirstate again (i.e. restoring it to a previous
1257 explicitly read the dirstate again (i.e. restoring it to a previous
1255 known good state).'''
1258 known good state).'''
1256 if hasunfilteredcache(self, 'dirstate'):
1259 if hasunfilteredcache(self, 'dirstate'):
1257 for k in self.dirstate._filecache:
1260 for k in self.dirstate._filecache:
1258 try:
1261 try:
1259 delattr(self.dirstate, k)
1262 delattr(self.dirstate, k)
1260 except AttributeError:
1263 except AttributeError:
1261 pass
1264 pass
1262 delattr(self.unfiltered(), 'dirstate')
1265 delattr(self.unfiltered(), 'dirstate')
1263
1266
1264 def invalidate(self, clearfilecache=False):
1267 def invalidate(self, clearfilecache=False):
1265 '''Invalidates both store and non-store parts other than dirstate
1268 '''Invalidates both store and non-store parts other than dirstate
1266
1269
1267 If a transaction is running, invalidation of store is omitted,
1270 If a transaction is running, invalidation of store is omitted,
1268 because discarding in-memory changes might cause inconsistency
1271 because discarding in-memory changes might cause inconsistency
1269 (e.g. incomplete fncache causes unintentional failure, but
1272 (e.g. incomplete fncache causes unintentional failure, but
1270 redundant one doesn't).
1273 redundant one doesn't).
1271 '''
1274 '''
1272 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1275 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1273 for k in self._filecache.keys():
1276 for k in self._filecache.keys():
1274 # dirstate is invalidated separately in invalidatedirstate()
1277 # dirstate is invalidated separately in invalidatedirstate()
1275 if k == 'dirstate':
1278 if k == 'dirstate':
1276 continue
1279 continue
1277
1280
1278 if clearfilecache:
1281 if clearfilecache:
1279 del self._filecache[k]
1282 del self._filecache[k]
1280 try:
1283 try:
1281 delattr(unfiltered, k)
1284 delattr(unfiltered, k)
1282 except AttributeError:
1285 except AttributeError:
1283 pass
1286 pass
1284 self.invalidatecaches()
1287 self.invalidatecaches()
1285 if not self.currenttransaction():
1288 if not self.currenttransaction():
1286 # TODO: Changing contents of store outside transaction
1289 # TODO: Changing contents of store outside transaction
1287 # causes inconsistency. We should make in-memory store
1290 # causes inconsistency. We should make in-memory store
1288 # changes detectable, and abort if changed.
1291 # changes detectable, and abort if changed.
1289 self.store.invalidatecaches()
1292 self.store.invalidatecaches()
1290
1293
1291 def invalidateall(self):
1294 def invalidateall(self):
1292 '''Fully invalidates both store and non-store parts, causing the
1295 '''Fully invalidates both store and non-store parts, causing the
1293 subsequent operation to reread any outside changes.'''
1296 subsequent operation to reread any outside changes.'''
1294 # extension should hook this to invalidate its caches
1297 # extension should hook this to invalidate its caches
1295 self.invalidate()
1298 self.invalidate()
1296 self.invalidatedirstate()
1299 self.invalidatedirstate()
1297
1300
1298 @unfilteredmethod
1301 @unfilteredmethod
1299 def _refreshfilecachestats(self, tr):
1302 def _refreshfilecachestats(self, tr):
1300 """Reload stats of cached files so that they are flagged as valid"""
1303 """Reload stats of cached files so that they are flagged as valid"""
1301 for k, ce in self._filecache.items():
1304 for k, ce in self._filecache.items():
1302 if k == 'dirstate' or k not in self.__dict__:
1305 if k == 'dirstate' or k not in self.__dict__:
1303 continue
1306 continue
1304 ce.refresh()
1307 ce.refresh()
1305
1308
1306 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1309 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1307 inheritchecker=None, parentenvvar=None):
1310 inheritchecker=None, parentenvvar=None):
1308 parentlock = None
1311 parentlock = None
1309 # the contents of parentenvvar are used by the underlying lock to
1312 # the contents of parentenvvar are used by the underlying lock to
1310 # determine whether it can be inherited
1313 # determine whether it can be inherited
1311 if parentenvvar is not None:
1314 if parentenvvar is not None:
1312 parentlock = encoding.environ.get(parentenvvar)
1315 parentlock = encoding.environ.get(parentenvvar)
1313 try:
1316 try:
1314 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1317 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1315 acquirefn=acquirefn, desc=desc,
1318 acquirefn=acquirefn, desc=desc,
1316 inheritchecker=inheritchecker,
1319 inheritchecker=inheritchecker,
1317 parentlock=parentlock)
1320 parentlock=parentlock)
1318 except error.LockHeld as inst:
1321 except error.LockHeld as inst:
1319 if not wait:
1322 if not wait:
1320 raise
1323 raise
1321 # show more details for new-style locks
1324 # show more details for new-style locks
1322 if ':' in inst.locker:
1325 if ':' in inst.locker:
1323 host, pid = inst.locker.split(":", 1)
1326 host, pid = inst.locker.split(":", 1)
1324 self.ui.warn(
1327 self.ui.warn(
1325 _("waiting for lock on %s held by process %r "
1328 _("waiting for lock on %s held by process %r "
1326 "on host %r\n") % (desc, pid, host))
1329 "on host %r\n") % (desc, pid, host))
1327 else:
1330 else:
1328 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1331 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1329 (desc, inst.locker))
1332 (desc, inst.locker))
1330 # default to 600 seconds timeout
1333 # default to 600 seconds timeout
1331 l = lockmod.lock(vfs, lockname,
1334 l = lockmod.lock(vfs, lockname,
1332 int(self.ui.config("ui", "timeout", "600")),
1335 int(self.ui.config("ui", "timeout", "600")),
1333 releasefn=releasefn, acquirefn=acquirefn,
1336 releasefn=releasefn, acquirefn=acquirefn,
1334 desc=desc)
1337 desc=desc)
1335 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1338 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1336 return l
1339 return l
1337
1340
1338 def _afterlock(self, callback):
1341 def _afterlock(self, callback):
1339 """add a callback to be run when the repository is fully unlocked
1342 """add a callback to be run when the repository is fully unlocked
1340
1343
1341 The callback will be executed when the outermost lock is released
1344 The callback will be executed when the outermost lock is released
1342 (with wlock being higher level than 'lock')."""
1345 (with wlock being higher level than 'lock')."""
1343 for ref in (self._wlockref, self._lockref):
1346 for ref in (self._wlockref, self._lockref):
1344 l = ref and ref()
1347 l = ref and ref()
1345 if l and l.held:
1348 if l and l.held:
1346 l.postrelease.append(callback)
1349 l.postrelease.append(callback)
1347 break
1350 break
1348 else: # no lock have been found.
1351 else: # no lock have been found.
1349 callback()
1352 callback()
1350
1353
1351 def lock(self, wait=True):
1354 def lock(self, wait=True):
1352 '''Lock the repository store (.hg/store) and return a weak reference
1355 '''Lock the repository store (.hg/store) and return a weak reference
1353 to the lock. Use this before modifying the store (e.g. committing or
1356 to the lock. Use this before modifying the store (e.g. committing or
1354 stripping). If you are opening a transaction, get a lock as well.)
1357 stripping). If you are opening a transaction, get a lock as well.)
1355
1358
1356 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1359 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1357 'wlock' first to avoid a dead-lock hazard.'''
1360 'wlock' first to avoid a dead-lock hazard.'''
1358 l = self._currentlock(self._lockref)
1361 l = self._currentlock(self._lockref)
1359 if l is not None:
1362 if l is not None:
1360 l.lock()
1363 l.lock()
1361 return l
1364 return l
1362
1365
1363 l = self._lock(self.svfs, "lock", wait, None,
1366 l = self._lock(self.svfs, "lock", wait, None,
1364 self.invalidate, _('repository %s') % self.origroot)
1367 self.invalidate, _('repository %s') % self.origroot)
1365 self._lockref = weakref.ref(l)
1368 self._lockref = weakref.ref(l)
1366 return l
1369 return l
1367
1370
1368 def _wlockchecktransaction(self):
1371 def _wlockchecktransaction(self):
1369 if self.currenttransaction() is not None:
1372 if self.currenttransaction() is not None:
1370 raise error.LockInheritanceContractViolation(
1373 raise error.LockInheritanceContractViolation(
1371 'wlock cannot be inherited in the middle of a transaction')
1374 'wlock cannot be inherited in the middle of a transaction')
1372
1375
1373 def wlock(self, wait=True):
1376 def wlock(self, wait=True):
1374 '''Lock the non-store parts of the repository (everything under
1377 '''Lock the non-store parts of the repository (everything under
1375 .hg except .hg/store) and return a weak reference to the lock.
1378 .hg except .hg/store) and return a weak reference to the lock.
1376
1379
1377 Use this before modifying files in .hg.
1380 Use this before modifying files in .hg.
1378
1381
1379 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1382 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1380 'wlock' first to avoid a dead-lock hazard.'''
1383 'wlock' first to avoid a dead-lock hazard.'''
1381 l = self._wlockref and self._wlockref()
1384 l = self._wlockref and self._wlockref()
1382 if l is not None and l.held:
1385 if l is not None and l.held:
1383 l.lock()
1386 l.lock()
1384 return l
1387 return l
1385
1388
1386 # We do not need to check for non-waiting lock acquisition. Such
1389 # We do not need to check for non-waiting lock acquisition. Such
1387 # acquisition would not cause dead-lock as they would just fail.
1390 # acquisition would not cause dead-lock as they would just fail.
1388 if wait and (self.ui.configbool('devel', 'all-warnings')
1391 if wait and (self.ui.configbool('devel', 'all-warnings')
1389 or self.ui.configbool('devel', 'check-locks')):
1392 or self.ui.configbool('devel', 'check-locks')):
1390 if self._currentlock(self._lockref) is not None:
1393 if self._currentlock(self._lockref) is not None:
1391 self.ui.develwarn('"wlock" acquired after "lock"')
1394 self.ui.develwarn('"wlock" acquired after "lock"')
1392
1395
1393 def unlock():
1396 def unlock():
1394 if self.dirstate.pendingparentchange():
1397 if self.dirstate.pendingparentchange():
1395 self.dirstate.invalidate()
1398 self.dirstate.invalidate()
1396 else:
1399 else:
1397 self.dirstate.write(None)
1400 self.dirstate.write(None)
1398
1401
1399 self._filecache['dirstate'].refresh()
1402 self._filecache['dirstate'].refresh()
1400
1403
1401 l = self._lock(self.vfs, "wlock", wait, unlock,
1404 l = self._lock(self.vfs, "wlock", wait, unlock,
1402 self.invalidatedirstate, _('working directory of %s') %
1405 self.invalidatedirstate, _('working directory of %s') %
1403 self.origroot,
1406 self.origroot,
1404 inheritchecker=self._wlockchecktransaction,
1407 inheritchecker=self._wlockchecktransaction,
1405 parentenvvar='HG_WLOCK_LOCKER')
1408 parentenvvar='HG_WLOCK_LOCKER')
1406 self._wlockref = weakref.ref(l)
1409 self._wlockref = weakref.ref(l)
1407 return l
1410 return l
1408
1411
1409 def _currentlock(self, lockref):
1412 def _currentlock(self, lockref):
1410 """Returns the lock if it's held, or None if it's not."""
1413 """Returns the lock if it's held, or None if it's not."""
1411 if lockref is None:
1414 if lockref is None:
1412 return None
1415 return None
1413 l = lockref()
1416 l = lockref()
1414 if l is None or not l.held:
1417 if l is None or not l.held:
1415 return None
1418 return None
1416 return l
1419 return l
1417
1420
1418 def currentwlock(self):
1421 def currentwlock(self):
1419 """Returns the wlock if it's held, or None if it's not."""
1422 """Returns the wlock if it's held, or None if it's not."""
1420 return self._currentlock(self._wlockref)
1423 return self._currentlock(self._wlockref)
1421
1424
1422 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1425 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1423 """
1426 """
1424 commit an individual file as part of a larger transaction
1427 commit an individual file as part of a larger transaction
1425 """
1428 """
1426
1429
1427 fname = fctx.path()
1430 fname = fctx.path()
1428 fparent1 = manifest1.get(fname, nullid)
1431 fparent1 = manifest1.get(fname, nullid)
1429 fparent2 = manifest2.get(fname, nullid)
1432 fparent2 = manifest2.get(fname, nullid)
1430 if isinstance(fctx, context.filectx):
1433 if isinstance(fctx, context.filectx):
1431 node = fctx.filenode()
1434 node = fctx.filenode()
1432 if node in [fparent1, fparent2]:
1435 if node in [fparent1, fparent2]:
1433 self.ui.debug('reusing %s filelog entry\n' % fname)
1436 self.ui.debug('reusing %s filelog entry\n' % fname)
1434 if manifest1.flags(fname) != fctx.flags():
1437 if manifest1.flags(fname) != fctx.flags():
1435 changelist.append(fname)
1438 changelist.append(fname)
1436 return node
1439 return node
1437
1440
1438 flog = self.file(fname)
1441 flog = self.file(fname)
1439 meta = {}
1442 meta = {}
1440 copy = fctx.renamed()
1443 copy = fctx.renamed()
1441 if copy and copy[0] != fname:
1444 if copy and copy[0] != fname:
1442 # Mark the new revision of this file as a copy of another
1445 # Mark the new revision of this file as a copy of another
1443 # file. This copy data will effectively act as a parent
1446 # file. This copy data will effectively act as a parent
1444 # of this new revision. If this is a merge, the first
1447 # of this new revision. If this is a merge, the first
1445 # parent will be the nullid (meaning "look up the copy data")
1448 # parent will be the nullid (meaning "look up the copy data")
1446 # and the second one will be the other parent. For example:
1449 # and the second one will be the other parent. For example:
1447 #
1450 #
1448 # 0 --- 1 --- 3 rev1 changes file foo
1451 # 0 --- 1 --- 3 rev1 changes file foo
1449 # \ / rev2 renames foo to bar and changes it
1452 # \ / rev2 renames foo to bar and changes it
1450 # \- 2 -/ rev3 should have bar with all changes and
1453 # \- 2 -/ rev3 should have bar with all changes and
1451 # should record that bar descends from
1454 # should record that bar descends from
1452 # bar in rev2 and foo in rev1
1455 # bar in rev2 and foo in rev1
1453 #
1456 #
1454 # this allows this merge to succeed:
1457 # this allows this merge to succeed:
1455 #
1458 #
1456 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1459 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1457 # \ / merging rev3 and rev4 should use bar@rev2
1460 # \ / merging rev3 and rev4 should use bar@rev2
1458 # \- 2 --- 4 as the merge base
1461 # \- 2 --- 4 as the merge base
1459 #
1462 #
1460
1463
1461 cfname = copy[0]
1464 cfname = copy[0]
1462 crev = manifest1.get(cfname)
1465 crev = manifest1.get(cfname)
1463 newfparent = fparent2
1466 newfparent = fparent2
1464
1467
1465 if manifest2: # branch merge
1468 if manifest2: # branch merge
1466 if fparent2 == nullid or crev is None: # copied on remote side
1469 if fparent2 == nullid or crev is None: # copied on remote side
1467 if cfname in manifest2:
1470 if cfname in manifest2:
1468 crev = manifest2[cfname]
1471 crev = manifest2[cfname]
1469 newfparent = fparent1
1472 newfparent = fparent1
1470
1473
1471 # Here, we used to search backwards through history to try to find
1474 # Here, we used to search backwards through history to try to find
1472 # where the file copy came from if the source of a copy was not in
1475 # where the file copy came from if the source of a copy was not in
1473 # the parent directory. However, this doesn't actually make sense to
1476 # the parent directory. However, this doesn't actually make sense to
1474 # do (what does a copy from something not in your working copy even
1477 # do (what does a copy from something not in your working copy even
1475 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1478 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1476 # the user that copy information was dropped, so if they didn't
1479 # the user that copy information was dropped, so if they didn't
1477 # expect this outcome it can be fixed, but this is the correct
1480 # expect this outcome it can be fixed, but this is the correct
1478 # behavior in this circumstance.
1481 # behavior in this circumstance.
1479
1482
1480 if crev:
1483 if crev:
1481 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1484 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1482 meta["copy"] = cfname
1485 meta["copy"] = cfname
1483 meta["copyrev"] = hex(crev)
1486 meta["copyrev"] = hex(crev)
1484 fparent1, fparent2 = nullid, newfparent
1487 fparent1, fparent2 = nullid, newfparent
1485 else:
1488 else:
1486 self.ui.warn(_("warning: can't find ancestor for '%s' "
1489 self.ui.warn(_("warning: can't find ancestor for '%s' "
1487 "copied from '%s'!\n") % (fname, cfname))
1490 "copied from '%s'!\n") % (fname, cfname))
1488
1491
1489 elif fparent1 == nullid:
1492 elif fparent1 == nullid:
1490 fparent1, fparent2 = fparent2, nullid
1493 fparent1, fparent2 = fparent2, nullid
1491 elif fparent2 != nullid:
1494 elif fparent2 != nullid:
1492 # is one parent an ancestor of the other?
1495 # is one parent an ancestor of the other?
1493 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1496 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1494 if fparent1 in fparentancestors:
1497 if fparent1 in fparentancestors:
1495 fparent1, fparent2 = fparent2, nullid
1498 fparent1, fparent2 = fparent2, nullid
1496 elif fparent2 in fparentancestors:
1499 elif fparent2 in fparentancestors:
1497 fparent2 = nullid
1500 fparent2 = nullid
1498
1501
1499 # is the file changed?
1502 # is the file changed?
1500 text = fctx.data()
1503 text = fctx.data()
1501 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1504 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1502 changelist.append(fname)
1505 changelist.append(fname)
1503 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1506 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1504 # are just the flags changed during merge?
1507 # are just the flags changed during merge?
1505 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1508 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1506 changelist.append(fname)
1509 changelist.append(fname)
1507
1510
1508 return fparent1
1511 return fparent1
1509
1512
1510 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1513 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1511 """check for commit arguments that aren't committable"""
1514 """check for commit arguments that aren't committable"""
1512 if match.isexact() or match.prefix():
1515 if match.isexact() or match.prefix():
1513 matched = set(status.modified + status.added + status.removed)
1516 matched = set(status.modified + status.added + status.removed)
1514
1517
1515 for f in match.files():
1518 for f in match.files():
1516 f = self.dirstate.normalize(f)
1519 f = self.dirstate.normalize(f)
1517 if f == '.' or f in matched or f in wctx.substate:
1520 if f == '.' or f in matched or f in wctx.substate:
1518 continue
1521 continue
1519 if f in status.deleted:
1522 if f in status.deleted:
1520 fail(f, _('file not found!'))
1523 fail(f, _('file not found!'))
1521 if f in vdirs: # visited directory
1524 if f in vdirs: # visited directory
1522 d = f + '/'
1525 d = f + '/'
1523 for mf in matched:
1526 for mf in matched:
1524 if mf.startswith(d):
1527 if mf.startswith(d):
1525 break
1528 break
1526 else:
1529 else:
1527 fail(f, _("no match under directory!"))
1530 fail(f, _("no match under directory!"))
1528 elif f not in self.dirstate:
1531 elif f not in self.dirstate:
1529 fail(f, _("file not tracked!"))
1532 fail(f, _("file not tracked!"))
1530
1533
1531 @unfilteredmethod
1534 @unfilteredmethod
1532 def commit(self, text="", user=None, date=None, match=None, force=False,
1535 def commit(self, text="", user=None, date=None, match=None, force=False,
1533 editor=False, extra=None):
1536 editor=False, extra=None):
1534 """Add a new revision to current repository.
1537 """Add a new revision to current repository.
1535
1538
1536 Revision information is gathered from the working directory,
1539 Revision information is gathered from the working directory,
1537 match can be used to filter the committed files. If editor is
1540 match can be used to filter the committed files. If editor is
1538 supplied, it is called to get a commit message.
1541 supplied, it is called to get a commit message.
1539 """
1542 """
1540 if extra is None:
1543 if extra is None:
1541 extra = {}
1544 extra = {}
1542
1545
1543 def fail(f, msg):
1546 def fail(f, msg):
1544 raise error.Abort('%s: %s' % (f, msg))
1547 raise error.Abort('%s: %s' % (f, msg))
1545
1548
1546 if not match:
1549 if not match:
1547 match = matchmod.always(self.root, '')
1550 match = matchmod.always(self.root, '')
1548
1551
1549 if not force:
1552 if not force:
1550 vdirs = []
1553 vdirs = []
1551 match.explicitdir = vdirs.append
1554 match.explicitdir = vdirs.append
1552 match.bad = fail
1555 match.bad = fail
1553
1556
1554 wlock = lock = tr = None
1557 wlock = lock = tr = None
1555 try:
1558 try:
1556 wlock = self.wlock()
1559 wlock = self.wlock()
1557 lock = self.lock() # for recent changelog (see issue4368)
1560 lock = self.lock() # for recent changelog (see issue4368)
1558
1561
1559 wctx = self[None]
1562 wctx = self[None]
1560 merge = len(wctx.parents()) > 1
1563 merge = len(wctx.parents()) > 1
1561
1564
1562 if not force and merge and match.ispartial():
1565 if not force and merge and match.ispartial():
1563 raise error.Abort(_('cannot partially commit a merge '
1566 raise error.Abort(_('cannot partially commit a merge '
1564 '(do not specify files or patterns)'))
1567 '(do not specify files or patterns)'))
1565
1568
1566 status = self.status(match=match, clean=force)
1569 status = self.status(match=match, clean=force)
1567 if force:
1570 if force:
1568 status.modified.extend(status.clean) # mq may commit clean files
1571 status.modified.extend(status.clean) # mq may commit clean files
1569
1572
1570 # check subrepos
1573 # check subrepos
1571 subs = []
1574 subs = []
1572 commitsubs = set()
1575 commitsubs = set()
1573 newstate = wctx.substate.copy()
1576 newstate = wctx.substate.copy()
1574 # only manage subrepos and .hgsubstate if .hgsub is present
1577 # only manage subrepos and .hgsubstate if .hgsub is present
1575 if '.hgsub' in wctx:
1578 if '.hgsub' in wctx:
1576 # we'll decide whether to track this ourselves, thanks
1579 # we'll decide whether to track this ourselves, thanks
1577 for c in status.modified, status.added, status.removed:
1580 for c in status.modified, status.added, status.removed:
1578 if '.hgsubstate' in c:
1581 if '.hgsubstate' in c:
1579 c.remove('.hgsubstate')
1582 c.remove('.hgsubstate')
1580
1583
1581 # compare current state to last committed state
1584 # compare current state to last committed state
1582 # build new substate based on last committed state
1585 # build new substate based on last committed state
1583 oldstate = wctx.p1().substate
1586 oldstate = wctx.p1().substate
1584 for s in sorted(newstate.keys()):
1587 for s in sorted(newstate.keys()):
1585 if not match(s):
1588 if not match(s):
1586 # ignore working copy, use old state if present
1589 # ignore working copy, use old state if present
1587 if s in oldstate:
1590 if s in oldstate:
1588 newstate[s] = oldstate[s]
1591 newstate[s] = oldstate[s]
1589 continue
1592 continue
1590 if not force:
1593 if not force:
1591 raise error.Abort(
1594 raise error.Abort(
1592 _("commit with new subrepo %s excluded") % s)
1595 _("commit with new subrepo %s excluded") % s)
1593 dirtyreason = wctx.sub(s).dirtyreason(True)
1596 dirtyreason = wctx.sub(s).dirtyreason(True)
1594 if dirtyreason:
1597 if dirtyreason:
1595 if not self.ui.configbool('ui', 'commitsubrepos'):
1598 if not self.ui.configbool('ui', 'commitsubrepos'):
1596 raise error.Abort(dirtyreason,
1599 raise error.Abort(dirtyreason,
1597 hint=_("use --subrepos for recursive commit"))
1600 hint=_("use --subrepos for recursive commit"))
1598 subs.append(s)
1601 subs.append(s)
1599 commitsubs.add(s)
1602 commitsubs.add(s)
1600 else:
1603 else:
1601 bs = wctx.sub(s).basestate()
1604 bs = wctx.sub(s).basestate()
1602 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1605 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1603 if oldstate.get(s, (None, None, None))[1] != bs:
1606 if oldstate.get(s, (None, None, None))[1] != bs:
1604 subs.append(s)
1607 subs.append(s)
1605
1608
1606 # check for removed subrepos
1609 # check for removed subrepos
1607 for p in wctx.parents():
1610 for p in wctx.parents():
1608 r = [s for s in p.substate if s not in newstate]
1611 r = [s for s in p.substate if s not in newstate]
1609 subs += [s for s in r if match(s)]
1612 subs += [s for s in r if match(s)]
1610 if subs:
1613 if subs:
1611 if (not match('.hgsub') and
1614 if (not match('.hgsub') and
1612 '.hgsub' in (wctx.modified() + wctx.added())):
1615 '.hgsub' in (wctx.modified() + wctx.added())):
1613 raise error.Abort(
1616 raise error.Abort(
1614 _("can't commit subrepos without .hgsub"))
1617 _("can't commit subrepos without .hgsub"))
1615 status.modified.insert(0, '.hgsubstate')
1618 status.modified.insert(0, '.hgsubstate')
1616
1619
1617 elif '.hgsub' in status.removed:
1620 elif '.hgsub' in status.removed:
1618 # clean up .hgsubstate when .hgsub is removed
1621 # clean up .hgsubstate when .hgsub is removed
1619 if ('.hgsubstate' in wctx and
1622 if ('.hgsubstate' in wctx and
1620 '.hgsubstate' not in (status.modified + status.added +
1623 '.hgsubstate' not in (status.modified + status.added +
1621 status.removed)):
1624 status.removed)):
1622 status.removed.insert(0, '.hgsubstate')
1625 status.removed.insert(0, '.hgsubstate')
1623
1626
1624 # make sure all explicit patterns are matched
1627 # make sure all explicit patterns are matched
1625 if not force:
1628 if not force:
1626 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1629 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1627
1630
1628 cctx = context.workingcommitctx(self, status,
1631 cctx = context.workingcommitctx(self, status,
1629 text, user, date, extra)
1632 text, user, date, extra)
1630
1633
1631 # internal config: ui.allowemptycommit
1634 # internal config: ui.allowemptycommit
1632 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1635 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1633 or extra.get('close') or merge or cctx.files()
1636 or extra.get('close') or merge or cctx.files()
1634 or self.ui.configbool('ui', 'allowemptycommit'))
1637 or self.ui.configbool('ui', 'allowemptycommit'))
1635 if not allowemptycommit:
1638 if not allowemptycommit:
1636 return None
1639 return None
1637
1640
1638 if merge and cctx.deleted():
1641 if merge and cctx.deleted():
1639 raise error.Abort(_("cannot commit merge with missing files"))
1642 raise error.Abort(_("cannot commit merge with missing files"))
1640
1643
1641 ms = mergemod.mergestate.read(self)
1644 ms = mergemod.mergestate.read(self)
1642 mergeutil.checkunresolved(ms)
1645 mergeutil.checkunresolved(ms)
1643
1646
1644 if editor:
1647 if editor:
1645 cctx._text = editor(self, cctx, subs)
1648 cctx._text = editor(self, cctx, subs)
1646 edited = (text != cctx._text)
1649 edited = (text != cctx._text)
1647
1650
1648 # Save commit message in case this transaction gets rolled back
1651 # Save commit message in case this transaction gets rolled back
1649 # (e.g. by a pretxncommit hook). Leave the content alone on
1652 # (e.g. by a pretxncommit hook). Leave the content alone on
1650 # the assumption that the user will use the same editor again.
1653 # the assumption that the user will use the same editor again.
1651 msgfn = self.savecommitmessage(cctx._text)
1654 msgfn = self.savecommitmessage(cctx._text)
1652
1655
1653 # commit subs and write new state
1656 # commit subs and write new state
1654 if subs:
1657 if subs:
1655 for s in sorted(commitsubs):
1658 for s in sorted(commitsubs):
1656 sub = wctx.sub(s)
1659 sub = wctx.sub(s)
1657 self.ui.status(_('committing subrepository %s\n') %
1660 self.ui.status(_('committing subrepository %s\n') %
1658 subrepo.subrelpath(sub))
1661 subrepo.subrelpath(sub))
1659 sr = sub.commit(cctx._text, user, date)
1662 sr = sub.commit(cctx._text, user, date)
1660 newstate[s] = (newstate[s][0], sr)
1663 newstate[s] = (newstate[s][0], sr)
1661 subrepo.writestate(self, newstate)
1664 subrepo.writestate(self, newstate)
1662
1665
1663 p1, p2 = self.dirstate.parents()
1666 p1, p2 = self.dirstate.parents()
1664 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1667 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1665 try:
1668 try:
1666 self.hook("precommit", throw=True, parent1=hookp1,
1669 self.hook("precommit", throw=True, parent1=hookp1,
1667 parent2=hookp2)
1670 parent2=hookp2)
1668 tr = self.transaction('commit')
1671 tr = self.transaction('commit')
1669 ret = self.commitctx(cctx, True)
1672 ret = self.commitctx(cctx, True)
1670 except: # re-raises
1673 except: # re-raises
1671 if edited:
1674 if edited:
1672 self.ui.write(
1675 self.ui.write(
1673 _('note: commit message saved in %s\n') % msgfn)
1676 _('note: commit message saved in %s\n') % msgfn)
1674 raise
1677 raise
1675 # update bookmarks, dirstate and mergestate
1678 # update bookmarks, dirstate and mergestate
1676 bookmarks.update(self, [p1, p2], ret)
1679 bookmarks.update(self, [p1, p2], ret)
1677 cctx.markcommitted(ret)
1680 cctx.markcommitted(ret)
1678 ms.reset()
1681 ms.reset()
1679 tr.close()
1682 tr.close()
1680
1683
1681 finally:
1684 finally:
1682 lockmod.release(tr, lock, wlock)
1685 lockmod.release(tr, lock, wlock)
1683
1686
1684 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1687 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1685 # hack for command that use a temporary commit (eg: histedit)
1688 # hack for command that use a temporary commit (eg: histedit)
1686 # temporary commit got stripped before hook release
1689 # temporary commit got stripped before hook release
1687 if self.changelog.hasnode(ret):
1690 if self.changelog.hasnode(ret):
1688 self.hook("commit", node=node, parent1=parent1,
1691 self.hook("commit", node=node, parent1=parent1,
1689 parent2=parent2)
1692 parent2=parent2)
1690 self._afterlock(commithook)
1693 self._afterlock(commithook)
1691 return ret
1694 return ret
1692
1695
1693 @unfilteredmethod
1696 @unfilteredmethod
1694 def commitctx(self, ctx, error=False):
1697 def commitctx(self, ctx, error=False):
1695 """Add a new revision to current repository.
1698 """Add a new revision to current repository.
1696 Revision information is passed via the context argument.
1699 Revision information is passed via the context argument.
1697 """
1700 """
1698
1701
1699 tr = None
1702 tr = None
1700 p1, p2 = ctx.p1(), ctx.p2()
1703 p1, p2 = ctx.p1(), ctx.p2()
1701 user = ctx.user()
1704 user = ctx.user()
1702
1705
1703 lock = self.lock()
1706 lock = self.lock()
1704 try:
1707 try:
1705 tr = self.transaction("commit")
1708 tr = self.transaction("commit")
1706 trp = weakref.proxy(tr)
1709 trp = weakref.proxy(tr)
1707
1710
1708 if ctx.manifestnode():
1711 if ctx.manifestnode():
1709 # reuse an existing manifest revision
1712 # reuse an existing manifest revision
1710 mn = ctx.manifestnode()
1713 mn = ctx.manifestnode()
1711 files = ctx.files()
1714 files = ctx.files()
1712 elif ctx.files():
1715 elif ctx.files():
1713 m1ctx = p1.manifestctx()
1716 m1ctx = p1.manifestctx()
1714 m2ctx = p2.manifestctx()
1717 m2ctx = p2.manifestctx()
1715 mctx = m1ctx.copy()
1718 mctx = m1ctx.copy()
1716
1719
1717 m = mctx.read()
1720 m = mctx.read()
1718 m1 = m1ctx.read()
1721 m1 = m1ctx.read()
1719 m2 = m2ctx.read()
1722 m2 = m2ctx.read()
1720
1723
1721 # check in files
1724 # check in files
1722 added = []
1725 added = []
1723 changed = []
1726 changed = []
1724 removed = list(ctx.removed())
1727 removed = list(ctx.removed())
1725 linkrev = len(self)
1728 linkrev = len(self)
1726 self.ui.note(_("committing files:\n"))
1729 self.ui.note(_("committing files:\n"))
1727 for f in sorted(ctx.modified() + ctx.added()):
1730 for f in sorted(ctx.modified() + ctx.added()):
1728 self.ui.note(f + "\n")
1731 self.ui.note(f + "\n")
1729 try:
1732 try:
1730 fctx = ctx[f]
1733 fctx = ctx[f]
1731 if fctx is None:
1734 if fctx is None:
1732 removed.append(f)
1735 removed.append(f)
1733 else:
1736 else:
1734 added.append(f)
1737 added.append(f)
1735 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1738 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1736 trp, changed)
1739 trp, changed)
1737 m.setflag(f, fctx.flags())
1740 m.setflag(f, fctx.flags())
1738 except OSError as inst:
1741 except OSError as inst:
1739 self.ui.warn(_("trouble committing %s!\n") % f)
1742 self.ui.warn(_("trouble committing %s!\n") % f)
1740 raise
1743 raise
1741 except IOError as inst:
1744 except IOError as inst:
1742 errcode = getattr(inst, 'errno', errno.ENOENT)
1745 errcode = getattr(inst, 'errno', errno.ENOENT)
1743 if error or errcode and errcode != errno.ENOENT:
1746 if error or errcode and errcode != errno.ENOENT:
1744 self.ui.warn(_("trouble committing %s!\n") % f)
1747 self.ui.warn(_("trouble committing %s!\n") % f)
1745 raise
1748 raise
1746
1749
1747 # update manifest
1750 # update manifest
1748 self.ui.note(_("committing manifest\n"))
1751 self.ui.note(_("committing manifest\n"))
1749 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1752 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1750 drop = [f for f in removed if f in m]
1753 drop = [f for f in removed if f in m]
1751 for f in drop:
1754 for f in drop:
1752 del m[f]
1755 del m[f]
1753 mn = mctx.write(trp, linkrev,
1756 mn = mctx.write(trp, linkrev,
1754 p1.manifestnode(), p2.manifestnode(),
1757 p1.manifestnode(), p2.manifestnode(),
1755 added, drop)
1758 added, drop)
1756 files = changed + removed
1759 files = changed + removed
1757 else:
1760 else:
1758 mn = p1.manifestnode()
1761 mn = p1.manifestnode()
1759 files = []
1762 files = []
1760
1763
1761 # update changelog
1764 # update changelog
1762 self.ui.note(_("committing changelog\n"))
1765 self.ui.note(_("committing changelog\n"))
1763 self.changelog.delayupdate(tr)
1766 self.changelog.delayupdate(tr)
1764 n = self.changelog.add(mn, files, ctx.description(),
1767 n = self.changelog.add(mn, files, ctx.description(),
1765 trp, p1.node(), p2.node(),
1768 trp, p1.node(), p2.node(),
1766 user, ctx.date(), ctx.extra().copy())
1769 user, ctx.date(), ctx.extra().copy())
1767 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1770 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1768 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1771 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1769 parent2=xp2)
1772 parent2=xp2)
1770 # set the new commit is proper phase
1773 # set the new commit is proper phase
1771 targetphase = subrepo.newcommitphase(self.ui, ctx)
1774 targetphase = subrepo.newcommitphase(self.ui, ctx)
1772 if targetphase:
1775 if targetphase:
1773 # retract boundary do not alter parent changeset.
1776 # retract boundary do not alter parent changeset.
1774 # if a parent have higher the resulting phase will
1777 # if a parent have higher the resulting phase will
1775 # be compliant anyway
1778 # be compliant anyway
1776 #
1779 #
1777 # if minimal phase was 0 we don't need to retract anything
1780 # if minimal phase was 0 we don't need to retract anything
1778 phases.retractboundary(self, tr, targetphase, [n])
1781 phases.retractboundary(self, tr, targetphase, [n])
1779 tr.close()
1782 tr.close()
1780 branchmap.updatecache(self.filtered('served'))
1783 branchmap.updatecache(self.filtered('served'))
1781 return n
1784 return n
1782 finally:
1785 finally:
1783 if tr:
1786 if tr:
1784 tr.release()
1787 tr.release()
1785 lock.release()
1788 lock.release()
1786
1789
1787 @unfilteredmethod
1790 @unfilteredmethod
1788 def destroying(self):
1791 def destroying(self):
1789 '''Inform the repository that nodes are about to be destroyed.
1792 '''Inform the repository that nodes are about to be destroyed.
1790 Intended for use by strip and rollback, so there's a common
1793 Intended for use by strip and rollback, so there's a common
1791 place for anything that has to be done before destroying history.
1794 place for anything that has to be done before destroying history.
1792
1795
1793 This is mostly useful for saving state that is in memory and waiting
1796 This is mostly useful for saving state that is in memory and waiting
1794 to be flushed when the current lock is released. Because a call to
1797 to be flushed when the current lock is released. Because a call to
1795 destroyed is imminent, the repo will be invalidated causing those
1798 destroyed is imminent, the repo will be invalidated causing those
1796 changes to stay in memory (waiting for the next unlock), or vanish
1799 changes to stay in memory (waiting for the next unlock), or vanish
1797 completely.
1800 completely.
1798 '''
1801 '''
1799 # When using the same lock to commit and strip, the phasecache is left
1802 # When using the same lock to commit and strip, the phasecache is left
1800 # dirty after committing. Then when we strip, the repo is invalidated,
1803 # dirty after committing. Then when we strip, the repo is invalidated,
1801 # causing those changes to disappear.
1804 # causing those changes to disappear.
1802 if '_phasecache' in vars(self):
1805 if '_phasecache' in vars(self):
1803 self._phasecache.write()
1806 self._phasecache.write()
1804
1807
1805 @unfilteredmethod
1808 @unfilteredmethod
1806 def destroyed(self):
1809 def destroyed(self):
1807 '''Inform the repository that nodes have been destroyed.
1810 '''Inform the repository that nodes have been destroyed.
1808 Intended for use by strip and rollback, so there's a common
1811 Intended for use by strip and rollback, so there's a common
1809 place for anything that has to be done after destroying history.
1812 place for anything that has to be done after destroying history.
1810 '''
1813 '''
1811 # When one tries to:
1814 # When one tries to:
1812 # 1) destroy nodes thus calling this method (e.g. strip)
1815 # 1) destroy nodes thus calling this method (e.g. strip)
1813 # 2) use phasecache somewhere (e.g. commit)
1816 # 2) use phasecache somewhere (e.g. commit)
1814 #
1817 #
1815 # then 2) will fail because the phasecache contains nodes that were
1818 # then 2) will fail because the phasecache contains nodes that were
1816 # removed. We can either remove phasecache from the filecache,
1819 # removed. We can either remove phasecache from the filecache,
1817 # causing it to reload next time it is accessed, or simply filter
1820 # causing it to reload next time it is accessed, or simply filter
1818 # the removed nodes now and write the updated cache.
1821 # the removed nodes now and write the updated cache.
1819 self._phasecache.filterunknown(self)
1822 self._phasecache.filterunknown(self)
1820 self._phasecache.write()
1823 self._phasecache.write()
1821
1824
1822 # update the 'served' branch cache to help read only server process
1825 # update the 'served' branch cache to help read only server process
1823 # Thanks to branchcache collaboration this is done from the nearest
1826 # Thanks to branchcache collaboration this is done from the nearest
1824 # filtered subset and it is expected to be fast.
1827 # filtered subset and it is expected to be fast.
1825 branchmap.updatecache(self.filtered('served'))
1828 branchmap.updatecache(self.filtered('served'))
1826
1829
1827 # Ensure the persistent tag cache is updated. Doing it now
1830 # Ensure the persistent tag cache is updated. Doing it now
1828 # means that the tag cache only has to worry about destroyed
1831 # means that the tag cache only has to worry about destroyed
1829 # heads immediately after a strip/rollback. That in turn
1832 # heads immediately after a strip/rollback. That in turn
1830 # guarantees that "cachetip == currenttip" (comparing both rev
1833 # guarantees that "cachetip == currenttip" (comparing both rev
1831 # and node) always means no nodes have been added or destroyed.
1834 # and node) always means no nodes have been added or destroyed.
1832
1835
1833 # XXX this is suboptimal when qrefresh'ing: we strip the current
1836 # XXX this is suboptimal when qrefresh'ing: we strip the current
1834 # head, refresh the tag cache, then immediately add a new head.
1837 # head, refresh the tag cache, then immediately add a new head.
1835 # But I think doing it this way is necessary for the "instant
1838 # But I think doing it this way is necessary for the "instant
1836 # tag cache retrieval" case to work.
1839 # tag cache retrieval" case to work.
1837 self.invalidate()
1840 self.invalidate()
1838
1841
1839 def walk(self, match, node=None):
1842 def walk(self, match, node=None):
1840 '''
1843 '''
1841 walk recursively through the directory tree or a given
1844 walk recursively through the directory tree or a given
1842 changeset, finding all files matched by the match
1845 changeset, finding all files matched by the match
1843 function
1846 function
1844 '''
1847 '''
1845 return self[node].walk(match)
1848 return self[node].walk(match)
1846
1849
1847 def status(self, node1='.', node2=None, match=None,
1850 def status(self, node1='.', node2=None, match=None,
1848 ignored=False, clean=False, unknown=False,
1851 ignored=False, clean=False, unknown=False,
1849 listsubrepos=False):
1852 listsubrepos=False):
1850 '''a convenience method that calls node1.status(node2)'''
1853 '''a convenience method that calls node1.status(node2)'''
1851 return self[node1].status(node2, match, ignored, clean, unknown,
1854 return self[node1].status(node2, match, ignored, clean, unknown,
1852 listsubrepos)
1855 listsubrepos)
1853
1856
1854 def heads(self, start=None):
1857 def heads(self, start=None):
1855 if start is None:
1858 if start is None:
1856 cl = self.changelog
1859 cl = self.changelog
1857 headrevs = reversed(cl.headrevs())
1860 headrevs = reversed(cl.headrevs())
1858 return [cl.node(rev) for rev in headrevs]
1861 return [cl.node(rev) for rev in headrevs]
1859
1862
1860 heads = self.changelog.heads(start)
1863 heads = self.changelog.heads(start)
1861 # sort the output in rev descending order
1864 # sort the output in rev descending order
1862 return sorted(heads, key=self.changelog.rev, reverse=True)
1865 return sorted(heads, key=self.changelog.rev, reverse=True)
1863
1866
1864 def branchheads(self, branch=None, start=None, closed=False):
1867 def branchheads(self, branch=None, start=None, closed=False):
1865 '''return a (possibly filtered) list of heads for the given branch
1868 '''return a (possibly filtered) list of heads for the given branch
1866
1869
1867 Heads are returned in topological order, from newest to oldest.
1870 Heads are returned in topological order, from newest to oldest.
1868 If branch is None, use the dirstate branch.
1871 If branch is None, use the dirstate branch.
1869 If start is not None, return only heads reachable from start.
1872 If start is not None, return only heads reachable from start.
1870 If closed is True, return heads that are marked as closed as well.
1873 If closed is True, return heads that are marked as closed as well.
1871 '''
1874 '''
1872 if branch is None:
1875 if branch is None:
1873 branch = self[None].branch()
1876 branch = self[None].branch()
1874 branches = self.branchmap()
1877 branches = self.branchmap()
1875 if branch not in branches:
1878 if branch not in branches:
1876 return []
1879 return []
1877 # the cache returns heads ordered lowest to highest
1880 # the cache returns heads ordered lowest to highest
1878 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1881 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1879 if start is not None:
1882 if start is not None:
1880 # filter out the heads that cannot be reached from startrev
1883 # filter out the heads that cannot be reached from startrev
1881 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1884 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1882 bheads = [h for h in bheads if h in fbheads]
1885 bheads = [h for h in bheads if h in fbheads]
1883 return bheads
1886 return bheads
1884
1887
1885 def branches(self, nodes):
1888 def branches(self, nodes):
1886 if not nodes:
1889 if not nodes:
1887 nodes = [self.changelog.tip()]
1890 nodes = [self.changelog.tip()]
1888 b = []
1891 b = []
1889 for n in nodes:
1892 for n in nodes:
1890 t = n
1893 t = n
1891 while True:
1894 while True:
1892 p = self.changelog.parents(n)
1895 p = self.changelog.parents(n)
1893 if p[1] != nullid or p[0] == nullid:
1896 if p[1] != nullid or p[0] == nullid:
1894 b.append((t, n, p[0], p[1]))
1897 b.append((t, n, p[0], p[1]))
1895 break
1898 break
1896 n = p[0]
1899 n = p[0]
1897 return b
1900 return b
1898
1901
1899 def between(self, pairs):
1902 def between(self, pairs):
1900 r = []
1903 r = []
1901
1904
1902 for top, bottom in pairs:
1905 for top, bottom in pairs:
1903 n, l, i = top, [], 0
1906 n, l, i = top, [], 0
1904 f = 1
1907 f = 1
1905
1908
1906 while n != bottom and n != nullid:
1909 while n != bottom and n != nullid:
1907 p = self.changelog.parents(n)[0]
1910 p = self.changelog.parents(n)[0]
1908 if i == f:
1911 if i == f:
1909 l.append(n)
1912 l.append(n)
1910 f = f * 2
1913 f = f * 2
1911 n = p
1914 n = p
1912 i += 1
1915 i += 1
1913
1916
1914 r.append(l)
1917 r.append(l)
1915
1918
1916 return r
1919 return r
1917
1920
1918 def checkpush(self, pushop):
1921 def checkpush(self, pushop):
1919 """Extensions can override this function if additional checks have
1922 """Extensions can override this function if additional checks have
1920 to be performed before pushing, or call it if they override push
1923 to be performed before pushing, or call it if they override push
1921 command.
1924 command.
1922 """
1925 """
1923 pass
1926 pass
1924
1927
1925 @unfilteredpropertycache
1928 @unfilteredpropertycache
1926 def prepushoutgoinghooks(self):
1929 def prepushoutgoinghooks(self):
1927 """Return util.hooks consists of a pushop with repo, remote, outgoing
1930 """Return util.hooks consists of a pushop with repo, remote, outgoing
1928 methods, which are called before pushing changesets.
1931 methods, which are called before pushing changesets.
1929 """
1932 """
1930 return util.hooks()
1933 return util.hooks()
1931
1934
1932 def pushkey(self, namespace, key, old, new):
1935 def pushkey(self, namespace, key, old, new):
1933 try:
1936 try:
1934 tr = self.currenttransaction()
1937 tr = self.currenttransaction()
1935 hookargs = {}
1938 hookargs = {}
1936 if tr is not None:
1939 if tr is not None:
1937 hookargs.update(tr.hookargs)
1940 hookargs.update(tr.hookargs)
1938 hookargs['namespace'] = namespace
1941 hookargs['namespace'] = namespace
1939 hookargs['key'] = key
1942 hookargs['key'] = key
1940 hookargs['old'] = old
1943 hookargs['old'] = old
1941 hookargs['new'] = new
1944 hookargs['new'] = new
1942 self.hook('prepushkey', throw=True, **hookargs)
1945 self.hook('prepushkey', throw=True, **hookargs)
1943 except error.HookAbort as exc:
1946 except error.HookAbort as exc:
1944 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1947 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1945 if exc.hint:
1948 if exc.hint:
1946 self.ui.write_err(_("(%s)\n") % exc.hint)
1949 self.ui.write_err(_("(%s)\n") % exc.hint)
1947 return False
1950 return False
1948 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1951 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1949 ret = pushkey.push(self, namespace, key, old, new)
1952 ret = pushkey.push(self, namespace, key, old, new)
1950 def runhook():
1953 def runhook():
1951 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1954 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1952 ret=ret)
1955 ret=ret)
1953 self._afterlock(runhook)
1956 self._afterlock(runhook)
1954 return ret
1957 return ret
1955
1958
1956 def listkeys(self, namespace):
1959 def listkeys(self, namespace):
1957 self.hook('prelistkeys', throw=True, namespace=namespace)
1960 self.hook('prelistkeys', throw=True, namespace=namespace)
1958 self.ui.debug('listing keys for "%s"\n' % namespace)
1961 self.ui.debug('listing keys for "%s"\n' % namespace)
1959 values = pushkey.list(self, namespace)
1962 values = pushkey.list(self, namespace)
1960 self.hook('listkeys', namespace=namespace, values=values)
1963 self.hook('listkeys', namespace=namespace, values=values)
1961 return values
1964 return values
1962
1965
1963 def debugwireargs(self, one, two, three=None, four=None, five=None):
1966 def debugwireargs(self, one, two, three=None, four=None, five=None):
1964 '''used to test argument passing over the wire'''
1967 '''used to test argument passing over the wire'''
1965 return "%s %s %s %s %s" % (one, two, three, four, five)
1968 return "%s %s %s %s %s" % (one, two, three, four, five)
1966
1969
1967 def savecommitmessage(self, text):
1970 def savecommitmessage(self, text):
1968 fp = self.vfs('last-message.txt', 'wb')
1971 fp = self.vfs('last-message.txt', 'wb')
1969 try:
1972 try:
1970 fp.write(text)
1973 fp.write(text)
1971 finally:
1974 finally:
1972 fp.close()
1975 fp.close()
1973 return self.pathto(fp.name[len(self.root) + 1:])
1976 return self.pathto(fp.name[len(self.root) + 1:])
1974
1977
1975 # used to avoid circular references so destructors work
1978 # used to avoid circular references so destructors work
1976 def aftertrans(files):
1979 def aftertrans(files):
1977 renamefiles = [tuple(t) for t in files]
1980 renamefiles = [tuple(t) for t in files]
1978 def a():
1981 def a():
1979 for vfs, src, dest in renamefiles:
1982 for vfs, src, dest in renamefiles:
1980 try:
1983 try:
1981 vfs.rename(src, dest)
1984 vfs.rename(src, dest)
1982 except OSError: # journal file does not yet exist
1985 except OSError: # journal file does not yet exist
1983 pass
1986 pass
1984 return a
1987 return a
1985
1988
1986 def undoname(fn):
1989 def undoname(fn):
1987 base, name = os.path.split(fn)
1990 base, name = os.path.split(fn)
1988 assert name.startswith('journal')
1991 assert name.startswith('journal')
1989 return os.path.join(base, name.replace('journal', 'undo', 1))
1992 return os.path.join(base, name.replace('journal', 'undo', 1))
1990
1993
1991 def instance(ui, path, create):
1994 def instance(ui, path, create):
1992 return localrepository(ui, util.urllocalpath(path), create)
1995 return localrepository(ui, util.urllocalpath(path), create)
1993
1996
1994 def islocal(path):
1997 def islocal(path):
1995 return True
1998 return True
1996
1999
1997 def newreporequirements(repo):
2000 def newreporequirements(repo):
1998 """Determine the set of requirements for a new local repository.
2001 """Determine the set of requirements for a new local repository.
1999
2002
2000 Extensions can wrap this function to specify custom requirements for
2003 Extensions can wrap this function to specify custom requirements for
2001 new repositories.
2004 new repositories.
2002 """
2005 """
2003 ui = repo.ui
2006 ui = repo.ui
2004 requirements = set(['revlogv1'])
2007 requirements = set(['revlogv1'])
2005 if ui.configbool('format', 'usestore', True):
2008 if ui.configbool('format', 'usestore', True):
2006 requirements.add('store')
2009 requirements.add('store')
2007 if ui.configbool('format', 'usefncache', True):
2010 if ui.configbool('format', 'usefncache', True):
2008 requirements.add('fncache')
2011 requirements.add('fncache')
2009 if ui.configbool('format', 'dotencode', True):
2012 if ui.configbool('format', 'dotencode', True):
2010 requirements.add('dotencode')
2013 requirements.add('dotencode')
2011
2014
2012 compengine = ui.config('experimental', 'format.compression', 'zlib')
2015 compengine = ui.config('experimental', 'format.compression', 'zlib')
2013 if compengine not in util.compengines:
2016 if compengine not in util.compengines:
2014 raise error.Abort(_('compression engine %s defined by '
2017 raise error.Abort(_('compression engine %s defined by '
2015 'experimental.format.compression not available') %
2018 'experimental.format.compression not available') %
2016 compengine,
2019 compengine,
2017 hint=_('run "hg debuginstall" to list available '
2020 hint=_('run "hg debuginstall" to list available '
2018 'compression engines'))
2021 'compression engines'))
2019
2022
2020 # zlib is the historical default and doesn't need an explicit requirement.
2023 # zlib is the historical default and doesn't need an explicit requirement.
2021 if compengine != 'zlib':
2024 if compengine != 'zlib':
2022 requirements.add('exp-compression-%s' % compengine)
2025 requirements.add('exp-compression-%s' % compengine)
2023
2026
2024 if scmutil.gdinitconfig(ui):
2027 if scmutil.gdinitconfig(ui):
2025 requirements.add('generaldelta')
2028 requirements.add('generaldelta')
2026 if ui.configbool('experimental', 'treemanifest', False):
2029 if ui.configbool('experimental', 'treemanifest', False):
2027 requirements.add('treemanifest')
2030 requirements.add('treemanifest')
2028 if ui.configbool('experimental', 'manifestv2', False):
2031 if ui.configbool('experimental', 'manifestv2', False):
2029 requirements.add('manifestv2')
2032 requirements.add('manifestv2')
2030
2033
2031 return requirements
2034 return requirements
General Comments 0
You need to be logged in to leave comments. Login now