##// END OF EJS Templates
localrepo: avoid unnecessary conversion from node to rev...
Stanislau Hlebik -
r30875:1791be8a default
parent child Browse files
Show More
@@ -1,2026 +1,2030 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 repoview,
51 repoview,
52 revset,
52 revset,
53 scmutil,
53 scmutil,
54 store,
54 store,
55 subrepo,
55 subrepo,
56 tags as tagsmod,
56 tags as tagsmod,
57 transaction,
57 transaction,
58 util,
58 util,
59 )
59 )
60
60
61 release = lockmod.release
61 release = lockmod.release
62 urlerr = util.urlerr
62 urlerr = util.urlerr
63 urlreq = util.urlreq
63 urlreq = util.urlreq
64
64
65 class repofilecache(scmutil.filecache):
65 class repofilecache(scmutil.filecache):
66 """All filecache usage on repo are done for logic that should be unfiltered
66 """All filecache usage on repo are done for logic that should be unfiltered
67 """
67 """
68
68
69 def __get__(self, repo, type=None):
69 def __get__(self, repo, type=None):
70 if repo is None:
70 if repo is None:
71 return self
71 return self
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
73 def __set__(self, repo, value):
73 def __set__(self, repo, value):
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
75 def __delete__(self, repo):
75 def __delete__(self, repo):
76 return super(repofilecache, self).__delete__(repo.unfiltered())
76 return super(repofilecache, self).__delete__(repo.unfiltered())
77
77
78 class storecache(repofilecache):
78 class storecache(repofilecache):
79 """filecache for files in the store"""
79 """filecache for files in the store"""
80 def join(self, obj, fname):
80 def join(self, obj, fname):
81 return obj.sjoin(fname)
81 return obj.sjoin(fname)
82
82
83 class unfilteredpropertycache(util.propertycache):
83 class unfilteredpropertycache(util.propertycache):
84 """propertycache that apply to unfiltered repo only"""
84 """propertycache that apply to unfiltered repo only"""
85
85
86 def __get__(self, repo, type=None):
86 def __get__(self, repo, type=None):
87 unfi = repo.unfiltered()
87 unfi = repo.unfiltered()
88 if unfi is repo:
88 if unfi is repo:
89 return super(unfilteredpropertycache, self).__get__(unfi)
89 return super(unfilteredpropertycache, self).__get__(unfi)
90 return getattr(unfi, self.name)
90 return getattr(unfi, self.name)
91
91
92 class filteredpropertycache(util.propertycache):
92 class filteredpropertycache(util.propertycache):
93 """propertycache that must take filtering in account"""
93 """propertycache that must take filtering in account"""
94
94
95 def cachevalue(self, obj, value):
95 def cachevalue(self, obj, value):
96 object.__setattr__(obj, self.name, value)
96 object.__setattr__(obj, self.name, value)
97
97
98
98
99 def hasunfilteredcache(repo, name):
99 def hasunfilteredcache(repo, name):
100 """check if a repo has an unfilteredpropertycache value for <name>"""
100 """check if a repo has an unfilteredpropertycache value for <name>"""
101 return name in vars(repo.unfiltered())
101 return name in vars(repo.unfiltered())
102
102
103 def unfilteredmethod(orig):
103 def unfilteredmethod(orig):
104 """decorate method that always need to be run on unfiltered version"""
104 """decorate method that always need to be run on unfiltered version"""
105 def wrapper(repo, *args, **kwargs):
105 def wrapper(repo, *args, **kwargs):
106 return orig(repo.unfiltered(), *args, **kwargs)
106 return orig(repo.unfiltered(), *args, **kwargs)
107 return wrapper
107 return wrapper
108
108
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
110 'unbundle'))
110 'unbundle'))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
112
112
113 class localpeer(peer.peerrepository):
113 class localpeer(peer.peerrepository):
114 '''peer for a local repo; reflects only the most recent API'''
114 '''peer for a local repo; reflects only the most recent API'''
115
115
116 def __init__(self, repo, caps=moderncaps):
116 def __init__(self, repo, caps=moderncaps):
117 peer.peerrepository.__init__(self)
117 peer.peerrepository.__init__(self)
118 self._repo = repo.filtered('served')
118 self._repo = repo.filtered('served')
119 self.ui = repo.ui
119 self.ui = repo.ui
120 self._caps = repo._restrictcapabilities(caps)
120 self._caps = repo._restrictcapabilities(caps)
121 self.requirements = repo.requirements
121 self.requirements = repo.requirements
122 self.supportedformats = repo.supportedformats
122 self.supportedformats = repo.supportedformats
123
123
124 def close(self):
124 def close(self):
125 self._repo.close()
125 self._repo.close()
126
126
127 def _capabilities(self):
127 def _capabilities(self):
128 return self._caps
128 return self._caps
129
129
130 def local(self):
130 def local(self):
131 return self._repo
131 return self._repo
132
132
133 def canpush(self):
133 def canpush(self):
134 return True
134 return True
135
135
136 def url(self):
136 def url(self):
137 return self._repo.url()
137 return self._repo.url()
138
138
139 def lookup(self, key):
139 def lookup(self, key):
140 return self._repo.lookup(key)
140 return self._repo.lookup(key)
141
141
142 def branchmap(self):
142 def branchmap(self):
143 return self._repo.branchmap()
143 return self._repo.branchmap()
144
144
145 def heads(self):
145 def heads(self):
146 return self._repo.heads()
146 return self._repo.heads()
147
147
148 def known(self, nodes):
148 def known(self, nodes):
149 return self._repo.known(nodes)
149 return self._repo.known(nodes)
150
150
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
152 **kwargs):
152 **kwargs):
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
154 common=common, bundlecaps=bundlecaps,
154 common=common, bundlecaps=bundlecaps,
155 **kwargs)
155 **kwargs)
156 cb = util.chunkbuffer(chunks)
156 cb = util.chunkbuffer(chunks)
157
157
158 if bundlecaps is not None and 'HG20' in bundlecaps:
158 if bundlecaps is not None and 'HG20' in bundlecaps:
159 # When requesting a bundle2, getbundle returns a stream to make the
159 # When requesting a bundle2, getbundle returns a stream to make the
160 # wire level function happier. We need to build a proper object
160 # wire level function happier. We need to build a proper object
161 # from it in local peer.
161 # from it in local peer.
162 return bundle2.getunbundler(self.ui, cb)
162 return bundle2.getunbundler(self.ui, cb)
163 else:
163 else:
164 return changegroup.getunbundler('01', cb, None)
164 return changegroup.getunbundler('01', cb, None)
165
165
166 # TODO We might want to move the next two calls into legacypeer and add
166 # TODO We might want to move the next two calls into legacypeer and add
167 # unbundle instead.
167 # unbundle instead.
168
168
169 def unbundle(self, cg, heads, url):
169 def unbundle(self, cg, heads, url):
170 """apply a bundle on a repo
170 """apply a bundle on a repo
171
171
172 This function handles the repo locking itself."""
172 This function handles the repo locking itself."""
173 try:
173 try:
174 try:
174 try:
175 cg = exchange.readbundle(self.ui, cg, None)
175 cg = exchange.readbundle(self.ui, cg, None)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
177 if util.safehasattr(ret, 'getchunks'):
177 if util.safehasattr(ret, 'getchunks'):
178 # This is a bundle20 object, turn it into an unbundler.
178 # This is a bundle20 object, turn it into an unbundler.
179 # This little dance should be dropped eventually when the
179 # This little dance should be dropped eventually when the
180 # API is finally improved.
180 # API is finally improved.
181 stream = util.chunkbuffer(ret.getchunks())
181 stream = util.chunkbuffer(ret.getchunks())
182 ret = bundle2.getunbundler(self.ui, stream)
182 ret = bundle2.getunbundler(self.ui, stream)
183 return ret
183 return ret
184 except Exception as exc:
184 except Exception as exc:
185 # If the exception contains output salvaged from a bundle2
185 # If the exception contains output salvaged from a bundle2
186 # reply, we need to make sure it is printed before continuing
186 # reply, we need to make sure it is printed before continuing
187 # to fail. So we build a bundle2 with such output and consume
187 # to fail. So we build a bundle2 with such output and consume
188 # it directly.
188 # it directly.
189 #
189 #
190 # This is not very elegant but allows a "simple" solution for
190 # This is not very elegant but allows a "simple" solution for
191 # issue4594
191 # issue4594
192 output = getattr(exc, '_bundle2salvagedoutput', ())
192 output = getattr(exc, '_bundle2salvagedoutput', ())
193 if output:
193 if output:
194 bundler = bundle2.bundle20(self._repo.ui)
194 bundler = bundle2.bundle20(self._repo.ui)
195 for out in output:
195 for out in output:
196 bundler.addpart(out)
196 bundler.addpart(out)
197 stream = util.chunkbuffer(bundler.getchunks())
197 stream = util.chunkbuffer(bundler.getchunks())
198 b = bundle2.getunbundler(self.ui, stream)
198 b = bundle2.getunbundler(self.ui, stream)
199 bundle2.processbundle(self._repo, b)
199 bundle2.processbundle(self._repo, b)
200 raise
200 raise
201 except error.PushRaced as exc:
201 except error.PushRaced as exc:
202 raise error.ResponseError(_('push failed:'), str(exc))
202 raise error.ResponseError(_('push failed:'), str(exc))
203
203
204 def lock(self):
204 def lock(self):
205 return self._repo.lock()
205 return self._repo.lock()
206
206
207 def addchangegroup(self, cg, source, url):
207 def addchangegroup(self, cg, source, url):
208 return cg.apply(self._repo, source, url)
208 return cg.apply(self._repo, source, url)
209
209
210 def pushkey(self, namespace, key, old, new):
210 def pushkey(self, namespace, key, old, new):
211 return self._repo.pushkey(namespace, key, old, new)
211 return self._repo.pushkey(namespace, key, old, new)
212
212
213 def listkeys(self, namespace):
213 def listkeys(self, namespace):
214 return self._repo.listkeys(namespace)
214 return self._repo.listkeys(namespace)
215
215
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
217 '''used to test argument passing over the wire'''
217 '''used to test argument passing over the wire'''
218 return "%s %s %s %s %s" % (one, two, three, four, five)
218 return "%s %s %s %s %s" % (one, two, three, four, five)
219
219
220 class locallegacypeer(localpeer):
220 class locallegacypeer(localpeer):
221 '''peer extension which implements legacy methods too; used for tests with
221 '''peer extension which implements legacy methods too; used for tests with
222 restricted capabilities'''
222 restricted capabilities'''
223
223
224 def __init__(self, repo):
224 def __init__(self, repo):
225 localpeer.__init__(self, repo, caps=legacycaps)
225 localpeer.__init__(self, repo, caps=legacycaps)
226
226
227 def branches(self, nodes):
227 def branches(self, nodes):
228 return self._repo.branches(nodes)
228 return self._repo.branches(nodes)
229
229
230 def between(self, pairs):
230 def between(self, pairs):
231 return self._repo.between(pairs)
231 return self._repo.between(pairs)
232
232
233 def changegroup(self, basenodes, source):
233 def changegroup(self, basenodes, source):
234 return changegroup.changegroup(self._repo, basenodes, source)
234 return changegroup.changegroup(self._repo, basenodes, source)
235
235
236 def changegroupsubset(self, bases, heads, source):
236 def changegroupsubset(self, bases, heads, source):
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
238
238
239 class localrepository(object):
239 class localrepository(object):
240
240
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
242 'manifestv2'))
242 'manifestv2'))
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
244 'dotencode'))
244 'dotencode'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
246 filtername = None
246 filtername = None
247
247
248 # a list of (ui, featureset) functions.
248 # a list of (ui, featureset) functions.
249 # only functions defined in module of enabled extensions are invoked
249 # only functions defined in module of enabled extensions are invoked
250 featuresetupfuncs = set()
250 featuresetupfuncs = set()
251
251
252 def __init__(self, baseui, path, create=False):
252 def __init__(self, baseui, path, create=False):
253 self.requirements = set()
253 self.requirements = set()
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
255 self.wopener = self.wvfs
255 self.wopener = self.wvfs
256 self.root = self.wvfs.base
256 self.root = self.wvfs.base
257 self.path = self.wvfs.join(".hg")
257 self.path = self.wvfs.join(".hg")
258 self.origroot = path
258 self.origroot = path
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
261 realfs=False)
261 realfs=False)
262 self.vfs = scmutil.vfs(self.path)
262 self.vfs = scmutil.vfs(self.path)
263 self.opener = self.vfs
263 self.opener = self.vfs
264 self.baseui = baseui
264 self.baseui = baseui
265 self.ui = baseui.copy()
265 self.ui = baseui.copy()
266 self.ui.copy = baseui.copy # prevent copying repo configuration
266 self.ui.copy = baseui.copy # prevent copying repo configuration
267 # A list of callback to shape the phase if no data were found.
267 # A list of callback to shape the phase if no data were found.
268 # Callback are in the form: func(repo, roots) --> processed root.
268 # Callback are in the form: func(repo, roots) --> processed root.
269 # This list it to be filled by extension during repo setup
269 # This list it to be filled by extension during repo setup
270 self._phasedefaults = []
270 self._phasedefaults = []
271 try:
271 try:
272 self.ui.readconfig(self.join("hgrc"), self.root)
272 self.ui.readconfig(self.join("hgrc"), self.root)
273 extensions.loadall(self.ui)
273 extensions.loadall(self.ui)
274 except IOError:
274 except IOError:
275 pass
275 pass
276
276
277 if self.featuresetupfuncs:
277 if self.featuresetupfuncs:
278 self.supported = set(self._basesupported) # use private copy
278 self.supported = set(self._basesupported) # use private copy
279 extmods = set(m.__name__ for n, m
279 extmods = set(m.__name__ for n, m
280 in extensions.extensions(self.ui))
280 in extensions.extensions(self.ui))
281 for setupfunc in self.featuresetupfuncs:
281 for setupfunc in self.featuresetupfuncs:
282 if setupfunc.__module__ in extmods:
282 if setupfunc.__module__ in extmods:
283 setupfunc(self.ui, self.supported)
283 setupfunc(self.ui, self.supported)
284 else:
284 else:
285 self.supported = self._basesupported
285 self.supported = self._basesupported
286
286
287 # Add compression engines.
287 # Add compression engines.
288 for name in util.compengines:
288 for name in util.compengines:
289 engine = util.compengines[name]
289 engine = util.compengines[name]
290 if engine.revlogheader():
290 if engine.revlogheader():
291 self.supported.add('exp-compression-%s' % name)
291 self.supported.add('exp-compression-%s' % name)
292
292
293 if not self.vfs.isdir():
293 if not self.vfs.isdir():
294 if create:
294 if create:
295 self.requirements = newreporequirements(self)
295 self.requirements = newreporequirements(self)
296
296
297 if not self.wvfs.exists():
297 if not self.wvfs.exists():
298 self.wvfs.makedirs()
298 self.wvfs.makedirs()
299 self.vfs.makedir(notindexed=True)
299 self.vfs.makedir(notindexed=True)
300
300
301 if 'store' in self.requirements:
301 if 'store' in self.requirements:
302 self.vfs.mkdir("store")
302 self.vfs.mkdir("store")
303
303
304 # create an invalid changelog
304 # create an invalid changelog
305 self.vfs.append(
305 self.vfs.append(
306 "00changelog.i",
306 "00changelog.i",
307 '\0\0\0\2' # represents revlogv2
307 '\0\0\0\2' # represents revlogv2
308 ' dummy changelog to prevent using the old repo layout'
308 ' dummy changelog to prevent using the old repo layout'
309 )
309 )
310 else:
310 else:
311 raise error.RepoError(_("repository %s not found") % path)
311 raise error.RepoError(_("repository %s not found") % path)
312 elif create:
312 elif create:
313 raise error.RepoError(_("repository %s already exists") % path)
313 raise error.RepoError(_("repository %s already exists") % path)
314 else:
314 else:
315 try:
315 try:
316 self.requirements = scmutil.readrequires(
316 self.requirements = scmutil.readrequires(
317 self.vfs, self.supported)
317 self.vfs, self.supported)
318 except IOError as inst:
318 except IOError as inst:
319 if inst.errno != errno.ENOENT:
319 if inst.errno != errno.ENOENT:
320 raise
320 raise
321
321
322 self.sharedpath = self.path
322 self.sharedpath = self.path
323 try:
323 try:
324 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
324 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
325 realpath=True)
325 realpath=True)
326 s = vfs.base
326 s = vfs.base
327 if not vfs.exists():
327 if not vfs.exists():
328 raise error.RepoError(
328 raise error.RepoError(
329 _('.hg/sharedpath points to nonexistent directory %s') % s)
329 _('.hg/sharedpath points to nonexistent directory %s') % s)
330 self.sharedpath = s
330 self.sharedpath = s
331 except IOError as inst:
331 except IOError as inst:
332 if inst.errno != errno.ENOENT:
332 if inst.errno != errno.ENOENT:
333 raise
333 raise
334
334
335 self.store = store.store(
335 self.store = store.store(
336 self.requirements, self.sharedpath, scmutil.vfs)
336 self.requirements, self.sharedpath, scmutil.vfs)
337 self.spath = self.store.path
337 self.spath = self.store.path
338 self.svfs = self.store.vfs
338 self.svfs = self.store.vfs
339 self.sjoin = self.store.join
339 self.sjoin = self.store.join
340 self.vfs.createmode = self.store.createmode
340 self.vfs.createmode = self.store.createmode
341 self._applyopenerreqs()
341 self._applyopenerreqs()
342 if create:
342 if create:
343 self._writerequirements()
343 self._writerequirements()
344
344
345 self._dirstatevalidatewarned = False
345 self._dirstatevalidatewarned = False
346
346
347 self._branchcaches = {}
347 self._branchcaches = {}
348 self._revbranchcache = None
348 self._revbranchcache = None
349 self.filterpats = {}
349 self.filterpats = {}
350 self._datafilters = {}
350 self._datafilters = {}
351 self._transref = self._lockref = self._wlockref = None
351 self._transref = self._lockref = self._wlockref = None
352
352
353 # A cache for various files under .hg/ that tracks file changes,
353 # A cache for various files under .hg/ that tracks file changes,
354 # (used by the filecache decorator)
354 # (used by the filecache decorator)
355 #
355 #
356 # Maps a property name to its util.filecacheentry
356 # Maps a property name to its util.filecacheentry
357 self._filecache = {}
357 self._filecache = {}
358
358
359 # hold sets of revision to be filtered
359 # hold sets of revision to be filtered
360 # should be cleared when something might have changed the filter value:
360 # should be cleared when something might have changed the filter value:
361 # - new changesets,
361 # - new changesets,
362 # - phase change,
362 # - phase change,
363 # - new obsolescence marker,
363 # - new obsolescence marker,
364 # - working directory parent change,
364 # - working directory parent change,
365 # - bookmark changes
365 # - bookmark changes
366 self.filteredrevcache = {}
366 self.filteredrevcache = {}
367
367
368 # generic mapping between names and nodes
368 # generic mapping between names and nodes
369 self.names = namespaces.namespaces()
369 self.names = namespaces.namespaces()
370
370
371 def close(self):
371 def close(self):
372 self._writecaches()
372 self._writecaches()
373
373
374 def _writecaches(self):
374 def _writecaches(self):
375 if self._revbranchcache:
375 if self._revbranchcache:
376 self._revbranchcache.write()
376 self._revbranchcache.write()
377
377
378 def _restrictcapabilities(self, caps):
378 def _restrictcapabilities(self, caps):
379 if self.ui.configbool('experimental', 'bundle2-advertise', True):
379 if self.ui.configbool('experimental', 'bundle2-advertise', True):
380 caps = set(caps)
380 caps = set(caps)
381 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
381 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
382 caps.add('bundle2=' + urlreq.quote(capsblob))
382 caps.add('bundle2=' + urlreq.quote(capsblob))
383 return caps
383 return caps
384
384
385 def _applyopenerreqs(self):
385 def _applyopenerreqs(self):
386 self.svfs.options = dict((r, 1) for r in self.requirements
386 self.svfs.options = dict((r, 1) for r in self.requirements
387 if r in self.openerreqs)
387 if r in self.openerreqs)
388 # experimental config: format.chunkcachesize
388 # experimental config: format.chunkcachesize
389 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
389 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
390 if chunkcachesize is not None:
390 if chunkcachesize is not None:
391 self.svfs.options['chunkcachesize'] = chunkcachesize
391 self.svfs.options['chunkcachesize'] = chunkcachesize
392 # experimental config: format.maxchainlen
392 # experimental config: format.maxchainlen
393 maxchainlen = self.ui.configint('format', 'maxchainlen')
393 maxchainlen = self.ui.configint('format', 'maxchainlen')
394 if maxchainlen is not None:
394 if maxchainlen is not None:
395 self.svfs.options['maxchainlen'] = maxchainlen
395 self.svfs.options['maxchainlen'] = maxchainlen
396 # experimental config: format.manifestcachesize
396 # experimental config: format.manifestcachesize
397 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
397 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
398 if manifestcachesize is not None:
398 if manifestcachesize is not None:
399 self.svfs.options['manifestcachesize'] = manifestcachesize
399 self.svfs.options['manifestcachesize'] = manifestcachesize
400 # experimental config: format.aggressivemergedeltas
400 # experimental config: format.aggressivemergedeltas
401 aggressivemergedeltas = self.ui.configbool('format',
401 aggressivemergedeltas = self.ui.configbool('format',
402 'aggressivemergedeltas', False)
402 'aggressivemergedeltas', False)
403 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
403 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
404 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
404 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
405
405
406 for r in self.requirements:
406 for r in self.requirements:
407 if r.startswith('exp-compression-'):
407 if r.startswith('exp-compression-'):
408 self.svfs.options['compengine'] = r[len('exp-compression-'):]
408 self.svfs.options['compengine'] = r[len('exp-compression-'):]
409
409
410 def _writerequirements(self):
410 def _writerequirements(self):
411 scmutil.writerequires(self.vfs, self.requirements)
411 scmutil.writerequires(self.vfs, self.requirements)
412
412
413 def _checknested(self, path):
413 def _checknested(self, path):
414 """Determine if path is a legal nested repository."""
414 """Determine if path is a legal nested repository."""
415 if not path.startswith(self.root):
415 if not path.startswith(self.root):
416 return False
416 return False
417 subpath = path[len(self.root) + 1:]
417 subpath = path[len(self.root) + 1:]
418 normsubpath = util.pconvert(subpath)
418 normsubpath = util.pconvert(subpath)
419
419
420 # XXX: Checking against the current working copy is wrong in
420 # XXX: Checking against the current working copy is wrong in
421 # the sense that it can reject things like
421 # the sense that it can reject things like
422 #
422 #
423 # $ hg cat -r 10 sub/x.txt
423 # $ hg cat -r 10 sub/x.txt
424 #
424 #
425 # if sub/ is no longer a subrepository in the working copy
425 # if sub/ is no longer a subrepository in the working copy
426 # parent revision.
426 # parent revision.
427 #
427 #
428 # However, it can of course also allow things that would have
428 # However, it can of course also allow things that would have
429 # been rejected before, such as the above cat command if sub/
429 # been rejected before, such as the above cat command if sub/
430 # is a subrepository now, but was a normal directory before.
430 # is a subrepository now, but was a normal directory before.
431 # The old path auditor would have rejected by mistake since it
431 # The old path auditor would have rejected by mistake since it
432 # panics when it sees sub/.hg/.
432 # panics when it sees sub/.hg/.
433 #
433 #
434 # All in all, checking against the working copy seems sensible
434 # All in all, checking against the working copy seems sensible
435 # since we want to prevent access to nested repositories on
435 # since we want to prevent access to nested repositories on
436 # the filesystem *now*.
436 # the filesystem *now*.
437 ctx = self[None]
437 ctx = self[None]
438 parts = util.splitpath(subpath)
438 parts = util.splitpath(subpath)
439 while parts:
439 while parts:
440 prefix = '/'.join(parts)
440 prefix = '/'.join(parts)
441 if prefix in ctx.substate:
441 if prefix in ctx.substate:
442 if prefix == normsubpath:
442 if prefix == normsubpath:
443 return True
443 return True
444 else:
444 else:
445 sub = ctx.sub(prefix)
445 sub = ctx.sub(prefix)
446 return sub.checknested(subpath[len(prefix) + 1:])
446 return sub.checknested(subpath[len(prefix) + 1:])
447 else:
447 else:
448 parts.pop()
448 parts.pop()
449 return False
449 return False
450
450
451 def peer(self):
451 def peer(self):
452 return localpeer(self) # not cached to avoid reference cycle
452 return localpeer(self) # not cached to avoid reference cycle
453
453
454 def unfiltered(self):
454 def unfiltered(self):
455 """Return unfiltered version of the repository
455 """Return unfiltered version of the repository
456
456
457 Intended to be overwritten by filtered repo."""
457 Intended to be overwritten by filtered repo."""
458 return self
458 return self
459
459
460 def filtered(self, name):
460 def filtered(self, name):
461 """Return a filtered version of a repository"""
461 """Return a filtered version of a repository"""
462 # build a new class with the mixin and the current class
462 # build a new class with the mixin and the current class
463 # (possibly subclass of the repo)
463 # (possibly subclass of the repo)
464 class proxycls(repoview.repoview, self.unfiltered().__class__):
464 class proxycls(repoview.repoview, self.unfiltered().__class__):
465 pass
465 pass
466 return proxycls(self, name)
466 return proxycls(self, name)
467
467
468 @repofilecache('bookmarks', 'bookmarks.current')
468 @repofilecache('bookmarks', 'bookmarks.current')
469 def _bookmarks(self):
469 def _bookmarks(self):
470 return bookmarks.bmstore(self)
470 return bookmarks.bmstore(self)
471
471
472 @property
472 @property
473 def _activebookmark(self):
473 def _activebookmark(self):
474 return self._bookmarks.active
474 return self._bookmarks.active
475
475
476 def bookmarkheads(self, bookmark):
476 def bookmarkheads(self, bookmark):
477 name = bookmark.split('@', 1)[0]
477 name = bookmark.split('@', 1)[0]
478 heads = []
478 heads = []
479 for mark, n in self._bookmarks.iteritems():
479 for mark, n in self._bookmarks.iteritems():
480 if mark.split('@', 1)[0] == name:
480 if mark.split('@', 1)[0] == name:
481 heads.append(n)
481 heads.append(n)
482 return heads
482 return heads
483
483
484 # _phaserevs and _phasesets depend on changelog. what we need is to
484 # _phaserevs and _phasesets depend on changelog. what we need is to
485 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
485 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
486 # can't be easily expressed in filecache mechanism.
486 # can't be easily expressed in filecache mechanism.
487 @storecache('phaseroots', '00changelog.i')
487 @storecache('phaseroots', '00changelog.i')
488 def _phasecache(self):
488 def _phasecache(self):
489 return phases.phasecache(self, self._phasedefaults)
489 return phases.phasecache(self, self._phasedefaults)
490
490
491 @storecache('obsstore')
491 @storecache('obsstore')
492 def obsstore(self):
492 def obsstore(self):
493 # read default format for new obsstore.
493 # read default format for new obsstore.
494 # developer config: format.obsstore-version
494 # developer config: format.obsstore-version
495 defaultformat = self.ui.configint('format', 'obsstore-version', None)
495 defaultformat = self.ui.configint('format', 'obsstore-version', None)
496 # rely on obsstore class default when possible.
496 # rely on obsstore class default when possible.
497 kwargs = {}
497 kwargs = {}
498 if defaultformat is not None:
498 if defaultformat is not None:
499 kwargs['defaultformat'] = defaultformat
499 kwargs['defaultformat'] = defaultformat
500 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
500 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
501 store = obsolete.obsstore(self.svfs, readonly=readonly,
501 store = obsolete.obsstore(self.svfs, readonly=readonly,
502 **kwargs)
502 **kwargs)
503 if store and readonly:
503 if store and readonly:
504 self.ui.warn(
504 self.ui.warn(
505 _('obsolete feature not enabled but %i markers found!\n')
505 _('obsolete feature not enabled but %i markers found!\n')
506 % len(list(store)))
506 % len(list(store)))
507 return store
507 return store
508
508
509 @storecache('00changelog.i')
509 @storecache('00changelog.i')
510 def changelog(self):
510 def changelog(self):
511 c = changelog.changelog(self.svfs)
511 c = changelog.changelog(self.svfs)
512 if 'HG_PENDING' in encoding.environ:
512 if 'HG_PENDING' in encoding.environ:
513 p = encoding.environ['HG_PENDING']
513 p = encoding.environ['HG_PENDING']
514 if p.startswith(self.root):
514 if p.startswith(self.root):
515 c.readpending('00changelog.i.a')
515 c.readpending('00changelog.i.a')
516 return c
516 return c
517
517
518 def _constructmanifest(self):
518 def _constructmanifest(self):
519 # This is a temporary function while we migrate from manifest to
519 # This is a temporary function while we migrate from manifest to
520 # manifestlog. It allows bundlerepo and unionrepo to intercept the
520 # manifestlog. It allows bundlerepo and unionrepo to intercept the
521 # manifest creation.
521 # manifest creation.
522 return manifest.manifestrevlog(self.svfs)
522 return manifest.manifestrevlog(self.svfs)
523
523
524 @storecache('00manifest.i')
524 @storecache('00manifest.i')
525 def manifestlog(self):
525 def manifestlog(self):
526 return manifest.manifestlog(self.svfs, self)
526 return manifest.manifestlog(self.svfs, self)
527
527
528 @repofilecache('dirstate')
528 @repofilecache('dirstate')
529 def dirstate(self):
529 def dirstate(self):
530 return dirstate.dirstate(self.vfs, self.ui, self.root,
530 return dirstate.dirstate(self.vfs, self.ui, self.root,
531 self._dirstatevalidate)
531 self._dirstatevalidate)
532
532
533 def _dirstatevalidate(self, node):
533 def _dirstatevalidate(self, node):
534 try:
534 try:
535 self.changelog.rev(node)
535 self.changelog.rev(node)
536 return node
536 return node
537 except error.LookupError:
537 except error.LookupError:
538 if not self._dirstatevalidatewarned:
538 if not self._dirstatevalidatewarned:
539 self._dirstatevalidatewarned = True
539 self._dirstatevalidatewarned = True
540 self.ui.warn(_("warning: ignoring unknown"
540 self.ui.warn(_("warning: ignoring unknown"
541 " working parent %s!\n") % short(node))
541 " working parent %s!\n") % short(node))
542 return nullid
542 return nullid
543
543
544 def __getitem__(self, changeid):
544 def __getitem__(self, changeid):
545 if changeid is None or changeid == wdirrev:
545 if changeid is None or changeid == wdirrev:
546 return context.workingctx(self)
546 return context.workingctx(self)
547 if isinstance(changeid, slice):
547 if isinstance(changeid, slice):
548 return [context.changectx(self, i)
548 return [context.changectx(self, i)
549 for i in xrange(*changeid.indices(len(self)))
549 for i in xrange(*changeid.indices(len(self)))
550 if i not in self.changelog.filteredrevs]
550 if i not in self.changelog.filteredrevs]
551 return context.changectx(self, changeid)
551 return context.changectx(self, changeid)
552
552
553 def __contains__(self, changeid):
553 def __contains__(self, changeid):
554 try:
554 try:
555 self[changeid]
555 self[changeid]
556 return True
556 return True
557 except error.RepoLookupError:
557 except error.RepoLookupError:
558 return False
558 return False
559
559
560 def __nonzero__(self):
560 def __nonzero__(self):
561 return True
561 return True
562
562
563 def __len__(self):
563 def __len__(self):
564 return len(self.changelog)
564 return len(self.changelog)
565
565
566 def __iter__(self):
566 def __iter__(self):
567 return iter(self.changelog)
567 return iter(self.changelog)
568
568
569 def revs(self, expr, *args):
569 def revs(self, expr, *args):
570 '''Find revisions matching a revset.
570 '''Find revisions matching a revset.
571
571
572 The revset is specified as a string ``expr`` that may contain
572 The revset is specified as a string ``expr`` that may contain
573 %-formatting to escape certain types. See ``revset.formatspec``.
573 %-formatting to escape certain types. See ``revset.formatspec``.
574
574
575 Revset aliases from the configuration are not expanded. To expand
575 Revset aliases from the configuration are not expanded. To expand
576 user aliases, consider calling ``scmutil.revrange()``.
576 user aliases, consider calling ``scmutil.revrange()``.
577
577
578 Returns a revset.abstractsmartset, which is a list-like interface
578 Returns a revset.abstractsmartset, which is a list-like interface
579 that contains integer revisions.
579 that contains integer revisions.
580 '''
580 '''
581 expr = revset.formatspec(expr, *args)
581 expr = revset.formatspec(expr, *args)
582 m = revset.match(None, expr)
582 m = revset.match(None, expr)
583 return m(self)
583 return m(self)
584
584
585 def set(self, expr, *args):
585 def set(self, expr, *args):
586 '''Find revisions matching a revset and emit changectx instances.
586 '''Find revisions matching a revset and emit changectx instances.
587
587
588 This is a convenience wrapper around ``revs()`` that iterates the
588 This is a convenience wrapper around ``revs()`` that iterates the
589 result and is a generator of changectx instances.
589 result and is a generator of changectx instances.
590
590
591 Revset aliases from the configuration are not expanded. To expand
591 Revset aliases from the configuration are not expanded. To expand
592 user aliases, consider calling ``scmutil.revrange()``.
592 user aliases, consider calling ``scmutil.revrange()``.
593 '''
593 '''
594 for r in self.revs(expr, *args):
594 for r in self.revs(expr, *args):
595 yield self[r]
595 yield self[r]
596
596
597 def url(self):
597 def url(self):
598 return 'file:' + self.root
598 return 'file:' + self.root
599
599
600 def hook(self, name, throw=False, **args):
600 def hook(self, name, throw=False, **args):
601 """Call a hook, passing this repo instance.
601 """Call a hook, passing this repo instance.
602
602
603 This a convenience method to aid invoking hooks. Extensions likely
603 This a convenience method to aid invoking hooks. Extensions likely
604 won't call this unless they have registered a custom hook or are
604 won't call this unless they have registered a custom hook or are
605 replacing code that is expected to call a hook.
605 replacing code that is expected to call a hook.
606 """
606 """
607 return hook.hook(self.ui, self, name, throw, **args)
607 return hook.hook(self.ui, self, name, throw, **args)
608
608
609 @unfilteredmethod
609 @unfilteredmethod
610 def _tag(self, names, node, message, local, user, date, extra=None,
610 def _tag(self, names, node, message, local, user, date, extra=None,
611 editor=False):
611 editor=False):
612 if isinstance(names, str):
612 if isinstance(names, str):
613 names = (names,)
613 names = (names,)
614
614
615 branches = self.branchmap()
615 branches = self.branchmap()
616 for name in names:
616 for name in names:
617 self.hook('pretag', throw=True, node=hex(node), tag=name,
617 self.hook('pretag', throw=True, node=hex(node), tag=name,
618 local=local)
618 local=local)
619 if name in branches:
619 if name in branches:
620 self.ui.warn(_("warning: tag %s conflicts with existing"
620 self.ui.warn(_("warning: tag %s conflicts with existing"
621 " branch name\n") % name)
621 " branch name\n") % name)
622
622
623 def writetags(fp, names, munge, prevtags):
623 def writetags(fp, names, munge, prevtags):
624 fp.seek(0, 2)
624 fp.seek(0, 2)
625 if prevtags and prevtags[-1] != '\n':
625 if prevtags and prevtags[-1] != '\n':
626 fp.write('\n')
626 fp.write('\n')
627 for name in names:
627 for name in names:
628 if munge:
628 if munge:
629 m = munge(name)
629 m = munge(name)
630 else:
630 else:
631 m = name
631 m = name
632
632
633 if (self._tagscache.tagtypes and
633 if (self._tagscache.tagtypes and
634 name in self._tagscache.tagtypes):
634 name in self._tagscache.tagtypes):
635 old = self.tags().get(name, nullid)
635 old = self.tags().get(name, nullid)
636 fp.write('%s %s\n' % (hex(old), m))
636 fp.write('%s %s\n' % (hex(old), m))
637 fp.write('%s %s\n' % (hex(node), m))
637 fp.write('%s %s\n' % (hex(node), m))
638 fp.close()
638 fp.close()
639
639
640 prevtags = ''
640 prevtags = ''
641 if local:
641 if local:
642 try:
642 try:
643 fp = self.vfs('localtags', 'r+')
643 fp = self.vfs('localtags', 'r+')
644 except IOError:
644 except IOError:
645 fp = self.vfs('localtags', 'a')
645 fp = self.vfs('localtags', 'a')
646 else:
646 else:
647 prevtags = fp.read()
647 prevtags = fp.read()
648
648
649 # local tags are stored in the current charset
649 # local tags are stored in the current charset
650 writetags(fp, names, None, prevtags)
650 writetags(fp, names, None, prevtags)
651 for name in names:
651 for name in names:
652 self.hook('tag', node=hex(node), tag=name, local=local)
652 self.hook('tag', node=hex(node), tag=name, local=local)
653 return
653 return
654
654
655 try:
655 try:
656 fp = self.wfile('.hgtags', 'rb+')
656 fp = self.wfile('.hgtags', 'rb+')
657 except IOError as e:
657 except IOError as e:
658 if e.errno != errno.ENOENT:
658 if e.errno != errno.ENOENT:
659 raise
659 raise
660 fp = self.wfile('.hgtags', 'ab')
660 fp = self.wfile('.hgtags', 'ab')
661 else:
661 else:
662 prevtags = fp.read()
662 prevtags = fp.read()
663
663
664 # committed tags are stored in UTF-8
664 # committed tags are stored in UTF-8
665 writetags(fp, names, encoding.fromlocal, prevtags)
665 writetags(fp, names, encoding.fromlocal, prevtags)
666
666
667 fp.close()
667 fp.close()
668
668
669 self.invalidatecaches()
669 self.invalidatecaches()
670
670
671 if '.hgtags' not in self.dirstate:
671 if '.hgtags' not in self.dirstate:
672 self[None].add(['.hgtags'])
672 self[None].add(['.hgtags'])
673
673
674 m = matchmod.exact(self.root, '', ['.hgtags'])
674 m = matchmod.exact(self.root, '', ['.hgtags'])
675 tagnode = self.commit(message, user, date, extra=extra, match=m,
675 tagnode = self.commit(message, user, date, extra=extra, match=m,
676 editor=editor)
676 editor=editor)
677
677
678 for name in names:
678 for name in names:
679 self.hook('tag', node=hex(node), tag=name, local=local)
679 self.hook('tag', node=hex(node), tag=name, local=local)
680
680
681 return tagnode
681 return tagnode
682
682
683 def tag(self, names, node, message, local, user, date, editor=False):
683 def tag(self, names, node, message, local, user, date, editor=False):
684 '''tag a revision with one or more symbolic names.
684 '''tag a revision with one or more symbolic names.
685
685
686 names is a list of strings or, when adding a single tag, names may be a
686 names is a list of strings or, when adding a single tag, names may be a
687 string.
687 string.
688
688
689 if local is True, the tags are stored in a per-repository file.
689 if local is True, the tags are stored in a per-repository file.
690 otherwise, they are stored in the .hgtags file, and a new
690 otherwise, they are stored in the .hgtags file, and a new
691 changeset is committed with the change.
691 changeset is committed with the change.
692
692
693 keyword arguments:
693 keyword arguments:
694
694
695 local: whether to store tags in non-version-controlled file
695 local: whether to store tags in non-version-controlled file
696 (default False)
696 (default False)
697
697
698 message: commit message to use if committing
698 message: commit message to use if committing
699
699
700 user: name of user to use if committing
700 user: name of user to use if committing
701
701
702 date: date tuple to use if committing'''
702 date: date tuple to use if committing'''
703
703
704 if not local:
704 if not local:
705 m = matchmod.exact(self.root, '', ['.hgtags'])
705 m = matchmod.exact(self.root, '', ['.hgtags'])
706 if any(self.status(match=m, unknown=True, ignored=True)):
706 if any(self.status(match=m, unknown=True, ignored=True)):
707 raise error.Abort(_('working copy of .hgtags is changed'),
707 raise error.Abort(_('working copy of .hgtags is changed'),
708 hint=_('please commit .hgtags manually'))
708 hint=_('please commit .hgtags manually'))
709
709
710 self.tags() # instantiate the cache
710 self.tags() # instantiate the cache
711 self._tag(names, node, message, local, user, date, editor=editor)
711 self._tag(names, node, message, local, user, date, editor=editor)
712
712
713 @filteredpropertycache
713 @filteredpropertycache
714 def _tagscache(self):
714 def _tagscache(self):
715 '''Returns a tagscache object that contains various tags related
715 '''Returns a tagscache object that contains various tags related
716 caches.'''
716 caches.'''
717
717
718 # This simplifies its cache management by having one decorated
718 # This simplifies its cache management by having one decorated
719 # function (this one) and the rest simply fetch things from it.
719 # function (this one) and the rest simply fetch things from it.
720 class tagscache(object):
720 class tagscache(object):
721 def __init__(self):
721 def __init__(self):
722 # These two define the set of tags for this repository. tags
722 # These two define the set of tags for this repository. tags
723 # maps tag name to node; tagtypes maps tag name to 'global' or
723 # maps tag name to node; tagtypes maps tag name to 'global' or
724 # 'local'. (Global tags are defined by .hgtags across all
724 # 'local'. (Global tags are defined by .hgtags across all
725 # heads, and local tags are defined in .hg/localtags.)
725 # heads, and local tags are defined in .hg/localtags.)
726 # They constitute the in-memory cache of tags.
726 # They constitute the in-memory cache of tags.
727 self.tags = self.tagtypes = None
727 self.tags = self.tagtypes = None
728
728
729 self.nodetagscache = self.tagslist = None
729 self.nodetagscache = self.tagslist = None
730
730
731 cache = tagscache()
731 cache = tagscache()
732 cache.tags, cache.tagtypes = self._findtags()
732 cache.tags, cache.tagtypes = self._findtags()
733
733
734 return cache
734 return cache
735
735
736 def tags(self):
736 def tags(self):
737 '''return a mapping of tag to node'''
737 '''return a mapping of tag to node'''
738 t = {}
738 t = {}
739 if self.changelog.filteredrevs:
739 if self.changelog.filteredrevs:
740 tags, tt = self._findtags()
740 tags, tt = self._findtags()
741 else:
741 else:
742 tags = self._tagscache.tags
742 tags = self._tagscache.tags
743 for k, v in tags.iteritems():
743 for k, v in tags.iteritems():
744 try:
744 try:
745 # ignore tags to unknown nodes
745 # ignore tags to unknown nodes
746 self.changelog.rev(v)
746 self.changelog.rev(v)
747 t[k] = v
747 t[k] = v
748 except (error.LookupError, ValueError):
748 except (error.LookupError, ValueError):
749 pass
749 pass
750 return t
750 return t
751
751
752 def _findtags(self):
752 def _findtags(self):
753 '''Do the hard work of finding tags. Return a pair of dicts
753 '''Do the hard work of finding tags. Return a pair of dicts
754 (tags, tagtypes) where tags maps tag name to node, and tagtypes
754 (tags, tagtypes) where tags maps tag name to node, and tagtypes
755 maps tag name to a string like \'global\' or \'local\'.
755 maps tag name to a string like \'global\' or \'local\'.
756 Subclasses or extensions are free to add their own tags, but
756 Subclasses or extensions are free to add their own tags, but
757 should be aware that the returned dicts will be retained for the
757 should be aware that the returned dicts will be retained for the
758 duration of the localrepo object.'''
758 duration of the localrepo object.'''
759
759
760 # XXX what tagtype should subclasses/extensions use? Currently
760 # XXX what tagtype should subclasses/extensions use? Currently
761 # mq and bookmarks add tags, but do not set the tagtype at all.
761 # mq and bookmarks add tags, but do not set the tagtype at all.
762 # Should each extension invent its own tag type? Should there
762 # Should each extension invent its own tag type? Should there
763 # be one tagtype for all such "virtual" tags? Or is the status
763 # be one tagtype for all such "virtual" tags? Or is the status
764 # quo fine?
764 # quo fine?
765
765
766 alltags = {} # map tag name to (node, hist)
766 alltags = {} # map tag name to (node, hist)
767 tagtypes = {}
767 tagtypes = {}
768
768
769 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
769 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
770 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
770 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
771
771
772 # Build the return dicts. Have to re-encode tag names because
772 # Build the return dicts. Have to re-encode tag names because
773 # the tags module always uses UTF-8 (in order not to lose info
773 # the tags module always uses UTF-8 (in order not to lose info
774 # writing to the cache), but the rest of Mercurial wants them in
774 # writing to the cache), but the rest of Mercurial wants them in
775 # local encoding.
775 # local encoding.
776 tags = {}
776 tags = {}
777 for (name, (node, hist)) in alltags.iteritems():
777 for (name, (node, hist)) in alltags.iteritems():
778 if node != nullid:
778 if node != nullid:
779 tags[encoding.tolocal(name)] = node
779 tags[encoding.tolocal(name)] = node
780 tags['tip'] = self.changelog.tip()
780 tags['tip'] = self.changelog.tip()
781 tagtypes = dict([(encoding.tolocal(name), value)
781 tagtypes = dict([(encoding.tolocal(name), value)
782 for (name, value) in tagtypes.iteritems()])
782 for (name, value) in tagtypes.iteritems()])
783 return (tags, tagtypes)
783 return (tags, tagtypes)
784
784
785 def tagtype(self, tagname):
785 def tagtype(self, tagname):
786 '''
786 '''
787 return the type of the given tag. result can be:
787 return the type of the given tag. result can be:
788
788
789 'local' : a local tag
789 'local' : a local tag
790 'global' : a global tag
790 'global' : a global tag
791 None : tag does not exist
791 None : tag does not exist
792 '''
792 '''
793
793
794 return self._tagscache.tagtypes.get(tagname)
794 return self._tagscache.tagtypes.get(tagname)
795
795
796 def tagslist(self):
796 def tagslist(self):
797 '''return a list of tags ordered by revision'''
797 '''return a list of tags ordered by revision'''
798 if not self._tagscache.tagslist:
798 if not self._tagscache.tagslist:
799 l = []
799 l = []
800 for t, n in self.tags().iteritems():
800 for t, n in self.tags().iteritems():
801 l.append((self.changelog.rev(n), t, n))
801 l.append((self.changelog.rev(n), t, n))
802 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
802 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
803
803
804 return self._tagscache.tagslist
804 return self._tagscache.tagslist
805
805
806 def nodetags(self, node):
806 def nodetags(self, node):
807 '''return the tags associated with a node'''
807 '''return the tags associated with a node'''
808 if not self._tagscache.nodetagscache:
808 if not self._tagscache.nodetagscache:
809 nodetagscache = {}
809 nodetagscache = {}
810 for t, n in self._tagscache.tags.iteritems():
810 for t, n in self._tagscache.tags.iteritems():
811 nodetagscache.setdefault(n, []).append(t)
811 nodetagscache.setdefault(n, []).append(t)
812 for tags in nodetagscache.itervalues():
812 for tags in nodetagscache.itervalues():
813 tags.sort()
813 tags.sort()
814 self._tagscache.nodetagscache = nodetagscache
814 self._tagscache.nodetagscache = nodetagscache
815 return self._tagscache.nodetagscache.get(node, [])
815 return self._tagscache.nodetagscache.get(node, [])
816
816
817 def nodebookmarks(self, node):
817 def nodebookmarks(self, node):
818 """return the list of bookmarks pointing to the specified node"""
818 """return the list of bookmarks pointing to the specified node"""
819 marks = []
819 marks = []
820 for bookmark, n in self._bookmarks.iteritems():
820 for bookmark, n in self._bookmarks.iteritems():
821 if n == node:
821 if n == node:
822 marks.append(bookmark)
822 marks.append(bookmark)
823 return sorted(marks)
823 return sorted(marks)
824
824
825 def branchmap(self):
825 def branchmap(self):
826 '''returns a dictionary {branch: [branchheads]} with branchheads
826 '''returns a dictionary {branch: [branchheads]} with branchheads
827 ordered by increasing revision number'''
827 ordered by increasing revision number'''
828 branchmap.updatecache(self)
828 branchmap.updatecache(self)
829 return self._branchcaches[self.filtername]
829 return self._branchcaches[self.filtername]
830
830
831 @unfilteredmethod
831 @unfilteredmethod
832 def revbranchcache(self):
832 def revbranchcache(self):
833 if not self._revbranchcache:
833 if not self._revbranchcache:
834 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
834 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
835 return self._revbranchcache
835 return self._revbranchcache
836
836
837 def branchtip(self, branch, ignoremissing=False):
837 def branchtip(self, branch, ignoremissing=False):
838 '''return the tip node for a given branch
838 '''return the tip node for a given branch
839
839
840 If ignoremissing is True, then this method will not raise an error.
840 If ignoremissing is True, then this method will not raise an error.
841 This is helpful for callers that only expect None for a missing branch
841 This is helpful for callers that only expect None for a missing branch
842 (e.g. namespace).
842 (e.g. namespace).
843
843
844 '''
844 '''
845 try:
845 try:
846 return self.branchmap().branchtip(branch)
846 return self.branchmap().branchtip(branch)
847 except KeyError:
847 except KeyError:
848 if not ignoremissing:
848 if not ignoremissing:
849 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
849 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
850 else:
850 else:
851 pass
851 pass
852
852
853 def lookup(self, key):
853 def lookup(self, key):
854 return self[key].node()
854 return self[key].node()
855
855
856 def lookupbranch(self, key, remote=None):
856 def lookupbranch(self, key, remote=None):
857 repo = remote or self
857 repo = remote or self
858 if key in repo.branchmap():
858 if key in repo.branchmap():
859 return key
859 return key
860
860
861 repo = (remote and remote.local()) and remote or self
861 repo = (remote and remote.local()) and remote or self
862 return repo[key].branch()
862 return repo[key].branch()
863
863
864 def known(self, nodes):
864 def known(self, nodes):
865 cl = self.changelog
865 cl = self.changelog
866 nm = cl.nodemap
866 nm = cl.nodemap
867 filtered = cl.filteredrevs
867 filtered = cl.filteredrevs
868 result = []
868 result = []
869 for n in nodes:
869 for n in nodes:
870 r = nm.get(n)
870 r = nm.get(n)
871 resp = not (r is None or r in filtered)
871 resp = not (r is None or r in filtered)
872 result.append(resp)
872 result.append(resp)
873 return result
873 return result
874
874
875 def local(self):
875 def local(self):
876 return self
876 return self
877
877
878 def publishing(self):
878 def publishing(self):
879 # it's safe (and desirable) to trust the publish flag unconditionally
879 # it's safe (and desirable) to trust the publish flag unconditionally
880 # so that we don't finalize changes shared between users via ssh or nfs
880 # so that we don't finalize changes shared between users via ssh or nfs
881 return self.ui.configbool('phases', 'publish', True, untrusted=True)
881 return self.ui.configbool('phases', 'publish', True, untrusted=True)
882
882
883 def cancopy(self):
883 def cancopy(self):
884 # so statichttprepo's override of local() works
884 # so statichttprepo's override of local() works
885 if not self.local():
885 if not self.local():
886 return False
886 return False
887 if not self.publishing():
887 if not self.publishing():
888 return True
888 return True
889 # if publishing we can't copy if there is filtered content
889 # if publishing we can't copy if there is filtered content
890 return not self.filtered('visible').changelog.filteredrevs
890 return not self.filtered('visible').changelog.filteredrevs
891
891
892 def shared(self):
892 def shared(self):
893 '''the type of shared repository (None if not shared)'''
893 '''the type of shared repository (None if not shared)'''
894 if self.sharedpath != self.path:
894 if self.sharedpath != self.path:
895 return 'store'
895 return 'store'
896 return None
896 return None
897
897
898 def join(self, f, *insidef):
898 def join(self, f, *insidef):
899 return self.vfs.join(os.path.join(f, *insidef))
899 return self.vfs.join(os.path.join(f, *insidef))
900
900
901 def wjoin(self, f, *insidef):
901 def wjoin(self, f, *insidef):
902 return self.vfs.reljoin(self.root, f, *insidef)
902 return self.vfs.reljoin(self.root, f, *insidef)
903
903
904 def file(self, f):
904 def file(self, f):
905 if f[0] == '/':
905 if f[0] == '/':
906 f = f[1:]
906 f = f[1:]
907 return filelog.filelog(self.svfs, f)
907 return filelog.filelog(self.svfs, f)
908
908
909 def changectx(self, changeid):
909 def changectx(self, changeid):
910 return self[changeid]
910 return self[changeid]
911
911
912 def setparents(self, p1, p2=nullid):
912 def setparents(self, p1, p2=nullid):
913 self.dirstate.beginparentchange()
913 self.dirstate.beginparentchange()
914 copies = self.dirstate.setparents(p1, p2)
914 copies = self.dirstate.setparents(p1, p2)
915 pctx = self[p1]
915 pctx = self[p1]
916 if copies:
916 if copies:
917 # Adjust copy records, the dirstate cannot do it, it
917 # Adjust copy records, the dirstate cannot do it, it
918 # requires access to parents manifests. Preserve them
918 # requires access to parents manifests. Preserve them
919 # only for entries added to first parent.
919 # only for entries added to first parent.
920 for f in copies:
920 for f in copies:
921 if f not in pctx and copies[f] in pctx:
921 if f not in pctx and copies[f] in pctx:
922 self.dirstate.copy(copies[f], f)
922 self.dirstate.copy(copies[f], f)
923 if p2 == nullid:
923 if p2 == nullid:
924 for f, s in sorted(self.dirstate.copies().items()):
924 for f, s in sorted(self.dirstate.copies().items()):
925 if f not in pctx and s not in pctx:
925 if f not in pctx and s not in pctx:
926 self.dirstate.copy(None, f)
926 self.dirstate.copy(None, f)
927 self.dirstate.endparentchange()
927 self.dirstate.endparentchange()
928
928
929 def filectx(self, path, changeid=None, fileid=None):
929 def filectx(self, path, changeid=None, fileid=None):
930 """changeid can be a changeset revision, node, or tag.
930 """changeid can be a changeset revision, node, or tag.
931 fileid can be a file revision or node."""
931 fileid can be a file revision or node."""
932 return context.filectx(self, path, changeid, fileid)
932 return context.filectx(self, path, changeid, fileid)
933
933
934 def getcwd(self):
934 def getcwd(self):
935 return self.dirstate.getcwd()
935 return self.dirstate.getcwd()
936
936
937 def pathto(self, f, cwd=None):
937 def pathto(self, f, cwd=None):
938 return self.dirstate.pathto(f, cwd)
938 return self.dirstate.pathto(f, cwd)
939
939
940 def wfile(self, f, mode='r'):
940 def wfile(self, f, mode='r'):
941 return self.wvfs(f, mode)
941 return self.wvfs(f, mode)
942
942
943 def _link(self, f):
943 def _link(self, f):
944 return self.wvfs.islink(f)
944 return self.wvfs.islink(f)
945
945
946 def _loadfilter(self, filter):
946 def _loadfilter(self, filter):
947 if filter not in self.filterpats:
947 if filter not in self.filterpats:
948 l = []
948 l = []
949 for pat, cmd in self.ui.configitems(filter):
949 for pat, cmd in self.ui.configitems(filter):
950 if cmd == '!':
950 if cmd == '!':
951 continue
951 continue
952 mf = matchmod.match(self.root, '', [pat])
952 mf = matchmod.match(self.root, '', [pat])
953 fn = None
953 fn = None
954 params = cmd
954 params = cmd
955 for name, filterfn in self._datafilters.iteritems():
955 for name, filterfn in self._datafilters.iteritems():
956 if cmd.startswith(name):
956 if cmd.startswith(name):
957 fn = filterfn
957 fn = filterfn
958 params = cmd[len(name):].lstrip()
958 params = cmd[len(name):].lstrip()
959 break
959 break
960 if not fn:
960 if not fn:
961 fn = lambda s, c, **kwargs: util.filter(s, c)
961 fn = lambda s, c, **kwargs: util.filter(s, c)
962 # Wrap old filters not supporting keyword arguments
962 # Wrap old filters not supporting keyword arguments
963 if not inspect.getargspec(fn)[2]:
963 if not inspect.getargspec(fn)[2]:
964 oldfn = fn
964 oldfn = fn
965 fn = lambda s, c, **kwargs: oldfn(s, c)
965 fn = lambda s, c, **kwargs: oldfn(s, c)
966 l.append((mf, fn, params))
966 l.append((mf, fn, params))
967 self.filterpats[filter] = l
967 self.filterpats[filter] = l
968 return self.filterpats[filter]
968 return self.filterpats[filter]
969
969
970 def _filter(self, filterpats, filename, data):
970 def _filter(self, filterpats, filename, data):
971 for mf, fn, cmd in filterpats:
971 for mf, fn, cmd in filterpats:
972 if mf(filename):
972 if mf(filename):
973 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
973 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
974 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
974 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
975 break
975 break
976
976
977 return data
977 return data
978
978
979 @unfilteredpropertycache
979 @unfilteredpropertycache
980 def _encodefilterpats(self):
980 def _encodefilterpats(self):
981 return self._loadfilter('encode')
981 return self._loadfilter('encode')
982
982
983 @unfilteredpropertycache
983 @unfilteredpropertycache
984 def _decodefilterpats(self):
984 def _decodefilterpats(self):
985 return self._loadfilter('decode')
985 return self._loadfilter('decode')
986
986
987 def adddatafilter(self, name, filter):
987 def adddatafilter(self, name, filter):
988 self._datafilters[name] = filter
988 self._datafilters[name] = filter
989
989
990 def wread(self, filename):
990 def wread(self, filename):
991 if self._link(filename):
991 if self._link(filename):
992 data = self.wvfs.readlink(filename)
992 data = self.wvfs.readlink(filename)
993 else:
993 else:
994 data = self.wvfs.read(filename)
994 data = self.wvfs.read(filename)
995 return self._filter(self._encodefilterpats, filename, data)
995 return self._filter(self._encodefilterpats, filename, data)
996
996
997 def wwrite(self, filename, data, flags, backgroundclose=False):
997 def wwrite(self, filename, data, flags, backgroundclose=False):
998 """write ``data`` into ``filename`` in the working directory
998 """write ``data`` into ``filename`` in the working directory
999
999
1000 This returns length of written (maybe decoded) data.
1000 This returns length of written (maybe decoded) data.
1001 """
1001 """
1002 data = self._filter(self._decodefilterpats, filename, data)
1002 data = self._filter(self._decodefilterpats, filename, data)
1003 if 'l' in flags:
1003 if 'l' in flags:
1004 self.wvfs.symlink(data, filename)
1004 self.wvfs.symlink(data, filename)
1005 else:
1005 else:
1006 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1006 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1007 if 'x' in flags:
1007 if 'x' in flags:
1008 self.wvfs.setflags(filename, False, True)
1008 self.wvfs.setflags(filename, False, True)
1009 return len(data)
1009 return len(data)
1010
1010
1011 def wwritedata(self, filename, data):
1011 def wwritedata(self, filename, data):
1012 return self._filter(self._decodefilterpats, filename, data)
1012 return self._filter(self._decodefilterpats, filename, data)
1013
1013
1014 def currenttransaction(self):
1014 def currenttransaction(self):
1015 """return the current transaction or None if non exists"""
1015 """return the current transaction or None if non exists"""
1016 if self._transref:
1016 if self._transref:
1017 tr = self._transref()
1017 tr = self._transref()
1018 else:
1018 else:
1019 tr = None
1019 tr = None
1020
1020
1021 if tr and tr.running():
1021 if tr and tr.running():
1022 return tr
1022 return tr
1023 return None
1023 return None
1024
1024
1025 def transaction(self, desc, report=None):
1025 def transaction(self, desc, report=None):
1026 if (self.ui.configbool('devel', 'all-warnings')
1026 if (self.ui.configbool('devel', 'all-warnings')
1027 or self.ui.configbool('devel', 'check-locks')):
1027 or self.ui.configbool('devel', 'check-locks')):
1028 if self._currentlock(self._lockref) is None:
1028 if self._currentlock(self._lockref) is None:
1029 raise error.ProgrammingError('transaction requires locking')
1029 raise error.ProgrammingError('transaction requires locking')
1030 tr = self.currenttransaction()
1030 tr = self.currenttransaction()
1031 if tr is not None:
1031 if tr is not None:
1032 return tr.nest()
1032 return tr.nest()
1033
1033
1034 # abort here if the journal already exists
1034 # abort here if the journal already exists
1035 if self.svfs.exists("journal"):
1035 if self.svfs.exists("journal"):
1036 raise error.RepoError(
1036 raise error.RepoError(
1037 _("abandoned transaction found"),
1037 _("abandoned transaction found"),
1038 hint=_("run 'hg recover' to clean up transaction"))
1038 hint=_("run 'hg recover' to clean up transaction"))
1039
1039
1040 idbase = "%.40f#%f" % (random.random(), time.time())
1040 idbase = "%.40f#%f" % (random.random(), time.time())
1041 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1041 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1042 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1042 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1043
1043
1044 self._writejournal(desc)
1044 self._writejournal(desc)
1045 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1045 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1046 if report:
1046 if report:
1047 rp = report
1047 rp = report
1048 else:
1048 else:
1049 rp = self.ui.warn
1049 rp = self.ui.warn
1050 vfsmap = {'plain': self.vfs} # root of .hg/
1050 vfsmap = {'plain': self.vfs} # root of .hg/
1051 # we must avoid cyclic reference between repo and transaction.
1051 # we must avoid cyclic reference between repo and transaction.
1052 reporef = weakref.ref(self)
1052 reporef = weakref.ref(self)
1053 def validate(tr):
1053 def validate(tr):
1054 """will run pre-closing hooks"""
1054 """will run pre-closing hooks"""
1055 reporef().hook('pretxnclose', throw=True,
1055 reporef().hook('pretxnclose', throw=True,
1056 txnname=desc, **tr.hookargs)
1056 txnname=desc, **tr.hookargs)
1057 def releasefn(tr, success):
1057 def releasefn(tr, success):
1058 repo = reporef()
1058 repo = reporef()
1059 if success:
1059 if success:
1060 # this should be explicitly invoked here, because
1060 # this should be explicitly invoked here, because
1061 # in-memory changes aren't written out at closing
1061 # in-memory changes aren't written out at closing
1062 # transaction, if tr.addfilegenerator (via
1062 # transaction, if tr.addfilegenerator (via
1063 # dirstate.write or so) isn't invoked while
1063 # dirstate.write or so) isn't invoked while
1064 # transaction running
1064 # transaction running
1065 repo.dirstate.write(None)
1065 repo.dirstate.write(None)
1066 else:
1066 else:
1067 # discard all changes (including ones already written
1067 # discard all changes (including ones already written
1068 # out) in this transaction
1068 # out) in this transaction
1069 repo.dirstate.restorebackup(None, prefix='journal.')
1069 repo.dirstate.restorebackup(None, prefix='journal.')
1070
1070
1071 repo.invalidate(clearfilecache=True)
1071 repo.invalidate(clearfilecache=True)
1072
1072
1073 tr = transaction.transaction(rp, self.svfs, vfsmap,
1073 tr = transaction.transaction(rp, self.svfs, vfsmap,
1074 "journal",
1074 "journal",
1075 "undo",
1075 "undo",
1076 aftertrans(renames),
1076 aftertrans(renames),
1077 self.store.createmode,
1077 self.store.createmode,
1078 validator=validate,
1078 validator=validate,
1079 releasefn=releasefn)
1079 releasefn=releasefn)
1080
1080
1081 tr.hookargs['txnid'] = txnid
1081 tr.hookargs['txnid'] = txnid
1082 # note: writing the fncache only during finalize mean that the file is
1082 # note: writing the fncache only during finalize mean that the file is
1083 # outdated when running hooks. As fncache is used for streaming clone,
1083 # outdated when running hooks. As fncache is used for streaming clone,
1084 # this is not expected to break anything that happen during the hooks.
1084 # this is not expected to break anything that happen during the hooks.
1085 tr.addfinalize('flush-fncache', self.store.write)
1085 tr.addfinalize('flush-fncache', self.store.write)
1086 def txnclosehook(tr2):
1086 def txnclosehook(tr2):
1087 """To be run if transaction is successful, will schedule a hook run
1087 """To be run if transaction is successful, will schedule a hook run
1088 """
1088 """
1089 # Don't reference tr2 in hook() so we don't hold a reference.
1089 # Don't reference tr2 in hook() so we don't hold a reference.
1090 # This reduces memory consumption when there are multiple
1090 # This reduces memory consumption when there are multiple
1091 # transactions per lock. This can likely go away if issue5045
1091 # transactions per lock. This can likely go away if issue5045
1092 # fixes the function accumulation.
1092 # fixes the function accumulation.
1093 hookargs = tr2.hookargs
1093 hookargs = tr2.hookargs
1094
1094
1095 def hook():
1095 def hook():
1096 reporef().hook('txnclose', throw=False, txnname=desc,
1096 reporef().hook('txnclose', throw=False, txnname=desc,
1097 **hookargs)
1097 **hookargs)
1098 reporef()._afterlock(hook)
1098 reporef()._afterlock(hook)
1099 tr.addfinalize('txnclose-hook', txnclosehook)
1099 tr.addfinalize('txnclose-hook', txnclosehook)
1100 def txnaborthook(tr2):
1100 def txnaborthook(tr2):
1101 """To be run if transaction is aborted
1101 """To be run if transaction is aborted
1102 """
1102 """
1103 reporef().hook('txnabort', throw=False, txnname=desc,
1103 reporef().hook('txnabort', throw=False, txnname=desc,
1104 **tr2.hookargs)
1104 **tr2.hookargs)
1105 tr.addabort('txnabort-hook', txnaborthook)
1105 tr.addabort('txnabort-hook', txnaborthook)
1106 # avoid eager cache invalidation. in-memory data should be identical
1106 # avoid eager cache invalidation. in-memory data should be identical
1107 # to stored data if transaction has no error.
1107 # to stored data if transaction has no error.
1108 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1108 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1109 self._transref = weakref.ref(tr)
1109 self._transref = weakref.ref(tr)
1110 return tr
1110 return tr
1111
1111
1112 def _journalfiles(self):
1112 def _journalfiles(self):
1113 return ((self.svfs, 'journal'),
1113 return ((self.svfs, 'journal'),
1114 (self.vfs, 'journal.dirstate'),
1114 (self.vfs, 'journal.dirstate'),
1115 (self.vfs, 'journal.branch'),
1115 (self.vfs, 'journal.branch'),
1116 (self.vfs, 'journal.desc'),
1116 (self.vfs, 'journal.desc'),
1117 (self.vfs, 'journal.bookmarks'),
1117 (self.vfs, 'journal.bookmarks'),
1118 (self.svfs, 'journal.phaseroots'))
1118 (self.svfs, 'journal.phaseroots'))
1119
1119
1120 def undofiles(self):
1120 def undofiles(self):
1121 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1121 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1122
1122
1123 def _writejournal(self, desc):
1123 def _writejournal(self, desc):
1124 self.dirstate.savebackup(None, prefix='journal.')
1124 self.dirstate.savebackup(None, prefix='journal.')
1125 self.vfs.write("journal.branch",
1125 self.vfs.write("journal.branch",
1126 encoding.fromlocal(self.dirstate.branch()))
1126 encoding.fromlocal(self.dirstate.branch()))
1127 self.vfs.write("journal.desc",
1127 self.vfs.write("journal.desc",
1128 "%d\n%s\n" % (len(self), desc))
1128 "%d\n%s\n" % (len(self), desc))
1129 self.vfs.write("journal.bookmarks",
1129 self.vfs.write("journal.bookmarks",
1130 self.vfs.tryread("bookmarks"))
1130 self.vfs.tryread("bookmarks"))
1131 self.svfs.write("journal.phaseroots",
1131 self.svfs.write("journal.phaseroots",
1132 self.svfs.tryread("phaseroots"))
1132 self.svfs.tryread("phaseroots"))
1133
1133
1134 def recover(self):
1134 def recover(self):
1135 with self.lock():
1135 with self.lock():
1136 if self.svfs.exists("journal"):
1136 if self.svfs.exists("journal"):
1137 self.ui.status(_("rolling back interrupted transaction\n"))
1137 self.ui.status(_("rolling back interrupted transaction\n"))
1138 vfsmap = {'': self.svfs,
1138 vfsmap = {'': self.svfs,
1139 'plain': self.vfs,}
1139 'plain': self.vfs,}
1140 transaction.rollback(self.svfs, vfsmap, "journal",
1140 transaction.rollback(self.svfs, vfsmap, "journal",
1141 self.ui.warn)
1141 self.ui.warn)
1142 self.invalidate()
1142 self.invalidate()
1143 return True
1143 return True
1144 else:
1144 else:
1145 self.ui.warn(_("no interrupted transaction available\n"))
1145 self.ui.warn(_("no interrupted transaction available\n"))
1146 return False
1146 return False
1147
1147
1148 def rollback(self, dryrun=False, force=False):
1148 def rollback(self, dryrun=False, force=False):
1149 wlock = lock = dsguard = None
1149 wlock = lock = dsguard = None
1150 try:
1150 try:
1151 wlock = self.wlock()
1151 wlock = self.wlock()
1152 lock = self.lock()
1152 lock = self.lock()
1153 if self.svfs.exists("undo"):
1153 if self.svfs.exists("undo"):
1154 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1154 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1155
1155
1156 return self._rollback(dryrun, force, dsguard)
1156 return self._rollback(dryrun, force, dsguard)
1157 else:
1157 else:
1158 self.ui.warn(_("no rollback information available\n"))
1158 self.ui.warn(_("no rollback information available\n"))
1159 return 1
1159 return 1
1160 finally:
1160 finally:
1161 release(dsguard, lock, wlock)
1161 release(dsguard, lock, wlock)
1162
1162
1163 @unfilteredmethod # Until we get smarter cache management
1163 @unfilteredmethod # Until we get smarter cache management
1164 def _rollback(self, dryrun, force, dsguard):
1164 def _rollback(self, dryrun, force, dsguard):
1165 ui = self.ui
1165 ui = self.ui
1166 try:
1166 try:
1167 args = self.vfs.read('undo.desc').splitlines()
1167 args = self.vfs.read('undo.desc').splitlines()
1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1169 if len(args) >= 3:
1169 if len(args) >= 3:
1170 detail = args[2]
1170 detail = args[2]
1171 oldtip = oldlen - 1
1171 oldtip = oldlen - 1
1172
1172
1173 if detail and ui.verbose:
1173 if detail and ui.verbose:
1174 msg = (_('repository tip rolled back to revision %s'
1174 msg = (_('repository tip rolled back to revision %s'
1175 ' (undo %s: %s)\n')
1175 ' (undo %s: %s)\n')
1176 % (oldtip, desc, detail))
1176 % (oldtip, desc, detail))
1177 else:
1177 else:
1178 msg = (_('repository tip rolled back to revision %s'
1178 msg = (_('repository tip rolled back to revision %s'
1179 ' (undo %s)\n')
1179 ' (undo %s)\n')
1180 % (oldtip, desc))
1180 % (oldtip, desc))
1181 except IOError:
1181 except IOError:
1182 msg = _('rolling back unknown transaction\n')
1182 msg = _('rolling back unknown transaction\n')
1183 desc = None
1183 desc = None
1184
1184
1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1186 raise error.Abort(
1186 raise error.Abort(
1187 _('rollback of last commit while not checked out '
1187 _('rollback of last commit while not checked out '
1188 'may lose data'), hint=_('use -f to force'))
1188 'may lose data'), hint=_('use -f to force'))
1189
1189
1190 ui.status(msg)
1190 ui.status(msg)
1191 if dryrun:
1191 if dryrun:
1192 return 0
1192 return 0
1193
1193
1194 parents = self.dirstate.parents()
1194 parents = self.dirstate.parents()
1195 self.destroying()
1195 self.destroying()
1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1198 if self.vfs.exists('undo.bookmarks'):
1198 if self.vfs.exists('undo.bookmarks'):
1199 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1199 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1200 if self.svfs.exists('undo.phaseroots'):
1200 if self.svfs.exists('undo.phaseroots'):
1201 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1201 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1202 self.invalidate()
1202 self.invalidate()
1203
1203
1204 parentgone = (parents[0] not in self.changelog.nodemap or
1204 parentgone = (parents[0] not in self.changelog.nodemap or
1205 parents[1] not in self.changelog.nodemap)
1205 parents[1] not in self.changelog.nodemap)
1206 if parentgone:
1206 if parentgone:
1207 # prevent dirstateguard from overwriting already restored one
1207 # prevent dirstateguard from overwriting already restored one
1208 dsguard.close()
1208 dsguard.close()
1209
1209
1210 self.dirstate.restorebackup(None, prefix='undo.')
1210 self.dirstate.restorebackup(None, prefix='undo.')
1211 try:
1211 try:
1212 branch = self.vfs.read('undo.branch')
1212 branch = self.vfs.read('undo.branch')
1213 self.dirstate.setbranch(encoding.tolocal(branch))
1213 self.dirstate.setbranch(encoding.tolocal(branch))
1214 except IOError:
1214 except IOError:
1215 ui.warn(_('named branch could not be reset: '
1215 ui.warn(_('named branch could not be reset: '
1216 'current branch is still \'%s\'\n')
1216 'current branch is still \'%s\'\n')
1217 % self.dirstate.branch())
1217 % self.dirstate.branch())
1218
1218
1219 parents = tuple([p.rev() for p in self[None].parents()])
1219 parents = tuple([p.rev() for p in self[None].parents()])
1220 if len(parents) > 1:
1220 if len(parents) > 1:
1221 ui.status(_('working directory now based on '
1221 ui.status(_('working directory now based on '
1222 'revisions %d and %d\n') % parents)
1222 'revisions %d and %d\n') % parents)
1223 else:
1223 else:
1224 ui.status(_('working directory now based on '
1224 ui.status(_('working directory now based on '
1225 'revision %d\n') % parents)
1225 'revision %d\n') % parents)
1226 mergemod.mergestate.clean(self, self['.'].node())
1226 mergemod.mergestate.clean(self, self['.'].node())
1227
1227
1228 # TODO: if we know which new heads may result from this rollback, pass
1228 # TODO: if we know which new heads may result from this rollback, pass
1229 # them to destroy(), which will prevent the branchhead cache from being
1229 # them to destroy(), which will prevent the branchhead cache from being
1230 # invalidated.
1230 # invalidated.
1231 self.destroyed()
1231 self.destroyed()
1232 return 0
1232 return 0
1233
1233
1234 def invalidatecaches(self):
1234 def invalidatecaches(self):
1235
1235
1236 if '_tagscache' in vars(self):
1236 if '_tagscache' in vars(self):
1237 # can't use delattr on proxy
1237 # can't use delattr on proxy
1238 del self.__dict__['_tagscache']
1238 del self.__dict__['_tagscache']
1239
1239
1240 self.unfiltered()._branchcaches.clear()
1240 self.unfiltered()._branchcaches.clear()
1241 self.invalidatevolatilesets()
1241 self.invalidatevolatilesets()
1242
1242
1243 def invalidatevolatilesets(self):
1243 def invalidatevolatilesets(self):
1244 self.filteredrevcache.clear()
1244 self.filteredrevcache.clear()
1245 obsolete.clearobscaches(self)
1245 obsolete.clearobscaches(self)
1246
1246
1247 def invalidatedirstate(self):
1247 def invalidatedirstate(self):
1248 '''Invalidates the dirstate, causing the next call to dirstate
1248 '''Invalidates the dirstate, causing the next call to dirstate
1249 to check if it was modified since the last time it was read,
1249 to check if it was modified since the last time it was read,
1250 rereading it if it has.
1250 rereading it if it has.
1251
1251
1252 This is different to dirstate.invalidate() that it doesn't always
1252 This is different to dirstate.invalidate() that it doesn't always
1253 rereads the dirstate. Use dirstate.invalidate() if you want to
1253 rereads the dirstate. Use dirstate.invalidate() if you want to
1254 explicitly read the dirstate again (i.e. restoring it to a previous
1254 explicitly read the dirstate again (i.e. restoring it to a previous
1255 known good state).'''
1255 known good state).'''
1256 if hasunfilteredcache(self, 'dirstate'):
1256 if hasunfilteredcache(self, 'dirstate'):
1257 for k in self.dirstate._filecache:
1257 for k in self.dirstate._filecache:
1258 try:
1258 try:
1259 delattr(self.dirstate, k)
1259 delattr(self.dirstate, k)
1260 except AttributeError:
1260 except AttributeError:
1261 pass
1261 pass
1262 delattr(self.unfiltered(), 'dirstate')
1262 delattr(self.unfiltered(), 'dirstate')
1263
1263
1264 def invalidate(self, clearfilecache=False):
1264 def invalidate(self, clearfilecache=False):
1265 '''Invalidates both store and non-store parts other than dirstate
1265 '''Invalidates both store and non-store parts other than dirstate
1266
1266
1267 If a transaction is running, invalidation of store is omitted,
1267 If a transaction is running, invalidation of store is omitted,
1268 because discarding in-memory changes might cause inconsistency
1268 because discarding in-memory changes might cause inconsistency
1269 (e.g. incomplete fncache causes unintentional failure, but
1269 (e.g. incomplete fncache causes unintentional failure, but
1270 redundant one doesn't).
1270 redundant one doesn't).
1271 '''
1271 '''
1272 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1272 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1273 for k in self._filecache.keys():
1273 for k in self._filecache.keys():
1274 # dirstate is invalidated separately in invalidatedirstate()
1274 # dirstate is invalidated separately in invalidatedirstate()
1275 if k == 'dirstate':
1275 if k == 'dirstate':
1276 continue
1276 continue
1277
1277
1278 if clearfilecache:
1278 if clearfilecache:
1279 del self._filecache[k]
1279 del self._filecache[k]
1280 try:
1280 try:
1281 delattr(unfiltered, k)
1281 delattr(unfiltered, k)
1282 except AttributeError:
1282 except AttributeError:
1283 pass
1283 pass
1284 self.invalidatecaches()
1284 self.invalidatecaches()
1285 if not self.currenttransaction():
1285 if not self.currenttransaction():
1286 # TODO: Changing contents of store outside transaction
1286 # TODO: Changing contents of store outside transaction
1287 # causes inconsistency. We should make in-memory store
1287 # causes inconsistency. We should make in-memory store
1288 # changes detectable, and abort if changed.
1288 # changes detectable, and abort if changed.
1289 self.store.invalidatecaches()
1289 self.store.invalidatecaches()
1290
1290
1291 def invalidateall(self):
1291 def invalidateall(self):
1292 '''Fully invalidates both store and non-store parts, causing the
1292 '''Fully invalidates both store and non-store parts, causing the
1293 subsequent operation to reread any outside changes.'''
1293 subsequent operation to reread any outside changes.'''
1294 # extension should hook this to invalidate its caches
1294 # extension should hook this to invalidate its caches
1295 self.invalidate()
1295 self.invalidate()
1296 self.invalidatedirstate()
1296 self.invalidatedirstate()
1297
1297
1298 @unfilteredmethod
1298 @unfilteredmethod
1299 def _refreshfilecachestats(self, tr):
1299 def _refreshfilecachestats(self, tr):
1300 """Reload stats of cached files so that they are flagged as valid"""
1300 """Reload stats of cached files so that they are flagged as valid"""
1301 for k, ce in self._filecache.items():
1301 for k, ce in self._filecache.items():
1302 if k == 'dirstate' or k not in self.__dict__:
1302 if k == 'dirstate' or k not in self.__dict__:
1303 continue
1303 continue
1304 ce.refresh()
1304 ce.refresh()
1305
1305
1306 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1306 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1307 inheritchecker=None, parentenvvar=None):
1307 inheritchecker=None, parentenvvar=None):
1308 parentlock = None
1308 parentlock = None
1309 # the contents of parentenvvar are used by the underlying lock to
1309 # the contents of parentenvvar are used by the underlying lock to
1310 # determine whether it can be inherited
1310 # determine whether it can be inherited
1311 if parentenvvar is not None:
1311 if parentenvvar is not None:
1312 parentlock = encoding.environ.get(parentenvvar)
1312 parentlock = encoding.environ.get(parentenvvar)
1313 try:
1313 try:
1314 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1314 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1315 acquirefn=acquirefn, desc=desc,
1315 acquirefn=acquirefn, desc=desc,
1316 inheritchecker=inheritchecker,
1316 inheritchecker=inheritchecker,
1317 parentlock=parentlock)
1317 parentlock=parentlock)
1318 except error.LockHeld as inst:
1318 except error.LockHeld as inst:
1319 if not wait:
1319 if not wait:
1320 raise
1320 raise
1321 # show more details for new-style locks
1321 # show more details for new-style locks
1322 if ':' in inst.locker:
1322 if ':' in inst.locker:
1323 host, pid = inst.locker.split(":", 1)
1323 host, pid = inst.locker.split(":", 1)
1324 self.ui.warn(
1324 self.ui.warn(
1325 _("waiting for lock on %s held by process %r "
1325 _("waiting for lock on %s held by process %r "
1326 "on host %r\n") % (desc, pid, host))
1326 "on host %r\n") % (desc, pid, host))
1327 else:
1327 else:
1328 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1328 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1329 (desc, inst.locker))
1329 (desc, inst.locker))
1330 # default to 600 seconds timeout
1330 # default to 600 seconds timeout
1331 l = lockmod.lock(vfs, lockname,
1331 l = lockmod.lock(vfs, lockname,
1332 int(self.ui.config("ui", "timeout", "600")),
1332 int(self.ui.config("ui", "timeout", "600")),
1333 releasefn=releasefn, acquirefn=acquirefn,
1333 releasefn=releasefn, acquirefn=acquirefn,
1334 desc=desc)
1334 desc=desc)
1335 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1335 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1336 return l
1336 return l
1337
1337
1338 def _afterlock(self, callback):
1338 def _afterlock(self, callback):
1339 """add a callback to be run when the repository is fully unlocked
1339 """add a callback to be run when the repository is fully unlocked
1340
1340
1341 The callback will be executed when the outermost lock is released
1341 The callback will be executed when the outermost lock is released
1342 (with wlock being higher level than 'lock')."""
1342 (with wlock being higher level than 'lock')."""
1343 for ref in (self._wlockref, self._lockref):
1343 for ref in (self._wlockref, self._lockref):
1344 l = ref and ref()
1344 l = ref and ref()
1345 if l and l.held:
1345 if l and l.held:
1346 l.postrelease.append(callback)
1346 l.postrelease.append(callback)
1347 break
1347 break
1348 else: # no lock have been found.
1348 else: # no lock have been found.
1349 callback()
1349 callback()
1350
1350
1351 def lock(self, wait=True):
1351 def lock(self, wait=True):
1352 '''Lock the repository store (.hg/store) and return a weak reference
1352 '''Lock the repository store (.hg/store) and return a weak reference
1353 to the lock. Use this before modifying the store (e.g. committing or
1353 to the lock. Use this before modifying the store (e.g. committing or
1354 stripping). If you are opening a transaction, get a lock as well.)
1354 stripping). If you are opening a transaction, get a lock as well.)
1355
1355
1356 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1356 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1357 'wlock' first to avoid a dead-lock hazard.'''
1357 'wlock' first to avoid a dead-lock hazard.'''
1358 l = self._currentlock(self._lockref)
1358 l = self._currentlock(self._lockref)
1359 if l is not None:
1359 if l is not None:
1360 l.lock()
1360 l.lock()
1361 return l
1361 return l
1362
1362
1363 l = self._lock(self.svfs, "lock", wait, None,
1363 l = self._lock(self.svfs, "lock", wait, None,
1364 self.invalidate, _('repository %s') % self.origroot)
1364 self.invalidate, _('repository %s') % self.origroot)
1365 self._lockref = weakref.ref(l)
1365 self._lockref = weakref.ref(l)
1366 return l
1366 return l
1367
1367
1368 def _wlockchecktransaction(self):
1368 def _wlockchecktransaction(self):
1369 if self.currenttransaction() is not None:
1369 if self.currenttransaction() is not None:
1370 raise error.LockInheritanceContractViolation(
1370 raise error.LockInheritanceContractViolation(
1371 'wlock cannot be inherited in the middle of a transaction')
1371 'wlock cannot be inherited in the middle of a transaction')
1372
1372
1373 def wlock(self, wait=True):
1373 def wlock(self, wait=True):
1374 '''Lock the non-store parts of the repository (everything under
1374 '''Lock the non-store parts of the repository (everything under
1375 .hg except .hg/store) and return a weak reference to the lock.
1375 .hg except .hg/store) and return a weak reference to the lock.
1376
1376
1377 Use this before modifying files in .hg.
1377 Use this before modifying files in .hg.
1378
1378
1379 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1379 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1380 'wlock' first to avoid a dead-lock hazard.'''
1380 'wlock' first to avoid a dead-lock hazard.'''
1381 l = self._wlockref and self._wlockref()
1381 l = self._wlockref and self._wlockref()
1382 if l is not None and l.held:
1382 if l is not None and l.held:
1383 l.lock()
1383 l.lock()
1384 return l
1384 return l
1385
1385
1386 # We do not need to check for non-waiting lock acquisition. Such
1386 # We do not need to check for non-waiting lock acquisition. Such
1387 # acquisition would not cause dead-lock as they would just fail.
1387 # acquisition would not cause dead-lock as they would just fail.
1388 if wait and (self.ui.configbool('devel', 'all-warnings')
1388 if wait and (self.ui.configbool('devel', 'all-warnings')
1389 or self.ui.configbool('devel', 'check-locks')):
1389 or self.ui.configbool('devel', 'check-locks')):
1390 if self._currentlock(self._lockref) is not None:
1390 if self._currentlock(self._lockref) is not None:
1391 self.ui.develwarn('"wlock" acquired after "lock"')
1391 self.ui.develwarn('"wlock" acquired after "lock"')
1392
1392
1393 def unlock():
1393 def unlock():
1394 if self.dirstate.pendingparentchange():
1394 if self.dirstate.pendingparentchange():
1395 self.dirstate.invalidate()
1395 self.dirstate.invalidate()
1396 else:
1396 else:
1397 self.dirstate.write(None)
1397 self.dirstate.write(None)
1398
1398
1399 self._filecache['dirstate'].refresh()
1399 self._filecache['dirstate'].refresh()
1400
1400
1401 l = self._lock(self.vfs, "wlock", wait, unlock,
1401 l = self._lock(self.vfs, "wlock", wait, unlock,
1402 self.invalidatedirstate, _('working directory of %s') %
1402 self.invalidatedirstate, _('working directory of %s') %
1403 self.origroot,
1403 self.origroot,
1404 inheritchecker=self._wlockchecktransaction,
1404 inheritchecker=self._wlockchecktransaction,
1405 parentenvvar='HG_WLOCK_LOCKER')
1405 parentenvvar='HG_WLOCK_LOCKER')
1406 self._wlockref = weakref.ref(l)
1406 self._wlockref = weakref.ref(l)
1407 return l
1407 return l
1408
1408
1409 def _currentlock(self, lockref):
1409 def _currentlock(self, lockref):
1410 """Returns the lock if it's held, or None if it's not."""
1410 """Returns the lock if it's held, or None if it's not."""
1411 if lockref is None:
1411 if lockref is None:
1412 return None
1412 return None
1413 l = lockref()
1413 l = lockref()
1414 if l is None or not l.held:
1414 if l is None or not l.held:
1415 return None
1415 return None
1416 return l
1416 return l
1417
1417
1418 def currentwlock(self):
1418 def currentwlock(self):
1419 """Returns the wlock if it's held, or None if it's not."""
1419 """Returns the wlock if it's held, or None if it's not."""
1420 return self._currentlock(self._wlockref)
1420 return self._currentlock(self._wlockref)
1421
1421
1422 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1422 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1423 """
1423 """
1424 commit an individual file as part of a larger transaction
1424 commit an individual file as part of a larger transaction
1425 """
1425 """
1426
1426
1427 fname = fctx.path()
1427 fname = fctx.path()
1428 fparent1 = manifest1.get(fname, nullid)
1428 fparent1 = manifest1.get(fname, nullid)
1429 fparent2 = manifest2.get(fname, nullid)
1429 fparent2 = manifest2.get(fname, nullid)
1430 if isinstance(fctx, context.filectx):
1430 if isinstance(fctx, context.filectx):
1431 node = fctx.filenode()
1431 node = fctx.filenode()
1432 if node in [fparent1, fparent2]:
1432 if node in [fparent1, fparent2]:
1433 self.ui.debug('reusing %s filelog entry\n' % fname)
1433 self.ui.debug('reusing %s filelog entry\n' % fname)
1434 if manifest1.flags(fname) != fctx.flags():
1434 if manifest1.flags(fname) != fctx.flags():
1435 changelist.append(fname)
1435 changelist.append(fname)
1436 return node
1436 return node
1437
1437
1438 flog = self.file(fname)
1438 flog = self.file(fname)
1439 meta = {}
1439 meta = {}
1440 copy = fctx.renamed()
1440 copy = fctx.renamed()
1441 if copy and copy[0] != fname:
1441 if copy and copy[0] != fname:
1442 # Mark the new revision of this file as a copy of another
1442 # Mark the new revision of this file as a copy of another
1443 # file. This copy data will effectively act as a parent
1443 # file. This copy data will effectively act as a parent
1444 # of this new revision. If this is a merge, the first
1444 # of this new revision. If this is a merge, the first
1445 # parent will be the nullid (meaning "look up the copy data")
1445 # parent will be the nullid (meaning "look up the copy data")
1446 # and the second one will be the other parent. For example:
1446 # and the second one will be the other parent. For example:
1447 #
1447 #
1448 # 0 --- 1 --- 3 rev1 changes file foo
1448 # 0 --- 1 --- 3 rev1 changes file foo
1449 # \ / rev2 renames foo to bar and changes it
1449 # \ / rev2 renames foo to bar and changes it
1450 # \- 2 -/ rev3 should have bar with all changes and
1450 # \- 2 -/ rev3 should have bar with all changes and
1451 # should record that bar descends from
1451 # should record that bar descends from
1452 # bar in rev2 and foo in rev1
1452 # bar in rev2 and foo in rev1
1453 #
1453 #
1454 # this allows this merge to succeed:
1454 # this allows this merge to succeed:
1455 #
1455 #
1456 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1456 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1457 # \ / merging rev3 and rev4 should use bar@rev2
1457 # \ / merging rev3 and rev4 should use bar@rev2
1458 # \- 2 --- 4 as the merge base
1458 # \- 2 --- 4 as the merge base
1459 #
1459 #
1460
1460
1461 cfname = copy[0]
1461 cfname = copy[0]
1462 crev = manifest1.get(cfname)
1462 crev = manifest1.get(cfname)
1463 newfparent = fparent2
1463 newfparent = fparent2
1464
1464
1465 if manifest2: # branch merge
1465 if manifest2: # branch merge
1466 if fparent2 == nullid or crev is None: # copied on remote side
1466 if fparent2 == nullid or crev is None: # copied on remote side
1467 if cfname in manifest2:
1467 if cfname in manifest2:
1468 crev = manifest2[cfname]
1468 crev = manifest2[cfname]
1469 newfparent = fparent1
1469 newfparent = fparent1
1470
1470
1471 # Here, we used to search backwards through history to try to find
1471 # Here, we used to search backwards through history to try to find
1472 # where the file copy came from if the source of a copy was not in
1472 # where the file copy came from if the source of a copy was not in
1473 # the parent directory. However, this doesn't actually make sense to
1473 # the parent directory. However, this doesn't actually make sense to
1474 # do (what does a copy from something not in your working copy even
1474 # do (what does a copy from something not in your working copy even
1475 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1475 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1476 # the user that copy information was dropped, so if they didn't
1476 # the user that copy information was dropped, so if they didn't
1477 # expect this outcome it can be fixed, but this is the correct
1477 # expect this outcome it can be fixed, but this is the correct
1478 # behavior in this circumstance.
1478 # behavior in this circumstance.
1479
1479
1480 if crev:
1480 if crev:
1481 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1481 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1482 meta["copy"] = cfname
1482 meta["copy"] = cfname
1483 meta["copyrev"] = hex(crev)
1483 meta["copyrev"] = hex(crev)
1484 fparent1, fparent2 = nullid, newfparent
1484 fparent1, fparent2 = nullid, newfparent
1485 else:
1485 else:
1486 self.ui.warn(_("warning: can't find ancestor for '%s' "
1486 self.ui.warn(_("warning: can't find ancestor for '%s' "
1487 "copied from '%s'!\n") % (fname, cfname))
1487 "copied from '%s'!\n") % (fname, cfname))
1488
1488
1489 elif fparent1 == nullid:
1489 elif fparent1 == nullid:
1490 fparent1, fparent2 = fparent2, nullid
1490 fparent1, fparent2 = fparent2, nullid
1491 elif fparent2 != nullid:
1491 elif fparent2 != nullid:
1492 # is one parent an ancestor of the other?
1492 # is one parent an ancestor of the other?
1493 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1493 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1494 if fparent1 in fparentancestors:
1494 if fparent1 in fparentancestors:
1495 fparent1, fparent2 = fparent2, nullid
1495 fparent1, fparent2 = fparent2, nullid
1496 elif fparent2 in fparentancestors:
1496 elif fparent2 in fparentancestors:
1497 fparent2 = nullid
1497 fparent2 = nullid
1498
1498
1499 # is the file changed?
1499 # is the file changed?
1500 text = fctx.data()
1500 text = fctx.data()
1501 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1501 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1502 changelist.append(fname)
1502 changelist.append(fname)
1503 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1503 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1504 # are just the flags changed during merge?
1504 # are just the flags changed during merge?
1505 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1505 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1506 changelist.append(fname)
1506 changelist.append(fname)
1507
1507
1508 return fparent1
1508 return fparent1
1509
1509
1510 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1510 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1511 """check for commit arguments that aren't committable"""
1511 """check for commit arguments that aren't committable"""
1512 if match.isexact() or match.prefix():
1512 if match.isexact() or match.prefix():
1513 matched = set(status.modified + status.added + status.removed)
1513 matched = set(status.modified + status.added + status.removed)
1514
1514
1515 for f in match.files():
1515 for f in match.files():
1516 f = self.dirstate.normalize(f)
1516 f = self.dirstate.normalize(f)
1517 if f == '.' or f in matched or f in wctx.substate:
1517 if f == '.' or f in matched or f in wctx.substate:
1518 continue
1518 continue
1519 if f in status.deleted:
1519 if f in status.deleted:
1520 fail(f, _('file not found!'))
1520 fail(f, _('file not found!'))
1521 if f in vdirs: # visited directory
1521 if f in vdirs: # visited directory
1522 d = f + '/'
1522 d = f + '/'
1523 for mf in matched:
1523 for mf in matched:
1524 if mf.startswith(d):
1524 if mf.startswith(d):
1525 break
1525 break
1526 else:
1526 else:
1527 fail(f, _("no match under directory!"))
1527 fail(f, _("no match under directory!"))
1528 elif f not in self.dirstate:
1528 elif f not in self.dirstate:
1529 fail(f, _("file not tracked!"))
1529 fail(f, _("file not tracked!"))
1530
1530
1531 @unfilteredmethod
1531 @unfilteredmethod
1532 def commit(self, text="", user=None, date=None, match=None, force=False,
1532 def commit(self, text="", user=None, date=None, match=None, force=False,
1533 editor=False, extra=None):
1533 editor=False, extra=None):
1534 """Add a new revision to current repository.
1534 """Add a new revision to current repository.
1535
1535
1536 Revision information is gathered from the working directory,
1536 Revision information is gathered from the working directory,
1537 match can be used to filter the committed files. If editor is
1537 match can be used to filter the committed files. If editor is
1538 supplied, it is called to get a commit message.
1538 supplied, it is called to get a commit message.
1539 """
1539 """
1540 if extra is None:
1540 if extra is None:
1541 extra = {}
1541 extra = {}
1542
1542
1543 def fail(f, msg):
1543 def fail(f, msg):
1544 raise error.Abort('%s: %s' % (f, msg))
1544 raise error.Abort('%s: %s' % (f, msg))
1545
1545
1546 if not match:
1546 if not match:
1547 match = matchmod.always(self.root, '')
1547 match = matchmod.always(self.root, '')
1548
1548
1549 if not force:
1549 if not force:
1550 vdirs = []
1550 vdirs = []
1551 match.explicitdir = vdirs.append
1551 match.explicitdir = vdirs.append
1552 match.bad = fail
1552 match.bad = fail
1553
1553
1554 wlock = lock = tr = None
1554 wlock = lock = tr = None
1555 try:
1555 try:
1556 wlock = self.wlock()
1556 wlock = self.wlock()
1557 lock = self.lock() # for recent changelog (see issue4368)
1557 lock = self.lock() # for recent changelog (see issue4368)
1558
1558
1559 wctx = self[None]
1559 wctx = self[None]
1560 merge = len(wctx.parents()) > 1
1560 merge = len(wctx.parents()) > 1
1561
1561
1562 if not force and merge and match.ispartial():
1562 if not force and merge and match.ispartial():
1563 raise error.Abort(_('cannot partially commit a merge '
1563 raise error.Abort(_('cannot partially commit a merge '
1564 '(do not specify files or patterns)'))
1564 '(do not specify files or patterns)'))
1565
1565
1566 status = self.status(match=match, clean=force)
1566 status = self.status(match=match, clean=force)
1567 if force:
1567 if force:
1568 status.modified.extend(status.clean) # mq may commit clean files
1568 status.modified.extend(status.clean) # mq may commit clean files
1569
1569
1570 # check subrepos
1570 # check subrepos
1571 subs = []
1571 subs = []
1572 commitsubs = set()
1572 commitsubs = set()
1573 newstate = wctx.substate.copy()
1573 newstate = wctx.substate.copy()
1574 # only manage subrepos and .hgsubstate if .hgsub is present
1574 # only manage subrepos and .hgsubstate if .hgsub is present
1575 if '.hgsub' in wctx:
1575 if '.hgsub' in wctx:
1576 # we'll decide whether to track this ourselves, thanks
1576 # we'll decide whether to track this ourselves, thanks
1577 for c in status.modified, status.added, status.removed:
1577 for c in status.modified, status.added, status.removed:
1578 if '.hgsubstate' in c:
1578 if '.hgsubstate' in c:
1579 c.remove('.hgsubstate')
1579 c.remove('.hgsubstate')
1580
1580
1581 # compare current state to last committed state
1581 # compare current state to last committed state
1582 # build new substate based on last committed state
1582 # build new substate based on last committed state
1583 oldstate = wctx.p1().substate
1583 oldstate = wctx.p1().substate
1584 for s in sorted(newstate.keys()):
1584 for s in sorted(newstate.keys()):
1585 if not match(s):
1585 if not match(s):
1586 # ignore working copy, use old state if present
1586 # ignore working copy, use old state if present
1587 if s in oldstate:
1587 if s in oldstate:
1588 newstate[s] = oldstate[s]
1588 newstate[s] = oldstate[s]
1589 continue
1589 continue
1590 if not force:
1590 if not force:
1591 raise error.Abort(
1591 raise error.Abort(
1592 _("commit with new subrepo %s excluded") % s)
1592 _("commit with new subrepo %s excluded") % s)
1593 dirtyreason = wctx.sub(s).dirtyreason(True)
1593 dirtyreason = wctx.sub(s).dirtyreason(True)
1594 if dirtyreason:
1594 if dirtyreason:
1595 if not self.ui.configbool('ui', 'commitsubrepos'):
1595 if not self.ui.configbool('ui', 'commitsubrepos'):
1596 raise error.Abort(dirtyreason,
1596 raise error.Abort(dirtyreason,
1597 hint=_("use --subrepos for recursive commit"))
1597 hint=_("use --subrepos for recursive commit"))
1598 subs.append(s)
1598 subs.append(s)
1599 commitsubs.add(s)
1599 commitsubs.add(s)
1600 else:
1600 else:
1601 bs = wctx.sub(s).basestate()
1601 bs = wctx.sub(s).basestate()
1602 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1602 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1603 if oldstate.get(s, (None, None, None))[1] != bs:
1603 if oldstate.get(s, (None, None, None))[1] != bs:
1604 subs.append(s)
1604 subs.append(s)
1605
1605
1606 # check for removed subrepos
1606 # check for removed subrepos
1607 for p in wctx.parents():
1607 for p in wctx.parents():
1608 r = [s for s in p.substate if s not in newstate]
1608 r = [s for s in p.substate if s not in newstate]
1609 subs += [s for s in r if match(s)]
1609 subs += [s for s in r if match(s)]
1610 if subs:
1610 if subs:
1611 if (not match('.hgsub') and
1611 if (not match('.hgsub') and
1612 '.hgsub' in (wctx.modified() + wctx.added())):
1612 '.hgsub' in (wctx.modified() + wctx.added())):
1613 raise error.Abort(
1613 raise error.Abort(
1614 _("can't commit subrepos without .hgsub"))
1614 _("can't commit subrepos without .hgsub"))
1615 status.modified.insert(0, '.hgsubstate')
1615 status.modified.insert(0, '.hgsubstate')
1616
1616
1617 elif '.hgsub' in status.removed:
1617 elif '.hgsub' in status.removed:
1618 # clean up .hgsubstate when .hgsub is removed
1618 # clean up .hgsubstate when .hgsub is removed
1619 if ('.hgsubstate' in wctx and
1619 if ('.hgsubstate' in wctx and
1620 '.hgsubstate' not in (status.modified + status.added +
1620 '.hgsubstate' not in (status.modified + status.added +
1621 status.removed)):
1621 status.removed)):
1622 status.removed.insert(0, '.hgsubstate')
1622 status.removed.insert(0, '.hgsubstate')
1623
1623
1624 # make sure all explicit patterns are matched
1624 # make sure all explicit patterns are matched
1625 if not force:
1625 if not force:
1626 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1626 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1627
1627
1628 cctx = context.workingcommitctx(self, status,
1628 cctx = context.workingcommitctx(self, status,
1629 text, user, date, extra)
1629 text, user, date, extra)
1630
1630
1631 # internal config: ui.allowemptycommit
1631 # internal config: ui.allowemptycommit
1632 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1632 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1633 or extra.get('close') or merge or cctx.files()
1633 or extra.get('close') or merge or cctx.files()
1634 or self.ui.configbool('ui', 'allowemptycommit'))
1634 or self.ui.configbool('ui', 'allowemptycommit'))
1635 if not allowemptycommit:
1635 if not allowemptycommit:
1636 return None
1636 return None
1637
1637
1638 if merge and cctx.deleted():
1638 if merge and cctx.deleted():
1639 raise error.Abort(_("cannot commit merge with missing files"))
1639 raise error.Abort(_("cannot commit merge with missing files"))
1640
1640
1641 ms = mergemod.mergestate.read(self)
1641 ms = mergemod.mergestate.read(self)
1642 mergeutil.checkunresolved(ms)
1642 mergeutil.checkunresolved(ms)
1643
1643
1644 if editor:
1644 if editor:
1645 cctx._text = editor(self, cctx, subs)
1645 cctx._text = editor(self, cctx, subs)
1646 edited = (text != cctx._text)
1646 edited = (text != cctx._text)
1647
1647
1648 # Save commit message in case this transaction gets rolled back
1648 # Save commit message in case this transaction gets rolled back
1649 # (e.g. by a pretxncommit hook). Leave the content alone on
1649 # (e.g. by a pretxncommit hook). Leave the content alone on
1650 # the assumption that the user will use the same editor again.
1650 # the assumption that the user will use the same editor again.
1651 msgfn = self.savecommitmessage(cctx._text)
1651 msgfn = self.savecommitmessage(cctx._text)
1652
1652
1653 # commit subs and write new state
1653 # commit subs and write new state
1654 if subs:
1654 if subs:
1655 for s in sorted(commitsubs):
1655 for s in sorted(commitsubs):
1656 sub = wctx.sub(s)
1656 sub = wctx.sub(s)
1657 self.ui.status(_('committing subrepository %s\n') %
1657 self.ui.status(_('committing subrepository %s\n') %
1658 subrepo.subrelpath(sub))
1658 subrepo.subrelpath(sub))
1659 sr = sub.commit(cctx._text, user, date)
1659 sr = sub.commit(cctx._text, user, date)
1660 newstate[s] = (newstate[s][0], sr)
1660 newstate[s] = (newstate[s][0], sr)
1661 subrepo.writestate(self, newstate)
1661 subrepo.writestate(self, newstate)
1662
1662
1663 p1, p2 = self.dirstate.parents()
1663 p1, p2 = self.dirstate.parents()
1664 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1664 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1665 try:
1665 try:
1666 self.hook("precommit", throw=True, parent1=hookp1,
1666 self.hook("precommit", throw=True, parent1=hookp1,
1667 parent2=hookp2)
1667 parent2=hookp2)
1668 tr = self.transaction('commit')
1668 tr = self.transaction('commit')
1669 ret = self.commitctx(cctx, True)
1669 ret = self.commitctx(cctx, True)
1670 except: # re-raises
1670 except: # re-raises
1671 if edited:
1671 if edited:
1672 self.ui.write(
1672 self.ui.write(
1673 _('note: commit message saved in %s\n') % msgfn)
1673 _('note: commit message saved in %s\n') % msgfn)
1674 raise
1674 raise
1675 # update bookmarks, dirstate and mergestate
1675 # update bookmarks, dirstate and mergestate
1676 bookmarks.update(self, [p1, p2], ret)
1676 bookmarks.update(self, [p1, p2], ret)
1677 cctx.markcommitted(ret)
1677 cctx.markcommitted(ret)
1678 ms.reset()
1678 ms.reset()
1679 tr.close()
1679 tr.close()
1680
1680
1681 finally:
1681 finally:
1682 lockmod.release(tr, lock, wlock)
1682 lockmod.release(tr, lock, wlock)
1683
1683
1684 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1684 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1685 # hack for command that use a temporary commit (eg: histedit)
1685 # hack for command that use a temporary commit (eg: histedit)
1686 # temporary commit got stripped before hook release
1686 # temporary commit got stripped before hook release
1687 if self.changelog.hasnode(ret):
1687 if self.changelog.hasnode(ret):
1688 self.hook("commit", node=node, parent1=parent1,
1688 self.hook("commit", node=node, parent1=parent1,
1689 parent2=parent2)
1689 parent2=parent2)
1690 self._afterlock(commithook)
1690 self._afterlock(commithook)
1691 return ret
1691 return ret
1692
1692
1693 @unfilteredmethod
1693 @unfilteredmethod
1694 def commitctx(self, ctx, error=False):
1694 def commitctx(self, ctx, error=False):
1695 """Add a new revision to current repository.
1695 """Add a new revision to current repository.
1696 Revision information is passed via the context argument.
1696 Revision information is passed via the context argument.
1697 """
1697 """
1698
1698
1699 tr = None
1699 tr = None
1700 p1, p2 = ctx.p1(), ctx.p2()
1700 p1, p2 = ctx.p1(), ctx.p2()
1701 user = ctx.user()
1701 user = ctx.user()
1702
1702
1703 lock = self.lock()
1703 lock = self.lock()
1704 try:
1704 try:
1705 tr = self.transaction("commit")
1705 tr = self.transaction("commit")
1706 trp = weakref.proxy(tr)
1706 trp = weakref.proxy(tr)
1707
1707
1708 if ctx.manifestnode():
1708 if ctx.manifestnode():
1709 # reuse an existing manifest revision
1709 # reuse an existing manifest revision
1710 mn = ctx.manifestnode()
1710 mn = ctx.manifestnode()
1711 files = ctx.files()
1711 files = ctx.files()
1712 elif ctx.files():
1712 elif ctx.files():
1713 m1ctx = p1.manifestctx()
1713 m1ctx = p1.manifestctx()
1714 m2ctx = p2.manifestctx()
1714 m2ctx = p2.manifestctx()
1715 mctx = m1ctx.copy()
1715 mctx = m1ctx.copy()
1716
1716
1717 m = mctx.read()
1717 m = mctx.read()
1718 m1 = m1ctx.read()
1718 m1 = m1ctx.read()
1719 m2 = m2ctx.read()
1719 m2 = m2ctx.read()
1720
1720
1721 # check in files
1721 # check in files
1722 added = []
1722 added = []
1723 changed = []
1723 changed = []
1724 removed = list(ctx.removed())
1724 removed = list(ctx.removed())
1725 linkrev = len(self)
1725 linkrev = len(self)
1726 self.ui.note(_("committing files:\n"))
1726 self.ui.note(_("committing files:\n"))
1727 for f in sorted(ctx.modified() + ctx.added()):
1727 for f in sorted(ctx.modified() + ctx.added()):
1728 self.ui.note(f + "\n")
1728 self.ui.note(f + "\n")
1729 try:
1729 try:
1730 fctx = ctx[f]
1730 fctx = ctx[f]
1731 if fctx is None:
1731 if fctx is None:
1732 removed.append(f)
1732 removed.append(f)
1733 else:
1733 else:
1734 added.append(f)
1734 added.append(f)
1735 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1735 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1736 trp, changed)
1736 trp, changed)
1737 m.setflag(f, fctx.flags())
1737 m.setflag(f, fctx.flags())
1738 except OSError as inst:
1738 except OSError as inst:
1739 self.ui.warn(_("trouble committing %s!\n") % f)
1739 self.ui.warn(_("trouble committing %s!\n") % f)
1740 raise
1740 raise
1741 except IOError as inst:
1741 except IOError as inst:
1742 errcode = getattr(inst, 'errno', errno.ENOENT)
1742 errcode = getattr(inst, 'errno', errno.ENOENT)
1743 if error or errcode and errcode != errno.ENOENT:
1743 if error or errcode and errcode != errno.ENOENT:
1744 self.ui.warn(_("trouble committing %s!\n") % f)
1744 self.ui.warn(_("trouble committing %s!\n") % f)
1745 raise
1745 raise
1746
1746
1747 # update manifest
1747 # update manifest
1748 self.ui.note(_("committing manifest\n"))
1748 self.ui.note(_("committing manifest\n"))
1749 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1749 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1750 drop = [f for f in removed if f in m]
1750 drop = [f for f in removed if f in m]
1751 for f in drop:
1751 for f in drop:
1752 del m[f]
1752 del m[f]
1753 mn = mctx.write(trp, linkrev,
1753 mn = mctx.write(trp, linkrev,
1754 p1.manifestnode(), p2.manifestnode(),
1754 p1.manifestnode(), p2.manifestnode(),
1755 added, drop)
1755 added, drop)
1756 files = changed + removed
1756 files = changed + removed
1757 else:
1757 else:
1758 mn = p1.manifestnode()
1758 mn = p1.manifestnode()
1759 files = []
1759 files = []
1760
1760
1761 # update changelog
1761 # update changelog
1762 self.ui.note(_("committing changelog\n"))
1762 self.ui.note(_("committing changelog\n"))
1763 self.changelog.delayupdate(tr)
1763 self.changelog.delayupdate(tr)
1764 n = self.changelog.add(mn, files, ctx.description(),
1764 n = self.changelog.add(mn, files, ctx.description(),
1765 trp, p1.node(), p2.node(),
1765 trp, p1.node(), p2.node(),
1766 user, ctx.date(), ctx.extra().copy())
1766 user, ctx.date(), ctx.extra().copy())
1767 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1767 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1768 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1768 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1769 parent2=xp2)
1769 parent2=xp2)
1770 # set the new commit is proper phase
1770 # set the new commit is proper phase
1771 targetphase = subrepo.newcommitphase(self.ui, ctx)
1771 targetphase = subrepo.newcommitphase(self.ui, ctx)
1772 if targetphase:
1772 if targetphase:
1773 # retract boundary do not alter parent changeset.
1773 # retract boundary do not alter parent changeset.
1774 # if a parent have higher the resulting phase will
1774 # if a parent have higher the resulting phase will
1775 # be compliant anyway
1775 # be compliant anyway
1776 #
1776 #
1777 # if minimal phase was 0 we don't need to retract anything
1777 # if minimal phase was 0 we don't need to retract anything
1778 phases.retractboundary(self, tr, targetphase, [n])
1778 phases.retractboundary(self, tr, targetphase, [n])
1779 tr.close()
1779 tr.close()
1780 branchmap.updatecache(self.filtered('served'))
1780 branchmap.updatecache(self.filtered('served'))
1781 return n
1781 return n
1782 finally:
1782 finally:
1783 if tr:
1783 if tr:
1784 tr.release()
1784 tr.release()
1785 lock.release()
1785 lock.release()
1786
1786
1787 @unfilteredmethod
1787 @unfilteredmethod
1788 def destroying(self):
1788 def destroying(self):
1789 '''Inform the repository that nodes are about to be destroyed.
1789 '''Inform the repository that nodes are about to be destroyed.
1790 Intended for use by strip and rollback, so there's a common
1790 Intended for use by strip and rollback, so there's a common
1791 place for anything that has to be done before destroying history.
1791 place for anything that has to be done before destroying history.
1792
1792
1793 This is mostly useful for saving state that is in memory and waiting
1793 This is mostly useful for saving state that is in memory and waiting
1794 to be flushed when the current lock is released. Because a call to
1794 to be flushed when the current lock is released. Because a call to
1795 destroyed is imminent, the repo will be invalidated causing those
1795 destroyed is imminent, the repo will be invalidated causing those
1796 changes to stay in memory (waiting for the next unlock), or vanish
1796 changes to stay in memory (waiting for the next unlock), or vanish
1797 completely.
1797 completely.
1798 '''
1798 '''
1799 # When using the same lock to commit and strip, the phasecache is left
1799 # When using the same lock to commit and strip, the phasecache is left
1800 # dirty after committing. Then when we strip, the repo is invalidated,
1800 # dirty after committing. Then when we strip, the repo is invalidated,
1801 # causing those changes to disappear.
1801 # causing those changes to disappear.
1802 if '_phasecache' in vars(self):
1802 if '_phasecache' in vars(self):
1803 self._phasecache.write()
1803 self._phasecache.write()
1804
1804
1805 @unfilteredmethod
1805 @unfilteredmethod
1806 def destroyed(self):
1806 def destroyed(self):
1807 '''Inform the repository that nodes have been destroyed.
1807 '''Inform the repository that nodes have been destroyed.
1808 Intended for use by strip and rollback, so there's a common
1808 Intended for use by strip and rollback, so there's a common
1809 place for anything that has to be done after destroying history.
1809 place for anything that has to be done after destroying history.
1810 '''
1810 '''
1811 # When one tries to:
1811 # When one tries to:
1812 # 1) destroy nodes thus calling this method (e.g. strip)
1812 # 1) destroy nodes thus calling this method (e.g. strip)
1813 # 2) use phasecache somewhere (e.g. commit)
1813 # 2) use phasecache somewhere (e.g. commit)
1814 #
1814 #
1815 # then 2) will fail because the phasecache contains nodes that were
1815 # then 2) will fail because the phasecache contains nodes that were
1816 # removed. We can either remove phasecache from the filecache,
1816 # removed. We can either remove phasecache from the filecache,
1817 # causing it to reload next time it is accessed, or simply filter
1817 # causing it to reload next time it is accessed, or simply filter
1818 # the removed nodes now and write the updated cache.
1818 # the removed nodes now and write the updated cache.
1819 self._phasecache.filterunknown(self)
1819 self._phasecache.filterunknown(self)
1820 self._phasecache.write()
1820 self._phasecache.write()
1821
1821
1822 # update the 'served' branch cache to help read only server process
1822 # update the 'served' branch cache to help read only server process
1823 # Thanks to branchcache collaboration this is done from the nearest
1823 # Thanks to branchcache collaboration this is done from the nearest
1824 # filtered subset and it is expected to be fast.
1824 # filtered subset and it is expected to be fast.
1825 branchmap.updatecache(self.filtered('served'))
1825 branchmap.updatecache(self.filtered('served'))
1826
1826
1827 # Ensure the persistent tag cache is updated. Doing it now
1827 # Ensure the persistent tag cache is updated. Doing it now
1828 # means that the tag cache only has to worry about destroyed
1828 # means that the tag cache only has to worry about destroyed
1829 # heads immediately after a strip/rollback. That in turn
1829 # heads immediately after a strip/rollback. That in turn
1830 # guarantees that "cachetip == currenttip" (comparing both rev
1830 # guarantees that "cachetip == currenttip" (comparing both rev
1831 # and node) always means no nodes have been added or destroyed.
1831 # and node) always means no nodes have been added or destroyed.
1832
1832
1833 # XXX this is suboptimal when qrefresh'ing: we strip the current
1833 # XXX this is suboptimal when qrefresh'ing: we strip the current
1834 # head, refresh the tag cache, then immediately add a new head.
1834 # head, refresh the tag cache, then immediately add a new head.
1835 # But I think doing it this way is necessary for the "instant
1835 # But I think doing it this way is necessary for the "instant
1836 # tag cache retrieval" case to work.
1836 # tag cache retrieval" case to work.
1837 self.invalidate()
1837 self.invalidate()
1838
1838
1839 def walk(self, match, node=None):
1839 def walk(self, match, node=None):
1840 '''
1840 '''
1841 walk recursively through the directory tree or a given
1841 walk recursively through the directory tree or a given
1842 changeset, finding all files matched by the match
1842 changeset, finding all files matched by the match
1843 function
1843 function
1844 '''
1844 '''
1845 return self[node].walk(match)
1845 return self[node].walk(match)
1846
1846
1847 def status(self, node1='.', node2=None, match=None,
1847 def status(self, node1='.', node2=None, match=None,
1848 ignored=False, clean=False, unknown=False,
1848 ignored=False, clean=False, unknown=False,
1849 listsubrepos=False):
1849 listsubrepos=False):
1850 '''a convenience method that calls node1.status(node2)'''
1850 '''a convenience method that calls node1.status(node2)'''
1851 return self[node1].status(node2, match, ignored, clean, unknown,
1851 return self[node1].status(node2, match, ignored, clean, unknown,
1852 listsubrepos)
1852 listsubrepos)
1853
1853
1854 def heads(self, start=None):
1854 def heads(self, start=None):
1855 if start is None:
1856 headrevs = sorted(self.changelog.headrevs(), reverse=True)
1857 return [self.changelog.node(rev) for rev in headrevs]
1858
1855 heads = self.changelog.heads(start)
1859 heads = self.changelog.heads(start)
1856 # sort the output in rev descending order
1860 # sort the output in rev descending order
1857 return sorted(heads, key=self.changelog.rev, reverse=True)
1861 return sorted(heads, key=self.changelog.rev, reverse=True)
1858
1862
1859 def branchheads(self, branch=None, start=None, closed=False):
1863 def branchheads(self, branch=None, start=None, closed=False):
1860 '''return a (possibly filtered) list of heads for the given branch
1864 '''return a (possibly filtered) list of heads for the given branch
1861
1865
1862 Heads are returned in topological order, from newest to oldest.
1866 Heads are returned in topological order, from newest to oldest.
1863 If branch is None, use the dirstate branch.
1867 If branch is None, use the dirstate branch.
1864 If start is not None, return only heads reachable from start.
1868 If start is not None, return only heads reachable from start.
1865 If closed is True, return heads that are marked as closed as well.
1869 If closed is True, return heads that are marked as closed as well.
1866 '''
1870 '''
1867 if branch is None:
1871 if branch is None:
1868 branch = self[None].branch()
1872 branch = self[None].branch()
1869 branches = self.branchmap()
1873 branches = self.branchmap()
1870 if branch not in branches:
1874 if branch not in branches:
1871 return []
1875 return []
1872 # the cache returns heads ordered lowest to highest
1876 # the cache returns heads ordered lowest to highest
1873 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1877 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1874 if start is not None:
1878 if start is not None:
1875 # filter out the heads that cannot be reached from startrev
1879 # filter out the heads that cannot be reached from startrev
1876 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1880 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1877 bheads = [h for h in bheads if h in fbheads]
1881 bheads = [h for h in bheads if h in fbheads]
1878 return bheads
1882 return bheads
1879
1883
1880 def branches(self, nodes):
1884 def branches(self, nodes):
1881 if not nodes:
1885 if not nodes:
1882 nodes = [self.changelog.tip()]
1886 nodes = [self.changelog.tip()]
1883 b = []
1887 b = []
1884 for n in nodes:
1888 for n in nodes:
1885 t = n
1889 t = n
1886 while True:
1890 while True:
1887 p = self.changelog.parents(n)
1891 p = self.changelog.parents(n)
1888 if p[1] != nullid or p[0] == nullid:
1892 if p[1] != nullid or p[0] == nullid:
1889 b.append((t, n, p[0], p[1]))
1893 b.append((t, n, p[0], p[1]))
1890 break
1894 break
1891 n = p[0]
1895 n = p[0]
1892 return b
1896 return b
1893
1897
1894 def between(self, pairs):
1898 def between(self, pairs):
1895 r = []
1899 r = []
1896
1900
1897 for top, bottom in pairs:
1901 for top, bottom in pairs:
1898 n, l, i = top, [], 0
1902 n, l, i = top, [], 0
1899 f = 1
1903 f = 1
1900
1904
1901 while n != bottom and n != nullid:
1905 while n != bottom and n != nullid:
1902 p = self.changelog.parents(n)[0]
1906 p = self.changelog.parents(n)[0]
1903 if i == f:
1907 if i == f:
1904 l.append(n)
1908 l.append(n)
1905 f = f * 2
1909 f = f * 2
1906 n = p
1910 n = p
1907 i += 1
1911 i += 1
1908
1912
1909 r.append(l)
1913 r.append(l)
1910
1914
1911 return r
1915 return r
1912
1916
1913 def checkpush(self, pushop):
1917 def checkpush(self, pushop):
1914 """Extensions can override this function if additional checks have
1918 """Extensions can override this function if additional checks have
1915 to be performed before pushing, or call it if they override push
1919 to be performed before pushing, or call it if they override push
1916 command.
1920 command.
1917 """
1921 """
1918 pass
1922 pass
1919
1923
1920 @unfilteredpropertycache
1924 @unfilteredpropertycache
1921 def prepushoutgoinghooks(self):
1925 def prepushoutgoinghooks(self):
1922 """Return util.hooks consists of a pushop with repo, remote, outgoing
1926 """Return util.hooks consists of a pushop with repo, remote, outgoing
1923 methods, which are called before pushing changesets.
1927 methods, which are called before pushing changesets.
1924 """
1928 """
1925 return util.hooks()
1929 return util.hooks()
1926
1930
1927 def pushkey(self, namespace, key, old, new):
1931 def pushkey(self, namespace, key, old, new):
1928 try:
1932 try:
1929 tr = self.currenttransaction()
1933 tr = self.currenttransaction()
1930 hookargs = {}
1934 hookargs = {}
1931 if tr is not None:
1935 if tr is not None:
1932 hookargs.update(tr.hookargs)
1936 hookargs.update(tr.hookargs)
1933 hookargs['namespace'] = namespace
1937 hookargs['namespace'] = namespace
1934 hookargs['key'] = key
1938 hookargs['key'] = key
1935 hookargs['old'] = old
1939 hookargs['old'] = old
1936 hookargs['new'] = new
1940 hookargs['new'] = new
1937 self.hook('prepushkey', throw=True, **hookargs)
1941 self.hook('prepushkey', throw=True, **hookargs)
1938 except error.HookAbort as exc:
1942 except error.HookAbort as exc:
1939 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1943 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1940 if exc.hint:
1944 if exc.hint:
1941 self.ui.write_err(_("(%s)\n") % exc.hint)
1945 self.ui.write_err(_("(%s)\n") % exc.hint)
1942 return False
1946 return False
1943 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1947 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1944 ret = pushkey.push(self, namespace, key, old, new)
1948 ret = pushkey.push(self, namespace, key, old, new)
1945 def runhook():
1949 def runhook():
1946 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1950 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1947 ret=ret)
1951 ret=ret)
1948 self._afterlock(runhook)
1952 self._afterlock(runhook)
1949 return ret
1953 return ret
1950
1954
1951 def listkeys(self, namespace):
1955 def listkeys(self, namespace):
1952 self.hook('prelistkeys', throw=True, namespace=namespace)
1956 self.hook('prelistkeys', throw=True, namespace=namespace)
1953 self.ui.debug('listing keys for "%s"\n' % namespace)
1957 self.ui.debug('listing keys for "%s"\n' % namespace)
1954 values = pushkey.list(self, namespace)
1958 values = pushkey.list(self, namespace)
1955 self.hook('listkeys', namespace=namespace, values=values)
1959 self.hook('listkeys', namespace=namespace, values=values)
1956 return values
1960 return values
1957
1961
1958 def debugwireargs(self, one, two, three=None, four=None, five=None):
1962 def debugwireargs(self, one, two, three=None, four=None, five=None):
1959 '''used to test argument passing over the wire'''
1963 '''used to test argument passing over the wire'''
1960 return "%s %s %s %s %s" % (one, two, three, four, five)
1964 return "%s %s %s %s %s" % (one, two, three, four, five)
1961
1965
1962 def savecommitmessage(self, text):
1966 def savecommitmessage(self, text):
1963 fp = self.vfs('last-message.txt', 'wb')
1967 fp = self.vfs('last-message.txt', 'wb')
1964 try:
1968 try:
1965 fp.write(text)
1969 fp.write(text)
1966 finally:
1970 finally:
1967 fp.close()
1971 fp.close()
1968 return self.pathto(fp.name[len(self.root) + 1:])
1972 return self.pathto(fp.name[len(self.root) + 1:])
1969
1973
1970 # used to avoid circular references so destructors work
1974 # used to avoid circular references so destructors work
1971 def aftertrans(files):
1975 def aftertrans(files):
1972 renamefiles = [tuple(t) for t in files]
1976 renamefiles = [tuple(t) for t in files]
1973 def a():
1977 def a():
1974 for vfs, src, dest in renamefiles:
1978 for vfs, src, dest in renamefiles:
1975 try:
1979 try:
1976 vfs.rename(src, dest)
1980 vfs.rename(src, dest)
1977 except OSError: # journal file does not yet exist
1981 except OSError: # journal file does not yet exist
1978 pass
1982 pass
1979 return a
1983 return a
1980
1984
1981 def undoname(fn):
1985 def undoname(fn):
1982 base, name = os.path.split(fn)
1986 base, name = os.path.split(fn)
1983 assert name.startswith('journal')
1987 assert name.startswith('journal')
1984 return os.path.join(base, name.replace('journal', 'undo', 1))
1988 return os.path.join(base, name.replace('journal', 'undo', 1))
1985
1989
1986 def instance(ui, path, create):
1990 def instance(ui, path, create):
1987 return localrepository(ui, util.urllocalpath(path), create)
1991 return localrepository(ui, util.urllocalpath(path), create)
1988
1992
1989 def islocal(path):
1993 def islocal(path):
1990 return True
1994 return True
1991
1995
1992 def newreporequirements(repo):
1996 def newreporequirements(repo):
1993 """Determine the set of requirements for a new local repository.
1997 """Determine the set of requirements for a new local repository.
1994
1998
1995 Extensions can wrap this function to specify custom requirements for
1999 Extensions can wrap this function to specify custom requirements for
1996 new repositories.
2000 new repositories.
1997 """
2001 """
1998 ui = repo.ui
2002 ui = repo.ui
1999 requirements = set(['revlogv1'])
2003 requirements = set(['revlogv1'])
2000 if ui.configbool('format', 'usestore', True):
2004 if ui.configbool('format', 'usestore', True):
2001 requirements.add('store')
2005 requirements.add('store')
2002 if ui.configbool('format', 'usefncache', True):
2006 if ui.configbool('format', 'usefncache', True):
2003 requirements.add('fncache')
2007 requirements.add('fncache')
2004 if ui.configbool('format', 'dotencode', True):
2008 if ui.configbool('format', 'dotencode', True):
2005 requirements.add('dotencode')
2009 requirements.add('dotencode')
2006
2010
2007 compengine = ui.config('experimental', 'format.compression', 'zlib')
2011 compengine = ui.config('experimental', 'format.compression', 'zlib')
2008 if compengine not in util.compengines:
2012 if compengine not in util.compengines:
2009 raise error.Abort(_('compression engine %s defined by '
2013 raise error.Abort(_('compression engine %s defined by '
2010 'experimental.format.compression not available') %
2014 'experimental.format.compression not available') %
2011 compengine,
2015 compengine,
2012 hint=_('run "hg debuginstall" to list available '
2016 hint=_('run "hg debuginstall" to list available '
2013 'compression engines'))
2017 'compression engines'))
2014
2018
2015 # zlib is the historical default and doesn't need an explicit requirement.
2019 # zlib is the historical default and doesn't need an explicit requirement.
2016 if compengine != 'zlib':
2020 if compengine != 'zlib':
2017 requirements.add('exp-compression-%s' % compengine)
2021 requirements.add('exp-compression-%s' % compengine)
2018
2022
2019 if scmutil.gdinitconfig(ui):
2023 if scmutil.gdinitconfig(ui):
2020 requirements.add('generaldelta')
2024 requirements.add('generaldelta')
2021 if ui.configbool('experimental', 'treemanifest', False):
2025 if ui.configbool('experimental', 'treemanifest', False):
2022 requirements.add('treemanifest')
2026 requirements.add('treemanifest')
2023 if ui.configbool('experimental', 'manifestv2', False):
2027 if ui.configbool('experimental', 'manifestv2', False):
2024 requirements.add('manifestv2')
2028 requirements.add('manifestv2')
2025
2029
2026 return requirements
2030 return requirements
General Comments 0
You need to be logged in to leave comments. Login now