##// END OF EJS Templates
tags: deprecated 'repo.tag'...
Pierre-Yves David -
r31672:e6fd7930 default
parent child Browse files
Show More
@@ -1,1983 +1,1984 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'))
117 'unbundle'))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if bundlecaps is not None and 'HG20' in bundlecaps:
167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 'manifestv2'))
251 'manifestv2'))
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 'relshared', 'dotencode'))
253 'relshared', 'dotencode'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 filtername = None
255 filtername = None
256
256
257 # a list of (ui, featureset) functions.
257 # a list of (ui, featureset) functions.
258 # only functions defined in module of enabled extensions are invoked
258 # only functions defined in module of enabled extensions are invoked
259 featuresetupfuncs = set()
259 featuresetupfuncs = set()
260
260
261 def __init__(self, baseui, path, create=False):
261 def __init__(self, baseui, path, create=False):
262 self.requirements = set()
262 self.requirements = set()
263 # wvfs: rooted at the repository root, used to access the working copy
263 # wvfs: rooted at the repository root, used to access the working copy
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 self.vfs = None
266 self.vfs = None
267 # svfs: usually rooted at .hg/store, used to access repository history
267 # svfs: usually rooted at .hg/store, used to access repository history
268 # If this is a shared repository, this vfs may point to another
268 # If this is a shared repository, this vfs may point to another
269 # repository's .hg/store directory.
269 # repository's .hg/store directory.
270 self.svfs = None
270 self.svfs = None
271 self.root = self.wvfs.base
271 self.root = self.wvfs.base
272 self.path = self.wvfs.join(".hg")
272 self.path = self.wvfs.join(".hg")
273 self.origroot = path
273 self.origroot = path
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 realfs=False)
276 realfs=False)
277 self.vfs = vfsmod.vfs(self.path)
277 self.vfs = vfsmod.vfs(self.path)
278 self.baseui = baseui
278 self.baseui = baseui
279 self.ui = baseui.copy()
279 self.ui = baseui.copy()
280 self.ui.copy = baseui.copy # prevent copying repo configuration
280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 # A list of callback to shape the phase if no data were found.
281 # A list of callback to shape the phase if no data were found.
282 # Callback are in the form: func(repo, roots) --> processed root.
282 # Callback are in the form: func(repo, roots) --> processed root.
283 # This list it to be filled by extension during repo setup
283 # This list it to be filled by extension during repo setup
284 self._phasedefaults = []
284 self._phasedefaults = []
285 try:
285 try:
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 self._loadextensions()
287 self._loadextensions()
288 except IOError:
288 except IOError:
289 pass
289 pass
290
290
291 if self.featuresetupfuncs:
291 if self.featuresetupfuncs:
292 self.supported = set(self._basesupported) # use private copy
292 self.supported = set(self._basesupported) # use private copy
293 extmods = set(m.__name__ for n, m
293 extmods = set(m.__name__ for n, m
294 in extensions.extensions(self.ui))
294 in extensions.extensions(self.ui))
295 for setupfunc in self.featuresetupfuncs:
295 for setupfunc in self.featuresetupfuncs:
296 if setupfunc.__module__ in extmods:
296 if setupfunc.__module__ in extmods:
297 setupfunc(self.ui, self.supported)
297 setupfunc(self.ui, self.supported)
298 else:
298 else:
299 self.supported = self._basesupported
299 self.supported = self._basesupported
300 color.setup(self.ui)
300 color.setup(self.ui)
301
301
302 # Add compression engines.
302 # Add compression engines.
303 for name in util.compengines:
303 for name in util.compengines:
304 engine = util.compengines[name]
304 engine = util.compengines[name]
305 if engine.revlogheader():
305 if engine.revlogheader():
306 self.supported.add('exp-compression-%s' % name)
306 self.supported.add('exp-compression-%s' % name)
307
307
308 if not self.vfs.isdir():
308 if not self.vfs.isdir():
309 if create:
309 if create:
310 self.requirements = newreporequirements(self)
310 self.requirements = newreporequirements(self)
311
311
312 if not self.wvfs.exists():
312 if not self.wvfs.exists():
313 self.wvfs.makedirs()
313 self.wvfs.makedirs()
314 self.vfs.makedir(notindexed=True)
314 self.vfs.makedir(notindexed=True)
315
315
316 if 'store' in self.requirements:
316 if 'store' in self.requirements:
317 self.vfs.mkdir("store")
317 self.vfs.mkdir("store")
318
318
319 # create an invalid changelog
319 # create an invalid changelog
320 self.vfs.append(
320 self.vfs.append(
321 "00changelog.i",
321 "00changelog.i",
322 '\0\0\0\2' # represents revlogv2
322 '\0\0\0\2' # represents revlogv2
323 ' dummy changelog to prevent using the old repo layout'
323 ' dummy changelog to prevent using the old repo layout'
324 )
324 )
325 else:
325 else:
326 raise error.RepoError(_("repository %s not found") % path)
326 raise error.RepoError(_("repository %s not found") % path)
327 elif create:
327 elif create:
328 raise error.RepoError(_("repository %s already exists") % path)
328 raise error.RepoError(_("repository %s already exists") % path)
329 else:
329 else:
330 try:
330 try:
331 self.requirements = scmutil.readrequires(
331 self.requirements = scmutil.readrequires(
332 self.vfs, self.supported)
332 self.vfs, self.supported)
333 except IOError as inst:
333 except IOError as inst:
334 if inst.errno != errno.ENOENT:
334 if inst.errno != errno.ENOENT:
335 raise
335 raise
336
336
337 self.sharedpath = self.path
337 self.sharedpath = self.path
338 try:
338 try:
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 if 'relshared' in self.requirements:
340 if 'relshared' in self.requirements:
341 sharedpath = self.vfs.join(sharedpath)
341 sharedpath = self.vfs.join(sharedpath)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 s = vfs.base
343 s = vfs.base
344 if not vfs.exists():
344 if not vfs.exists():
345 raise error.RepoError(
345 raise error.RepoError(
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 self.sharedpath = s
347 self.sharedpath = s
348 except IOError as inst:
348 except IOError as inst:
349 if inst.errno != errno.ENOENT:
349 if inst.errno != errno.ENOENT:
350 raise
350 raise
351
351
352 self.store = store.store(
352 self.store = store.store(
353 self.requirements, self.sharedpath, vfsmod.vfs)
353 self.requirements, self.sharedpath, vfsmod.vfs)
354 self.spath = self.store.path
354 self.spath = self.store.path
355 self.svfs = self.store.vfs
355 self.svfs = self.store.vfs
356 self.sjoin = self.store.join
356 self.sjoin = self.store.join
357 self.vfs.createmode = self.store.createmode
357 self.vfs.createmode = self.store.createmode
358 self._applyopenerreqs()
358 self._applyopenerreqs()
359 if create:
359 if create:
360 self._writerequirements()
360 self._writerequirements()
361
361
362 self._dirstatevalidatewarned = False
362 self._dirstatevalidatewarned = False
363
363
364 self._branchcaches = {}
364 self._branchcaches = {}
365 self._revbranchcache = None
365 self._revbranchcache = None
366 self.filterpats = {}
366 self.filterpats = {}
367 self._datafilters = {}
367 self._datafilters = {}
368 self._transref = self._lockref = self._wlockref = None
368 self._transref = self._lockref = self._wlockref = None
369
369
370 # A cache for various files under .hg/ that tracks file changes,
370 # A cache for various files under .hg/ that tracks file changes,
371 # (used by the filecache decorator)
371 # (used by the filecache decorator)
372 #
372 #
373 # Maps a property name to its util.filecacheentry
373 # Maps a property name to its util.filecacheentry
374 self._filecache = {}
374 self._filecache = {}
375
375
376 # hold sets of revision to be filtered
376 # hold sets of revision to be filtered
377 # should be cleared when something might have changed the filter value:
377 # should be cleared when something might have changed the filter value:
378 # - new changesets,
378 # - new changesets,
379 # - phase change,
379 # - phase change,
380 # - new obsolescence marker,
380 # - new obsolescence marker,
381 # - working directory parent change,
381 # - working directory parent change,
382 # - bookmark changes
382 # - bookmark changes
383 self.filteredrevcache = {}
383 self.filteredrevcache = {}
384
384
385 # generic mapping between names and nodes
385 # generic mapping between names and nodes
386 self.names = namespaces.namespaces()
386 self.names = namespaces.namespaces()
387
387
388 @property
388 @property
389 def wopener(self):
389 def wopener(self):
390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
391 return self.wvfs
391 return self.wvfs
392
392
393 @property
393 @property
394 def opener(self):
394 def opener(self):
395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
396 return self.vfs
396 return self.vfs
397
397
398 def close(self):
398 def close(self):
399 self._writecaches()
399 self._writecaches()
400
400
401 def _loadextensions(self):
401 def _loadextensions(self):
402 extensions.loadall(self.ui)
402 extensions.loadall(self.ui)
403
403
404 def _writecaches(self):
404 def _writecaches(self):
405 if self._revbranchcache:
405 if self._revbranchcache:
406 self._revbranchcache.write()
406 self._revbranchcache.write()
407
407
408 def _restrictcapabilities(self, caps):
408 def _restrictcapabilities(self, caps):
409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
410 caps = set(caps)
410 caps = set(caps)
411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
412 caps.add('bundle2=' + urlreq.quote(capsblob))
412 caps.add('bundle2=' + urlreq.quote(capsblob))
413 return caps
413 return caps
414
414
415 def _applyopenerreqs(self):
415 def _applyopenerreqs(self):
416 self.svfs.options = dict((r, 1) for r in self.requirements
416 self.svfs.options = dict((r, 1) for r in self.requirements
417 if r in self.openerreqs)
417 if r in self.openerreqs)
418 # experimental config: format.chunkcachesize
418 # experimental config: format.chunkcachesize
419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
420 if chunkcachesize is not None:
420 if chunkcachesize is not None:
421 self.svfs.options['chunkcachesize'] = chunkcachesize
421 self.svfs.options['chunkcachesize'] = chunkcachesize
422 # experimental config: format.maxchainlen
422 # experimental config: format.maxchainlen
423 maxchainlen = self.ui.configint('format', 'maxchainlen')
423 maxchainlen = self.ui.configint('format', 'maxchainlen')
424 if maxchainlen is not None:
424 if maxchainlen is not None:
425 self.svfs.options['maxchainlen'] = maxchainlen
425 self.svfs.options['maxchainlen'] = maxchainlen
426 # experimental config: format.manifestcachesize
426 # experimental config: format.manifestcachesize
427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
428 if manifestcachesize is not None:
428 if manifestcachesize is not None:
429 self.svfs.options['manifestcachesize'] = manifestcachesize
429 self.svfs.options['manifestcachesize'] = manifestcachesize
430 # experimental config: format.aggressivemergedeltas
430 # experimental config: format.aggressivemergedeltas
431 aggressivemergedeltas = self.ui.configbool('format',
431 aggressivemergedeltas = self.ui.configbool('format',
432 'aggressivemergedeltas', False)
432 'aggressivemergedeltas', False)
433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
435
435
436 for r in self.requirements:
436 for r in self.requirements:
437 if r.startswith('exp-compression-'):
437 if r.startswith('exp-compression-'):
438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
439
439
440 def _writerequirements(self):
440 def _writerequirements(self):
441 scmutil.writerequires(self.vfs, self.requirements)
441 scmutil.writerequires(self.vfs, self.requirements)
442
442
443 def _checknested(self, path):
443 def _checknested(self, path):
444 """Determine if path is a legal nested repository."""
444 """Determine if path is a legal nested repository."""
445 if not path.startswith(self.root):
445 if not path.startswith(self.root):
446 return False
446 return False
447 subpath = path[len(self.root) + 1:]
447 subpath = path[len(self.root) + 1:]
448 normsubpath = util.pconvert(subpath)
448 normsubpath = util.pconvert(subpath)
449
449
450 # XXX: Checking against the current working copy is wrong in
450 # XXX: Checking against the current working copy is wrong in
451 # the sense that it can reject things like
451 # the sense that it can reject things like
452 #
452 #
453 # $ hg cat -r 10 sub/x.txt
453 # $ hg cat -r 10 sub/x.txt
454 #
454 #
455 # if sub/ is no longer a subrepository in the working copy
455 # if sub/ is no longer a subrepository in the working copy
456 # parent revision.
456 # parent revision.
457 #
457 #
458 # However, it can of course also allow things that would have
458 # However, it can of course also allow things that would have
459 # been rejected before, such as the above cat command if sub/
459 # been rejected before, such as the above cat command if sub/
460 # is a subrepository now, but was a normal directory before.
460 # is a subrepository now, but was a normal directory before.
461 # The old path auditor would have rejected by mistake since it
461 # The old path auditor would have rejected by mistake since it
462 # panics when it sees sub/.hg/.
462 # panics when it sees sub/.hg/.
463 #
463 #
464 # All in all, checking against the working copy seems sensible
464 # All in all, checking against the working copy seems sensible
465 # since we want to prevent access to nested repositories on
465 # since we want to prevent access to nested repositories on
466 # the filesystem *now*.
466 # the filesystem *now*.
467 ctx = self[None]
467 ctx = self[None]
468 parts = util.splitpath(subpath)
468 parts = util.splitpath(subpath)
469 while parts:
469 while parts:
470 prefix = '/'.join(parts)
470 prefix = '/'.join(parts)
471 if prefix in ctx.substate:
471 if prefix in ctx.substate:
472 if prefix == normsubpath:
472 if prefix == normsubpath:
473 return True
473 return True
474 else:
474 else:
475 sub = ctx.sub(prefix)
475 sub = ctx.sub(prefix)
476 return sub.checknested(subpath[len(prefix) + 1:])
476 return sub.checknested(subpath[len(prefix) + 1:])
477 else:
477 else:
478 parts.pop()
478 parts.pop()
479 return False
479 return False
480
480
481 def peer(self):
481 def peer(self):
482 return localpeer(self) # not cached to avoid reference cycle
482 return localpeer(self) # not cached to avoid reference cycle
483
483
484 def unfiltered(self):
484 def unfiltered(self):
485 """Return unfiltered version of the repository
485 """Return unfiltered version of the repository
486
486
487 Intended to be overwritten by filtered repo."""
487 Intended to be overwritten by filtered repo."""
488 return self
488 return self
489
489
490 def filtered(self, name):
490 def filtered(self, name):
491 """Return a filtered version of a repository"""
491 """Return a filtered version of a repository"""
492 # build a new class with the mixin and the current class
492 # build a new class with the mixin and the current class
493 # (possibly subclass of the repo)
493 # (possibly subclass of the repo)
494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
495 pass
495 pass
496 return filteredrepo(self, name)
496 return filteredrepo(self, name)
497
497
498 @repofilecache('bookmarks', 'bookmarks.current')
498 @repofilecache('bookmarks', 'bookmarks.current')
499 def _bookmarks(self):
499 def _bookmarks(self):
500 return bookmarks.bmstore(self)
500 return bookmarks.bmstore(self)
501
501
502 @property
502 @property
503 def _activebookmark(self):
503 def _activebookmark(self):
504 return self._bookmarks.active
504 return self._bookmarks.active
505
505
506 def bookmarkheads(self, bookmark):
506 def bookmarkheads(self, bookmark):
507 name = bookmark.split('@', 1)[0]
507 name = bookmark.split('@', 1)[0]
508 heads = []
508 heads = []
509 for mark, n in self._bookmarks.iteritems():
509 for mark, n in self._bookmarks.iteritems():
510 if mark.split('@', 1)[0] == name:
510 if mark.split('@', 1)[0] == name:
511 heads.append(n)
511 heads.append(n)
512 return heads
512 return heads
513
513
514 # _phaserevs and _phasesets depend on changelog. what we need is to
514 # _phaserevs and _phasesets depend on changelog. what we need is to
515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
516 # can't be easily expressed in filecache mechanism.
516 # can't be easily expressed in filecache mechanism.
517 @storecache('phaseroots', '00changelog.i')
517 @storecache('phaseroots', '00changelog.i')
518 def _phasecache(self):
518 def _phasecache(self):
519 return phases.phasecache(self, self._phasedefaults)
519 return phases.phasecache(self, self._phasedefaults)
520
520
521 @storecache('obsstore')
521 @storecache('obsstore')
522 def obsstore(self):
522 def obsstore(self):
523 # read default format for new obsstore.
523 # read default format for new obsstore.
524 # developer config: format.obsstore-version
524 # developer config: format.obsstore-version
525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
526 # rely on obsstore class default when possible.
526 # rely on obsstore class default when possible.
527 kwargs = {}
527 kwargs = {}
528 if defaultformat is not None:
528 if defaultformat is not None:
529 kwargs['defaultformat'] = defaultformat
529 kwargs['defaultformat'] = defaultformat
530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
531 store = obsolete.obsstore(self.svfs, readonly=readonly,
531 store = obsolete.obsstore(self.svfs, readonly=readonly,
532 **kwargs)
532 **kwargs)
533 if store and readonly:
533 if store and readonly:
534 self.ui.warn(
534 self.ui.warn(
535 _('obsolete feature not enabled but %i markers found!\n')
535 _('obsolete feature not enabled but %i markers found!\n')
536 % len(list(store)))
536 % len(list(store)))
537 return store
537 return store
538
538
539 @storecache('00changelog.i')
539 @storecache('00changelog.i')
540 def changelog(self):
540 def changelog(self):
541 c = changelog.changelog(self.svfs)
541 c = changelog.changelog(self.svfs)
542 if txnutil.mayhavepending(self.root):
542 if txnutil.mayhavepending(self.root):
543 c.readpending('00changelog.i.a')
543 c.readpending('00changelog.i.a')
544 return c
544 return c
545
545
546 def _constructmanifest(self):
546 def _constructmanifest(self):
547 # This is a temporary function while we migrate from manifest to
547 # This is a temporary function while we migrate from manifest to
548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
549 # manifest creation.
549 # manifest creation.
550 return manifest.manifestrevlog(self.svfs)
550 return manifest.manifestrevlog(self.svfs)
551
551
552 @storecache('00manifest.i')
552 @storecache('00manifest.i')
553 def manifestlog(self):
553 def manifestlog(self):
554 return manifest.manifestlog(self.svfs, self)
554 return manifest.manifestlog(self.svfs, self)
555
555
556 @repofilecache('dirstate')
556 @repofilecache('dirstate')
557 def dirstate(self):
557 def dirstate(self):
558 return dirstate.dirstate(self.vfs, self.ui, self.root,
558 return dirstate.dirstate(self.vfs, self.ui, self.root,
559 self._dirstatevalidate)
559 self._dirstatevalidate)
560
560
561 def _dirstatevalidate(self, node):
561 def _dirstatevalidate(self, node):
562 try:
562 try:
563 self.changelog.rev(node)
563 self.changelog.rev(node)
564 return node
564 return node
565 except error.LookupError:
565 except error.LookupError:
566 if not self._dirstatevalidatewarned:
566 if not self._dirstatevalidatewarned:
567 self._dirstatevalidatewarned = True
567 self._dirstatevalidatewarned = True
568 self.ui.warn(_("warning: ignoring unknown"
568 self.ui.warn(_("warning: ignoring unknown"
569 " working parent %s!\n") % short(node))
569 " working parent %s!\n") % short(node))
570 return nullid
570 return nullid
571
571
572 def __getitem__(self, changeid):
572 def __getitem__(self, changeid):
573 if changeid is None or changeid == wdirrev:
573 if changeid is None or changeid == wdirrev:
574 return context.workingctx(self)
574 return context.workingctx(self)
575 if isinstance(changeid, slice):
575 if isinstance(changeid, slice):
576 return [context.changectx(self, i)
576 return [context.changectx(self, i)
577 for i in xrange(*changeid.indices(len(self)))
577 for i in xrange(*changeid.indices(len(self)))
578 if i not in self.changelog.filteredrevs]
578 if i not in self.changelog.filteredrevs]
579 return context.changectx(self, changeid)
579 return context.changectx(self, changeid)
580
580
581 def __contains__(self, changeid):
581 def __contains__(self, changeid):
582 try:
582 try:
583 self[changeid]
583 self[changeid]
584 return True
584 return True
585 except error.RepoLookupError:
585 except error.RepoLookupError:
586 return False
586 return False
587
587
588 def __nonzero__(self):
588 def __nonzero__(self):
589 return True
589 return True
590
590
591 __bool__ = __nonzero__
591 __bool__ = __nonzero__
592
592
593 def __len__(self):
593 def __len__(self):
594 return len(self.changelog)
594 return len(self.changelog)
595
595
596 def __iter__(self):
596 def __iter__(self):
597 return iter(self.changelog)
597 return iter(self.changelog)
598
598
599 def revs(self, expr, *args):
599 def revs(self, expr, *args):
600 '''Find revisions matching a revset.
600 '''Find revisions matching a revset.
601
601
602 The revset is specified as a string ``expr`` that may contain
602 The revset is specified as a string ``expr`` that may contain
603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604
604
605 Revset aliases from the configuration are not expanded. To expand
605 Revset aliases from the configuration are not expanded. To expand
606 user aliases, consider calling ``scmutil.revrange()`` or
606 user aliases, consider calling ``scmutil.revrange()`` or
607 ``repo.anyrevs([expr], user=True)``.
607 ``repo.anyrevs([expr], user=True)``.
608
608
609 Returns a revset.abstractsmartset, which is a list-like interface
609 Returns a revset.abstractsmartset, which is a list-like interface
610 that contains integer revisions.
610 that contains integer revisions.
611 '''
611 '''
612 expr = revsetlang.formatspec(expr, *args)
612 expr = revsetlang.formatspec(expr, *args)
613 m = revset.match(None, expr)
613 m = revset.match(None, expr)
614 return m(self)
614 return m(self)
615
615
616 def set(self, expr, *args):
616 def set(self, expr, *args):
617 '''Find revisions matching a revset and emit changectx instances.
617 '''Find revisions matching a revset and emit changectx instances.
618
618
619 This is a convenience wrapper around ``revs()`` that iterates the
619 This is a convenience wrapper around ``revs()`` that iterates the
620 result and is a generator of changectx instances.
620 result and is a generator of changectx instances.
621
621
622 Revset aliases from the configuration are not expanded. To expand
622 Revset aliases from the configuration are not expanded. To expand
623 user aliases, consider calling ``scmutil.revrange()``.
623 user aliases, consider calling ``scmutil.revrange()``.
624 '''
624 '''
625 for r in self.revs(expr, *args):
625 for r in self.revs(expr, *args):
626 yield self[r]
626 yield self[r]
627
627
628 def anyrevs(self, specs, user=False):
628 def anyrevs(self, specs, user=False):
629 '''Find revisions matching one of the given revsets.
629 '''Find revisions matching one of the given revsets.
630
630
631 Revset aliases from the configuration are not expanded by default. To
631 Revset aliases from the configuration are not expanded by default. To
632 expand user aliases, specify ``user=True``.
632 expand user aliases, specify ``user=True``.
633 '''
633 '''
634 if user:
634 if user:
635 m = revset.matchany(self.ui, specs, repo=self)
635 m = revset.matchany(self.ui, specs, repo=self)
636 else:
636 else:
637 m = revset.matchany(None, specs)
637 m = revset.matchany(None, specs)
638 return m(self)
638 return m(self)
639
639
640 def url(self):
640 def url(self):
641 return 'file:' + self.root
641 return 'file:' + self.root
642
642
643 def hook(self, name, throw=False, **args):
643 def hook(self, name, throw=False, **args):
644 """Call a hook, passing this repo instance.
644 """Call a hook, passing this repo instance.
645
645
646 This a convenience method to aid invoking hooks. Extensions likely
646 This a convenience method to aid invoking hooks. Extensions likely
647 won't call this unless they have registered a custom hook or are
647 won't call this unless they have registered a custom hook or are
648 replacing code that is expected to call a hook.
648 replacing code that is expected to call a hook.
649 """
649 """
650 return hook.hook(self.ui, self, name, throw, **args)
650 return hook.hook(self.ui, self, name, throw, **args)
651
651
652 def tag(self, names, node, message, local, user, date, editor=False):
652 def tag(self, names, node, message, local, user, date, editor=False):
653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
653 tagsmod.tag(self, names, node, message, local, user, date,
654 tagsmod.tag(self, names, node, message, local, user, date,
654 editor=editor)
655 editor=editor)
655
656
656 @filteredpropertycache
657 @filteredpropertycache
657 def _tagscache(self):
658 def _tagscache(self):
658 '''Returns a tagscache object that contains various tags related
659 '''Returns a tagscache object that contains various tags related
659 caches.'''
660 caches.'''
660
661
661 # This simplifies its cache management by having one decorated
662 # This simplifies its cache management by having one decorated
662 # function (this one) and the rest simply fetch things from it.
663 # function (this one) and the rest simply fetch things from it.
663 class tagscache(object):
664 class tagscache(object):
664 def __init__(self):
665 def __init__(self):
665 # These two define the set of tags for this repository. tags
666 # These two define the set of tags for this repository. tags
666 # maps tag name to node; tagtypes maps tag name to 'global' or
667 # maps tag name to node; tagtypes maps tag name to 'global' or
667 # 'local'. (Global tags are defined by .hgtags across all
668 # 'local'. (Global tags are defined by .hgtags across all
668 # heads, and local tags are defined in .hg/localtags.)
669 # heads, and local tags are defined in .hg/localtags.)
669 # They constitute the in-memory cache of tags.
670 # They constitute the in-memory cache of tags.
670 self.tags = self.tagtypes = None
671 self.tags = self.tagtypes = None
671
672
672 self.nodetagscache = self.tagslist = None
673 self.nodetagscache = self.tagslist = None
673
674
674 cache = tagscache()
675 cache = tagscache()
675 cache.tags, cache.tagtypes = self._findtags()
676 cache.tags, cache.tagtypes = self._findtags()
676
677
677 return cache
678 return cache
678
679
679 def tags(self):
680 def tags(self):
680 '''return a mapping of tag to node'''
681 '''return a mapping of tag to node'''
681 t = {}
682 t = {}
682 if self.changelog.filteredrevs:
683 if self.changelog.filteredrevs:
683 tags, tt = self._findtags()
684 tags, tt = self._findtags()
684 else:
685 else:
685 tags = self._tagscache.tags
686 tags = self._tagscache.tags
686 for k, v in tags.iteritems():
687 for k, v in tags.iteritems():
687 try:
688 try:
688 # ignore tags to unknown nodes
689 # ignore tags to unknown nodes
689 self.changelog.rev(v)
690 self.changelog.rev(v)
690 t[k] = v
691 t[k] = v
691 except (error.LookupError, ValueError):
692 except (error.LookupError, ValueError):
692 pass
693 pass
693 return t
694 return t
694
695
695 def _findtags(self):
696 def _findtags(self):
696 '''Do the hard work of finding tags. Return a pair of dicts
697 '''Do the hard work of finding tags. Return a pair of dicts
697 (tags, tagtypes) where tags maps tag name to node, and tagtypes
698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
698 maps tag name to a string like \'global\' or \'local\'.
699 maps tag name to a string like \'global\' or \'local\'.
699 Subclasses or extensions are free to add their own tags, but
700 Subclasses or extensions are free to add their own tags, but
700 should be aware that the returned dicts will be retained for the
701 should be aware that the returned dicts will be retained for the
701 duration of the localrepo object.'''
702 duration of the localrepo object.'''
702
703
703 # XXX what tagtype should subclasses/extensions use? Currently
704 # XXX what tagtype should subclasses/extensions use? Currently
704 # mq and bookmarks add tags, but do not set the tagtype at all.
705 # mq and bookmarks add tags, but do not set the tagtype at all.
705 # Should each extension invent its own tag type? Should there
706 # Should each extension invent its own tag type? Should there
706 # be one tagtype for all such "virtual" tags? Or is the status
707 # be one tagtype for all such "virtual" tags? Or is the status
707 # quo fine?
708 # quo fine?
708
709
709 alltags = {} # map tag name to (node, hist)
710 alltags = {} # map tag name to (node, hist)
710 tagtypes = {}
711 tagtypes = {}
711
712
712 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
713 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
714
715
715 # Build the return dicts. Have to re-encode tag names because
716 # Build the return dicts. Have to re-encode tag names because
716 # the tags module always uses UTF-8 (in order not to lose info
717 # the tags module always uses UTF-8 (in order not to lose info
717 # writing to the cache), but the rest of Mercurial wants them in
718 # writing to the cache), but the rest of Mercurial wants them in
718 # local encoding.
719 # local encoding.
719 tags = {}
720 tags = {}
720 for (name, (node, hist)) in alltags.iteritems():
721 for (name, (node, hist)) in alltags.iteritems():
721 if node != nullid:
722 if node != nullid:
722 tags[encoding.tolocal(name)] = node
723 tags[encoding.tolocal(name)] = node
723 tags['tip'] = self.changelog.tip()
724 tags['tip'] = self.changelog.tip()
724 tagtypes = dict([(encoding.tolocal(name), value)
725 tagtypes = dict([(encoding.tolocal(name), value)
725 for (name, value) in tagtypes.iteritems()])
726 for (name, value) in tagtypes.iteritems()])
726 return (tags, tagtypes)
727 return (tags, tagtypes)
727
728
728 def tagtype(self, tagname):
729 def tagtype(self, tagname):
729 '''
730 '''
730 return the type of the given tag. result can be:
731 return the type of the given tag. result can be:
731
732
732 'local' : a local tag
733 'local' : a local tag
733 'global' : a global tag
734 'global' : a global tag
734 None : tag does not exist
735 None : tag does not exist
735 '''
736 '''
736
737
737 return self._tagscache.tagtypes.get(tagname)
738 return self._tagscache.tagtypes.get(tagname)
738
739
739 def tagslist(self):
740 def tagslist(self):
740 '''return a list of tags ordered by revision'''
741 '''return a list of tags ordered by revision'''
741 if not self._tagscache.tagslist:
742 if not self._tagscache.tagslist:
742 l = []
743 l = []
743 for t, n in self.tags().iteritems():
744 for t, n in self.tags().iteritems():
744 l.append((self.changelog.rev(n), t, n))
745 l.append((self.changelog.rev(n), t, n))
745 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
746
747
747 return self._tagscache.tagslist
748 return self._tagscache.tagslist
748
749
749 def nodetags(self, node):
750 def nodetags(self, node):
750 '''return the tags associated with a node'''
751 '''return the tags associated with a node'''
751 if not self._tagscache.nodetagscache:
752 if not self._tagscache.nodetagscache:
752 nodetagscache = {}
753 nodetagscache = {}
753 for t, n in self._tagscache.tags.iteritems():
754 for t, n in self._tagscache.tags.iteritems():
754 nodetagscache.setdefault(n, []).append(t)
755 nodetagscache.setdefault(n, []).append(t)
755 for tags in nodetagscache.itervalues():
756 for tags in nodetagscache.itervalues():
756 tags.sort()
757 tags.sort()
757 self._tagscache.nodetagscache = nodetagscache
758 self._tagscache.nodetagscache = nodetagscache
758 return self._tagscache.nodetagscache.get(node, [])
759 return self._tagscache.nodetagscache.get(node, [])
759
760
760 def nodebookmarks(self, node):
761 def nodebookmarks(self, node):
761 """return the list of bookmarks pointing to the specified node"""
762 """return the list of bookmarks pointing to the specified node"""
762 marks = []
763 marks = []
763 for bookmark, n in self._bookmarks.iteritems():
764 for bookmark, n in self._bookmarks.iteritems():
764 if n == node:
765 if n == node:
765 marks.append(bookmark)
766 marks.append(bookmark)
766 return sorted(marks)
767 return sorted(marks)
767
768
768 def branchmap(self):
769 def branchmap(self):
769 '''returns a dictionary {branch: [branchheads]} with branchheads
770 '''returns a dictionary {branch: [branchheads]} with branchheads
770 ordered by increasing revision number'''
771 ordered by increasing revision number'''
771 branchmap.updatecache(self)
772 branchmap.updatecache(self)
772 return self._branchcaches[self.filtername]
773 return self._branchcaches[self.filtername]
773
774
774 @unfilteredmethod
775 @unfilteredmethod
775 def revbranchcache(self):
776 def revbranchcache(self):
776 if not self._revbranchcache:
777 if not self._revbranchcache:
777 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 return self._revbranchcache
779 return self._revbranchcache
779
780
780 def branchtip(self, branch, ignoremissing=False):
781 def branchtip(self, branch, ignoremissing=False):
781 '''return the tip node for a given branch
782 '''return the tip node for a given branch
782
783
783 If ignoremissing is True, then this method will not raise an error.
784 If ignoremissing is True, then this method will not raise an error.
784 This is helpful for callers that only expect None for a missing branch
785 This is helpful for callers that only expect None for a missing branch
785 (e.g. namespace).
786 (e.g. namespace).
786
787
787 '''
788 '''
788 try:
789 try:
789 return self.branchmap().branchtip(branch)
790 return self.branchmap().branchtip(branch)
790 except KeyError:
791 except KeyError:
791 if not ignoremissing:
792 if not ignoremissing:
792 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 else:
794 else:
794 pass
795 pass
795
796
796 def lookup(self, key):
797 def lookup(self, key):
797 return self[key].node()
798 return self[key].node()
798
799
799 def lookupbranch(self, key, remote=None):
800 def lookupbranch(self, key, remote=None):
800 repo = remote or self
801 repo = remote or self
801 if key in repo.branchmap():
802 if key in repo.branchmap():
802 return key
803 return key
803
804
804 repo = (remote and remote.local()) and remote or self
805 repo = (remote and remote.local()) and remote or self
805 return repo[key].branch()
806 return repo[key].branch()
806
807
807 def known(self, nodes):
808 def known(self, nodes):
808 cl = self.changelog
809 cl = self.changelog
809 nm = cl.nodemap
810 nm = cl.nodemap
810 filtered = cl.filteredrevs
811 filtered = cl.filteredrevs
811 result = []
812 result = []
812 for n in nodes:
813 for n in nodes:
813 r = nm.get(n)
814 r = nm.get(n)
814 resp = not (r is None or r in filtered)
815 resp = not (r is None or r in filtered)
815 result.append(resp)
816 result.append(resp)
816 return result
817 return result
817
818
818 def local(self):
819 def local(self):
819 return self
820 return self
820
821
821 def publishing(self):
822 def publishing(self):
822 # it's safe (and desirable) to trust the publish flag unconditionally
823 # it's safe (and desirable) to trust the publish flag unconditionally
823 # so that we don't finalize changes shared between users via ssh or nfs
824 # so that we don't finalize changes shared between users via ssh or nfs
824 return self.ui.configbool('phases', 'publish', True, untrusted=True)
825 return self.ui.configbool('phases', 'publish', True, untrusted=True)
825
826
826 def cancopy(self):
827 def cancopy(self):
827 # so statichttprepo's override of local() works
828 # so statichttprepo's override of local() works
828 if not self.local():
829 if not self.local():
829 return False
830 return False
830 if not self.publishing():
831 if not self.publishing():
831 return True
832 return True
832 # if publishing we can't copy if there is filtered content
833 # if publishing we can't copy if there is filtered content
833 return not self.filtered('visible').changelog.filteredrevs
834 return not self.filtered('visible').changelog.filteredrevs
834
835
835 def shared(self):
836 def shared(self):
836 '''the type of shared repository (None if not shared)'''
837 '''the type of shared repository (None if not shared)'''
837 if self.sharedpath != self.path:
838 if self.sharedpath != self.path:
838 return 'store'
839 return 'store'
839 return None
840 return None
840
841
841 def join(self, f, *insidef):
842 def join(self, f, *insidef):
842 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
843 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
843 return self.vfs.join(os.path.join(f, *insidef))
844 return self.vfs.join(os.path.join(f, *insidef))
844
845
845 def wjoin(self, f, *insidef):
846 def wjoin(self, f, *insidef):
846 return self.vfs.reljoin(self.root, f, *insidef)
847 return self.vfs.reljoin(self.root, f, *insidef)
847
848
848 def file(self, f):
849 def file(self, f):
849 if f[0] == '/':
850 if f[0] == '/':
850 f = f[1:]
851 f = f[1:]
851 return filelog.filelog(self.svfs, f)
852 return filelog.filelog(self.svfs, f)
852
853
853 def changectx(self, changeid):
854 def changectx(self, changeid):
854 return self[changeid]
855 return self[changeid]
855
856
856 def setparents(self, p1, p2=nullid):
857 def setparents(self, p1, p2=nullid):
857 self.dirstate.beginparentchange()
858 self.dirstate.beginparentchange()
858 copies = self.dirstate.setparents(p1, p2)
859 copies = self.dirstate.setparents(p1, p2)
859 pctx = self[p1]
860 pctx = self[p1]
860 if copies:
861 if copies:
861 # Adjust copy records, the dirstate cannot do it, it
862 # Adjust copy records, the dirstate cannot do it, it
862 # requires access to parents manifests. Preserve them
863 # requires access to parents manifests. Preserve them
863 # only for entries added to first parent.
864 # only for entries added to first parent.
864 for f in copies:
865 for f in copies:
865 if f not in pctx and copies[f] in pctx:
866 if f not in pctx and copies[f] in pctx:
866 self.dirstate.copy(copies[f], f)
867 self.dirstate.copy(copies[f], f)
867 if p2 == nullid:
868 if p2 == nullid:
868 for f, s in sorted(self.dirstate.copies().items()):
869 for f, s in sorted(self.dirstate.copies().items()):
869 if f not in pctx and s not in pctx:
870 if f not in pctx and s not in pctx:
870 self.dirstate.copy(None, f)
871 self.dirstate.copy(None, f)
871 self.dirstate.endparentchange()
872 self.dirstate.endparentchange()
872
873
873 def filectx(self, path, changeid=None, fileid=None):
874 def filectx(self, path, changeid=None, fileid=None):
874 """changeid can be a changeset revision, node, or tag.
875 """changeid can be a changeset revision, node, or tag.
875 fileid can be a file revision or node."""
876 fileid can be a file revision or node."""
876 return context.filectx(self, path, changeid, fileid)
877 return context.filectx(self, path, changeid, fileid)
877
878
878 def getcwd(self):
879 def getcwd(self):
879 return self.dirstate.getcwd()
880 return self.dirstate.getcwd()
880
881
881 def pathto(self, f, cwd=None):
882 def pathto(self, f, cwd=None):
882 return self.dirstate.pathto(f, cwd)
883 return self.dirstate.pathto(f, cwd)
883
884
884 def wfile(self, f, mode='r'):
885 def wfile(self, f, mode='r'):
885 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
886 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
886 return self.wvfs(f, mode)
887 return self.wvfs(f, mode)
887
888
888 def _link(self, f):
889 def _link(self, f):
889 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
890 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
890 '4.0')
891 '4.0')
891 return self.wvfs.islink(f)
892 return self.wvfs.islink(f)
892
893
893 def _loadfilter(self, filter):
894 def _loadfilter(self, filter):
894 if filter not in self.filterpats:
895 if filter not in self.filterpats:
895 l = []
896 l = []
896 for pat, cmd in self.ui.configitems(filter):
897 for pat, cmd in self.ui.configitems(filter):
897 if cmd == '!':
898 if cmd == '!':
898 continue
899 continue
899 mf = matchmod.match(self.root, '', [pat])
900 mf = matchmod.match(self.root, '', [pat])
900 fn = None
901 fn = None
901 params = cmd
902 params = cmd
902 for name, filterfn in self._datafilters.iteritems():
903 for name, filterfn in self._datafilters.iteritems():
903 if cmd.startswith(name):
904 if cmd.startswith(name):
904 fn = filterfn
905 fn = filterfn
905 params = cmd[len(name):].lstrip()
906 params = cmd[len(name):].lstrip()
906 break
907 break
907 if not fn:
908 if not fn:
908 fn = lambda s, c, **kwargs: util.filter(s, c)
909 fn = lambda s, c, **kwargs: util.filter(s, c)
909 # Wrap old filters not supporting keyword arguments
910 # Wrap old filters not supporting keyword arguments
910 if not inspect.getargspec(fn)[2]:
911 if not inspect.getargspec(fn)[2]:
911 oldfn = fn
912 oldfn = fn
912 fn = lambda s, c, **kwargs: oldfn(s, c)
913 fn = lambda s, c, **kwargs: oldfn(s, c)
913 l.append((mf, fn, params))
914 l.append((mf, fn, params))
914 self.filterpats[filter] = l
915 self.filterpats[filter] = l
915 return self.filterpats[filter]
916 return self.filterpats[filter]
916
917
917 def _filter(self, filterpats, filename, data):
918 def _filter(self, filterpats, filename, data):
918 for mf, fn, cmd in filterpats:
919 for mf, fn, cmd in filterpats:
919 if mf(filename):
920 if mf(filename):
920 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
921 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
921 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
922 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
922 break
923 break
923
924
924 return data
925 return data
925
926
926 @unfilteredpropertycache
927 @unfilteredpropertycache
927 def _encodefilterpats(self):
928 def _encodefilterpats(self):
928 return self._loadfilter('encode')
929 return self._loadfilter('encode')
929
930
930 @unfilteredpropertycache
931 @unfilteredpropertycache
931 def _decodefilterpats(self):
932 def _decodefilterpats(self):
932 return self._loadfilter('decode')
933 return self._loadfilter('decode')
933
934
934 def adddatafilter(self, name, filter):
935 def adddatafilter(self, name, filter):
935 self._datafilters[name] = filter
936 self._datafilters[name] = filter
936
937
937 def wread(self, filename):
938 def wread(self, filename):
938 if self.wvfs.islink(filename):
939 if self.wvfs.islink(filename):
939 data = self.wvfs.readlink(filename)
940 data = self.wvfs.readlink(filename)
940 else:
941 else:
941 data = self.wvfs.read(filename)
942 data = self.wvfs.read(filename)
942 return self._filter(self._encodefilterpats, filename, data)
943 return self._filter(self._encodefilterpats, filename, data)
943
944
944 def wwrite(self, filename, data, flags, backgroundclose=False):
945 def wwrite(self, filename, data, flags, backgroundclose=False):
945 """write ``data`` into ``filename`` in the working directory
946 """write ``data`` into ``filename`` in the working directory
946
947
947 This returns length of written (maybe decoded) data.
948 This returns length of written (maybe decoded) data.
948 """
949 """
949 data = self._filter(self._decodefilterpats, filename, data)
950 data = self._filter(self._decodefilterpats, filename, data)
950 if 'l' in flags:
951 if 'l' in flags:
951 self.wvfs.symlink(data, filename)
952 self.wvfs.symlink(data, filename)
952 else:
953 else:
953 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
954 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
954 if 'x' in flags:
955 if 'x' in flags:
955 self.wvfs.setflags(filename, False, True)
956 self.wvfs.setflags(filename, False, True)
956 return len(data)
957 return len(data)
957
958
958 def wwritedata(self, filename, data):
959 def wwritedata(self, filename, data):
959 return self._filter(self._decodefilterpats, filename, data)
960 return self._filter(self._decodefilterpats, filename, data)
960
961
961 def currenttransaction(self):
962 def currenttransaction(self):
962 """return the current transaction or None if non exists"""
963 """return the current transaction or None if non exists"""
963 if self._transref:
964 if self._transref:
964 tr = self._transref()
965 tr = self._transref()
965 else:
966 else:
966 tr = None
967 tr = None
967
968
968 if tr and tr.running():
969 if tr and tr.running():
969 return tr
970 return tr
970 return None
971 return None
971
972
972 def transaction(self, desc, report=None):
973 def transaction(self, desc, report=None):
973 if (self.ui.configbool('devel', 'all-warnings')
974 if (self.ui.configbool('devel', 'all-warnings')
974 or self.ui.configbool('devel', 'check-locks')):
975 or self.ui.configbool('devel', 'check-locks')):
975 if self._currentlock(self._lockref) is None:
976 if self._currentlock(self._lockref) is None:
976 raise error.ProgrammingError('transaction requires locking')
977 raise error.ProgrammingError('transaction requires locking')
977 tr = self.currenttransaction()
978 tr = self.currenttransaction()
978 if tr is not None:
979 if tr is not None:
979 return tr.nest()
980 return tr.nest()
980
981
981 # abort here if the journal already exists
982 # abort here if the journal already exists
982 if self.svfs.exists("journal"):
983 if self.svfs.exists("journal"):
983 raise error.RepoError(
984 raise error.RepoError(
984 _("abandoned transaction found"),
985 _("abandoned transaction found"),
985 hint=_("run 'hg recover' to clean up transaction"))
986 hint=_("run 'hg recover' to clean up transaction"))
986
987
987 idbase = "%.40f#%f" % (random.random(), time.time())
988 idbase = "%.40f#%f" % (random.random(), time.time())
988 ha = hex(hashlib.sha1(idbase).digest())
989 ha = hex(hashlib.sha1(idbase).digest())
989 txnid = 'TXN:' + ha
990 txnid = 'TXN:' + ha
990 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
991 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
991
992
992 self._writejournal(desc)
993 self._writejournal(desc)
993 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
994 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
994 if report:
995 if report:
995 rp = report
996 rp = report
996 else:
997 else:
997 rp = self.ui.warn
998 rp = self.ui.warn
998 vfsmap = {'plain': self.vfs} # root of .hg/
999 vfsmap = {'plain': self.vfs} # root of .hg/
999 # we must avoid cyclic reference between repo and transaction.
1000 # we must avoid cyclic reference between repo and transaction.
1000 reporef = weakref.ref(self)
1001 reporef = weakref.ref(self)
1001 def validate(tr):
1002 def validate(tr):
1002 """will run pre-closing hooks"""
1003 """will run pre-closing hooks"""
1003 reporef().hook('pretxnclose', throw=True,
1004 reporef().hook('pretxnclose', throw=True,
1004 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1005 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1005 def releasefn(tr, success):
1006 def releasefn(tr, success):
1006 repo = reporef()
1007 repo = reporef()
1007 if success:
1008 if success:
1008 # this should be explicitly invoked here, because
1009 # this should be explicitly invoked here, because
1009 # in-memory changes aren't written out at closing
1010 # in-memory changes aren't written out at closing
1010 # transaction, if tr.addfilegenerator (via
1011 # transaction, if tr.addfilegenerator (via
1011 # dirstate.write or so) isn't invoked while
1012 # dirstate.write or so) isn't invoked while
1012 # transaction running
1013 # transaction running
1013 repo.dirstate.write(None)
1014 repo.dirstate.write(None)
1014 else:
1015 else:
1015 # discard all changes (including ones already written
1016 # discard all changes (including ones already written
1016 # out) in this transaction
1017 # out) in this transaction
1017 repo.dirstate.restorebackup(None, prefix='journal.')
1018 repo.dirstate.restorebackup(None, prefix='journal.')
1018
1019
1019 repo.invalidate(clearfilecache=True)
1020 repo.invalidate(clearfilecache=True)
1020
1021
1021 tr = transaction.transaction(rp, self.svfs, vfsmap,
1022 tr = transaction.transaction(rp, self.svfs, vfsmap,
1022 "journal",
1023 "journal",
1023 "undo",
1024 "undo",
1024 aftertrans(renames),
1025 aftertrans(renames),
1025 self.store.createmode,
1026 self.store.createmode,
1026 validator=validate,
1027 validator=validate,
1027 releasefn=releasefn)
1028 releasefn=releasefn)
1028
1029
1029 tr.hookargs['txnid'] = txnid
1030 tr.hookargs['txnid'] = txnid
1030 # note: writing the fncache only during finalize mean that the file is
1031 # note: writing the fncache only during finalize mean that the file is
1031 # outdated when running hooks. As fncache is used for streaming clone,
1032 # outdated when running hooks. As fncache is used for streaming clone,
1032 # this is not expected to break anything that happen during the hooks.
1033 # this is not expected to break anything that happen during the hooks.
1033 tr.addfinalize('flush-fncache', self.store.write)
1034 tr.addfinalize('flush-fncache', self.store.write)
1034 def txnclosehook(tr2):
1035 def txnclosehook(tr2):
1035 """To be run if transaction is successful, will schedule a hook run
1036 """To be run if transaction is successful, will schedule a hook run
1036 """
1037 """
1037 # Don't reference tr2 in hook() so we don't hold a reference.
1038 # Don't reference tr2 in hook() so we don't hold a reference.
1038 # This reduces memory consumption when there are multiple
1039 # This reduces memory consumption when there are multiple
1039 # transactions per lock. This can likely go away if issue5045
1040 # transactions per lock. This can likely go away if issue5045
1040 # fixes the function accumulation.
1041 # fixes the function accumulation.
1041 hookargs = tr2.hookargs
1042 hookargs = tr2.hookargs
1042
1043
1043 def hook():
1044 def hook():
1044 reporef().hook('txnclose', throw=False, txnname=desc,
1045 reporef().hook('txnclose', throw=False, txnname=desc,
1045 **pycompat.strkwargs(hookargs))
1046 **pycompat.strkwargs(hookargs))
1046 reporef()._afterlock(hook)
1047 reporef()._afterlock(hook)
1047 tr.addfinalize('txnclose-hook', txnclosehook)
1048 tr.addfinalize('txnclose-hook', txnclosehook)
1048 def txnaborthook(tr2):
1049 def txnaborthook(tr2):
1049 """To be run if transaction is aborted
1050 """To be run if transaction is aborted
1050 """
1051 """
1051 reporef().hook('txnabort', throw=False, txnname=desc,
1052 reporef().hook('txnabort', throw=False, txnname=desc,
1052 **tr2.hookargs)
1053 **tr2.hookargs)
1053 tr.addabort('txnabort-hook', txnaborthook)
1054 tr.addabort('txnabort-hook', txnaborthook)
1054 # avoid eager cache invalidation. in-memory data should be identical
1055 # avoid eager cache invalidation. in-memory data should be identical
1055 # to stored data if transaction has no error.
1056 # to stored data if transaction has no error.
1056 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1057 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1057 self._transref = weakref.ref(tr)
1058 self._transref = weakref.ref(tr)
1058 return tr
1059 return tr
1059
1060
1060 def _journalfiles(self):
1061 def _journalfiles(self):
1061 return ((self.svfs, 'journal'),
1062 return ((self.svfs, 'journal'),
1062 (self.vfs, 'journal.dirstate'),
1063 (self.vfs, 'journal.dirstate'),
1063 (self.vfs, 'journal.branch'),
1064 (self.vfs, 'journal.branch'),
1064 (self.vfs, 'journal.desc'),
1065 (self.vfs, 'journal.desc'),
1065 (self.vfs, 'journal.bookmarks'),
1066 (self.vfs, 'journal.bookmarks'),
1066 (self.svfs, 'journal.phaseroots'))
1067 (self.svfs, 'journal.phaseroots'))
1067
1068
1068 def undofiles(self):
1069 def undofiles(self):
1069 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1070 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1070
1071
1071 def _writejournal(self, desc):
1072 def _writejournal(self, desc):
1072 self.dirstate.savebackup(None, prefix='journal.')
1073 self.dirstate.savebackup(None, prefix='journal.')
1073 self.vfs.write("journal.branch",
1074 self.vfs.write("journal.branch",
1074 encoding.fromlocal(self.dirstate.branch()))
1075 encoding.fromlocal(self.dirstate.branch()))
1075 self.vfs.write("journal.desc",
1076 self.vfs.write("journal.desc",
1076 "%d\n%s\n" % (len(self), desc))
1077 "%d\n%s\n" % (len(self), desc))
1077 self.vfs.write("journal.bookmarks",
1078 self.vfs.write("journal.bookmarks",
1078 self.vfs.tryread("bookmarks"))
1079 self.vfs.tryread("bookmarks"))
1079 self.svfs.write("journal.phaseroots",
1080 self.svfs.write("journal.phaseroots",
1080 self.svfs.tryread("phaseroots"))
1081 self.svfs.tryread("phaseroots"))
1081
1082
1082 def recover(self):
1083 def recover(self):
1083 with self.lock():
1084 with self.lock():
1084 if self.svfs.exists("journal"):
1085 if self.svfs.exists("journal"):
1085 self.ui.status(_("rolling back interrupted transaction\n"))
1086 self.ui.status(_("rolling back interrupted transaction\n"))
1086 vfsmap = {'': self.svfs,
1087 vfsmap = {'': self.svfs,
1087 'plain': self.vfs,}
1088 'plain': self.vfs,}
1088 transaction.rollback(self.svfs, vfsmap, "journal",
1089 transaction.rollback(self.svfs, vfsmap, "journal",
1089 self.ui.warn)
1090 self.ui.warn)
1090 self.invalidate()
1091 self.invalidate()
1091 return True
1092 return True
1092 else:
1093 else:
1093 self.ui.warn(_("no interrupted transaction available\n"))
1094 self.ui.warn(_("no interrupted transaction available\n"))
1094 return False
1095 return False
1095
1096
1096 def rollback(self, dryrun=False, force=False):
1097 def rollback(self, dryrun=False, force=False):
1097 wlock = lock = dsguard = None
1098 wlock = lock = dsguard = None
1098 try:
1099 try:
1099 wlock = self.wlock()
1100 wlock = self.wlock()
1100 lock = self.lock()
1101 lock = self.lock()
1101 if self.svfs.exists("undo"):
1102 if self.svfs.exists("undo"):
1102 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1103 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1103
1104
1104 return self._rollback(dryrun, force, dsguard)
1105 return self._rollback(dryrun, force, dsguard)
1105 else:
1106 else:
1106 self.ui.warn(_("no rollback information available\n"))
1107 self.ui.warn(_("no rollback information available\n"))
1107 return 1
1108 return 1
1108 finally:
1109 finally:
1109 release(dsguard, lock, wlock)
1110 release(dsguard, lock, wlock)
1110
1111
1111 @unfilteredmethod # Until we get smarter cache management
1112 @unfilteredmethod # Until we get smarter cache management
1112 def _rollback(self, dryrun, force, dsguard):
1113 def _rollback(self, dryrun, force, dsguard):
1113 ui = self.ui
1114 ui = self.ui
1114 try:
1115 try:
1115 args = self.vfs.read('undo.desc').splitlines()
1116 args = self.vfs.read('undo.desc').splitlines()
1116 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1117 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1117 if len(args) >= 3:
1118 if len(args) >= 3:
1118 detail = args[2]
1119 detail = args[2]
1119 oldtip = oldlen - 1
1120 oldtip = oldlen - 1
1120
1121
1121 if detail and ui.verbose:
1122 if detail and ui.verbose:
1122 msg = (_('repository tip rolled back to revision %s'
1123 msg = (_('repository tip rolled back to revision %s'
1123 ' (undo %s: %s)\n')
1124 ' (undo %s: %s)\n')
1124 % (oldtip, desc, detail))
1125 % (oldtip, desc, detail))
1125 else:
1126 else:
1126 msg = (_('repository tip rolled back to revision %s'
1127 msg = (_('repository tip rolled back to revision %s'
1127 ' (undo %s)\n')
1128 ' (undo %s)\n')
1128 % (oldtip, desc))
1129 % (oldtip, desc))
1129 except IOError:
1130 except IOError:
1130 msg = _('rolling back unknown transaction\n')
1131 msg = _('rolling back unknown transaction\n')
1131 desc = None
1132 desc = None
1132
1133
1133 if not force and self['.'] != self['tip'] and desc == 'commit':
1134 if not force and self['.'] != self['tip'] and desc == 'commit':
1134 raise error.Abort(
1135 raise error.Abort(
1135 _('rollback of last commit while not checked out '
1136 _('rollback of last commit while not checked out '
1136 'may lose data'), hint=_('use -f to force'))
1137 'may lose data'), hint=_('use -f to force'))
1137
1138
1138 ui.status(msg)
1139 ui.status(msg)
1139 if dryrun:
1140 if dryrun:
1140 return 0
1141 return 0
1141
1142
1142 parents = self.dirstate.parents()
1143 parents = self.dirstate.parents()
1143 self.destroying()
1144 self.destroying()
1144 vfsmap = {'plain': self.vfs, '': self.svfs}
1145 vfsmap = {'plain': self.vfs, '': self.svfs}
1145 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1146 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1146 if self.vfs.exists('undo.bookmarks'):
1147 if self.vfs.exists('undo.bookmarks'):
1147 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1148 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1148 if self.svfs.exists('undo.phaseroots'):
1149 if self.svfs.exists('undo.phaseroots'):
1149 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1150 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1150 self.invalidate()
1151 self.invalidate()
1151
1152
1152 parentgone = (parents[0] not in self.changelog.nodemap or
1153 parentgone = (parents[0] not in self.changelog.nodemap or
1153 parents[1] not in self.changelog.nodemap)
1154 parents[1] not in self.changelog.nodemap)
1154 if parentgone:
1155 if parentgone:
1155 # prevent dirstateguard from overwriting already restored one
1156 # prevent dirstateguard from overwriting already restored one
1156 dsguard.close()
1157 dsguard.close()
1157
1158
1158 self.dirstate.restorebackup(None, prefix='undo.')
1159 self.dirstate.restorebackup(None, prefix='undo.')
1159 try:
1160 try:
1160 branch = self.vfs.read('undo.branch')
1161 branch = self.vfs.read('undo.branch')
1161 self.dirstate.setbranch(encoding.tolocal(branch))
1162 self.dirstate.setbranch(encoding.tolocal(branch))
1162 except IOError:
1163 except IOError:
1163 ui.warn(_('named branch could not be reset: '
1164 ui.warn(_('named branch could not be reset: '
1164 'current branch is still \'%s\'\n')
1165 'current branch is still \'%s\'\n')
1165 % self.dirstate.branch())
1166 % self.dirstate.branch())
1166
1167
1167 parents = tuple([p.rev() for p in self[None].parents()])
1168 parents = tuple([p.rev() for p in self[None].parents()])
1168 if len(parents) > 1:
1169 if len(parents) > 1:
1169 ui.status(_('working directory now based on '
1170 ui.status(_('working directory now based on '
1170 'revisions %d and %d\n') % parents)
1171 'revisions %d and %d\n') % parents)
1171 else:
1172 else:
1172 ui.status(_('working directory now based on '
1173 ui.status(_('working directory now based on '
1173 'revision %d\n') % parents)
1174 'revision %d\n') % parents)
1174 mergemod.mergestate.clean(self, self['.'].node())
1175 mergemod.mergestate.clean(self, self['.'].node())
1175
1176
1176 # TODO: if we know which new heads may result from this rollback, pass
1177 # TODO: if we know which new heads may result from this rollback, pass
1177 # them to destroy(), which will prevent the branchhead cache from being
1178 # them to destroy(), which will prevent the branchhead cache from being
1178 # invalidated.
1179 # invalidated.
1179 self.destroyed()
1180 self.destroyed()
1180 return 0
1181 return 0
1181
1182
1182 def invalidatecaches(self):
1183 def invalidatecaches(self):
1183
1184
1184 if '_tagscache' in vars(self):
1185 if '_tagscache' in vars(self):
1185 # can't use delattr on proxy
1186 # can't use delattr on proxy
1186 del self.__dict__['_tagscache']
1187 del self.__dict__['_tagscache']
1187
1188
1188 self.unfiltered()._branchcaches.clear()
1189 self.unfiltered()._branchcaches.clear()
1189 self.invalidatevolatilesets()
1190 self.invalidatevolatilesets()
1190
1191
1191 def invalidatevolatilesets(self):
1192 def invalidatevolatilesets(self):
1192 self.filteredrevcache.clear()
1193 self.filteredrevcache.clear()
1193 obsolete.clearobscaches(self)
1194 obsolete.clearobscaches(self)
1194
1195
1195 def invalidatedirstate(self):
1196 def invalidatedirstate(self):
1196 '''Invalidates the dirstate, causing the next call to dirstate
1197 '''Invalidates the dirstate, causing the next call to dirstate
1197 to check if it was modified since the last time it was read,
1198 to check if it was modified since the last time it was read,
1198 rereading it if it has.
1199 rereading it if it has.
1199
1200
1200 This is different to dirstate.invalidate() that it doesn't always
1201 This is different to dirstate.invalidate() that it doesn't always
1201 rereads the dirstate. Use dirstate.invalidate() if you want to
1202 rereads the dirstate. Use dirstate.invalidate() if you want to
1202 explicitly read the dirstate again (i.e. restoring it to a previous
1203 explicitly read the dirstate again (i.e. restoring it to a previous
1203 known good state).'''
1204 known good state).'''
1204 if hasunfilteredcache(self, 'dirstate'):
1205 if hasunfilteredcache(self, 'dirstate'):
1205 for k in self.dirstate._filecache:
1206 for k in self.dirstate._filecache:
1206 try:
1207 try:
1207 delattr(self.dirstate, k)
1208 delattr(self.dirstate, k)
1208 except AttributeError:
1209 except AttributeError:
1209 pass
1210 pass
1210 delattr(self.unfiltered(), 'dirstate')
1211 delattr(self.unfiltered(), 'dirstate')
1211
1212
1212 def invalidate(self, clearfilecache=False):
1213 def invalidate(self, clearfilecache=False):
1213 '''Invalidates both store and non-store parts other than dirstate
1214 '''Invalidates both store and non-store parts other than dirstate
1214
1215
1215 If a transaction is running, invalidation of store is omitted,
1216 If a transaction is running, invalidation of store is omitted,
1216 because discarding in-memory changes might cause inconsistency
1217 because discarding in-memory changes might cause inconsistency
1217 (e.g. incomplete fncache causes unintentional failure, but
1218 (e.g. incomplete fncache causes unintentional failure, but
1218 redundant one doesn't).
1219 redundant one doesn't).
1219 '''
1220 '''
1220 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1221 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1221 for k in list(self._filecache.keys()):
1222 for k in list(self._filecache.keys()):
1222 # dirstate is invalidated separately in invalidatedirstate()
1223 # dirstate is invalidated separately in invalidatedirstate()
1223 if k == 'dirstate':
1224 if k == 'dirstate':
1224 continue
1225 continue
1225
1226
1226 if clearfilecache:
1227 if clearfilecache:
1227 del self._filecache[k]
1228 del self._filecache[k]
1228 try:
1229 try:
1229 delattr(unfiltered, k)
1230 delattr(unfiltered, k)
1230 except AttributeError:
1231 except AttributeError:
1231 pass
1232 pass
1232 self.invalidatecaches()
1233 self.invalidatecaches()
1233 if not self.currenttransaction():
1234 if not self.currenttransaction():
1234 # TODO: Changing contents of store outside transaction
1235 # TODO: Changing contents of store outside transaction
1235 # causes inconsistency. We should make in-memory store
1236 # causes inconsistency. We should make in-memory store
1236 # changes detectable, and abort if changed.
1237 # changes detectable, and abort if changed.
1237 self.store.invalidatecaches()
1238 self.store.invalidatecaches()
1238
1239
1239 def invalidateall(self):
1240 def invalidateall(self):
1240 '''Fully invalidates both store and non-store parts, causing the
1241 '''Fully invalidates both store and non-store parts, causing the
1241 subsequent operation to reread any outside changes.'''
1242 subsequent operation to reread any outside changes.'''
1242 # extension should hook this to invalidate its caches
1243 # extension should hook this to invalidate its caches
1243 self.invalidate()
1244 self.invalidate()
1244 self.invalidatedirstate()
1245 self.invalidatedirstate()
1245
1246
1246 @unfilteredmethod
1247 @unfilteredmethod
1247 def _refreshfilecachestats(self, tr):
1248 def _refreshfilecachestats(self, tr):
1248 """Reload stats of cached files so that they are flagged as valid"""
1249 """Reload stats of cached files so that they are flagged as valid"""
1249 for k, ce in self._filecache.items():
1250 for k, ce in self._filecache.items():
1250 if k == 'dirstate' or k not in self.__dict__:
1251 if k == 'dirstate' or k not in self.__dict__:
1251 continue
1252 continue
1252 ce.refresh()
1253 ce.refresh()
1253
1254
1254 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1255 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1255 inheritchecker=None, parentenvvar=None):
1256 inheritchecker=None, parentenvvar=None):
1256 parentlock = None
1257 parentlock = None
1257 # the contents of parentenvvar are used by the underlying lock to
1258 # the contents of parentenvvar are used by the underlying lock to
1258 # determine whether it can be inherited
1259 # determine whether it can be inherited
1259 if parentenvvar is not None:
1260 if parentenvvar is not None:
1260 parentlock = encoding.environ.get(parentenvvar)
1261 parentlock = encoding.environ.get(parentenvvar)
1261 try:
1262 try:
1262 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1263 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1263 acquirefn=acquirefn, desc=desc,
1264 acquirefn=acquirefn, desc=desc,
1264 inheritchecker=inheritchecker,
1265 inheritchecker=inheritchecker,
1265 parentlock=parentlock)
1266 parentlock=parentlock)
1266 except error.LockHeld as inst:
1267 except error.LockHeld as inst:
1267 if not wait:
1268 if not wait:
1268 raise
1269 raise
1269 # show more details for new-style locks
1270 # show more details for new-style locks
1270 if ':' in inst.locker:
1271 if ':' in inst.locker:
1271 host, pid = inst.locker.split(":", 1)
1272 host, pid = inst.locker.split(":", 1)
1272 self.ui.warn(
1273 self.ui.warn(
1273 _("waiting for lock on %s held by process %r "
1274 _("waiting for lock on %s held by process %r "
1274 "on host %r\n") % (desc, pid, host))
1275 "on host %r\n") % (desc, pid, host))
1275 else:
1276 else:
1276 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1277 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1277 (desc, inst.locker))
1278 (desc, inst.locker))
1278 # default to 600 seconds timeout
1279 # default to 600 seconds timeout
1279 l = lockmod.lock(vfs, lockname,
1280 l = lockmod.lock(vfs, lockname,
1280 int(self.ui.config("ui", "timeout", "600")),
1281 int(self.ui.config("ui", "timeout", "600")),
1281 releasefn=releasefn, acquirefn=acquirefn,
1282 releasefn=releasefn, acquirefn=acquirefn,
1282 desc=desc)
1283 desc=desc)
1283 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1284 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1284 return l
1285 return l
1285
1286
1286 def _afterlock(self, callback):
1287 def _afterlock(self, callback):
1287 """add a callback to be run when the repository is fully unlocked
1288 """add a callback to be run when the repository is fully unlocked
1288
1289
1289 The callback will be executed when the outermost lock is released
1290 The callback will be executed when the outermost lock is released
1290 (with wlock being higher level than 'lock')."""
1291 (with wlock being higher level than 'lock')."""
1291 for ref in (self._wlockref, self._lockref):
1292 for ref in (self._wlockref, self._lockref):
1292 l = ref and ref()
1293 l = ref and ref()
1293 if l and l.held:
1294 if l and l.held:
1294 l.postrelease.append(callback)
1295 l.postrelease.append(callback)
1295 break
1296 break
1296 else: # no lock have been found.
1297 else: # no lock have been found.
1297 callback()
1298 callback()
1298
1299
1299 def lock(self, wait=True):
1300 def lock(self, wait=True):
1300 '''Lock the repository store (.hg/store) and return a weak reference
1301 '''Lock the repository store (.hg/store) and return a weak reference
1301 to the lock. Use this before modifying the store (e.g. committing or
1302 to the lock. Use this before modifying the store (e.g. committing or
1302 stripping). If you are opening a transaction, get a lock as well.)
1303 stripping). If you are opening a transaction, get a lock as well.)
1303
1304
1304 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1305 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1305 'wlock' first to avoid a dead-lock hazard.'''
1306 'wlock' first to avoid a dead-lock hazard.'''
1306 l = self._currentlock(self._lockref)
1307 l = self._currentlock(self._lockref)
1307 if l is not None:
1308 if l is not None:
1308 l.lock()
1309 l.lock()
1309 return l
1310 return l
1310
1311
1311 l = self._lock(self.svfs, "lock", wait, None,
1312 l = self._lock(self.svfs, "lock", wait, None,
1312 self.invalidate, _('repository %s') % self.origroot)
1313 self.invalidate, _('repository %s') % self.origroot)
1313 self._lockref = weakref.ref(l)
1314 self._lockref = weakref.ref(l)
1314 return l
1315 return l
1315
1316
1316 def _wlockchecktransaction(self):
1317 def _wlockchecktransaction(self):
1317 if self.currenttransaction() is not None:
1318 if self.currenttransaction() is not None:
1318 raise error.LockInheritanceContractViolation(
1319 raise error.LockInheritanceContractViolation(
1319 'wlock cannot be inherited in the middle of a transaction')
1320 'wlock cannot be inherited in the middle of a transaction')
1320
1321
1321 def wlock(self, wait=True):
1322 def wlock(self, wait=True):
1322 '''Lock the non-store parts of the repository (everything under
1323 '''Lock the non-store parts of the repository (everything under
1323 .hg except .hg/store) and return a weak reference to the lock.
1324 .hg except .hg/store) and return a weak reference to the lock.
1324
1325
1325 Use this before modifying files in .hg.
1326 Use this before modifying files in .hg.
1326
1327
1327 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1328 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1328 'wlock' first to avoid a dead-lock hazard.'''
1329 'wlock' first to avoid a dead-lock hazard.'''
1329 l = self._wlockref and self._wlockref()
1330 l = self._wlockref and self._wlockref()
1330 if l is not None and l.held:
1331 if l is not None and l.held:
1331 l.lock()
1332 l.lock()
1332 return l
1333 return l
1333
1334
1334 # We do not need to check for non-waiting lock acquisition. Such
1335 # We do not need to check for non-waiting lock acquisition. Such
1335 # acquisition would not cause dead-lock as they would just fail.
1336 # acquisition would not cause dead-lock as they would just fail.
1336 if wait and (self.ui.configbool('devel', 'all-warnings')
1337 if wait and (self.ui.configbool('devel', 'all-warnings')
1337 or self.ui.configbool('devel', 'check-locks')):
1338 or self.ui.configbool('devel', 'check-locks')):
1338 if self._currentlock(self._lockref) is not None:
1339 if self._currentlock(self._lockref) is not None:
1339 self.ui.develwarn('"wlock" acquired after "lock"')
1340 self.ui.develwarn('"wlock" acquired after "lock"')
1340
1341
1341 def unlock():
1342 def unlock():
1342 if self.dirstate.pendingparentchange():
1343 if self.dirstate.pendingparentchange():
1343 self.dirstate.invalidate()
1344 self.dirstate.invalidate()
1344 else:
1345 else:
1345 self.dirstate.write(None)
1346 self.dirstate.write(None)
1346
1347
1347 self._filecache['dirstate'].refresh()
1348 self._filecache['dirstate'].refresh()
1348
1349
1349 l = self._lock(self.vfs, "wlock", wait, unlock,
1350 l = self._lock(self.vfs, "wlock", wait, unlock,
1350 self.invalidatedirstate, _('working directory of %s') %
1351 self.invalidatedirstate, _('working directory of %s') %
1351 self.origroot,
1352 self.origroot,
1352 inheritchecker=self._wlockchecktransaction,
1353 inheritchecker=self._wlockchecktransaction,
1353 parentenvvar='HG_WLOCK_LOCKER')
1354 parentenvvar='HG_WLOCK_LOCKER')
1354 self._wlockref = weakref.ref(l)
1355 self._wlockref = weakref.ref(l)
1355 return l
1356 return l
1356
1357
1357 def _currentlock(self, lockref):
1358 def _currentlock(self, lockref):
1358 """Returns the lock if it's held, or None if it's not."""
1359 """Returns the lock if it's held, or None if it's not."""
1359 if lockref is None:
1360 if lockref is None:
1360 return None
1361 return None
1361 l = lockref()
1362 l = lockref()
1362 if l is None or not l.held:
1363 if l is None or not l.held:
1363 return None
1364 return None
1364 return l
1365 return l
1365
1366
1366 def currentwlock(self):
1367 def currentwlock(self):
1367 """Returns the wlock if it's held, or None if it's not."""
1368 """Returns the wlock if it's held, or None if it's not."""
1368 return self._currentlock(self._wlockref)
1369 return self._currentlock(self._wlockref)
1369
1370
1370 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1371 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1371 """
1372 """
1372 commit an individual file as part of a larger transaction
1373 commit an individual file as part of a larger transaction
1373 """
1374 """
1374
1375
1375 fname = fctx.path()
1376 fname = fctx.path()
1376 fparent1 = manifest1.get(fname, nullid)
1377 fparent1 = manifest1.get(fname, nullid)
1377 fparent2 = manifest2.get(fname, nullid)
1378 fparent2 = manifest2.get(fname, nullid)
1378 if isinstance(fctx, context.filectx):
1379 if isinstance(fctx, context.filectx):
1379 node = fctx.filenode()
1380 node = fctx.filenode()
1380 if node in [fparent1, fparent2]:
1381 if node in [fparent1, fparent2]:
1381 self.ui.debug('reusing %s filelog entry\n' % fname)
1382 self.ui.debug('reusing %s filelog entry\n' % fname)
1382 if manifest1.flags(fname) != fctx.flags():
1383 if manifest1.flags(fname) != fctx.flags():
1383 changelist.append(fname)
1384 changelist.append(fname)
1384 return node
1385 return node
1385
1386
1386 flog = self.file(fname)
1387 flog = self.file(fname)
1387 meta = {}
1388 meta = {}
1388 copy = fctx.renamed()
1389 copy = fctx.renamed()
1389 if copy and copy[0] != fname:
1390 if copy and copy[0] != fname:
1390 # Mark the new revision of this file as a copy of another
1391 # Mark the new revision of this file as a copy of another
1391 # file. This copy data will effectively act as a parent
1392 # file. This copy data will effectively act as a parent
1392 # of this new revision. If this is a merge, the first
1393 # of this new revision. If this is a merge, the first
1393 # parent will be the nullid (meaning "look up the copy data")
1394 # parent will be the nullid (meaning "look up the copy data")
1394 # and the second one will be the other parent. For example:
1395 # and the second one will be the other parent. For example:
1395 #
1396 #
1396 # 0 --- 1 --- 3 rev1 changes file foo
1397 # 0 --- 1 --- 3 rev1 changes file foo
1397 # \ / rev2 renames foo to bar and changes it
1398 # \ / rev2 renames foo to bar and changes it
1398 # \- 2 -/ rev3 should have bar with all changes and
1399 # \- 2 -/ rev3 should have bar with all changes and
1399 # should record that bar descends from
1400 # should record that bar descends from
1400 # bar in rev2 and foo in rev1
1401 # bar in rev2 and foo in rev1
1401 #
1402 #
1402 # this allows this merge to succeed:
1403 # this allows this merge to succeed:
1403 #
1404 #
1404 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1405 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1405 # \ / merging rev3 and rev4 should use bar@rev2
1406 # \ / merging rev3 and rev4 should use bar@rev2
1406 # \- 2 --- 4 as the merge base
1407 # \- 2 --- 4 as the merge base
1407 #
1408 #
1408
1409
1409 cfname = copy[0]
1410 cfname = copy[0]
1410 crev = manifest1.get(cfname)
1411 crev = manifest1.get(cfname)
1411 newfparent = fparent2
1412 newfparent = fparent2
1412
1413
1413 if manifest2: # branch merge
1414 if manifest2: # branch merge
1414 if fparent2 == nullid or crev is None: # copied on remote side
1415 if fparent2 == nullid or crev is None: # copied on remote side
1415 if cfname in manifest2:
1416 if cfname in manifest2:
1416 crev = manifest2[cfname]
1417 crev = manifest2[cfname]
1417 newfparent = fparent1
1418 newfparent = fparent1
1418
1419
1419 # Here, we used to search backwards through history to try to find
1420 # Here, we used to search backwards through history to try to find
1420 # where the file copy came from if the source of a copy was not in
1421 # where the file copy came from if the source of a copy was not in
1421 # the parent directory. However, this doesn't actually make sense to
1422 # the parent directory. However, this doesn't actually make sense to
1422 # do (what does a copy from something not in your working copy even
1423 # do (what does a copy from something not in your working copy even
1423 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1424 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1424 # the user that copy information was dropped, so if they didn't
1425 # the user that copy information was dropped, so if they didn't
1425 # expect this outcome it can be fixed, but this is the correct
1426 # expect this outcome it can be fixed, but this is the correct
1426 # behavior in this circumstance.
1427 # behavior in this circumstance.
1427
1428
1428 if crev:
1429 if crev:
1429 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1430 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1430 meta["copy"] = cfname
1431 meta["copy"] = cfname
1431 meta["copyrev"] = hex(crev)
1432 meta["copyrev"] = hex(crev)
1432 fparent1, fparent2 = nullid, newfparent
1433 fparent1, fparent2 = nullid, newfparent
1433 else:
1434 else:
1434 self.ui.warn(_("warning: can't find ancestor for '%s' "
1435 self.ui.warn(_("warning: can't find ancestor for '%s' "
1435 "copied from '%s'!\n") % (fname, cfname))
1436 "copied from '%s'!\n") % (fname, cfname))
1436
1437
1437 elif fparent1 == nullid:
1438 elif fparent1 == nullid:
1438 fparent1, fparent2 = fparent2, nullid
1439 fparent1, fparent2 = fparent2, nullid
1439 elif fparent2 != nullid:
1440 elif fparent2 != nullid:
1440 # is one parent an ancestor of the other?
1441 # is one parent an ancestor of the other?
1441 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1442 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1442 if fparent1 in fparentancestors:
1443 if fparent1 in fparentancestors:
1443 fparent1, fparent2 = fparent2, nullid
1444 fparent1, fparent2 = fparent2, nullid
1444 elif fparent2 in fparentancestors:
1445 elif fparent2 in fparentancestors:
1445 fparent2 = nullid
1446 fparent2 = nullid
1446
1447
1447 # is the file changed?
1448 # is the file changed?
1448 text = fctx.data()
1449 text = fctx.data()
1449 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1450 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1450 changelist.append(fname)
1451 changelist.append(fname)
1451 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1452 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1452 # are just the flags changed during merge?
1453 # are just the flags changed during merge?
1453 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1454 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1454 changelist.append(fname)
1455 changelist.append(fname)
1455
1456
1456 return fparent1
1457 return fparent1
1457
1458
1458 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1459 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1459 """check for commit arguments that aren't committable"""
1460 """check for commit arguments that aren't committable"""
1460 if match.isexact() or match.prefix():
1461 if match.isexact() or match.prefix():
1461 matched = set(status.modified + status.added + status.removed)
1462 matched = set(status.modified + status.added + status.removed)
1462
1463
1463 for f in match.files():
1464 for f in match.files():
1464 f = self.dirstate.normalize(f)
1465 f = self.dirstate.normalize(f)
1465 if f == '.' or f in matched or f in wctx.substate:
1466 if f == '.' or f in matched or f in wctx.substate:
1466 continue
1467 continue
1467 if f in status.deleted:
1468 if f in status.deleted:
1468 fail(f, _('file not found!'))
1469 fail(f, _('file not found!'))
1469 if f in vdirs: # visited directory
1470 if f in vdirs: # visited directory
1470 d = f + '/'
1471 d = f + '/'
1471 for mf in matched:
1472 for mf in matched:
1472 if mf.startswith(d):
1473 if mf.startswith(d):
1473 break
1474 break
1474 else:
1475 else:
1475 fail(f, _("no match under directory!"))
1476 fail(f, _("no match under directory!"))
1476 elif f not in self.dirstate:
1477 elif f not in self.dirstate:
1477 fail(f, _("file not tracked!"))
1478 fail(f, _("file not tracked!"))
1478
1479
1479 @unfilteredmethod
1480 @unfilteredmethod
1480 def commit(self, text="", user=None, date=None, match=None, force=False,
1481 def commit(self, text="", user=None, date=None, match=None, force=False,
1481 editor=False, extra=None):
1482 editor=False, extra=None):
1482 """Add a new revision to current repository.
1483 """Add a new revision to current repository.
1483
1484
1484 Revision information is gathered from the working directory,
1485 Revision information is gathered from the working directory,
1485 match can be used to filter the committed files. If editor is
1486 match can be used to filter the committed files. If editor is
1486 supplied, it is called to get a commit message.
1487 supplied, it is called to get a commit message.
1487 """
1488 """
1488 if extra is None:
1489 if extra is None:
1489 extra = {}
1490 extra = {}
1490
1491
1491 def fail(f, msg):
1492 def fail(f, msg):
1492 raise error.Abort('%s: %s' % (f, msg))
1493 raise error.Abort('%s: %s' % (f, msg))
1493
1494
1494 if not match:
1495 if not match:
1495 match = matchmod.always(self.root, '')
1496 match = matchmod.always(self.root, '')
1496
1497
1497 if not force:
1498 if not force:
1498 vdirs = []
1499 vdirs = []
1499 match.explicitdir = vdirs.append
1500 match.explicitdir = vdirs.append
1500 match.bad = fail
1501 match.bad = fail
1501
1502
1502 wlock = lock = tr = None
1503 wlock = lock = tr = None
1503 try:
1504 try:
1504 wlock = self.wlock()
1505 wlock = self.wlock()
1505 lock = self.lock() # for recent changelog (see issue4368)
1506 lock = self.lock() # for recent changelog (see issue4368)
1506
1507
1507 wctx = self[None]
1508 wctx = self[None]
1508 merge = len(wctx.parents()) > 1
1509 merge = len(wctx.parents()) > 1
1509
1510
1510 if not force and merge and match.ispartial():
1511 if not force and merge and match.ispartial():
1511 raise error.Abort(_('cannot partially commit a merge '
1512 raise error.Abort(_('cannot partially commit a merge '
1512 '(do not specify files or patterns)'))
1513 '(do not specify files or patterns)'))
1513
1514
1514 status = self.status(match=match, clean=force)
1515 status = self.status(match=match, clean=force)
1515 if force:
1516 if force:
1516 status.modified.extend(status.clean) # mq may commit clean files
1517 status.modified.extend(status.clean) # mq may commit clean files
1517
1518
1518 # check subrepos
1519 # check subrepos
1519 subs = []
1520 subs = []
1520 commitsubs = set()
1521 commitsubs = set()
1521 newstate = wctx.substate.copy()
1522 newstate = wctx.substate.copy()
1522 # only manage subrepos and .hgsubstate if .hgsub is present
1523 # only manage subrepos and .hgsubstate if .hgsub is present
1523 if '.hgsub' in wctx:
1524 if '.hgsub' in wctx:
1524 # we'll decide whether to track this ourselves, thanks
1525 # we'll decide whether to track this ourselves, thanks
1525 for c in status.modified, status.added, status.removed:
1526 for c in status.modified, status.added, status.removed:
1526 if '.hgsubstate' in c:
1527 if '.hgsubstate' in c:
1527 c.remove('.hgsubstate')
1528 c.remove('.hgsubstate')
1528
1529
1529 # compare current state to last committed state
1530 # compare current state to last committed state
1530 # build new substate based on last committed state
1531 # build new substate based on last committed state
1531 oldstate = wctx.p1().substate
1532 oldstate = wctx.p1().substate
1532 for s in sorted(newstate.keys()):
1533 for s in sorted(newstate.keys()):
1533 if not match(s):
1534 if not match(s):
1534 # ignore working copy, use old state if present
1535 # ignore working copy, use old state if present
1535 if s in oldstate:
1536 if s in oldstate:
1536 newstate[s] = oldstate[s]
1537 newstate[s] = oldstate[s]
1537 continue
1538 continue
1538 if not force:
1539 if not force:
1539 raise error.Abort(
1540 raise error.Abort(
1540 _("commit with new subrepo %s excluded") % s)
1541 _("commit with new subrepo %s excluded") % s)
1541 dirtyreason = wctx.sub(s).dirtyreason(True)
1542 dirtyreason = wctx.sub(s).dirtyreason(True)
1542 if dirtyreason:
1543 if dirtyreason:
1543 if not self.ui.configbool('ui', 'commitsubrepos'):
1544 if not self.ui.configbool('ui', 'commitsubrepos'):
1544 raise error.Abort(dirtyreason,
1545 raise error.Abort(dirtyreason,
1545 hint=_("use --subrepos for recursive commit"))
1546 hint=_("use --subrepos for recursive commit"))
1546 subs.append(s)
1547 subs.append(s)
1547 commitsubs.add(s)
1548 commitsubs.add(s)
1548 else:
1549 else:
1549 bs = wctx.sub(s).basestate()
1550 bs = wctx.sub(s).basestate()
1550 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1551 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1551 if oldstate.get(s, (None, None, None))[1] != bs:
1552 if oldstate.get(s, (None, None, None))[1] != bs:
1552 subs.append(s)
1553 subs.append(s)
1553
1554
1554 # check for removed subrepos
1555 # check for removed subrepos
1555 for p in wctx.parents():
1556 for p in wctx.parents():
1556 r = [s for s in p.substate if s not in newstate]
1557 r = [s for s in p.substate if s not in newstate]
1557 subs += [s for s in r if match(s)]
1558 subs += [s for s in r if match(s)]
1558 if subs:
1559 if subs:
1559 if (not match('.hgsub') and
1560 if (not match('.hgsub') and
1560 '.hgsub' in (wctx.modified() + wctx.added())):
1561 '.hgsub' in (wctx.modified() + wctx.added())):
1561 raise error.Abort(
1562 raise error.Abort(
1562 _("can't commit subrepos without .hgsub"))
1563 _("can't commit subrepos without .hgsub"))
1563 status.modified.insert(0, '.hgsubstate')
1564 status.modified.insert(0, '.hgsubstate')
1564
1565
1565 elif '.hgsub' in status.removed:
1566 elif '.hgsub' in status.removed:
1566 # clean up .hgsubstate when .hgsub is removed
1567 # clean up .hgsubstate when .hgsub is removed
1567 if ('.hgsubstate' in wctx and
1568 if ('.hgsubstate' in wctx and
1568 '.hgsubstate' not in (status.modified + status.added +
1569 '.hgsubstate' not in (status.modified + status.added +
1569 status.removed)):
1570 status.removed)):
1570 status.removed.insert(0, '.hgsubstate')
1571 status.removed.insert(0, '.hgsubstate')
1571
1572
1572 # make sure all explicit patterns are matched
1573 # make sure all explicit patterns are matched
1573 if not force:
1574 if not force:
1574 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1575 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1575
1576
1576 cctx = context.workingcommitctx(self, status,
1577 cctx = context.workingcommitctx(self, status,
1577 text, user, date, extra)
1578 text, user, date, extra)
1578
1579
1579 # internal config: ui.allowemptycommit
1580 # internal config: ui.allowemptycommit
1580 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1581 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1581 or extra.get('close') or merge or cctx.files()
1582 or extra.get('close') or merge or cctx.files()
1582 or self.ui.configbool('ui', 'allowemptycommit'))
1583 or self.ui.configbool('ui', 'allowemptycommit'))
1583 if not allowemptycommit:
1584 if not allowemptycommit:
1584 return None
1585 return None
1585
1586
1586 if merge and cctx.deleted():
1587 if merge and cctx.deleted():
1587 raise error.Abort(_("cannot commit merge with missing files"))
1588 raise error.Abort(_("cannot commit merge with missing files"))
1588
1589
1589 ms = mergemod.mergestate.read(self)
1590 ms = mergemod.mergestate.read(self)
1590 mergeutil.checkunresolved(ms)
1591 mergeutil.checkunresolved(ms)
1591
1592
1592 if editor:
1593 if editor:
1593 cctx._text = editor(self, cctx, subs)
1594 cctx._text = editor(self, cctx, subs)
1594 edited = (text != cctx._text)
1595 edited = (text != cctx._text)
1595
1596
1596 # Save commit message in case this transaction gets rolled back
1597 # Save commit message in case this transaction gets rolled back
1597 # (e.g. by a pretxncommit hook). Leave the content alone on
1598 # (e.g. by a pretxncommit hook). Leave the content alone on
1598 # the assumption that the user will use the same editor again.
1599 # the assumption that the user will use the same editor again.
1599 msgfn = self.savecommitmessage(cctx._text)
1600 msgfn = self.savecommitmessage(cctx._text)
1600
1601
1601 # commit subs and write new state
1602 # commit subs and write new state
1602 if subs:
1603 if subs:
1603 for s in sorted(commitsubs):
1604 for s in sorted(commitsubs):
1604 sub = wctx.sub(s)
1605 sub = wctx.sub(s)
1605 self.ui.status(_('committing subrepository %s\n') %
1606 self.ui.status(_('committing subrepository %s\n') %
1606 subrepo.subrelpath(sub))
1607 subrepo.subrelpath(sub))
1607 sr = sub.commit(cctx._text, user, date)
1608 sr = sub.commit(cctx._text, user, date)
1608 newstate[s] = (newstate[s][0], sr)
1609 newstate[s] = (newstate[s][0], sr)
1609 subrepo.writestate(self, newstate)
1610 subrepo.writestate(self, newstate)
1610
1611
1611 p1, p2 = self.dirstate.parents()
1612 p1, p2 = self.dirstate.parents()
1612 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1613 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1613 try:
1614 try:
1614 self.hook("precommit", throw=True, parent1=hookp1,
1615 self.hook("precommit", throw=True, parent1=hookp1,
1615 parent2=hookp2)
1616 parent2=hookp2)
1616 tr = self.transaction('commit')
1617 tr = self.transaction('commit')
1617 ret = self.commitctx(cctx, True)
1618 ret = self.commitctx(cctx, True)
1618 except: # re-raises
1619 except: # re-raises
1619 if edited:
1620 if edited:
1620 self.ui.write(
1621 self.ui.write(
1621 _('note: commit message saved in %s\n') % msgfn)
1622 _('note: commit message saved in %s\n') % msgfn)
1622 raise
1623 raise
1623 # update bookmarks, dirstate and mergestate
1624 # update bookmarks, dirstate and mergestate
1624 bookmarks.update(self, [p1, p2], ret)
1625 bookmarks.update(self, [p1, p2], ret)
1625 cctx.markcommitted(ret)
1626 cctx.markcommitted(ret)
1626 ms.reset()
1627 ms.reset()
1627 tr.close()
1628 tr.close()
1628
1629
1629 finally:
1630 finally:
1630 lockmod.release(tr, lock, wlock)
1631 lockmod.release(tr, lock, wlock)
1631
1632
1632 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1633 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1633 # hack for command that use a temporary commit (eg: histedit)
1634 # hack for command that use a temporary commit (eg: histedit)
1634 # temporary commit got stripped before hook release
1635 # temporary commit got stripped before hook release
1635 if self.changelog.hasnode(ret):
1636 if self.changelog.hasnode(ret):
1636 self.hook("commit", node=node, parent1=parent1,
1637 self.hook("commit", node=node, parent1=parent1,
1637 parent2=parent2)
1638 parent2=parent2)
1638 self._afterlock(commithook)
1639 self._afterlock(commithook)
1639 return ret
1640 return ret
1640
1641
1641 @unfilteredmethod
1642 @unfilteredmethod
1642 def commitctx(self, ctx, error=False):
1643 def commitctx(self, ctx, error=False):
1643 """Add a new revision to current repository.
1644 """Add a new revision to current repository.
1644 Revision information is passed via the context argument.
1645 Revision information is passed via the context argument.
1645 """
1646 """
1646
1647
1647 tr = None
1648 tr = None
1648 p1, p2 = ctx.p1(), ctx.p2()
1649 p1, p2 = ctx.p1(), ctx.p2()
1649 user = ctx.user()
1650 user = ctx.user()
1650
1651
1651 lock = self.lock()
1652 lock = self.lock()
1652 try:
1653 try:
1653 tr = self.transaction("commit")
1654 tr = self.transaction("commit")
1654 trp = weakref.proxy(tr)
1655 trp = weakref.proxy(tr)
1655
1656
1656 if ctx.manifestnode():
1657 if ctx.manifestnode():
1657 # reuse an existing manifest revision
1658 # reuse an existing manifest revision
1658 mn = ctx.manifestnode()
1659 mn = ctx.manifestnode()
1659 files = ctx.files()
1660 files = ctx.files()
1660 elif ctx.files():
1661 elif ctx.files():
1661 m1ctx = p1.manifestctx()
1662 m1ctx = p1.manifestctx()
1662 m2ctx = p2.manifestctx()
1663 m2ctx = p2.manifestctx()
1663 mctx = m1ctx.copy()
1664 mctx = m1ctx.copy()
1664
1665
1665 m = mctx.read()
1666 m = mctx.read()
1666 m1 = m1ctx.read()
1667 m1 = m1ctx.read()
1667 m2 = m2ctx.read()
1668 m2 = m2ctx.read()
1668
1669
1669 # check in files
1670 # check in files
1670 added = []
1671 added = []
1671 changed = []
1672 changed = []
1672 removed = list(ctx.removed())
1673 removed = list(ctx.removed())
1673 linkrev = len(self)
1674 linkrev = len(self)
1674 self.ui.note(_("committing files:\n"))
1675 self.ui.note(_("committing files:\n"))
1675 for f in sorted(ctx.modified() + ctx.added()):
1676 for f in sorted(ctx.modified() + ctx.added()):
1676 self.ui.note(f + "\n")
1677 self.ui.note(f + "\n")
1677 try:
1678 try:
1678 fctx = ctx[f]
1679 fctx = ctx[f]
1679 if fctx is None:
1680 if fctx is None:
1680 removed.append(f)
1681 removed.append(f)
1681 else:
1682 else:
1682 added.append(f)
1683 added.append(f)
1683 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1684 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1684 trp, changed)
1685 trp, changed)
1685 m.setflag(f, fctx.flags())
1686 m.setflag(f, fctx.flags())
1686 except OSError as inst:
1687 except OSError as inst:
1687 self.ui.warn(_("trouble committing %s!\n") % f)
1688 self.ui.warn(_("trouble committing %s!\n") % f)
1688 raise
1689 raise
1689 except IOError as inst:
1690 except IOError as inst:
1690 errcode = getattr(inst, 'errno', errno.ENOENT)
1691 errcode = getattr(inst, 'errno', errno.ENOENT)
1691 if error or errcode and errcode != errno.ENOENT:
1692 if error or errcode and errcode != errno.ENOENT:
1692 self.ui.warn(_("trouble committing %s!\n") % f)
1693 self.ui.warn(_("trouble committing %s!\n") % f)
1693 raise
1694 raise
1694
1695
1695 # update manifest
1696 # update manifest
1696 self.ui.note(_("committing manifest\n"))
1697 self.ui.note(_("committing manifest\n"))
1697 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1698 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1698 drop = [f for f in removed if f in m]
1699 drop = [f for f in removed if f in m]
1699 for f in drop:
1700 for f in drop:
1700 del m[f]
1701 del m[f]
1701 mn = mctx.write(trp, linkrev,
1702 mn = mctx.write(trp, linkrev,
1702 p1.manifestnode(), p2.manifestnode(),
1703 p1.manifestnode(), p2.manifestnode(),
1703 added, drop)
1704 added, drop)
1704 files = changed + removed
1705 files = changed + removed
1705 else:
1706 else:
1706 mn = p1.manifestnode()
1707 mn = p1.manifestnode()
1707 files = []
1708 files = []
1708
1709
1709 # update changelog
1710 # update changelog
1710 self.ui.note(_("committing changelog\n"))
1711 self.ui.note(_("committing changelog\n"))
1711 self.changelog.delayupdate(tr)
1712 self.changelog.delayupdate(tr)
1712 n = self.changelog.add(mn, files, ctx.description(),
1713 n = self.changelog.add(mn, files, ctx.description(),
1713 trp, p1.node(), p2.node(),
1714 trp, p1.node(), p2.node(),
1714 user, ctx.date(), ctx.extra().copy())
1715 user, ctx.date(), ctx.extra().copy())
1715 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1716 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1716 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1717 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1717 parent2=xp2)
1718 parent2=xp2)
1718 # set the new commit is proper phase
1719 # set the new commit is proper phase
1719 targetphase = subrepo.newcommitphase(self.ui, ctx)
1720 targetphase = subrepo.newcommitphase(self.ui, ctx)
1720 if targetphase:
1721 if targetphase:
1721 # retract boundary do not alter parent changeset.
1722 # retract boundary do not alter parent changeset.
1722 # if a parent have higher the resulting phase will
1723 # if a parent have higher the resulting phase will
1723 # be compliant anyway
1724 # be compliant anyway
1724 #
1725 #
1725 # if minimal phase was 0 we don't need to retract anything
1726 # if minimal phase was 0 we don't need to retract anything
1726 phases.retractboundary(self, tr, targetphase, [n])
1727 phases.retractboundary(self, tr, targetphase, [n])
1727 tr.close()
1728 tr.close()
1728 branchmap.updatecache(self.filtered('served'))
1729 branchmap.updatecache(self.filtered('served'))
1729 return n
1730 return n
1730 finally:
1731 finally:
1731 if tr:
1732 if tr:
1732 tr.release()
1733 tr.release()
1733 lock.release()
1734 lock.release()
1734
1735
1735 @unfilteredmethod
1736 @unfilteredmethod
1736 def destroying(self):
1737 def destroying(self):
1737 '''Inform the repository that nodes are about to be destroyed.
1738 '''Inform the repository that nodes are about to be destroyed.
1738 Intended for use by strip and rollback, so there's a common
1739 Intended for use by strip and rollback, so there's a common
1739 place for anything that has to be done before destroying history.
1740 place for anything that has to be done before destroying history.
1740
1741
1741 This is mostly useful for saving state that is in memory and waiting
1742 This is mostly useful for saving state that is in memory and waiting
1742 to be flushed when the current lock is released. Because a call to
1743 to be flushed when the current lock is released. Because a call to
1743 destroyed is imminent, the repo will be invalidated causing those
1744 destroyed is imminent, the repo will be invalidated causing those
1744 changes to stay in memory (waiting for the next unlock), or vanish
1745 changes to stay in memory (waiting for the next unlock), or vanish
1745 completely.
1746 completely.
1746 '''
1747 '''
1747 # When using the same lock to commit and strip, the phasecache is left
1748 # When using the same lock to commit and strip, the phasecache is left
1748 # dirty after committing. Then when we strip, the repo is invalidated,
1749 # dirty after committing. Then when we strip, the repo is invalidated,
1749 # causing those changes to disappear.
1750 # causing those changes to disappear.
1750 if '_phasecache' in vars(self):
1751 if '_phasecache' in vars(self):
1751 self._phasecache.write()
1752 self._phasecache.write()
1752
1753
1753 @unfilteredmethod
1754 @unfilteredmethod
1754 def destroyed(self):
1755 def destroyed(self):
1755 '''Inform the repository that nodes have been destroyed.
1756 '''Inform the repository that nodes have been destroyed.
1756 Intended for use by strip and rollback, so there's a common
1757 Intended for use by strip and rollback, so there's a common
1757 place for anything that has to be done after destroying history.
1758 place for anything that has to be done after destroying history.
1758 '''
1759 '''
1759 # When one tries to:
1760 # When one tries to:
1760 # 1) destroy nodes thus calling this method (e.g. strip)
1761 # 1) destroy nodes thus calling this method (e.g. strip)
1761 # 2) use phasecache somewhere (e.g. commit)
1762 # 2) use phasecache somewhere (e.g. commit)
1762 #
1763 #
1763 # then 2) will fail because the phasecache contains nodes that were
1764 # then 2) will fail because the phasecache contains nodes that were
1764 # removed. We can either remove phasecache from the filecache,
1765 # removed. We can either remove phasecache from the filecache,
1765 # causing it to reload next time it is accessed, or simply filter
1766 # causing it to reload next time it is accessed, or simply filter
1766 # the removed nodes now and write the updated cache.
1767 # the removed nodes now and write the updated cache.
1767 self._phasecache.filterunknown(self)
1768 self._phasecache.filterunknown(self)
1768 self._phasecache.write()
1769 self._phasecache.write()
1769
1770
1770 # update the 'served' branch cache to help read only server process
1771 # update the 'served' branch cache to help read only server process
1771 # Thanks to branchcache collaboration this is done from the nearest
1772 # Thanks to branchcache collaboration this is done from the nearest
1772 # filtered subset and it is expected to be fast.
1773 # filtered subset and it is expected to be fast.
1773 branchmap.updatecache(self.filtered('served'))
1774 branchmap.updatecache(self.filtered('served'))
1774
1775
1775 # Ensure the persistent tag cache is updated. Doing it now
1776 # Ensure the persistent tag cache is updated. Doing it now
1776 # means that the tag cache only has to worry about destroyed
1777 # means that the tag cache only has to worry about destroyed
1777 # heads immediately after a strip/rollback. That in turn
1778 # heads immediately after a strip/rollback. That in turn
1778 # guarantees that "cachetip == currenttip" (comparing both rev
1779 # guarantees that "cachetip == currenttip" (comparing both rev
1779 # and node) always means no nodes have been added or destroyed.
1780 # and node) always means no nodes have been added or destroyed.
1780
1781
1781 # XXX this is suboptimal when qrefresh'ing: we strip the current
1782 # XXX this is suboptimal when qrefresh'ing: we strip the current
1782 # head, refresh the tag cache, then immediately add a new head.
1783 # head, refresh the tag cache, then immediately add a new head.
1783 # But I think doing it this way is necessary for the "instant
1784 # But I think doing it this way is necessary for the "instant
1784 # tag cache retrieval" case to work.
1785 # tag cache retrieval" case to work.
1785 self.invalidate()
1786 self.invalidate()
1786
1787
1787 def walk(self, match, node=None):
1788 def walk(self, match, node=None):
1788 '''
1789 '''
1789 walk recursively through the directory tree or a given
1790 walk recursively through the directory tree or a given
1790 changeset, finding all files matched by the match
1791 changeset, finding all files matched by the match
1791 function
1792 function
1792 '''
1793 '''
1793 return self[node].walk(match)
1794 return self[node].walk(match)
1794
1795
1795 def status(self, node1='.', node2=None, match=None,
1796 def status(self, node1='.', node2=None, match=None,
1796 ignored=False, clean=False, unknown=False,
1797 ignored=False, clean=False, unknown=False,
1797 listsubrepos=False):
1798 listsubrepos=False):
1798 '''a convenience method that calls node1.status(node2)'''
1799 '''a convenience method that calls node1.status(node2)'''
1799 return self[node1].status(node2, match, ignored, clean, unknown,
1800 return self[node1].status(node2, match, ignored, clean, unknown,
1800 listsubrepos)
1801 listsubrepos)
1801
1802
1802 def heads(self, start=None):
1803 def heads(self, start=None):
1803 if start is None:
1804 if start is None:
1804 cl = self.changelog
1805 cl = self.changelog
1805 headrevs = reversed(cl.headrevs())
1806 headrevs = reversed(cl.headrevs())
1806 return [cl.node(rev) for rev in headrevs]
1807 return [cl.node(rev) for rev in headrevs]
1807
1808
1808 heads = self.changelog.heads(start)
1809 heads = self.changelog.heads(start)
1809 # sort the output in rev descending order
1810 # sort the output in rev descending order
1810 return sorted(heads, key=self.changelog.rev, reverse=True)
1811 return sorted(heads, key=self.changelog.rev, reverse=True)
1811
1812
1812 def branchheads(self, branch=None, start=None, closed=False):
1813 def branchheads(self, branch=None, start=None, closed=False):
1813 '''return a (possibly filtered) list of heads for the given branch
1814 '''return a (possibly filtered) list of heads for the given branch
1814
1815
1815 Heads are returned in topological order, from newest to oldest.
1816 Heads are returned in topological order, from newest to oldest.
1816 If branch is None, use the dirstate branch.
1817 If branch is None, use the dirstate branch.
1817 If start is not None, return only heads reachable from start.
1818 If start is not None, return only heads reachable from start.
1818 If closed is True, return heads that are marked as closed as well.
1819 If closed is True, return heads that are marked as closed as well.
1819 '''
1820 '''
1820 if branch is None:
1821 if branch is None:
1821 branch = self[None].branch()
1822 branch = self[None].branch()
1822 branches = self.branchmap()
1823 branches = self.branchmap()
1823 if branch not in branches:
1824 if branch not in branches:
1824 return []
1825 return []
1825 # the cache returns heads ordered lowest to highest
1826 # the cache returns heads ordered lowest to highest
1826 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1827 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1827 if start is not None:
1828 if start is not None:
1828 # filter out the heads that cannot be reached from startrev
1829 # filter out the heads that cannot be reached from startrev
1829 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1830 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1830 bheads = [h for h in bheads if h in fbheads]
1831 bheads = [h for h in bheads if h in fbheads]
1831 return bheads
1832 return bheads
1832
1833
1833 def branches(self, nodes):
1834 def branches(self, nodes):
1834 if not nodes:
1835 if not nodes:
1835 nodes = [self.changelog.tip()]
1836 nodes = [self.changelog.tip()]
1836 b = []
1837 b = []
1837 for n in nodes:
1838 for n in nodes:
1838 t = n
1839 t = n
1839 while True:
1840 while True:
1840 p = self.changelog.parents(n)
1841 p = self.changelog.parents(n)
1841 if p[1] != nullid or p[0] == nullid:
1842 if p[1] != nullid or p[0] == nullid:
1842 b.append((t, n, p[0], p[1]))
1843 b.append((t, n, p[0], p[1]))
1843 break
1844 break
1844 n = p[0]
1845 n = p[0]
1845 return b
1846 return b
1846
1847
1847 def between(self, pairs):
1848 def between(self, pairs):
1848 r = []
1849 r = []
1849
1850
1850 for top, bottom in pairs:
1851 for top, bottom in pairs:
1851 n, l, i = top, [], 0
1852 n, l, i = top, [], 0
1852 f = 1
1853 f = 1
1853
1854
1854 while n != bottom and n != nullid:
1855 while n != bottom and n != nullid:
1855 p = self.changelog.parents(n)[0]
1856 p = self.changelog.parents(n)[0]
1856 if i == f:
1857 if i == f:
1857 l.append(n)
1858 l.append(n)
1858 f = f * 2
1859 f = f * 2
1859 n = p
1860 n = p
1860 i += 1
1861 i += 1
1861
1862
1862 r.append(l)
1863 r.append(l)
1863
1864
1864 return r
1865 return r
1865
1866
1866 def checkpush(self, pushop):
1867 def checkpush(self, pushop):
1867 """Extensions can override this function if additional checks have
1868 """Extensions can override this function if additional checks have
1868 to be performed before pushing, or call it if they override push
1869 to be performed before pushing, or call it if they override push
1869 command.
1870 command.
1870 """
1871 """
1871 pass
1872 pass
1872
1873
1873 @unfilteredpropertycache
1874 @unfilteredpropertycache
1874 def prepushoutgoinghooks(self):
1875 def prepushoutgoinghooks(self):
1875 """Return util.hooks consists of a pushop with repo, remote, outgoing
1876 """Return util.hooks consists of a pushop with repo, remote, outgoing
1876 methods, which are called before pushing changesets.
1877 methods, which are called before pushing changesets.
1877 """
1878 """
1878 return util.hooks()
1879 return util.hooks()
1879
1880
1880 def pushkey(self, namespace, key, old, new):
1881 def pushkey(self, namespace, key, old, new):
1881 try:
1882 try:
1882 tr = self.currenttransaction()
1883 tr = self.currenttransaction()
1883 hookargs = {}
1884 hookargs = {}
1884 if tr is not None:
1885 if tr is not None:
1885 hookargs.update(tr.hookargs)
1886 hookargs.update(tr.hookargs)
1886 hookargs['namespace'] = namespace
1887 hookargs['namespace'] = namespace
1887 hookargs['key'] = key
1888 hookargs['key'] = key
1888 hookargs['old'] = old
1889 hookargs['old'] = old
1889 hookargs['new'] = new
1890 hookargs['new'] = new
1890 self.hook('prepushkey', throw=True, **hookargs)
1891 self.hook('prepushkey', throw=True, **hookargs)
1891 except error.HookAbort as exc:
1892 except error.HookAbort as exc:
1892 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1893 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1893 if exc.hint:
1894 if exc.hint:
1894 self.ui.write_err(_("(%s)\n") % exc.hint)
1895 self.ui.write_err(_("(%s)\n") % exc.hint)
1895 return False
1896 return False
1896 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1897 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1897 ret = pushkey.push(self, namespace, key, old, new)
1898 ret = pushkey.push(self, namespace, key, old, new)
1898 def runhook():
1899 def runhook():
1899 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1900 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1900 ret=ret)
1901 ret=ret)
1901 self._afterlock(runhook)
1902 self._afterlock(runhook)
1902 return ret
1903 return ret
1903
1904
1904 def listkeys(self, namespace):
1905 def listkeys(self, namespace):
1905 self.hook('prelistkeys', throw=True, namespace=namespace)
1906 self.hook('prelistkeys', throw=True, namespace=namespace)
1906 self.ui.debug('listing keys for "%s"\n' % namespace)
1907 self.ui.debug('listing keys for "%s"\n' % namespace)
1907 values = pushkey.list(self, namespace)
1908 values = pushkey.list(self, namespace)
1908 self.hook('listkeys', namespace=namespace, values=values)
1909 self.hook('listkeys', namespace=namespace, values=values)
1909 return values
1910 return values
1910
1911
1911 def debugwireargs(self, one, two, three=None, four=None, five=None):
1912 def debugwireargs(self, one, two, three=None, four=None, five=None):
1912 '''used to test argument passing over the wire'''
1913 '''used to test argument passing over the wire'''
1913 return "%s %s %s %s %s" % (one, two, three, four, five)
1914 return "%s %s %s %s %s" % (one, two, three, four, five)
1914
1915
1915 def savecommitmessage(self, text):
1916 def savecommitmessage(self, text):
1916 fp = self.vfs('last-message.txt', 'wb')
1917 fp = self.vfs('last-message.txt', 'wb')
1917 try:
1918 try:
1918 fp.write(text)
1919 fp.write(text)
1919 finally:
1920 finally:
1920 fp.close()
1921 fp.close()
1921 return self.pathto(fp.name[len(self.root) + 1:])
1922 return self.pathto(fp.name[len(self.root) + 1:])
1922
1923
1923 # used to avoid circular references so destructors work
1924 # used to avoid circular references so destructors work
1924 def aftertrans(files):
1925 def aftertrans(files):
1925 renamefiles = [tuple(t) for t in files]
1926 renamefiles = [tuple(t) for t in files]
1926 def a():
1927 def a():
1927 for vfs, src, dest in renamefiles:
1928 for vfs, src, dest in renamefiles:
1928 # if src and dest refer to a same file, vfs.rename is a no-op,
1929 # if src and dest refer to a same file, vfs.rename is a no-op,
1929 # leaving both src and dest on disk. delete dest to make sure
1930 # leaving both src and dest on disk. delete dest to make sure
1930 # the rename couldn't be such a no-op.
1931 # the rename couldn't be such a no-op.
1931 vfs.tryunlink(dest)
1932 vfs.tryunlink(dest)
1932 try:
1933 try:
1933 vfs.rename(src, dest)
1934 vfs.rename(src, dest)
1934 except OSError: # journal file does not yet exist
1935 except OSError: # journal file does not yet exist
1935 pass
1936 pass
1936 return a
1937 return a
1937
1938
1938 def undoname(fn):
1939 def undoname(fn):
1939 base, name = os.path.split(fn)
1940 base, name = os.path.split(fn)
1940 assert name.startswith('journal')
1941 assert name.startswith('journal')
1941 return os.path.join(base, name.replace('journal', 'undo', 1))
1942 return os.path.join(base, name.replace('journal', 'undo', 1))
1942
1943
1943 def instance(ui, path, create):
1944 def instance(ui, path, create):
1944 return localrepository(ui, util.urllocalpath(path), create)
1945 return localrepository(ui, util.urllocalpath(path), create)
1945
1946
1946 def islocal(path):
1947 def islocal(path):
1947 return True
1948 return True
1948
1949
1949 def newreporequirements(repo):
1950 def newreporequirements(repo):
1950 """Determine the set of requirements for a new local repository.
1951 """Determine the set of requirements for a new local repository.
1951
1952
1952 Extensions can wrap this function to specify custom requirements for
1953 Extensions can wrap this function to specify custom requirements for
1953 new repositories.
1954 new repositories.
1954 """
1955 """
1955 ui = repo.ui
1956 ui = repo.ui
1956 requirements = set(['revlogv1'])
1957 requirements = set(['revlogv1'])
1957 if ui.configbool('format', 'usestore', True):
1958 if ui.configbool('format', 'usestore', True):
1958 requirements.add('store')
1959 requirements.add('store')
1959 if ui.configbool('format', 'usefncache', True):
1960 if ui.configbool('format', 'usefncache', True):
1960 requirements.add('fncache')
1961 requirements.add('fncache')
1961 if ui.configbool('format', 'dotencode', True):
1962 if ui.configbool('format', 'dotencode', True):
1962 requirements.add('dotencode')
1963 requirements.add('dotencode')
1963
1964
1964 compengine = ui.config('experimental', 'format.compression', 'zlib')
1965 compengine = ui.config('experimental', 'format.compression', 'zlib')
1965 if compengine not in util.compengines:
1966 if compengine not in util.compengines:
1966 raise error.Abort(_('compression engine %s defined by '
1967 raise error.Abort(_('compression engine %s defined by '
1967 'experimental.format.compression not available') %
1968 'experimental.format.compression not available') %
1968 compengine,
1969 compengine,
1969 hint=_('run "hg debuginstall" to list available '
1970 hint=_('run "hg debuginstall" to list available '
1970 'compression engines'))
1971 'compression engines'))
1971
1972
1972 # zlib is the historical default and doesn't need an explicit requirement.
1973 # zlib is the historical default and doesn't need an explicit requirement.
1973 if compengine != 'zlib':
1974 if compengine != 'zlib':
1974 requirements.add('exp-compression-%s' % compengine)
1975 requirements.add('exp-compression-%s' % compengine)
1975
1976
1976 if scmutil.gdinitconfig(ui):
1977 if scmutil.gdinitconfig(ui):
1977 requirements.add('generaldelta')
1978 requirements.add('generaldelta')
1978 if ui.configbool('experimental', 'treemanifest', False):
1979 if ui.configbool('experimental', 'treemanifest', False):
1979 requirements.add('treemanifest')
1980 requirements.add('treemanifest')
1980 if ui.configbool('experimental', 'manifestv2', False):
1981 if ui.configbool('experimental', 'manifestv2', False):
1981 requirements.add('manifestv2')
1982 requirements.add('manifestv2')
1982
1983
1983 return requirements
1984 return requirements
General Comments 0
You need to be logged in to leave comments. Login now