##// END OF EJS Templates
tags: do not feed dictionaries to 'findglobaltags'...
Pierre-Yves David -
r31706:63d4deda default
parent child Browse files
Show More
@@ -1,1984 +1,1985 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'))
117 'unbundle'))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if bundlecaps is not None and 'HG20' in bundlecaps:
167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 'manifestv2'))
251 'manifestv2'))
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 'relshared', 'dotencode'))
253 'relshared', 'dotencode'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 filtername = None
255 filtername = None
256
256
257 # a list of (ui, featureset) functions.
257 # a list of (ui, featureset) functions.
258 # only functions defined in module of enabled extensions are invoked
258 # only functions defined in module of enabled extensions are invoked
259 featuresetupfuncs = set()
259 featuresetupfuncs = set()
260
260
261 def __init__(self, baseui, path, create=False):
261 def __init__(self, baseui, path, create=False):
262 self.requirements = set()
262 self.requirements = set()
263 # wvfs: rooted at the repository root, used to access the working copy
263 # wvfs: rooted at the repository root, used to access the working copy
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 self.vfs = None
266 self.vfs = None
267 # svfs: usually rooted at .hg/store, used to access repository history
267 # svfs: usually rooted at .hg/store, used to access repository history
268 # If this is a shared repository, this vfs may point to another
268 # If this is a shared repository, this vfs may point to another
269 # repository's .hg/store directory.
269 # repository's .hg/store directory.
270 self.svfs = None
270 self.svfs = None
271 self.root = self.wvfs.base
271 self.root = self.wvfs.base
272 self.path = self.wvfs.join(".hg")
272 self.path = self.wvfs.join(".hg")
273 self.origroot = path
273 self.origroot = path
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 realfs=False)
276 realfs=False)
277 self.vfs = vfsmod.vfs(self.path)
277 self.vfs = vfsmod.vfs(self.path)
278 self.baseui = baseui
278 self.baseui = baseui
279 self.ui = baseui.copy()
279 self.ui = baseui.copy()
280 self.ui.copy = baseui.copy # prevent copying repo configuration
280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 # A list of callback to shape the phase if no data were found.
281 # A list of callback to shape the phase if no data were found.
282 # Callback are in the form: func(repo, roots) --> processed root.
282 # Callback are in the form: func(repo, roots) --> processed root.
283 # This list it to be filled by extension during repo setup
283 # This list it to be filled by extension during repo setup
284 self._phasedefaults = []
284 self._phasedefaults = []
285 try:
285 try:
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 self._loadextensions()
287 self._loadextensions()
288 except IOError:
288 except IOError:
289 pass
289 pass
290
290
291 if self.featuresetupfuncs:
291 if self.featuresetupfuncs:
292 self.supported = set(self._basesupported) # use private copy
292 self.supported = set(self._basesupported) # use private copy
293 extmods = set(m.__name__ for n, m
293 extmods = set(m.__name__ for n, m
294 in extensions.extensions(self.ui))
294 in extensions.extensions(self.ui))
295 for setupfunc in self.featuresetupfuncs:
295 for setupfunc in self.featuresetupfuncs:
296 if setupfunc.__module__ in extmods:
296 if setupfunc.__module__ in extmods:
297 setupfunc(self.ui, self.supported)
297 setupfunc(self.ui, self.supported)
298 else:
298 else:
299 self.supported = self._basesupported
299 self.supported = self._basesupported
300 color.setup(self.ui)
300 color.setup(self.ui)
301
301
302 # Add compression engines.
302 # Add compression engines.
303 for name in util.compengines:
303 for name in util.compengines:
304 engine = util.compengines[name]
304 engine = util.compengines[name]
305 if engine.revlogheader():
305 if engine.revlogheader():
306 self.supported.add('exp-compression-%s' % name)
306 self.supported.add('exp-compression-%s' % name)
307
307
308 if not self.vfs.isdir():
308 if not self.vfs.isdir():
309 if create:
309 if create:
310 self.requirements = newreporequirements(self)
310 self.requirements = newreporequirements(self)
311
311
312 if not self.wvfs.exists():
312 if not self.wvfs.exists():
313 self.wvfs.makedirs()
313 self.wvfs.makedirs()
314 self.vfs.makedir(notindexed=True)
314 self.vfs.makedir(notindexed=True)
315
315
316 if 'store' in self.requirements:
316 if 'store' in self.requirements:
317 self.vfs.mkdir("store")
317 self.vfs.mkdir("store")
318
318
319 # create an invalid changelog
319 # create an invalid changelog
320 self.vfs.append(
320 self.vfs.append(
321 "00changelog.i",
321 "00changelog.i",
322 '\0\0\0\2' # represents revlogv2
322 '\0\0\0\2' # represents revlogv2
323 ' dummy changelog to prevent using the old repo layout'
323 ' dummy changelog to prevent using the old repo layout'
324 )
324 )
325 else:
325 else:
326 raise error.RepoError(_("repository %s not found") % path)
326 raise error.RepoError(_("repository %s not found") % path)
327 elif create:
327 elif create:
328 raise error.RepoError(_("repository %s already exists") % path)
328 raise error.RepoError(_("repository %s already exists") % path)
329 else:
329 else:
330 try:
330 try:
331 self.requirements = scmutil.readrequires(
331 self.requirements = scmutil.readrequires(
332 self.vfs, self.supported)
332 self.vfs, self.supported)
333 except IOError as inst:
333 except IOError as inst:
334 if inst.errno != errno.ENOENT:
334 if inst.errno != errno.ENOENT:
335 raise
335 raise
336
336
337 self.sharedpath = self.path
337 self.sharedpath = self.path
338 try:
338 try:
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 if 'relshared' in self.requirements:
340 if 'relshared' in self.requirements:
341 sharedpath = self.vfs.join(sharedpath)
341 sharedpath = self.vfs.join(sharedpath)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 s = vfs.base
343 s = vfs.base
344 if not vfs.exists():
344 if not vfs.exists():
345 raise error.RepoError(
345 raise error.RepoError(
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 self.sharedpath = s
347 self.sharedpath = s
348 except IOError as inst:
348 except IOError as inst:
349 if inst.errno != errno.ENOENT:
349 if inst.errno != errno.ENOENT:
350 raise
350 raise
351
351
352 self.store = store.store(
352 self.store = store.store(
353 self.requirements, self.sharedpath, vfsmod.vfs)
353 self.requirements, self.sharedpath, vfsmod.vfs)
354 self.spath = self.store.path
354 self.spath = self.store.path
355 self.svfs = self.store.vfs
355 self.svfs = self.store.vfs
356 self.sjoin = self.store.join
356 self.sjoin = self.store.join
357 self.vfs.createmode = self.store.createmode
357 self.vfs.createmode = self.store.createmode
358 self._applyopenerreqs()
358 self._applyopenerreqs()
359 if create:
359 if create:
360 self._writerequirements()
360 self._writerequirements()
361
361
362 self._dirstatevalidatewarned = False
362 self._dirstatevalidatewarned = False
363
363
364 self._branchcaches = {}
364 self._branchcaches = {}
365 self._revbranchcache = None
365 self._revbranchcache = None
366 self.filterpats = {}
366 self.filterpats = {}
367 self._datafilters = {}
367 self._datafilters = {}
368 self._transref = self._lockref = self._wlockref = None
368 self._transref = self._lockref = self._wlockref = None
369
369
370 # A cache for various files under .hg/ that tracks file changes,
370 # A cache for various files under .hg/ that tracks file changes,
371 # (used by the filecache decorator)
371 # (used by the filecache decorator)
372 #
372 #
373 # Maps a property name to its util.filecacheentry
373 # Maps a property name to its util.filecacheentry
374 self._filecache = {}
374 self._filecache = {}
375
375
376 # hold sets of revision to be filtered
376 # hold sets of revision to be filtered
377 # should be cleared when something might have changed the filter value:
377 # should be cleared when something might have changed the filter value:
378 # - new changesets,
378 # - new changesets,
379 # - phase change,
379 # - phase change,
380 # - new obsolescence marker,
380 # - new obsolescence marker,
381 # - working directory parent change,
381 # - working directory parent change,
382 # - bookmark changes
382 # - bookmark changes
383 self.filteredrevcache = {}
383 self.filteredrevcache = {}
384
384
385 # generic mapping between names and nodes
385 # generic mapping between names and nodes
386 self.names = namespaces.namespaces()
386 self.names = namespaces.namespaces()
387
387
388 @property
388 @property
389 def wopener(self):
389 def wopener(self):
390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
391 return self.wvfs
391 return self.wvfs
392
392
393 @property
393 @property
394 def opener(self):
394 def opener(self):
395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
396 return self.vfs
396 return self.vfs
397
397
398 def close(self):
398 def close(self):
399 self._writecaches()
399 self._writecaches()
400
400
401 def _loadextensions(self):
401 def _loadextensions(self):
402 extensions.loadall(self.ui)
402 extensions.loadall(self.ui)
403
403
404 def _writecaches(self):
404 def _writecaches(self):
405 if self._revbranchcache:
405 if self._revbranchcache:
406 self._revbranchcache.write()
406 self._revbranchcache.write()
407
407
408 def _restrictcapabilities(self, caps):
408 def _restrictcapabilities(self, caps):
409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
410 caps = set(caps)
410 caps = set(caps)
411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
412 caps.add('bundle2=' + urlreq.quote(capsblob))
412 caps.add('bundle2=' + urlreq.quote(capsblob))
413 return caps
413 return caps
414
414
415 def _applyopenerreqs(self):
415 def _applyopenerreqs(self):
416 self.svfs.options = dict((r, 1) for r in self.requirements
416 self.svfs.options = dict((r, 1) for r in self.requirements
417 if r in self.openerreqs)
417 if r in self.openerreqs)
418 # experimental config: format.chunkcachesize
418 # experimental config: format.chunkcachesize
419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
420 if chunkcachesize is not None:
420 if chunkcachesize is not None:
421 self.svfs.options['chunkcachesize'] = chunkcachesize
421 self.svfs.options['chunkcachesize'] = chunkcachesize
422 # experimental config: format.maxchainlen
422 # experimental config: format.maxchainlen
423 maxchainlen = self.ui.configint('format', 'maxchainlen')
423 maxchainlen = self.ui.configint('format', 'maxchainlen')
424 if maxchainlen is not None:
424 if maxchainlen is not None:
425 self.svfs.options['maxchainlen'] = maxchainlen
425 self.svfs.options['maxchainlen'] = maxchainlen
426 # experimental config: format.manifestcachesize
426 # experimental config: format.manifestcachesize
427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
428 if manifestcachesize is not None:
428 if manifestcachesize is not None:
429 self.svfs.options['manifestcachesize'] = manifestcachesize
429 self.svfs.options['manifestcachesize'] = manifestcachesize
430 # experimental config: format.aggressivemergedeltas
430 # experimental config: format.aggressivemergedeltas
431 aggressivemergedeltas = self.ui.configbool('format',
431 aggressivemergedeltas = self.ui.configbool('format',
432 'aggressivemergedeltas', False)
432 'aggressivemergedeltas', False)
433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
435
435
436 for r in self.requirements:
436 for r in self.requirements:
437 if r.startswith('exp-compression-'):
437 if r.startswith('exp-compression-'):
438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
439
439
440 def _writerequirements(self):
440 def _writerequirements(self):
441 scmutil.writerequires(self.vfs, self.requirements)
441 scmutil.writerequires(self.vfs, self.requirements)
442
442
443 def _checknested(self, path):
443 def _checknested(self, path):
444 """Determine if path is a legal nested repository."""
444 """Determine if path is a legal nested repository."""
445 if not path.startswith(self.root):
445 if not path.startswith(self.root):
446 return False
446 return False
447 subpath = path[len(self.root) + 1:]
447 subpath = path[len(self.root) + 1:]
448 normsubpath = util.pconvert(subpath)
448 normsubpath = util.pconvert(subpath)
449
449
450 # XXX: Checking against the current working copy is wrong in
450 # XXX: Checking against the current working copy is wrong in
451 # the sense that it can reject things like
451 # the sense that it can reject things like
452 #
452 #
453 # $ hg cat -r 10 sub/x.txt
453 # $ hg cat -r 10 sub/x.txt
454 #
454 #
455 # if sub/ is no longer a subrepository in the working copy
455 # if sub/ is no longer a subrepository in the working copy
456 # parent revision.
456 # parent revision.
457 #
457 #
458 # However, it can of course also allow things that would have
458 # However, it can of course also allow things that would have
459 # been rejected before, such as the above cat command if sub/
459 # been rejected before, such as the above cat command if sub/
460 # is a subrepository now, but was a normal directory before.
460 # is a subrepository now, but was a normal directory before.
461 # The old path auditor would have rejected by mistake since it
461 # The old path auditor would have rejected by mistake since it
462 # panics when it sees sub/.hg/.
462 # panics when it sees sub/.hg/.
463 #
463 #
464 # All in all, checking against the working copy seems sensible
464 # All in all, checking against the working copy seems sensible
465 # since we want to prevent access to nested repositories on
465 # since we want to prevent access to nested repositories on
466 # the filesystem *now*.
466 # the filesystem *now*.
467 ctx = self[None]
467 ctx = self[None]
468 parts = util.splitpath(subpath)
468 parts = util.splitpath(subpath)
469 while parts:
469 while parts:
470 prefix = '/'.join(parts)
470 prefix = '/'.join(parts)
471 if prefix in ctx.substate:
471 if prefix in ctx.substate:
472 if prefix == normsubpath:
472 if prefix == normsubpath:
473 return True
473 return True
474 else:
474 else:
475 sub = ctx.sub(prefix)
475 sub = ctx.sub(prefix)
476 return sub.checknested(subpath[len(prefix) + 1:])
476 return sub.checknested(subpath[len(prefix) + 1:])
477 else:
477 else:
478 parts.pop()
478 parts.pop()
479 return False
479 return False
480
480
481 def peer(self):
481 def peer(self):
482 return localpeer(self) # not cached to avoid reference cycle
482 return localpeer(self) # not cached to avoid reference cycle
483
483
484 def unfiltered(self):
484 def unfiltered(self):
485 """Return unfiltered version of the repository
485 """Return unfiltered version of the repository
486
486
487 Intended to be overwritten by filtered repo."""
487 Intended to be overwritten by filtered repo."""
488 return self
488 return self
489
489
490 def filtered(self, name):
490 def filtered(self, name):
491 """Return a filtered version of a repository"""
491 """Return a filtered version of a repository"""
492 # build a new class with the mixin and the current class
492 # build a new class with the mixin and the current class
493 # (possibly subclass of the repo)
493 # (possibly subclass of the repo)
494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
495 pass
495 pass
496 return filteredrepo(self, name)
496 return filteredrepo(self, name)
497
497
498 @repofilecache('bookmarks', 'bookmarks.current')
498 @repofilecache('bookmarks', 'bookmarks.current')
499 def _bookmarks(self):
499 def _bookmarks(self):
500 return bookmarks.bmstore(self)
500 return bookmarks.bmstore(self)
501
501
502 @property
502 @property
503 def _activebookmark(self):
503 def _activebookmark(self):
504 return self._bookmarks.active
504 return self._bookmarks.active
505
505
506 def bookmarkheads(self, bookmark):
506 def bookmarkheads(self, bookmark):
507 name = bookmark.split('@', 1)[0]
507 name = bookmark.split('@', 1)[0]
508 heads = []
508 heads = []
509 for mark, n in self._bookmarks.iteritems():
509 for mark, n in self._bookmarks.iteritems():
510 if mark.split('@', 1)[0] == name:
510 if mark.split('@', 1)[0] == name:
511 heads.append(n)
511 heads.append(n)
512 return heads
512 return heads
513
513
514 # _phaserevs and _phasesets depend on changelog. what we need is to
514 # _phaserevs and _phasesets depend on changelog. what we need is to
515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
516 # can't be easily expressed in filecache mechanism.
516 # can't be easily expressed in filecache mechanism.
517 @storecache('phaseroots', '00changelog.i')
517 @storecache('phaseroots', '00changelog.i')
518 def _phasecache(self):
518 def _phasecache(self):
519 return phases.phasecache(self, self._phasedefaults)
519 return phases.phasecache(self, self._phasedefaults)
520
520
521 @storecache('obsstore')
521 @storecache('obsstore')
522 def obsstore(self):
522 def obsstore(self):
523 # read default format for new obsstore.
523 # read default format for new obsstore.
524 # developer config: format.obsstore-version
524 # developer config: format.obsstore-version
525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
526 # rely on obsstore class default when possible.
526 # rely on obsstore class default when possible.
527 kwargs = {}
527 kwargs = {}
528 if defaultformat is not None:
528 if defaultformat is not None:
529 kwargs['defaultformat'] = defaultformat
529 kwargs['defaultformat'] = defaultformat
530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
531 store = obsolete.obsstore(self.svfs, readonly=readonly,
531 store = obsolete.obsstore(self.svfs, readonly=readonly,
532 **kwargs)
532 **kwargs)
533 if store and readonly:
533 if store and readonly:
534 self.ui.warn(
534 self.ui.warn(
535 _('obsolete feature not enabled but %i markers found!\n')
535 _('obsolete feature not enabled but %i markers found!\n')
536 % len(list(store)))
536 % len(list(store)))
537 return store
537 return store
538
538
539 @storecache('00changelog.i')
539 @storecache('00changelog.i')
540 def changelog(self):
540 def changelog(self):
541 c = changelog.changelog(self.svfs)
541 c = changelog.changelog(self.svfs)
542 if txnutil.mayhavepending(self.root):
542 if txnutil.mayhavepending(self.root):
543 c.readpending('00changelog.i.a')
543 c.readpending('00changelog.i.a')
544 return c
544 return c
545
545
546 def _constructmanifest(self):
546 def _constructmanifest(self):
547 # This is a temporary function while we migrate from manifest to
547 # This is a temporary function while we migrate from manifest to
548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
549 # manifest creation.
549 # manifest creation.
550 return manifest.manifestrevlog(self.svfs)
550 return manifest.manifestrevlog(self.svfs)
551
551
552 @storecache('00manifest.i')
552 @storecache('00manifest.i')
553 def manifestlog(self):
553 def manifestlog(self):
554 return manifest.manifestlog(self.svfs, self)
554 return manifest.manifestlog(self.svfs, self)
555
555
556 @repofilecache('dirstate')
556 @repofilecache('dirstate')
557 def dirstate(self):
557 def dirstate(self):
558 return dirstate.dirstate(self.vfs, self.ui, self.root,
558 return dirstate.dirstate(self.vfs, self.ui, self.root,
559 self._dirstatevalidate)
559 self._dirstatevalidate)
560
560
561 def _dirstatevalidate(self, node):
561 def _dirstatevalidate(self, node):
562 try:
562 try:
563 self.changelog.rev(node)
563 self.changelog.rev(node)
564 return node
564 return node
565 except error.LookupError:
565 except error.LookupError:
566 if not self._dirstatevalidatewarned:
566 if not self._dirstatevalidatewarned:
567 self._dirstatevalidatewarned = True
567 self._dirstatevalidatewarned = True
568 self.ui.warn(_("warning: ignoring unknown"
568 self.ui.warn(_("warning: ignoring unknown"
569 " working parent %s!\n") % short(node))
569 " working parent %s!\n") % short(node))
570 return nullid
570 return nullid
571
571
572 def __getitem__(self, changeid):
572 def __getitem__(self, changeid):
573 if changeid is None or changeid == wdirrev:
573 if changeid is None or changeid == wdirrev:
574 return context.workingctx(self)
574 return context.workingctx(self)
575 if isinstance(changeid, slice):
575 if isinstance(changeid, slice):
576 return [context.changectx(self, i)
576 return [context.changectx(self, i)
577 for i in xrange(*changeid.indices(len(self)))
577 for i in xrange(*changeid.indices(len(self)))
578 if i not in self.changelog.filteredrevs]
578 if i not in self.changelog.filteredrevs]
579 return context.changectx(self, changeid)
579 return context.changectx(self, changeid)
580
580
581 def __contains__(self, changeid):
581 def __contains__(self, changeid):
582 try:
582 try:
583 self[changeid]
583 self[changeid]
584 return True
584 return True
585 except error.RepoLookupError:
585 except error.RepoLookupError:
586 return False
586 return False
587
587
588 def __nonzero__(self):
588 def __nonzero__(self):
589 return True
589 return True
590
590
591 __bool__ = __nonzero__
591 __bool__ = __nonzero__
592
592
593 def __len__(self):
593 def __len__(self):
594 return len(self.changelog)
594 return len(self.changelog)
595
595
596 def __iter__(self):
596 def __iter__(self):
597 return iter(self.changelog)
597 return iter(self.changelog)
598
598
599 def revs(self, expr, *args):
599 def revs(self, expr, *args):
600 '''Find revisions matching a revset.
600 '''Find revisions matching a revset.
601
601
602 The revset is specified as a string ``expr`` that may contain
602 The revset is specified as a string ``expr`` that may contain
603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604
604
605 Revset aliases from the configuration are not expanded. To expand
605 Revset aliases from the configuration are not expanded. To expand
606 user aliases, consider calling ``scmutil.revrange()`` or
606 user aliases, consider calling ``scmutil.revrange()`` or
607 ``repo.anyrevs([expr], user=True)``.
607 ``repo.anyrevs([expr], user=True)``.
608
608
609 Returns a revset.abstractsmartset, which is a list-like interface
609 Returns a revset.abstractsmartset, which is a list-like interface
610 that contains integer revisions.
610 that contains integer revisions.
611 '''
611 '''
612 expr = revsetlang.formatspec(expr, *args)
612 expr = revsetlang.formatspec(expr, *args)
613 m = revset.match(None, expr)
613 m = revset.match(None, expr)
614 return m(self)
614 return m(self)
615
615
616 def set(self, expr, *args):
616 def set(self, expr, *args):
617 '''Find revisions matching a revset and emit changectx instances.
617 '''Find revisions matching a revset and emit changectx instances.
618
618
619 This is a convenience wrapper around ``revs()`` that iterates the
619 This is a convenience wrapper around ``revs()`` that iterates the
620 result and is a generator of changectx instances.
620 result and is a generator of changectx instances.
621
621
622 Revset aliases from the configuration are not expanded. To expand
622 Revset aliases from the configuration are not expanded. To expand
623 user aliases, consider calling ``scmutil.revrange()``.
623 user aliases, consider calling ``scmutil.revrange()``.
624 '''
624 '''
625 for r in self.revs(expr, *args):
625 for r in self.revs(expr, *args):
626 yield self[r]
626 yield self[r]
627
627
628 def anyrevs(self, specs, user=False):
628 def anyrevs(self, specs, user=False):
629 '''Find revisions matching one of the given revsets.
629 '''Find revisions matching one of the given revsets.
630
630
631 Revset aliases from the configuration are not expanded by default. To
631 Revset aliases from the configuration are not expanded by default. To
632 expand user aliases, specify ``user=True``.
632 expand user aliases, specify ``user=True``.
633 '''
633 '''
634 if user:
634 if user:
635 m = revset.matchany(self.ui, specs, repo=self)
635 m = revset.matchany(self.ui, specs, repo=self)
636 else:
636 else:
637 m = revset.matchany(None, specs)
637 m = revset.matchany(None, specs)
638 return m(self)
638 return m(self)
639
639
640 def url(self):
640 def url(self):
641 return 'file:' + self.root
641 return 'file:' + self.root
642
642
643 def hook(self, name, throw=False, **args):
643 def hook(self, name, throw=False, **args):
644 """Call a hook, passing this repo instance.
644 """Call a hook, passing this repo instance.
645
645
646 This a convenience method to aid invoking hooks. Extensions likely
646 This a convenience method to aid invoking hooks. Extensions likely
647 won't call this unless they have registered a custom hook or are
647 won't call this unless they have registered a custom hook or are
648 replacing code that is expected to call a hook.
648 replacing code that is expected to call a hook.
649 """
649 """
650 return hook.hook(self.ui, self, name, throw, **args)
650 return hook.hook(self.ui, self, name, throw, **args)
651
651
652 def tag(self, names, node, message, local, user, date, editor=False):
652 def tag(self, names, node, message, local, user, date, editor=False):
653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
654 tagsmod.tag(self, names, node, message, local, user, date,
654 tagsmod.tag(self, names, node, message, local, user, date,
655 editor=editor)
655 editor=editor)
656
656
657 @filteredpropertycache
657 @filteredpropertycache
658 def _tagscache(self):
658 def _tagscache(self):
659 '''Returns a tagscache object that contains various tags related
659 '''Returns a tagscache object that contains various tags related
660 caches.'''
660 caches.'''
661
661
662 # This simplifies its cache management by having one decorated
662 # This simplifies its cache management by having one decorated
663 # function (this one) and the rest simply fetch things from it.
663 # function (this one) and the rest simply fetch things from it.
664 class tagscache(object):
664 class tagscache(object):
665 def __init__(self):
665 def __init__(self):
666 # These two define the set of tags for this repository. tags
666 # These two define the set of tags for this repository. tags
667 # maps tag name to node; tagtypes maps tag name to 'global' or
667 # maps tag name to node; tagtypes maps tag name to 'global' or
668 # 'local'. (Global tags are defined by .hgtags across all
668 # 'local'. (Global tags are defined by .hgtags across all
669 # heads, and local tags are defined in .hg/localtags.)
669 # heads, and local tags are defined in .hg/localtags.)
670 # They constitute the in-memory cache of tags.
670 # They constitute the in-memory cache of tags.
671 self.tags = self.tagtypes = None
671 self.tags = self.tagtypes = None
672
672
673 self.nodetagscache = self.tagslist = None
673 self.nodetagscache = self.tagslist = None
674
674
675 cache = tagscache()
675 cache = tagscache()
676 cache.tags, cache.tagtypes = self._findtags()
676 cache.tags, cache.tagtypes = self._findtags()
677
677
678 return cache
678 return cache
679
679
680 def tags(self):
680 def tags(self):
681 '''return a mapping of tag to node'''
681 '''return a mapping of tag to node'''
682 t = {}
682 t = {}
683 if self.changelog.filteredrevs:
683 if self.changelog.filteredrevs:
684 tags, tt = self._findtags()
684 tags, tt = self._findtags()
685 else:
685 else:
686 tags = self._tagscache.tags
686 tags = self._tagscache.tags
687 for k, v in tags.iteritems():
687 for k, v in tags.iteritems():
688 try:
688 try:
689 # ignore tags to unknown nodes
689 # ignore tags to unknown nodes
690 self.changelog.rev(v)
690 self.changelog.rev(v)
691 t[k] = v
691 t[k] = v
692 except (error.LookupError, ValueError):
692 except (error.LookupError, ValueError):
693 pass
693 pass
694 return t
694 return t
695
695
696 def _findtags(self):
696 def _findtags(self):
697 '''Do the hard work of finding tags. Return a pair of dicts
697 '''Do the hard work of finding tags. Return a pair of dicts
698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
699 maps tag name to a string like \'global\' or \'local\'.
699 maps tag name to a string like \'global\' or \'local\'.
700 Subclasses or extensions are free to add their own tags, but
700 Subclasses or extensions are free to add their own tags, but
701 should be aware that the returned dicts will be retained for the
701 should be aware that the returned dicts will be retained for the
702 duration of the localrepo object.'''
702 duration of the localrepo object.'''
703
703
704 # XXX what tagtype should subclasses/extensions use? Currently
704 # XXX what tagtype should subclasses/extensions use? Currently
705 # mq and bookmarks add tags, but do not set the tagtype at all.
705 # mq and bookmarks add tags, but do not set the tagtype at all.
706 # Should each extension invent its own tag type? Should there
706 # Should each extension invent its own tag type? Should there
707 # be one tagtype for all such "virtual" tags? Or is the status
707 # be one tagtype for all such "virtual" tags? Or is the status
708 # quo fine?
708 # quo fine?
709
709
710 alltags = {} # map tag name to (node, hist)
711 tagtypes = {}
712
710
713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
711 globaldata = tagsmod.findglobaltags(self.ui, self)
712 alltags = globaldata[0] # map tag name to (node, hist)
713 tagtypes = globaldata[1] # map tag name to tag type
714
714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
715 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
715
716
716 # Build the return dicts. Have to re-encode tag names because
717 # Build the return dicts. Have to re-encode tag names because
717 # the tags module always uses UTF-8 (in order not to lose info
718 # the tags module always uses UTF-8 (in order not to lose info
718 # writing to the cache), but the rest of Mercurial wants them in
719 # writing to the cache), but the rest of Mercurial wants them in
719 # local encoding.
720 # local encoding.
720 tags = {}
721 tags = {}
721 for (name, (node, hist)) in alltags.iteritems():
722 for (name, (node, hist)) in alltags.iteritems():
722 if node != nullid:
723 if node != nullid:
723 tags[encoding.tolocal(name)] = node
724 tags[encoding.tolocal(name)] = node
724 tags['tip'] = self.changelog.tip()
725 tags['tip'] = self.changelog.tip()
725 tagtypes = dict([(encoding.tolocal(name), value)
726 tagtypes = dict([(encoding.tolocal(name), value)
726 for (name, value) in tagtypes.iteritems()])
727 for (name, value) in tagtypes.iteritems()])
727 return (tags, tagtypes)
728 return (tags, tagtypes)
728
729
729 def tagtype(self, tagname):
730 def tagtype(self, tagname):
730 '''
731 '''
731 return the type of the given tag. result can be:
732 return the type of the given tag. result can be:
732
733
733 'local' : a local tag
734 'local' : a local tag
734 'global' : a global tag
735 'global' : a global tag
735 None : tag does not exist
736 None : tag does not exist
736 '''
737 '''
737
738
738 return self._tagscache.tagtypes.get(tagname)
739 return self._tagscache.tagtypes.get(tagname)
739
740
740 def tagslist(self):
741 def tagslist(self):
741 '''return a list of tags ordered by revision'''
742 '''return a list of tags ordered by revision'''
742 if not self._tagscache.tagslist:
743 if not self._tagscache.tagslist:
743 l = []
744 l = []
744 for t, n in self.tags().iteritems():
745 for t, n in self.tags().iteritems():
745 l.append((self.changelog.rev(n), t, n))
746 l.append((self.changelog.rev(n), t, n))
746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
747 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
747
748
748 return self._tagscache.tagslist
749 return self._tagscache.tagslist
749
750
750 def nodetags(self, node):
751 def nodetags(self, node):
751 '''return the tags associated with a node'''
752 '''return the tags associated with a node'''
752 if not self._tagscache.nodetagscache:
753 if not self._tagscache.nodetagscache:
753 nodetagscache = {}
754 nodetagscache = {}
754 for t, n in self._tagscache.tags.iteritems():
755 for t, n in self._tagscache.tags.iteritems():
755 nodetagscache.setdefault(n, []).append(t)
756 nodetagscache.setdefault(n, []).append(t)
756 for tags in nodetagscache.itervalues():
757 for tags in nodetagscache.itervalues():
757 tags.sort()
758 tags.sort()
758 self._tagscache.nodetagscache = nodetagscache
759 self._tagscache.nodetagscache = nodetagscache
759 return self._tagscache.nodetagscache.get(node, [])
760 return self._tagscache.nodetagscache.get(node, [])
760
761
761 def nodebookmarks(self, node):
762 def nodebookmarks(self, node):
762 """return the list of bookmarks pointing to the specified node"""
763 """return the list of bookmarks pointing to the specified node"""
763 marks = []
764 marks = []
764 for bookmark, n in self._bookmarks.iteritems():
765 for bookmark, n in self._bookmarks.iteritems():
765 if n == node:
766 if n == node:
766 marks.append(bookmark)
767 marks.append(bookmark)
767 return sorted(marks)
768 return sorted(marks)
768
769
769 def branchmap(self):
770 def branchmap(self):
770 '''returns a dictionary {branch: [branchheads]} with branchheads
771 '''returns a dictionary {branch: [branchheads]} with branchheads
771 ordered by increasing revision number'''
772 ordered by increasing revision number'''
772 branchmap.updatecache(self)
773 branchmap.updatecache(self)
773 return self._branchcaches[self.filtername]
774 return self._branchcaches[self.filtername]
774
775
775 @unfilteredmethod
776 @unfilteredmethod
776 def revbranchcache(self):
777 def revbranchcache(self):
777 if not self._revbranchcache:
778 if not self._revbranchcache:
778 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
779 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
779 return self._revbranchcache
780 return self._revbranchcache
780
781
781 def branchtip(self, branch, ignoremissing=False):
782 def branchtip(self, branch, ignoremissing=False):
782 '''return the tip node for a given branch
783 '''return the tip node for a given branch
783
784
784 If ignoremissing is True, then this method will not raise an error.
785 If ignoremissing is True, then this method will not raise an error.
785 This is helpful for callers that only expect None for a missing branch
786 This is helpful for callers that only expect None for a missing branch
786 (e.g. namespace).
787 (e.g. namespace).
787
788
788 '''
789 '''
789 try:
790 try:
790 return self.branchmap().branchtip(branch)
791 return self.branchmap().branchtip(branch)
791 except KeyError:
792 except KeyError:
792 if not ignoremissing:
793 if not ignoremissing:
793 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
794 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
794 else:
795 else:
795 pass
796 pass
796
797
797 def lookup(self, key):
798 def lookup(self, key):
798 return self[key].node()
799 return self[key].node()
799
800
800 def lookupbranch(self, key, remote=None):
801 def lookupbranch(self, key, remote=None):
801 repo = remote or self
802 repo = remote or self
802 if key in repo.branchmap():
803 if key in repo.branchmap():
803 return key
804 return key
804
805
805 repo = (remote and remote.local()) and remote or self
806 repo = (remote and remote.local()) and remote or self
806 return repo[key].branch()
807 return repo[key].branch()
807
808
808 def known(self, nodes):
809 def known(self, nodes):
809 cl = self.changelog
810 cl = self.changelog
810 nm = cl.nodemap
811 nm = cl.nodemap
811 filtered = cl.filteredrevs
812 filtered = cl.filteredrevs
812 result = []
813 result = []
813 for n in nodes:
814 for n in nodes:
814 r = nm.get(n)
815 r = nm.get(n)
815 resp = not (r is None or r in filtered)
816 resp = not (r is None or r in filtered)
816 result.append(resp)
817 result.append(resp)
817 return result
818 return result
818
819
819 def local(self):
820 def local(self):
820 return self
821 return self
821
822
822 def publishing(self):
823 def publishing(self):
823 # it's safe (and desirable) to trust the publish flag unconditionally
824 # it's safe (and desirable) to trust the publish flag unconditionally
824 # so that we don't finalize changes shared between users via ssh or nfs
825 # so that we don't finalize changes shared between users via ssh or nfs
825 return self.ui.configbool('phases', 'publish', True, untrusted=True)
826 return self.ui.configbool('phases', 'publish', True, untrusted=True)
826
827
827 def cancopy(self):
828 def cancopy(self):
828 # so statichttprepo's override of local() works
829 # so statichttprepo's override of local() works
829 if not self.local():
830 if not self.local():
830 return False
831 return False
831 if not self.publishing():
832 if not self.publishing():
832 return True
833 return True
833 # if publishing we can't copy if there is filtered content
834 # if publishing we can't copy if there is filtered content
834 return not self.filtered('visible').changelog.filteredrevs
835 return not self.filtered('visible').changelog.filteredrevs
835
836
836 def shared(self):
837 def shared(self):
837 '''the type of shared repository (None if not shared)'''
838 '''the type of shared repository (None if not shared)'''
838 if self.sharedpath != self.path:
839 if self.sharedpath != self.path:
839 return 'store'
840 return 'store'
840 return None
841 return None
841
842
842 def join(self, f, *insidef):
843 def join(self, f, *insidef):
843 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
844 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
844 return self.vfs.join(os.path.join(f, *insidef))
845 return self.vfs.join(os.path.join(f, *insidef))
845
846
846 def wjoin(self, f, *insidef):
847 def wjoin(self, f, *insidef):
847 return self.vfs.reljoin(self.root, f, *insidef)
848 return self.vfs.reljoin(self.root, f, *insidef)
848
849
849 def file(self, f):
850 def file(self, f):
850 if f[0] == '/':
851 if f[0] == '/':
851 f = f[1:]
852 f = f[1:]
852 return filelog.filelog(self.svfs, f)
853 return filelog.filelog(self.svfs, f)
853
854
854 def changectx(self, changeid):
855 def changectx(self, changeid):
855 return self[changeid]
856 return self[changeid]
856
857
857 def setparents(self, p1, p2=nullid):
858 def setparents(self, p1, p2=nullid):
858 self.dirstate.beginparentchange()
859 self.dirstate.beginparentchange()
859 copies = self.dirstate.setparents(p1, p2)
860 copies = self.dirstate.setparents(p1, p2)
860 pctx = self[p1]
861 pctx = self[p1]
861 if copies:
862 if copies:
862 # Adjust copy records, the dirstate cannot do it, it
863 # Adjust copy records, the dirstate cannot do it, it
863 # requires access to parents manifests. Preserve them
864 # requires access to parents manifests. Preserve them
864 # only for entries added to first parent.
865 # only for entries added to first parent.
865 for f in copies:
866 for f in copies:
866 if f not in pctx and copies[f] in pctx:
867 if f not in pctx and copies[f] in pctx:
867 self.dirstate.copy(copies[f], f)
868 self.dirstate.copy(copies[f], f)
868 if p2 == nullid:
869 if p2 == nullid:
869 for f, s in sorted(self.dirstate.copies().items()):
870 for f, s in sorted(self.dirstate.copies().items()):
870 if f not in pctx and s not in pctx:
871 if f not in pctx and s not in pctx:
871 self.dirstate.copy(None, f)
872 self.dirstate.copy(None, f)
872 self.dirstate.endparentchange()
873 self.dirstate.endparentchange()
873
874
874 def filectx(self, path, changeid=None, fileid=None):
875 def filectx(self, path, changeid=None, fileid=None):
875 """changeid can be a changeset revision, node, or tag.
876 """changeid can be a changeset revision, node, or tag.
876 fileid can be a file revision or node."""
877 fileid can be a file revision or node."""
877 return context.filectx(self, path, changeid, fileid)
878 return context.filectx(self, path, changeid, fileid)
878
879
879 def getcwd(self):
880 def getcwd(self):
880 return self.dirstate.getcwd()
881 return self.dirstate.getcwd()
881
882
882 def pathto(self, f, cwd=None):
883 def pathto(self, f, cwd=None):
883 return self.dirstate.pathto(f, cwd)
884 return self.dirstate.pathto(f, cwd)
884
885
885 def wfile(self, f, mode='r'):
886 def wfile(self, f, mode='r'):
886 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
887 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
887 return self.wvfs(f, mode)
888 return self.wvfs(f, mode)
888
889
889 def _link(self, f):
890 def _link(self, f):
890 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
891 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
891 '4.0')
892 '4.0')
892 return self.wvfs.islink(f)
893 return self.wvfs.islink(f)
893
894
894 def _loadfilter(self, filter):
895 def _loadfilter(self, filter):
895 if filter not in self.filterpats:
896 if filter not in self.filterpats:
896 l = []
897 l = []
897 for pat, cmd in self.ui.configitems(filter):
898 for pat, cmd in self.ui.configitems(filter):
898 if cmd == '!':
899 if cmd == '!':
899 continue
900 continue
900 mf = matchmod.match(self.root, '', [pat])
901 mf = matchmod.match(self.root, '', [pat])
901 fn = None
902 fn = None
902 params = cmd
903 params = cmd
903 for name, filterfn in self._datafilters.iteritems():
904 for name, filterfn in self._datafilters.iteritems():
904 if cmd.startswith(name):
905 if cmd.startswith(name):
905 fn = filterfn
906 fn = filterfn
906 params = cmd[len(name):].lstrip()
907 params = cmd[len(name):].lstrip()
907 break
908 break
908 if not fn:
909 if not fn:
909 fn = lambda s, c, **kwargs: util.filter(s, c)
910 fn = lambda s, c, **kwargs: util.filter(s, c)
910 # Wrap old filters not supporting keyword arguments
911 # Wrap old filters not supporting keyword arguments
911 if not inspect.getargspec(fn)[2]:
912 if not inspect.getargspec(fn)[2]:
912 oldfn = fn
913 oldfn = fn
913 fn = lambda s, c, **kwargs: oldfn(s, c)
914 fn = lambda s, c, **kwargs: oldfn(s, c)
914 l.append((mf, fn, params))
915 l.append((mf, fn, params))
915 self.filterpats[filter] = l
916 self.filterpats[filter] = l
916 return self.filterpats[filter]
917 return self.filterpats[filter]
917
918
918 def _filter(self, filterpats, filename, data):
919 def _filter(self, filterpats, filename, data):
919 for mf, fn, cmd in filterpats:
920 for mf, fn, cmd in filterpats:
920 if mf(filename):
921 if mf(filename):
921 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
922 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
922 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
923 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
923 break
924 break
924
925
925 return data
926 return data
926
927
927 @unfilteredpropertycache
928 @unfilteredpropertycache
928 def _encodefilterpats(self):
929 def _encodefilterpats(self):
929 return self._loadfilter('encode')
930 return self._loadfilter('encode')
930
931
931 @unfilteredpropertycache
932 @unfilteredpropertycache
932 def _decodefilterpats(self):
933 def _decodefilterpats(self):
933 return self._loadfilter('decode')
934 return self._loadfilter('decode')
934
935
935 def adddatafilter(self, name, filter):
936 def adddatafilter(self, name, filter):
936 self._datafilters[name] = filter
937 self._datafilters[name] = filter
937
938
938 def wread(self, filename):
939 def wread(self, filename):
939 if self.wvfs.islink(filename):
940 if self.wvfs.islink(filename):
940 data = self.wvfs.readlink(filename)
941 data = self.wvfs.readlink(filename)
941 else:
942 else:
942 data = self.wvfs.read(filename)
943 data = self.wvfs.read(filename)
943 return self._filter(self._encodefilterpats, filename, data)
944 return self._filter(self._encodefilterpats, filename, data)
944
945
945 def wwrite(self, filename, data, flags, backgroundclose=False):
946 def wwrite(self, filename, data, flags, backgroundclose=False):
946 """write ``data`` into ``filename`` in the working directory
947 """write ``data`` into ``filename`` in the working directory
947
948
948 This returns length of written (maybe decoded) data.
949 This returns length of written (maybe decoded) data.
949 """
950 """
950 data = self._filter(self._decodefilterpats, filename, data)
951 data = self._filter(self._decodefilterpats, filename, data)
951 if 'l' in flags:
952 if 'l' in flags:
952 self.wvfs.symlink(data, filename)
953 self.wvfs.symlink(data, filename)
953 else:
954 else:
954 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
955 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
955 if 'x' in flags:
956 if 'x' in flags:
956 self.wvfs.setflags(filename, False, True)
957 self.wvfs.setflags(filename, False, True)
957 return len(data)
958 return len(data)
958
959
959 def wwritedata(self, filename, data):
960 def wwritedata(self, filename, data):
960 return self._filter(self._decodefilterpats, filename, data)
961 return self._filter(self._decodefilterpats, filename, data)
961
962
962 def currenttransaction(self):
963 def currenttransaction(self):
963 """return the current transaction or None if non exists"""
964 """return the current transaction or None if non exists"""
964 if self._transref:
965 if self._transref:
965 tr = self._transref()
966 tr = self._transref()
966 else:
967 else:
967 tr = None
968 tr = None
968
969
969 if tr and tr.running():
970 if tr and tr.running():
970 return tr
971 return tr
971 return None
972 return None
972
973
973 def transaction(self, desc, report=None):
974 def transaction(self, desc, report=None):
974 if (self.ui.configbool('devel', 'all-warnings')
975 if (self.ui.configbool('devel', 'all-warnings')
975 or self.ui.configbool('devel', 'check-locks')):
976 or self.ui.configbool('devel', 'check-locks')):
976 if self._currentlock(self._lockref) is None:
977 if self._currentlock(self._lockref) is None:
977 raise error.ProgrammingError('transaction requires locking')
978 raise error.ProgrammingError('transaction requires locking')
978 tr = self.currenttransaction()
979 tr = self.currenttransaction()
979 if tr is not None:
980 if tr is not None:
980 return tr.nest()
981 return tr.nest()
981
982
982 # abort here if the journal already exists
983 # abort here if the journal already exists
983 if self.svfs.exists("journal"):
984 if self.svfs.exists("journal"):
984 raise error.RepoError(
985 raise error.RepoError(
985 _("abandoned transaction found"),
986 _("abandoned transaction found"),
986 hint=_("run 'hg recover' to clean up transaction"))
987 hint=_("run 'hg recover' to clean up transaction"))
987
988
988 idbase = "%.40f#%f" % (random.random(), time.time())
989 idbase = "%.40f#%f" % (random.random(), time.time())
989 ha = hex(hashlib.sha1(idbase).digest())
990 ha = hex(hashlib.sha1(idbase).digest())
990 txnid = 'TXN:' + ha
991 txnid = 'TXN:' + ha
991 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
992 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
992
993
993 self._writejournal(desc)
994 self._writejournal(desc)
994 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
995 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
995 if report:
996 if report:
996 rp = report
997 rp = report
997 else:
998 else:
998 rp = self.ui.warn
999 rp = self.ui.warn
999 vfsmap = {'plain': self.vfs} # root of .hg/
1000 vfsmap = {'plain': self.vfs} # root of .hg/
1000 # we must avoid cyclic reference between repo and transaction.
1001 # we must avoid cyclic reference between repo and transaction.
1001 reporef = weakref.ref(self)
1002 reporef = weakref.ref(self)
1002 def validate(tr):
1003 def validate(tr):
1003 """will run pre-closing hooks"""
1004 """will run pre-closing hooks"""
1004 reporef().hook('pretxnclose', throw=True,
1005 reporef().hook('pretxnclose', throw=True,
1005 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1006 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1006 def releasefn(tr, success):
1007 def releasefn(tr, success):
1007 repo = reporef()
1008 repo = reporef()
1008 if success:
1009 if success:
1009 # this should be explicitly invoked here, because
1010 # this should be explicitly invoked here, because
1010 # in-memory changes aren't written out at closing
1011 # in-memory changes aren't written out at closing
1011 # transaction, if tr.addfilegenerator (via
1012 # transaction, if tr.addfilegenerator (via
1012 # dirstate.write or so) isn't invoked while
1013 # dirstate.write or so) isn't invoked while
1013 # transaction running
1014 # transaction running
1014 repo.dirstate.write(None)
1015 repo.dirstate.write(None)
1015 else:
1016 else:
1016 # discard all changes (including ones already written
1017 # discard all changes (including ones already written
1017 # out) in this transaction
1018 # out) in this transaction
1018 repo.dirstate.restorebackup(None, prefix='journal.')
1019 repo.dirstate.restorebackup(None, prefix='journal.')
1019
1020
1020 repo.invalidate(clearfilecache=True)
1021 repo.invalidate(clearfilecache=True)
1021
1022
1022 tr = transaction.transaction(rp, self.svfs, vfsmap,
1023 tr = transaction.transaction(rp, self.svfs, vfsmap,
1023 "journal",
1024 "journal",
1024 "undo",
1025 "undo",
1025 aftertrans(renames),
1026 aftertrans(renames),
1026 self.store.createmode,
1027 self.store.createmode,
1027 validator=validate,
1028 validator=validate,
1028 releasefn=releasefn)
1029 releasefn=releasefn)
1029
1030
1030 tr.hookargs['txnid'] = txnid
1031 tr.hookargs['txnid'] = txnid
1031 # note: writing the fncache only during finalize mean that the file is
1032 # note: writing the fncache only during finalize mean that the file is
1032 # outdated when running hooks. As fncache is used for streaming clone,
1033 # outdated when running hooks. As fncache is used for streaming clone,
1033 # this is not expected to break anything that happen during the hooks.
1034 # this is not expected to break anything that happen during the hooks.
1034 tr.addfinalize('flush-fncache', self.store.write)
1035 tr.addfinalize('flush-fncache', self.store.write)
1035 def txnclosehook(tr2):
1036 def txnclosehook(tr2):
1036 """To be run if transaction is successful, will schedule a hook run
1037 """To be run if transaction is successful, will schedule a hook run
1037 """
1038 """
1038 # Don't reference tr2 in hook() so we don't hold a reference.
1039 # Don't reference tr2 in hook() so we don't hold a reference.
1039 # This reduces memory consumption when there are multiple
1040 # This reduces memory consumption when there are multiple
1040 # transactions per lock. This can likely go away if issue5045
1041 # transactions per lock. This can likely go away if issue5045
1041 # fixes the function accumulation.
1042 # fixes the function accumulation.
1042 hookargs = tr2.hookargs
1043 hookargs = tr2.hookargs
1043
1044
1044 def hook():
1045 def hook():
1045 reporef().hook('txnclose', throw=False, txnname=desc,
1046 reporef().hook('txnclose', throw=False, txnname=desc,
1046 **pycompat.strkwargs(hookargs))
1047 **pycompat.strkwargs(hookargs))
1047 reporef()._afterlock(hook)
1048 reporef()._afterlock(hook)
1048 tr.addfinalize('txnclose-hook', txnclosehook)
1049 tr.addfinalize('txnclose-hook', txnclosehook)
1049 def txnaborthook(tr2):
1050 def txnaborthook(tr2):
1050 """To be run if transaction is aborted
1051 """To be run if transaction is aborted
1051 """
1052 """
1052 reporef().hook('txnabort', throw=False, txnname=desc,
1053 reporef().hook('txnabort', throw=False, txnname=desc,
1053 **tr2.hookargs)
1054 **tr2.hookargs)
1054 tr.addabort('txnabort-hook', txnaborthook)
1055 tr.addabort('txnabort-hook', txnaborthook)
1055 # avoid eager cache invalidation. in-memory data should be identical
1056 # avoid eager cache invalidation. in-memory data should be identical
1056 # to stored data if transaction has no error.
1057 # to stored data if transaction has no error.
1057 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1058 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1058 self._transref = weakref.ref(tr)
1059 self._transref = weakref.ref(tr)
1059 return tr
1060 return tr
1060
1061
1061 def _journalfiles(self):
1062 def _journalfiles(self):
1062 return ((self.svfs, 'journal'),
1063 return ((self.svfs, 'journal'),
1063 (self.vfs, 'journal.dirstate'),
1064 (self.vfs, 'journal.dirstate'),
1064 (self.vfs, 'journal.branch'),
1065 (self.vfs, 'journal.branch'),
1065 (self.vfs, 'journal.desc'),
1066 (self.vfs, 'journal.desc'),
1066 (self.vfs, 'journal.bookmarks'),
1067 (self.vfs, 'journal.bookmarks'),
1067 (self.svfs, 'journal.phaseroots'))
1068 (self.svfs, 'journal.phaseroots'))
1068
1069
1069 def undofiles(self):
1070 def undofiles(self):
1070 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1071 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1071
1072
1072 def _writejournal(self, desc):
1073 def _writejournal(self, desc):
1073 self.dirstate.savebackup(None, prefix='journal.')
1074 self.dirstate.savebackup(None, prefix='journal.')
1074 self.vfs.write("journal.branch",
1075 self.vfs.write("journal.branch",
1075 encoding.fromlocal(self.dirstate.branch()))
1076 encoding.fromlocal(self.dirstate.branch()))
1076 self.vfs.write("journal.desc",
1077 self.vfs.write("journal.desc",
1077 "%d\n%s\n" % (len(self), desc))
1078 "%d\n%s\n" % (len(self), desc))
1078 self.vfs.write("journal.bookmarks",
1079 self.vfs.write("journal.bookmarks",
1079 self.vfs.tryread("bookmarks"))
1080 self.vfs.tryread("bookmarks"))
1080 self.svfs.write("journal.phaseroots",
1081 self.svfs.write("journal.phaseroots",
1081 self.svfs.tryread("phaseroots"))
1082 self.svfs.tryread("phaseroots"))
1082
1083
1083 def recover(self):
1084 def recover(self):
1084 with self.lock():
1085 with self.lock():
1085 if self.svfs.exists("journal"):
1086 if self.svfs.exists("journal"):
1086 self.ui.status(_("rolling back interrupted transaction\n"))
1087 self.ui.status(_("rolling back interrupted transaction\n"))
1087 vfsmap = {'': self.svfs,
1088 vfsmap = {'': self.svfs,
1088 'plain': self.vfs,}
1089 'plain': self.vfs,}
1089 transaction.rollback(self.svfs, vfsmap, "journal",
1090 transaction.rollback(self.svfs, vfsmap, "journal",
1090 self.ui.warn)
1091 self.ui.warn)
1091 self.invalidate()
1092 self.invalidate()
1092 return True
1093 return True
1093 else:
1094 else:
1094 self.ui.warn(_("no interrupted transaction available\n"))
1095 self.ui.warn(_("no interrupted transaction available\n"))
1095 return False
1096 return False
1096
1097
1097 def rollback(self, dryrun=False, force=False):
1098 def rollback(self, dryrun=False, force=False):
1098 wlock = lock = dsguard = None
1099 wlock = lock = dsguard = None
1099 try:
1100 try:
1100 wlock = self.wlock()
1101 wlock = self.wlock()
1101 lock = self.lock()
1102 lock = self.lock()
1102 if self.svfs.exists("undo"):
1103 if self.svfs.exists("undo"):
1103 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1104 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1104
1105
1105 return self._rollback(dryrun, force, dsguard)
1106 return self._rollback(dryrun, force, dsguard)
1106 else:
1107 else:
1107 self.ui.warn(_("no rollback information available\n"))
1108 self.ui.warn(_("no rollback information available\n"))
1108 return 1
1109 return 1
1109 finally:
1110 finally:
1110 release(dsguard, lock, wlock)
1111 release(dsguard, lock, wlock)
1111
1112
1112 @unfilteredmethod # Until we get smarter cache management
1113 @unfilteredmethod # Until we get smarter cache management
1113 def _rollback(self, dryrun, force, dsguard):
1114 def _rollback(self, dryrun, force, dsguard):
1114 ui = self.ui
1115 ui = self.ui
1115 try:
1116 try:
1116 args = self.vfs.read('undo.desc').splitlines()
1117 args = self.vfs.read('undo.desc').splitlines()
1117 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1118 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1118 if len(args) >= 3:
1119 if len(args) >= 3:
1119 detail = args[2]
1120 detail = args[2]
1120 oldtip = oldlen - 1
1121 oldtip = oldlen - 1
1121
1122
1122 if detail and ui.verbose:
1123 if detail and ui.verbose:
1123 msg = (_('repository tip rolled back to revision %s'
1124 msg = (_('repository tip rolled back to revision %s'
1124 ' (undo %s: %s)\n')
1125 ' (undo %s: %s)\n')
1125 % (oldtip, desc, detail))
1126 % (oldtip, desc, detail))
1126 else:
1127 else:
1127 msg = (_('repository tip rolled back to revision %s'
1128 msg = (_('repository tip rolled back to revision %s'
1128 ' (undo %s)\n')
1129 ' (undo %s)\n')
1129 % (oldtip, desc))
1130 % (oldtip, desc))
1130 except IOError:
1131 except IOError:
1131 msg = _('rolling back unknown transaction\n')
1132 msg = _('rolling back unknown transaction\n')
1132 desc = None
1133 desc = None
1133
1134
1134 if not force and self['.'] != self['tip'] and desc == 'commit':
1135 if not force and self['.'] != self['tip'] and desc == 'commit':
1135 raise error.Abort(
1136 raise error.Abort(
1136 _('rollback of last commit while not checked out '
1137 _('rollback of last commit while not checked out '
1137 'may lose data'), hint=_('use -f to force'))
1138 'may lose data'), hint=_('use -f to force'))
1138
1139
1139 ui.status(msg)
1140 ui.status(msg)
1140 if dryrun:
1141 if dryrun:
1141 return 0
1142 return 0
1142
1143
1143 parents = self.dirstate.parents()
1144 parents = self.dirstate.parents()
1144 self.destroying()
1145 self.destroying()
1145 vfsmap = {'plain': self.vfs, '': self.svfs}
1146 vfsmap = {'plain': self.vfs, '': self.svfs}
1146 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1147 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1147 if self.vfs.exists('undo.bookmarks'):
1148 if self.vfs.exists('undo.bookmarks'):
1148 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1149 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1149 if self.svfs.exists('undo.phaseroots'):
1150 if self.svfs.exists('undo.phaseroots'):
1150 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1151 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1151 self.invalidate()
1152 self.invalidate()
1152
1153
1153 parentgone = (parents[0] not in self.changelog.nodemap or
1154 parentgone = (parents[0] not in self.changelog.nodemap or
1154 parents[1] not in self.changelog.nodemap)
1155 parents[1] not in self.changelog.nodemap)
1155 if parentgone:
1156 if parentgone:
1156 # prevent dirstateguard from overwriting already restored one
1157 # prevent dirstateguard from overwriting already restored one
1157 dsguard.close()
1158 dsguard.close()
1158
1159
1159 self.dirstate.restorebackup(None, prefix='undo.')
1160 self.dirstate.restorebackup(None, prefix='undo.')
1160 try:
1161 try:
1161 branch = self.vfs.read('undo.branch')
1162 branch = self.vfs.read('undo.branch')
1162 self.dirstate.setbranch(encoding.tolocal(branch))
1163 self.dirstate.setbranch(encoding.tolocal(branch))
1163 except IOError:
1164 except IOError:
1164 ui.warn(_('named branch could not be reset: '
1165 ui.warn(_('named branch could not be reset: '
1165 'current branch is still \'%s\'\n')
1166 'current branch is still \'%s\'\n')
1166 % self.dirstate.branch())
1167 % self.dirstate.branch())
1167
1168
1168 parents = tuple([p.rev() for p in self[None].parents()])
1169 parents = tuple([p.rev() for p in self[None].parents()])
1169 if len(parents) > 1:
1170 if len(parents) > 1:
1170 ui.status(_('working directory now based on '
1171 ui.status(_('working directory now based on '
1171 'revisions %d and %d\n') % parents)
1172 'revisions %d and %d\n') % parents)
1172 else:
1173 else:
1173 ui.status(_('working directory now based on '
1174 ui.status(_('working directory now based on '
1174 'revision %d\n') % parents)
1175 'revision %d\n') % parents)
1175 mergemod.mergestate.clean(self, self['.'].node())
1176 mergemod.mergestate.clean(self, self['.'].node())
1176
1177
1177 # TODO: if we know which new heads may result from this rollback, pass
1178 # TODO: if we know which new heads may result from this rollback, pass
1178 # them to destroy(), which will prevent the branchhead cache from being
1179 # them to destroy(), which will prevent the branchhead cache from being
1179 # invalidated.
1180 # invalidated.
1180 self.destroyed()
1181 self.destroyed()
1181 return 0
1182 return 0
1182
1183
1183 def invalidatecaches(self):
1184 def invalidatecaches(self):
1184
1185
1185 if '_tagscache' in vars(self):
1186 if '_tagscache' in vars(self):
1186 # can't use delattr on proxy
1187 # can't use delattr on proxy
1187 del self.__dict__['_tagscache']
1188 del self.__dict__['_tagscache']
1188
1189
1189 self.unfiltered()._branchcaches.clear()
1190 self.unfiltered()._branchcaches.clear()
1190 self.invalidatevolatilesets()
1191 self.invalidatevolatilesets()
1191
1192
1192 def invalidatevolatilesets(self):
1193 def invalidatevolatilesets(self):
1193 self.filteredrevcache.clear()
1194 self.filteredrevcache.clear()
1194 obsolete.clearobscaches(self)
1195 obsolete.clearobscaches(self)
1195
1196
1196 def invalidatedirstate(self):
1197 def invalidatedirstate(self):
1197 '''Invalidates the dirstate, causing the next call to dirstate
1198 '''Invalidates the dirstate, causing the next call to dirstate
1198 to check if it was modified since the last time it was read,
1199 to check if it was modified since the last time it was read,
1199 rereading it if it has.
1200 rereading it if it has.
1200
1201
1201 This is different to dirstate.invalidate() that it doesn't always
1202 This is different to dirstate.invalidate() that it doesn't always
1202 rereads the dirstate. Use dirstate.invalidate() if you want to
1203 rereads the dirstate. Use dirstate.invalidate() if you want to
1203 explicitly read the dirstate again (i.e. restoring it to a previous
1204 explicitly read the dirstate again (i.e. restoring it to a previous
1204 known good state).'''
1205 known good state).'''
1205 if hasunfilteredcache(self, 'dirstate'):
1206 if hasunfilteredcache(self, 'dirstate'):
1206 for k in self.dirstate._filecache:
1207 for k in self.dirstate._filecache:
1207 try:
1208 try:
1208 delattr(self.dirstate, k)
1209 delattr(self.dirstate, k)
1209 except AttributeError:
1210 except AttributeError:
1210 pass
1211 pass
1211 delattr(self.unfiltered(), 'dirstate')
1212 delattr(self.unfiltered(), 'dirstate')
1212
1213
1213 def invalidate(self, clearfilecache=False):
1214 def invalidate(self, clearfilecache=False):
1214 '''Invalidates both store and non-store parts other than dirstate
1215 '''Invalidates both store and non-store parts other than dirstate
1215
1216
1216 If a transaction is running, invalidation of store is omitted,
1217 If a transaction is running, invalidation of store is omitted,
1217 because discarding in-memory changes might cause inconsistency
1218 because discarding in-memory changes might cause inconsistency
1218 (e.g. incomplete fncache causes unintentional failure, but
1219 (e.g. incomplete fncache causes unintentional failure, but
1219 redundant one doesn't).
1220 redundant one doesn't).
1220 '''
1221 '''
1221 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1222 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1222 for k in list(self._filecache.keys()):
1223 for k in list(self._filecache.keys()):
1223 # dirstate is invalidated separately in invalidatedirstate()
1224 # dirstate is invalidated separately in invalidatedirstate()
1224 if k == 'dirstate':
1225 if k == 'dirstate':
1225 continue
1226 continue
1226
1227
1227 if clearfilecache:
1228 if clearfilecache:
1228 del self._filecache[k]
1229 del self._filecache[k]
1229 try:
1230 try:
1230 delattr(unfiltered, k)
1231 delattr(unfiltered, k)
1231 except AttributeError:
1232 except AttributeError:
1232 pass
1233 pass
1233 self.invalidatecaches()
1234 self.invalidatecaches()
1234 if not self.currenttransaction():
1235 if not self.currenttransaction():
1235 # TODO: Changing contents of store outside transaction
1236 # TODO: Changing contents of store outside transaction
1236 # causes inconsistency. We should make in-memory store
1237 # causes inconsistency. We should make in-memory store
1237 # changes detectable, and abort if changed.
1238 # changes detectable, and abort if changed.
1238 self.store.invalidatecaches()
1239 self.store.invalidatecaches()
1239
1240
1240 def invalidateall(self):
1241 def invalidateall(self):
1241 '''Fully invalidates both store and non-store parts, causing the
1242 '''Fully invalidates both store and non-store parts, causing the
1242 subsequent operation to reread any outside changes.'''
1243 subsequent operation to reread any outside changes.'''
1243 # extension should hook this to invalidate its caches
1244 # extension should hook this to invalidate its caches
1244 self.invalidate()
1245 self.invalidate()
1245 self.invalidatedirstate()
1246 self.invalidatedirstate()
1246
1247
1247 @unfilteredmethod
1248 @unfilteredmethod
1248 def _refreshfilecachestats(self, tr):
1249 def _refreshfilecachestats(self, tr):
1249 """Reload stats of cached files so that they are flagged as valid"""
1250 """Reload stats of cached files so that they are flagged as valid"""
1250 for k, ce in self._filecache.items():
1251 for k, ce in self._filecache.items():
1251 if k == 'dirstate' or k not in self.__dict__:
1252 if k == 'dirstate' or k not in self.__dict__:
1252 continue
1253 continue
1253 ce.refresh()
1254 ce.refresh()
1254
1255
1255 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1256 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1256 inheritchecker=None, parentenvvar=None):
1257 inheritchecker=None, parentenvvar=None):
1257 parentlock = None
1258 parentlock = None
1258 # the contents of parentenvvar are used by the underlying lock to
1259 # the contents of parentenvvar are used by the underlying lock to
1259 # determine whether it can be inherited
1260 # determine whether it can be inherited
1260 if parentenvvar is not None:
1261 if parentenvvar is not None:
1261 parentlock = encoding.environ.get(parentenvvar)
1262 parentlock = encoding.environ.get(parentenvvar)
1262 try:
1263 try:
1263 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1264 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1264 acquirefn=acquirefn, desc=desc,
1265 acquirefn=acquirefn, desc=desc,
1265 inheritchecker=inheritchecker,
1266 inheritchecker=inheritchecker,
1266 parentlock=parentlock)
1267 parentlock=parentlock)
1267 except error.LockHeld as inst:
1268 except error.LockHeld as inst:
1268 if not wait:
1269 if not wait:
1269 raise
1270 raise
1270 # show more details for new-style locks
1271 # show more details for new-style locks
1271 if ':' in inst.locker:
1272 if ':' in inst.locker:
1272 host, pid = inst.locker.split(":", 1)
1273 host, pid = inst.locker.split(":", 1)
1273 self.ui.warn(
1274 self.ui.warn(
1274 _("waiting for lock on %s held by process %r "
1275 _("waiting for lock on %s held by process %r "
1275 "on host %r\n") % (desc, pid, host))
1276 "on host %r\n") % (desc, pid, host))
1276 else:
1277 else:
1277 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1278 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1278 (desc, inst.locker))
1279 (desc, inst.locker))
1279 # default to 600 seconds timeout
1280 # default to 600 seconds timeout
1280 l = lockmod.lock(vfs, lockname,
1281 l = lockmod.lock(vfs, lockname,
1281 int(self.ui.config("ui", "timeout", "600")),
1282 int(self.ui.config("ui", "timeout", "600")),
1282 releasefn=releasefn, acquirefn=acquirefn,
1283 releasefn=releasefn, acquirefn=acquirefn,
1283 desc=desc)
1284 desc=desc)
1284 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1285 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1285 return l
1286 return l
1286
1287
1287 def _afterlock(self, callback):
1288 def _afterlock(self, callback):
1288 """add a callback to be run when the repository is fully unlocked
1289 """add a callback to be run when the repository is fully unlocked
1289
1290
1290 The callback will be executed when the outermost lock is released
1291 The callback will be executed when the outermost lock is released
1291 (with wlock being higher level than 'lock')."""
1292 (with wlock being higher level than 'lock')."""
1292 for ref in (self._wlockref, self._lockref):
1293 for ref in (self._wlockref, self._lockref):
1293 l = ref and ref()
1294 l = ref and ref()
1294 if l and l.held:
1295 if l and l.held:
1295 l.postrelease.append(callback)
1296 l.postrelease.append(callback)
1296 break
1297 break
1297 else: # no lock have been found.
1298 else: # no lock have been found.
1298 callback()
1299 callback()
1299
1300
1300 def lock(self, wait=True):
1301 def lock(self, wait=True):
1301 '''Lock the repository store (.hg/store) and return a weak reference
1302 '''Lock the repository store (.hg/store) and return a weak reference
1302 to the lock. Use this before modifying the store (e.g. committing or
1303 to the lock. Use this before modifying the store (e.g. committing or
1303 stripping). If you are opening a transaction, get a lock as well.)
1304 stripping). If you are opening a transaction, get a lock as well.)
1304
1305
1305 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1306 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1306 'wlock' first to avoid a dead-lock hazard.'''
1307 'wlock' first to avoid a dead-lock hazard.'''
1307 l = self._currentlock(self._lockref)
1308 l = self._currentlock(self._lockref)
1308 if l is not None:
1309 if l is not None:
1309 l.lock()
1310 l.lock()
1310 return l
1311 return l
1311
1312
1312 l = self._lock(self.svfs, "lock", wait, None,
1313 l = self._lock(self.svfs, "lock", wait, None,
1313 self.invalidate, _('repository %s') % self.origroot)
1314 self.invalidate, _('repository %s') % self.origroot)
1314 self._lockref = weakref.ref(l)
1315 self._lockref = weakref.ref(l)
1315 return l
1316 return l
1316
1317
1317 def _wlockchecktransaction(self):
1318 def _wlockchecktransaction(self):
1318 if self.currenttransaction() is not None:
1319 if self.currenttransaction() is not None:
1319 raise error.LockInheritanceContractViolation(
1320 raise error.LockInheritanceContractViolation(
1320 'wlock cannot be inherited in the middle of a transaction')
1321 'wlock cannot be inherited in the middle of a transaction')
1321
1322
1322 def wlock(self, wait=True):
1323 def wlock(self, wait=True):
1323 '''Lock the non-store parts of the repository (everything under
1324 '''Lock the non-store parts of the repository (everything under
1324 .hg except .hg/store) and return a weak reference to the lock.
1325 .hg except .hg/store) and return a weak reference to the lock.
1325
1326
1326 Use this before modifying files in .hg.
1327 Use this before modifying files in .hg.
1327
1328
1328 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1329 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1329 'wlock' first to avoid a dead-lock hazard.'''
1330 'wlock' first to avoid a dead-lock hazard.'''
1330 l = self._wlockref and self._wlockref()
1331 l = self._wlockref and self._wlockref()
1331 if l is not None and l.held:
1332 if l is not None and l.held:
1332 l.lock()
1333 l.lock()
1333 return l
1334 return l
1334
1335
1335 # We do not need to check for non-waiting lock acquisition. Such
1336 # We do not need to check for non-waiting lock acquisition. Such
1336 # acquisition would not cause dead-lock as they would just fail.
1337 # acquisition would not cause dead-lock as they would just fail.
1337 if wait and (self.ui.configbool('devel', 'all-warnings')
1338 if wait and (self.ui.configbool('devel', 'all-warnings')
1338 or self.ui.configbool('devel', 'check-locks')):
1339 or self.ui.configbool('devel', 'check-locks')):
1339 if self._currentlock(self._lockref) is not None:
1340 if self._currentlock(self._lockref) is not None:
1340 self.ui.develwarn('"wlock" acquired after "lock"')
1341 self.ui.develwarn('"wlock" acquired after "lock"')
1341
1342
1342 def unlock():
1343 def unlock():
1343 if self.dirstate.pendingparentchange():
1344 if self.dirstate.pendingparentchange():
1344 self.dirstate.invalidate()
1345 self.dirstate.invalidate()
1345 else:
1346 else:
1346 self.dirstate.write(None)
1347 self.dirstate.write(None)
1347
1348
1348 self._filecache['dirstate'].refresh()
1349 self._filecache['dirstate'].refresh()
1349
1350
1350 l = self._lock(self.vfs, "wlock", wait, unlock,
1351 l = self._lock(self.vfs, "wlock", wait, unlock,
1351 self.invalidatedirstate, _('working directory of %s') %
1352 self.invalidatedirstate, _('working directory of %s') %
1352 self.origroot,
1353 self.origroot,
1353 inheritchecker=self._wlockchecktransaction,
1354 inheritchecker=self._wlockchecktransaction,
1354 parentenvvar='HG_WLOCK_LOCKER')
1355 parentenvvar='HG_WLOCK_LOCKER')
1355 self._wlockref = weakref.ref(l)
1356 self._wlockref = weakref.ref(l)
1356 return l
1357 return l
1357
1358
1358 def _currentlock(self, lockref):
1359 def _currentlock(self, lockref):
1359 """Returns the lock if it's held, or None if it's not."""
1360 """Returns the lock if it's held, or None if it's not."""
1360 if lockref is None:
1361 if lockref is None:
1361 return None
1362 return None
1362 l = lockref()
1363 l = lockref()
1363 if l is None or not l.held:
1364 if l is None or not l.held:
1364 return None
1365 return None
1365 return l
1366 return l
1366
1367
1367 def currentwlock(self):
1368 def currentwlock(self):
1368 """Returns the wlock if it's held, or None if it's not."""
1369 """Returns the wlock if it's held, or None if it's not."""
1369 return self._currentlock(self._wlockref)
1370 return self._currentlock(self._wlockref)
1370
1371
1371 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1372 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1372 """
1373 """
1373 commit an individual file as part of a larger transaction
1374 commit an individual file as part of a larger transaction
1374 """
1375 """
1375
1376
1376 fname = fctx.path()
1377 fname = fctx.path()
1377 fparent1 = manifest1.get(fname, nullid)
1378 fparent1 = manifest1.get(fname, nullid)
1378 fparent2 = manifest2.get(fname, nullid)
1379 fparent2 = manifest2.get(fname, nullid)
1379 if isinstance(fctx, context.filectx):
1380 if isinstance(fctx, context.filectx):
1380 node = fctx.filenode()
1381 node = fctx.filenode()
1381 if node in [fparent1, fparent2]:
1382 if node in [fparent1, fparent2]:
1382 self.ui.debug('reusing %s filelog entry\n' % fname)
1383 self.ui.debug('reusing %s filelog entry\n' % fname)
1383 if manifest1.flags(fname) != fctx.flags():
1384 if manifest1.flags(fname) != fctx.flags():
1384 changelist.append(fname)
1385 changelist.append(fname)
1385 return node
1386 return node
1386
1387
1387 flog = self.file(fname)
1388 flog = self.file(fname)
1388 meta = {}
1389 meta = {}
1389 copy = fctx.renamed()
1390 copy = fctx.renamed()
1390 if copy and copy[0] != fname:
1391 if copy and copy[0] != fname:
1391 # Mark the new revision of this file as a copy of another
1392 # Mark the new revision of this file as a copy of another
1392 # file. This copy data will effectively act as a parent
1393 # file. This copy data will effectively act as a parent
1393 # of this new revision. If this is a merge, the first
1394 # of this new revision. If this is a merge, the first
1394 # parent will be the nullid (meaning "look up the copy data")
1395 # parent will be the nullid (meaning "look up the copy data")
1395 # and the second one will be the other parent. For example:
1396 # and the second one will be the other parent. For example:
1396 #
1397 #
1397 # 0 --- 1 --- 3 rev1 changes file foo
1398 # 0 --- 1 --- 3 rev1 changes file foo
1398 # \ / rev2 renames foo to bar and changes it
1399 # \ / rev2 renames foo to bar and changes it
1399 # \- 2 -/ rev3 should have bar with all changes and
1400 # \- 2 -/ rev3 should have bar with all changes and
1400 # should record that bar descends from
1401 # should record that bar descends from
1401 # bar in rev2 and foo in rev1
1402 # bar in rev2 and foo in rev1
1402 #
1403 #
1403 # this allows this merge to succeed:
1404 # this allows this merge to succeed:
1404 #
1405 #
1405 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1406 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1406 # \ / merging rev3 and rev4 should use bar@rev2
1407 # \ / merging rev3 and rev4 should use bar@rev2
1407 # \- 2 --- 4 as the merge base
1408 # \- 2 --- 4 as the merge base
1408 #
1409 #
1409
1410
1410 cfname = copy[0]
1411 cfname = copy[0]
1411 crev = manifest1.get(cfname)
1412 crev = manifest1.get(cfname)
1412 newfparent = fparent2
1413 newfparent = fparent2
1413
1414
1414 if manifest2: # branch merge
1415 if manifest2: # branch merge
1415 if fparent2 == nullid or crev is None: # copied on remote side
1416 if fparent2 == nullid or crev is None: # copied on remote side
1416 if cfname in manifest2:
1417 if cfname in manifest2:
1417 crev = manifest2[cfname]
1418 crev = manifest2[cfname]
1418 newfparent = fparent1
1419 newfparent = fparent1
1419
1420
1420 # Here, we used to search backwards through history to try to find
1421 # Here, we used to search backwards through history to try to find
1421 # where the file copy came from if the source of a copy was not in
1422 # where the file copy came from if the source of a copy was not in
1422 # the parent directory. However, this doesn't actually make sense to
1423 # the parent directory. However, this doesn't actually make sense to
1423 # do (what does a copy from something not in your working copy even
1424 # do (what does a copy from something not in your working copy even
1424 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1425 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1425 # the user that copy information was dropped, so if they didn't
1426 # the user that copy information was dropped, so if they didn't
1426 # expect this outcome it can be fixed, but this is the correct
1427 # expect this outcome it can be fixed, but this is the correct
1427 # behavior in this circumstance.
1428 # behavior in this circumstance.
1428
1429
1429 if crev:
1430 if crev:
1430 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1431 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1431 meta["copy"] = cfname
1432 meta["copy"] = cfname
1432 meta["copyrev"] = hex(crev)
1433 meta["copyrev"] = hex(crev)
1433 fparent1, fparent2 = nullid, newfparent
1434 fparent1, fparent2 = nullid, newfparent
1434 else:
1435 else:
1435 self.ui.warn(_("warning: can't find ancestor for '%s' "
1436 self.ui.warn(_("warning: can't find ancestor for '%s' "
1436 "copied from '%s'!\n") % (fname, cfname))
1437 "copied from '%s'!\n") % (fname, cfname))
1437
1438
1438 elif fparent1 == nullid:
1439 elif fparent1 == nullid:
1439 fparent1, fparent2 = fparent2, nullid
1440 fparent1, fparent2 = fparent2, nullid
1440 elif fparent2 != nullid:
1441 elif fparent2 != nullid:
1441 # is one parent an ancestor of the other?
1442 # is one parent an ancestor of the other?
1442 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1443 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1443 if fparent1 in fparentancestors:
1444 if fparent1 in fparentancestors:
1444 fparent1, fparent2 = fparent2, nullid
1445 fparent1, fparent2 = fparent2, nullid
1445 elif fparent2 in fparentancestors:
1446 elif fparent2 in fparentancestors:
1446 fparent2 = nullid
1447 fparent2 = nullid
1447
1448
1448 # is the file changed?
1449 # is the file changed?
1449 text = fctx.data()
1450 text = fctx.data()
1450 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1451 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1451 changelist.append(fname)
1452 changelist.append(fname)
1452 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1453 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1453 # are just the flags changed during merge?
1454 # are just the flags changed during merge?
1454 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1455 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1455 changelist.append(fname)
1456 changelist.append(fname)
1456
1457
1457 return fparent1
1458 return fparent1
1458
1459
1459 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1460 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1460 """check for commit arguments that aren't committable"""
1461 """check for commit arguments that aren't committable"""
1461 if match.isexact() or match.prefix():
1462 if match.isexact() or match.prefix():
1462 matched = set(status.modified + status.added + status.removed)
1463 matched = set(status.modified + status.added + status.removed)
1463
1464
1464 for f in match.files():
1465 for f in match.files():
1465 f = self.dirstate.normalize(f)
1466 f = self.dirstate.normalize(f)
1466 if f == '.' or f in matched or f in wctx.substate:
1467 if f == '.' or f in matched or f in wctx.substate:
1467 continue
1468 continue
1468 if f in status.deleted:
1469 if f in status.deleted:
1469 fail(f, _('file not found!'))
1470 fail(f, _('file not found!'))
1470 if f in vdirs: # visited directory
1471 if f in vdirs: # visited directory
1471 d = f + '/'
1472 d = f + '/'
1472 for mf in matched:
1473 for mf in matched:
1473 if mf.startswith(d):
1474 if mf.startswith(d):
1474 break
1475 break
1475 else:
1476 else:
1476 fail(f, _("no match under directory!"))
1477 fail(f, _("no match under directory!"))
1477 elif f not in self.dirstate:
1478 elif f not in self.dirstate:
1478 fail(f, _("file not tracked!"))
1479 fail(f, _("file not tracked!"))
1479
1480
1480 @unfilteredmethod
1481 @unfilteredmethod
1481 def commit(self, text="", user=None, date=None, match=None, force=False,
1482 def commit(self, text="", user=None, date=None, match=None, force=False,
1482 editor=False, extra=None):
1483 editor=False, extra=None):
1483 """Add a new revision to current repository.
1484 """Add a new revision to current repository.
1484
1485
1485 Revision information is gathered from the working directory,
1486 Revision information is gathered from the working directory,
1486 match can be used to filter the committed files. If editor is
1487 match can be used to filter the committed files. If editor is
1487 supplied, it is called to get a commit message.
1488 supplied, it is called to get a commit message.
1488 """
1489 """
1489 if extra is None:
1490 if extra is None:
1490 extra = {}
1491 extra = {}
1491
1492
1492 def fail(f, msg):
1493 def fail(f, msg):
1493 raise error.Abort('%s: %s' % (f, msg))
1494 raise error.Abort('%s: %s' % (f, msg))
1494
1495
1495 if not match:
1496 if not match:
1496 match = matchmod.always(self.root, '')
1497 match = matchmod.always(self.root, '')
1497
1498
1498 if not force:
1499 if not force:
1499 vdirs = []
1500 vdirs = []
1500 match.explicitdir = vdirs.append
1501 match.explicitdir = vdirs.append
1501 match.bad = fail
1502 match.bad = fail
1502
1503
1503 wlock = lock = tr = None
1504 wlock = lock = tr = None
1504 try:
1505 try:
1505 wlock = self.wlock()
1506 wlock = self.wlock()
1506 lock = self.lock() # for recent changelog (see issue4368)
1507 lock = self.lock() # for recent changelog (see issue4368)
1507
1508
1508 wctx = self[None]
1509 wctx = self[None]
1509 merge = len(wctx.parents()) > 1
1510 merge = len(wctx.parents()) > 1
1510
1511
1511 if not force and merge and match.ispartial():
1512 if not force and merge and match.ispartial():
1512 raise error.Abort(_('cannot partially commit a merge '
1513 raise error.Abort(_('cannot partially commit a merge '
1513 '(do not specify files or patterns)'))
1514 '(do not specify files or patterns)'))
1514
1515
1515 status = self.status(match=match, clean=force)
1516 status = self.status(match=match, clean=force)
1516 if force:
1517 if force:
1517 status.modified.extend(status.clean) # mq may commit clean files
1518 status.modified.extend(status.clean) # mq may commit clean files
1518
1519
1519 # check subrepos
1520 # check subrepos
1520 subs = []
1521 subs = []
1521 commitsubs = set()
1522 commitsubs = set()
1522 newstate = wctx.substate.copy()
1523 newstate = wctx.substate.copy()
1523 # only manage subrepos and .hgsubstate if .hgsub is present
1524 # only manage subrepos and .hgsubstate if .hgsub is present
1524 if '.hgsub' in wctx:
1525 if '.hgsub' in wctx:
1525 # we'll decide whether to track this ourselves, thanks
1526 # we'll decide whether to track this ourselves, thanks
1526 for c in status.modified, status.added, status.removed:
1527 for c in status.modified, status.added, status.removed:
1527 if '.hgsubstate' in c:
1528 if '.hgsubstate' in c:
1528 c.remove('.hgsubstate')
1529 c.remove('.hgsubstate')
1529
1530
1530 # compare current state to last committed state
1531 # compare current state to last committed state
1531 # build new substate based on last committed state
1532 # build new substate based on last committed state
1532 oldstate = wctx.p1().substate
1533 oldstate = wctx.p1().substate
1533 for s in sorted(newstate.keys()):
1534 for s in sorted(newstate.keys()):
1534 if not match(s):
1535 if not match(s):
1535 # ignore working copy, use old state if present
1536 # ignore working copy, use old state if present
1536 if s in oldstate:
1537 if s in oldstate:
1537 newstate[s] = oldstate[s]
1538 newstate[s] = oldstate[s]
1538 continue
1539 continue
1539 if not force:
1540 if not force:
1540 raise error.Abort(
1541 raise error.Abort(
1541 _("commit with new subrepo %s excluded") % s)
1542 _("commit with new subrepo %s excluded") % s)
1542 dirtyreason = wctx.sub(s).dirtyreason(True)
1543 dirtyreason = wctx.sub(s).dirtyreason(True)
1543 if dirtyreason:
1544 if dirtyreason:
1544 if not self.ui.configbool('ui', 'commitsubrepos'):
1545 if not self.ui.configbool('ui', 'commitsubrepos'):
1545 raise error.Abort(dirtyreason,
1546 raise error.Abort(dirtyreason,
1546 hint=_("use --subrepos for recursive commit"))
1547 hint=_("use --subrepos for recursive commit"))
1547 subs.append(s)
1548 subs.append(s)
1548 commitsubs.add(s)
1549 commitsubs.add(s)
1549 else:
1550 else:
1550 bs = wctx.sub(s).basestate()
1551 bs = wctx.sub(s).basestate()
1551 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1552 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1552 if oldstate.get(s, (None, None, None))[1] != bs:
1553 if oldstate.get(s, (None, None, None))[1] != bs:
1553 subs.append(s)
1554 subs.append(s)
1554
1555
1555 # check for removed subrepos
1556 # check for removed subrepos
1556 for p in wctx.parents():
1557 for p in wctx.parents():
1557 r = [s for s in p.substate if s not in newstate]
1558 r = [s for s in p.substate if s not in newstate]
1558 subs += [s for s in r if match(s)]
1559 subs += [s for s in r if match(s)]
1559 if subs:
1560 if subs:
1560 if (not match('.hgsub') and
1561 if (not match('.hgsub') and
1561 '.hgsub' in (wctx.modified() + wctx.added())):
1562 '.hgsub' in (wctx.modified() + wctx.added())):
1562 raise error.Abort(
1563 raise error.Abort(
1563 _("can't commit subrepos without .hgsub"))
1564 _("can't commit subrepos without .hgsub"))
1564 status.modified.insert(0, '.hgsubstate')
1565 status.modified.insert(0, '.hgsubstate')
1565
1566
1566 elif '.hgsub' in status.removed:
1567 elif '.hgsub' in status.removed:
1567 # clean up .hgsubstate when .hgsub is removed
1568 # clean up .hgsubstate when .hgsub is removed
1568 if ('.hgsubstate' in wctx and
1569 if ('.hgsubstate' in wctx and
1569 '.hgsubstate' not in (status.modified + status.added +
1570 '.hgsubstate' not in (status.modified + status.added +
1570 status.removed)):
1571 status.removed)):
1571 status.removed.insert(0, '.hgsubstate')
1572 status.removed.insert(0, '.hgsubstate')
1572
1573
1573 # make sure all explicit patterns are matched
1574 # make sure all explicit patterns are matched
1574 if not force:
1575 if not force:
1575 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1576 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1576
1577
1577 cctx = context.workingcommitctx(self, status,
1578 cctx = context.workingcommitctx(self, status,
1578 text, user, date, extra)
1579 text, user, date, extra)
1579
1580
1580 # internal config: ui.allowemptycommit
1581 # internal config: ui.allowemptycommit
1581 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1582 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1582 or extra.get('close') or merge or cctx.files()
1583 or extra.get('close') or merge or cctx.files()
1583 or self.ui.configbool('ui', 'allowemptycommit'))
1584 or self.ui.configbool('ui', 'allowemptycommit'))
1584 if not allowemptycommit:
1585 if not allowemptycommit:
1585 return None
1586 return None
1586
1587
1587 if merge and cctx.deleted():
1588 if merge and cctx.deleted():
1588 raise error.Abort(_("cannot commit merge with missing files"))
1589 raise error.Abort(_("cannot commit merge with missing files"))
1589
1590
1590 ms = mergemod.mergestate.read(self)
1591 ms = mergemod.mergestate.read(self)
1591 mergeutil.checkunresolved(ms)
1592 mergeutil.checkunresolved(ms)
1592
1593
1593 if editor:
1594 if editor:
1594 cctx._text = editor(self, cctx, subs)
1595 cctx._text = editor(self, cctx, subs)
1595 edited = (text != cctx._text)
1596 edited = (text != cctx._text)
1596
1597
1597 # Save commit message in case this transaction gets rolled back
1598 # Save commit message in case this transaction gets rolled back
1598 # (e.g. by a pretxncommit hook). Leave the content alone on
1599 # (e.g. by a pretxncommit hook). Leave the content alone on
1599 # the assumption that the user will use the same editor again.
1600 # the assumption that the user will use the same editor again.
1600 msgfn = self.savecommitmessage(cctx._text)
1601 msgfn = self.savecommitmessage(cctx._text)
1601
1602
1602 # commit subs and write new state
1603 # commit subs and write new state
1603 if subs:
1604 if subs:
1604 for s in sorted(commitsubs):
1605 for s in sorted(commitsubs):
1605 sub = wctx.sub(s)
1606 sub = wctx.sub(s)
1606 self.ui.status(_('committing subrepository %s\n') %
1607 self.ui.status(_('committing subrepository %s\n') %
1607 subrepo.subrelpath(sub))
1608 subrepo.subrelpath(sub))
1608 sr = sub.commit(cctx._text, user, date)
1609 sr = sub.commit(cctx._text, user, date)
1609 newstate[s] = (newstate[s][0], sr)
1610 newstate[s] = (newstate[s][0], sr)
1610 subrepo.writestate(self, newstate)
1611 subrepo.writestate(self, newstate)
1611
1612
1612 p1, p2 = self.dirstate.parents()
1613 p1, p2 = self.dirstate.parents()
1613 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1614 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1614 try:
1615 try:
1615 self.hook("precommit", throw=True, parent1=hookp1,
1616 self.hook("precommit", throw=True, parent1=hookp1,
1616 parent2=hookp2)
1617 parent2=hookp2)
1617 tr = self.transaction('commit')
1618 tr = self.transaction('commit')
1618 ret = self.commitctx(cctx, True)
1619 ret = self.commitctx(cctx, True)
1619 except: # re-raises
1620 except: # re-raises
1620 if edited:
1621 if edited:
1621 self.ui.write(
1622 self.ui.write(
1622 _('note: commit message saved in %s\n') % msgfn)
1623 _('note: commit message saved in %s\n') % msgfn)
1623 raise
1624 raise
1624 # update bookmarks, dirstate and mergestate
1625 # update bookmarks, dirstate and mergestate
1625 bookmarks.update(self, [p1, p2], ret)
1626 bookmarks.update(self, [p1, p2], ret)
1626 cctx.markcommitted(ret)
1627 cctx.markcommitted(ret)
1627 ms.reset()
1628 ms.reset()
1628 tr.close()
1629 tr.close()
1629
1630
1630 finally:
1631 finally:
1631 lockmod.release(tr, lock, wlock)
1632 lockmod.release(tr, lock, wlock)
1632
1633
1633 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1634 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1634 # hack for command that use a temporary commit (eg: histedit)
1635 # hack for command that use a temporary commit (eg: histedit)
1635 # temporary commit got stripped before hook release
1636 # temporary commit got stripped before hook release
1636 if self.changelog.hasnode(ret):
1637 if self.changelog.hasnode(ret):
1637 self.hook("commit", node=node, parent1=parent1,
1638 self.hook("commit", node=node, parent1=parent1,
1638 parent2=parent2)
1639 parent2=parent2)
1639 self._afterlock(commithook)
1640 self._afterlock(commithook)
1640 return ret
1641 return ret
1641
1642
1642 @unfilteredmethod
1643 @unfilteredmethod
1643 def commitctx(self, ctx, error=False):
1644 def commitctx(self, ctx, error=False):
1644 """Add a new revision to current repository.
1645 """Add a new revision to current repository.
1645 Revision information is passed via the context argument.
1646 Revision information is passed via the context argument.
1646 """
1647 """
1647
1648
1648 tr = None
1649 tr = None
1649 p1, p2 = ctx.p1(), ctx.p2()
1650 p1, p2 = ctx.p1(), ctx.p2()
1650 user = ctx.user()
1651 user = ctx.user()
1651
1652
1652 lock = self.lock()
1653 lock = self.lock()
1653 try:
1654 try:
1654 tr = self.transaction("commit")
1655 tr = self.transaction("commit")
1655 trp = weakref.proxy(tr)
1656 trp = weakref.proxy(tr)
1656
1657
1657 if ctx.manifestnode():
1658 if ctx.manifestnode():
1658 # reuse an existing manifest revision
1659 # reuse an existing manifest revision
1659 mn = ctx.manifestnode()
1660 mn = ctx.manifestnode()
1660 files = ctx.files()
1661 files = ctx.files()
1661 elif ctx.files():
1662 elif ctx.files():
1662 m1ctx = p1.manifestctx()
1663 m1ctx = p1.manifestctx()
1663 m2ctx = p2.manifestctx()
1664 m2ctx = p2.manifestctx()
1664 mctx = m1ctx.copy()
1665 mctx = m1ctx.copy()
1665
1666
1666 m = mctx.read()
1667 m = mctx.read()
1667 m1 = m1ctx.read()
1668 m1 = m1ctx.read()
1668 m2 = m2ctx.read()
1669 m2 = m2ctx.read()
1669
1670
1670 # check in files
1671 # check in files
1671 added = []
1672 added = []
1672 changed = []
1673 changed = []
1673 removed = list(ctx.removed())
1674 removed = list(ctx.removed())
1674 linkrev = len(self)
1675 linkrev = len(self)
1675 self.ui.note(_("committing files:\n"))
1676 self.ui.note(_("committing files:\n"))
1676 for f in sorted(ctx.modified() + ctx.added()):
1677 for f in sorted(ctx.modified() + ctx.added()):
1677 self.ui.note(f + "\n")
1678 self.ui.note(f + "\n")
1678 try:
1679 try:
1679 fctx = ctx[f]
1680 fctx = ctx[f]
1680 if fctx is None:
1681 if fctx is None:
1681 removed.append(f)
1682 removed.append(f)
1682 else:
1683 else:
1683 added.append(f)
1684 added.append(f)
1684 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1685 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1685 trp, changed)
1686 trp, changed)
1686 m.setflag(f, fctx.flags())
1687 m.setflag(f, fctx.flags())
1687 except OSError as inst:
1688 except OSError as inst:
1688 self.ui.warn(_("trouble committing %s!\n") % f)
1689 self.ui.warn(_("trouble committing %s!\n") % f)
1689 raise
1690 raise
1690 except IOError as inst:
1691 except IOError as inst:
1691 errcode = getattr(inst, 'errno', errno.ENOENT)
1692 errcode = getattr(inst, 'errno', errno.ENOENT)
1692 if error or errcode and errcode != errno.ENOENT:
1693 if error or errcode and errcode != errno.ENOENT:
1693 self.ui.warn(_("trouble committing %s!\n") % f)
1694 self.ui.warn(_("trouble committing %s!\n") % f)
1694 raise
1695 raise
1695
1696
1696 # update manifest
1697 # update manifest
1697 self.ui.note(_("committing manifest\n"))
1698 self.ui.note(_("committing manifest\n"))
1698 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1699 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1699 drop = [f for f in removed if f in m]
1700 drop = [f for f in removed if f in m]
1700 for f in drop:
1701 for f in drop:
1701 del m[f]
1702 del m[f]
1702 mn = mctx.write(trp, linkrev,
1703 mn = mctx.write(trp, linkrev,
1703 p1.manifestnode(), p2.manifestnode(),
1704 p1.manifestnode(), p2.manifestnode(),
1704 added, drop)
1705 added, drop)
1705 files = changed + removed
1706 files = changed + removed
1706 else:
1707 else:
1707 mn = p1.manifestnode()
1708 mn = p1.manifestnode()
1708 files = []
1709 files = []
1709
1710
1710 # update changelog
1711 # update changelog
1711 self.ui.note(_("committing changelog\n"))
1712 self.ui.note(_("committing changelog\n"))
1712 self.changelog.delayupdate(tr)
1713 self.changelog.delayupdate(tr)
1713 n = self.changelog.add(mn, files, ctx.description(),
1714 n = self.changelog.add(mn, files, ctx.description(),
1714 trp, p1.node(), p2.node(),
1715 trp, p1.node(), p2.node(),
1715 user, ctx.date(), ctx.extra().copy())
1716 user, ctx.date(), ctx.extra().copy())
1716 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1717 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1717 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1718 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1718 parent2=xp2)
1719 parent2=xp2)
1719 # set the new commit is proper phase
1720 # set the new commit is proper phase
1720 targetphase = subrepo.newcommitphase(self.ui, ctx)
1721 targetphase = subrepo.newcommitphase(self.ui, ctx)
1721 if targetphase:
1722 if targetphase:
1722 # retract boundary do not alter parent changeset.
1723 # retract boundary do not alter parent changeset.
1723 # if a parent have higher the resulting phase will
1724 # if a parent have higher the resulting phase will
1724 # be compliant anyway
1725 # be compliant anyway
1725 #
1726 #
1726 # if minimal phase was 0 we don't need to retract anything
1727 # if minimal phase was 0 we don't need to retract anything
1727 phases.retractboundary(self, tr, targetphase, [n])
1728 phases.retractboundary(self, tr, targetphase, [n])
1728 tr.close()
1729 tr.close()
1729 branchmap.updatecache(self.filtered('served'))
1730 branchmap.updatecache(self.filtered('served'))
1730 return n
1731 return n
1731 finally:
1732 finally:
1732 if tr:
1733 if tr:
1733 tr.release()
1734 tr.release()
1734 lock.release()
1735 lock.release()
1735
1736
1736 @unfilteredmethod
1737 @unfilteredmethod
1737 def destroying(self):
1738 def destroying(self):
1738 '''Inform the repository that nodes are about to be destroyed.
1739 '''Inform the repository that nodes are about to be destroyed.
1739 Intended for use by strip and rollback, so there's a common
1740 Intended for use by strip and rollback, so there's a common
1740 place for anything that has to be done before destroying history.
1741 place for anything that has to be done before destroying history.
1741
1742
1742 This is mostly useful for saving state that is in memory and waiting
1743 This is mostly useful for saving state that is in memory and waiting
1743 to be flushed when the current lock is released. Because a call to
1744 to be flushed when the current lock is released. Because a call to
1744 destroyed is imminent, the repo will be invalidated causing those
1745 destroyed is imminent, the repo will be invalidated causing those
1745 changes to stay in memory (waiting for the next unlock), or vanish
1746 changes to stay in memory (waiting for the next unlock), or vanish
1746 completely.
1747 completely.
1747 '''
1748 '''
1748 # When using the same lock to commit and strip, the phasecache is left
1749 # When using the same lock to commit and strip, the phasecache is left
1749 # dirty after committing. Then when we strip, the repo is invalidated,
1750 # dirty after committing. Then when we strip, the repo is invalidated,
1750 # causing those changes to disappear.
1751 # causing those changes to disappear.
1751 if '_phasecache' in vars(self):
1752 if '_phasecache' in vars(self):
1752 self._phasecache.write()
1753 self._phasecache.write()
1753
1754
1754 @unfilteredmethod
1755 @unfilteredmethod
1755 def destroyed(self):
1756 def destroyed(self):
1756 '''Inform the repository that nodes have been destroyed.
1757 '''Inform the repository that nodes have been destroyed.
1757 Intended for use by strip and rollback, so there's a common
1758 Intended for use by strip and rollback, so there's a common
1758 place for anything that has to be done after destroying history.
1759 place for anything that has to be done after destroying history.
1759 '''
1760 '''
1760 # When one tries to:
1761 # When one tries to:
1761 # 1) destroy nodes thus calling this method (e.g. strip)
1762 # 1) destroy nodes thus calling this method (e.g. strip)
1762 # 2) use phasecache somewhere (e.g. commit)
1763 # 2) use phasecache somewhere (e.g. commit)
1763 #
1764 #
1764 # then 2) will fail because the phasecache contains nodes that were
1765 # then 2) will fail because the phasecache contains nodes that were
1765 # removed. We can either remove phasecache from the filecache,
1766 # removed. We can either remove phasecache from the filecache,
1766 # causing it to reload next time it is accessed, or simply filter
1767 # causing it to reload next time it is accessed, or simply filter
1767 # the removed nodes now and write the updated cache.
1768 # the removed nodes now and write the updated cache.
1768 self._phasecache.filterunknown(self)
1769 self._phasecache.filterunknown(self)
1769 self._phasecache.write()
1770 self._phasecache.write()
1770
1771
1771 # update the 'served' branch cache to help read only server process
1772 # update the 'served' branch cache to help read only server process
1772 # Thanks to branchcache collaboration this is done from the nearest
1773 # Thanks to branchcache collaboration this is done from the nearest
1773 # filtered subset and it is expected to be fast.
1774 # filtered subset and it is expected to be fast.
1774 branchmap.updatecache(self.filtered('served'))
1775 branchmap.updatecache(self.filtered('served'))
1775
1776
1776 # Ensure the persistent tag cache is updated. Doing it now
1777 # Ensure the persistent tag cache is updated. Doing it now
1777 # means that the tag cache only has to worry about destroyed
1778 # means that the tag cache only has to worry about destroyed
1778 # heads immediately after a strip/rollback. That in turn
1779 # heads immediately after a strip/rollback. That in turn
1779 # guarantees that "cachetip == currenttip" (comparing both rev
1780 # guarantees that "cachetip == currenttip" (comparing both rev
1780 # and node) always means no nodes have been added or destroyed.
1781 # and node) always means no nodes have been added or destroyed.
1781
1782
1782 # XXX this is suboptimal when qrefresh'ing: we strip the current
1783 # XXX this is suboptimal when qrefresh'ing: we strip the current
1783 # head, refresh the tag cache, then immediately add a new head.
1784 # head, refresh the tag cache, then immediately add a new head.
1784 # But I think doing it this way is necessary for the "instant
1785 # But I think doing it this way is necessary for the "instant
1785 # tag cache retrieval" case to work.
1786 # tag cache retrieval" case to work.
1786 self.invalidate()
1787 self.invalidate()
1787
1788
1788 def walk(self, match, node=None):
1789 def walk(self, match, node=None):
1789 '''
1790 '''
1790 walk recursively through the directory tree or a given
1791 walk recursively through the directory tree or a given
1791 changeset, finding all files matched by the match
1792 changeset, finding all files matched by the match
1792 function
1793 function
1793 '''
1794 '''
1794 return self[node].walk(match)
1795 return self[node].walk(match)
1795
1796
1796 def status(self, node1='.', node2=None, match=None,
1797 def status(self, node1='.', node2=None, match=None,
1797 ignored=False, clean=False, unknown=False,
1798 ignored=False, clean=False, unknown=False,
1798 listsubrepos=False):
1799 listsubrepos=False):
1799 '''a convenience method that calls node1.status(node2)'''
1800 '''a convenience method that calls node1.status(node2)'''
1800 return self[node1].status(node2, match, ignored, clean, unknown,
1801 return self[node1].status(node2, match, ignored, clean, unknown,
1801 listsubrepos)
1802 listsubrepos)
1802
1803
1803 def heads(self, start=None):
1804 def heads(self, start=None):
1804 if start is None:
1805 if start is None:
1805 cl = self.changelog
1806 cl = self.changelog
1806 headrevs = reversed(cl.headrevs())
1807 headrevs = reversed(cl.headrevs())
1807 return [cl.node(rev) for rev in headrevs]
1808 return [cl.node(rev) for rev in headrevs]
1808
1809
1809 heads = self.changelog.heads(start)
1810 heads = self.changelog.heads(start)
1810 # sort the output in rev descending order
1811 # sort the output in rev descending order
1811 return sorted(heads, key=self.changelog.rev, reverse=True)
1812 return sorted(heads, key=self.changelog.rev, reverse=True)
1812
1813
1813 def branchheads(self, branch=None, start=None, closed=False):
1814 def branchheads(self, branch=None, start=None, closed=False):
1814 '''return a (possibly filtered) list of heads for the given branch
1815 '''return a (possibly filtered) list of heads for the given branch
1815
1816
1816 Heads are returned in topological order, from newest to oldest.
1817 Heads are returned in topological order, from newest to oldest.
1817 If branch is None, use the dirstate branch.
1818 If branch is None, use the dirstate branch.
1818 If start is not None, return only heads reachable from start.
1819 If start is not None, return only heads reachable from start.
1819 If closed is True, return heads that are marked as closed as well.
1820 If closed is True, return heads that are marked as closed as well.
1820 '''
1821 '''
1821 if branch is None:
1822 if branch is None:
1822 branch = self[None].branch()
1823 branch = self[None].branch()
1823 branches = self.branchmap()
1824 branches = self.branchmap()
1824 if branch not in branches:
1825 if branch not in branches:
1825 return []
1826 return []
1826 # the cache returns heads ordered lowest to highest
1827 # the cache returns heads ordered lowest to highest
1827 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1828 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1828 if start is not None:
1829 if start is not None:
1829 # filter out the heads that cannot be reached from startrev
1830 # filter out the heads that cannot be reached from startrev
1830 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1831 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1831 bheads = [h for h in bheads if h in fbheads]
1832 bheads = [h for h in bheads if h in fbheads]
1832 return bheads
1833 return bheads
1833
1834
1834 def branches(self, nodes):
1835 def branches(self, nodes):
1835 if not nodes:
1836 if not nodes:
1836 nodes = [self.changelog.tip()]
1837 nodes = [self.changelog.tip()]
1837 b = []
1838 b = []
1838 for n in nodes:
1839 for n in nodes:
1839 t = n
1840 t = n
1840 while True:
1841 while True:
1841 p = self.changelog.parents(n)
1842 p = self.changelog.parents(n)
1842 if p[1] != nullid or p[0] == nullid:
1843 if p[1] != nullid or p[0] == nullid:
1843 b.append((t, n, p[0], p[1]))
1844 b.append((t, n, p[0], p[1]))
1844 break
1845 break
1845 n = p[0]
1846 n = p[0]
1846 return b
1847 return b
1847
1848
1848 def between(self, pairs):
1849 def between(self, pairs):
1849 r = []
1850 r = []
1850
1851
1851 for top, bottom in pairs:
1852 for top, bottom in pairs:
1852 n, l, i = top, [], 0
1853 n, l, i = top, [], 0
1853 f = 1
1854 f = 1
1854
1855
1855 while n != bottom and n != nullid:
1856 while n != bottom and n != nullid:
1856 p = self.changelog.parents(n)[0]
1857 p = self.changelog.parents(n)[0]
1857 if i == f:
1858 if i == f:
1858 l.append(n)
1859 l.append(n)
1859 f = f * 2
1860 f = f * 2
1860 n = p
1861 n = p
1861 i += 1
1862 i += 1
1862
1863
1863 r.append(l)
1864 r.append(l)
1864
1865
1865 return r
1866 return r
1866
1867
1867 def checkpush(self, pushop):
1868 def checkpush(self, pushop):
1868 """Extensions can override this function if additional checks have
1869 """Extensions can override this function if additional checks have
1869 to be performed before pushing, or call it if they override push
1870 to be performed before pushing, or call it if they override push
1870 command.
1871 command.
1871 """
1872 """
1872 pass
1873 pass
1873
1874
1874 @unfilteredpropertycache
1875 @unfilteredpropertycache
1875 def prepushoutgoinghooks(self):
1876 def prepushoutgoinghooks(self):
1876 """Return util.hooks consists of a pushop with repo, remote, outgoing
1877 """Return util.hooks consists of a pushop with repo, remote, outgoing
1877 methods, which are called before pushing changesets.
1878 methods, which are called before pushing changesets.
1878 """
1879 """
1879 return util.hooks()
1880 return util.hooks()
1880
1881
1881 def pushkey(self, namespace, key, old, new):
1882 def pushkey(self, namespace, key, old, new):
1882 try:
1883 try:
1883 tr = self.currenttransaction()
1884 tr = self.currenttransaction()
1884 hookargs = {}
1885 hookargs = {}
1885 if tr is not None:
1886 if tr is not None:
1886 hookargs.update(tr.hookargs)
1887 hookargs.update(tr.hookargs)
1887 hookargs['namespace'] = namespace
1888 hookargs['namespace'] = namespace
1888 hookargs['key'] = key
1889 hookargs['key'] = key
1889 hookargs['old'] = old
1890 hookargs['old'] = old
1890 hookargs['new'] = new
1891 hookargs['new'] = new
1891 self.hook('prepushkey', throw=True, **hookargs)
1892 self.hook('prepushkey', throw=True, **hookargs)
1892 except error.HookAbort as exc:
1893 except error.HookAbort as exc:
1893 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1894 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1894 if exc.hint:
1895 if exc.hint:
1895 self.ui.write_err(_("(%s)\n") % exc.hint)
1896 self.ui.write_err(_("(%s)\n") % exc.hint)
1896 return False
1897 return False
1897 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1898 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1898 ret = pushkey.push(self, namespace, key, old, new)
1899 ret = pushkey.push(self, namespace, key, old, new)
1899 def runhook():
1900 def runhook():
1900 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1901 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1901 ret=ret)
1902 ret=ret)
1902 self._afterlock(runhook)
1903 self._afterlock(runhook)
1903 return ret
1904 return ret
1904
1905
1905 def listkeys(self, namespace):
1906 def listkeys(self, namespace):
1906 self.hook('prelistkeys', throw=True, namespace=namespace)
1907 self.hook('prelistkeys', throw=True, namespace=namespace)
1907 self.ui.debug('listing keys for "%s"\n' % namespace)
1908 self.ui.debug('listing keys for "%s"\n' % namespace)
1908 values = pushkey.list(self, namespace)
1909 values = pushkey.list(self, namespace)
1909 self.hook('listkeys', namespace=namespace, values=values)
1910 self.hook('listkeys', namespace=namespace, values=values)
1910 return values
1911 return values
1911
1912
1912 def debugwireargs(self, one, two, three=None, four=None, five=None):
1913 def debugwireargs(self, one, two, three=None, four=None, five=None):
1913 '''used to test argument passing over the wire'''
1914 '''used to test argument passing over the wire'''
1914 return "%s %s %s %s %s" % (one, two, three, four, five)
1915 return "%s %s %s %s %s" % (one, two, three, four, five)
1915
1916
1916 def savecommitmessage(self, text):
1917 def savecommitmessage(self, text):
1917 fp = self.vfs('last-message.txt', 'wb')
1918 fp = self.vfs('last-message.txt', 'wb')
1918 try:
1919 try:
1919 fp.write(text)
1920 fp.write(text)
1920 finally:
1921 finally:
1921 fp.close()
1922 fp.close()
1922 return self.pathto(fp.name[len(self.root) + 1:])
1923 return self.pathto(fp.name[len(self.root) + 1:])
1923
1924
1924 # used to avoid circular references so destructors work
1925 # used to avoid circular references so destructors work
1925 def aftertrans(files):
1926 def aftertrans(files):
1926 renamefiles = [tuple(t) for t in files]
1927 renamefiles = [tuple(t) for t in files]
1927 def a():
1928 def a():
1928 for vfs, src, dest in renamefiles:
1929 for vfs, src, dest in renamefiles:
1929 # if src and dest refer to a same file, vfs.rename is a no-op,
1930 # if src and dest refer to a same file, vfs.rename is a no-op,
1930 # leaving both src and dest on disk. delete dest to make sure
1931 # leaving both src and dest on disk. delete dest to make sure
1931 # the rename couldn't be such a no-op.
1932 # the rename couldn't be such a no-op.
1932 vfs.tryunlink(dest)
1933 vfs.tryunlink(dest)
1933 try:
1934 try:
1934 vfs.rename(src, dest)
1935 vfs.rename(src, dest)
1935 except OSError: # journal file does not yet exist
1936 except OSError: # journal file does not yet exist
1936 pass
1937 pass
1937 return a
1938 return a
1938
1939
1939 def undoname(fn):
1940 def undoname(fn):
1940 base, name = os.path.split(fn)
1941 base, name = os.path.split(fn)
1941 assert name.startswith('journal')
1942 assert name.startswith('journal')
1942 return os.path.join(base, name.replace('journal', 'undo', 1))
1943 return os.path.join(base, name.replace('journal', 'undo', 1))
1943
1944
1944 def instance(ui, path, create):
1945 def instance(ui, path, create):
1945 return localrepository(ui, util.urllocalpath(path), create)
1946 return localrepository(ui, util.urllocalpath(path), create)
1946
1947
1947 def islocal(path):
1948 def islocal(path):
1948 return True
1949 return True
1949
1950
1950 def newreporequirements(repo):
1951 def newreporequirements(repo):
1951 """Determine the set of requirements for a new local repository.
1952 """Determine the set of requirements for a new local repository.
1952
1953
1953 Extensions can wrap this function to specify custom requirements for
1954 Extensions can wrap this function to specify custom requirements for
1954 new repositories.
1955 new repositories.
1955 """
1956 """
1956 ui = repo.ui
1957 ui = repo.ui
1957 requirements = set(['revlogv1'])
1958 requirements = set(['revlogv1'])
1958 if ui.configbool('format', 'usestore', True):
1959 if ui.configbool('format', 'usestore', True):
1959 requirements.add('store')
1960 requirements.add('store')
1960 if ui.configbool('format', 'usefncache', True):
1961 if ui.configbool('format', 'usefncache', True):
1961 requirements.add('fncache')
1962 requirements.add('fncache')
1962 if ui.configbool('format', 'dotencode', True):
1963 if ui.configbool('format', 'dotencode', True):
1963 requirements.add('dotencode')
1964 requirements.add('dotencode')
1964
1965
1965 compengine = ui.config('experimental', 'format.compression', 'zlib')
1966 compengine = ui.config('experimental', 'format.compression', 'zlib')
1966 if compengine not in util.compengines:
1967 if compengine not in util.compengines:
1967 raise error.Abort(_('compression engine %s defined by '
1968 raise error.Abort(_('compression engine %s defined by '
1968 'experimental.format.compression not available') %
1969 'experimental.format.compression not available') %
1969 compengine,
1970 compengine,
1970 hint=_('run "hg debuginstall" to list available '
1971 hint=_('run "hg debuginstall" to list available '
1971 'compression engines'))
1972 'compression engines'))
1972
1973
1973 # zlib is the historical default and doesn't need an explicit requirement.
1974 # zlib is the historical default and doesn't need an explicit requirement.
1974 if compengine != 'zlib':
1975 if compengine != 'zlib':
1975 requirements.add('exp-compression-%s' % compengine)
1976 requirements.add('exp-compression-%s' % compengine)
1976
1977
1977 if scmutil.gdinitconfig(ui):
1978 if scmutil.gdinitconfig(ui):
1978 requirements.add('generaldelta')
1979 requirements.add('generaldelta')
1979 if ui.configbool('experimental', 'treemanifest', False):
1980 if ui.configbool('experimental', 'treemanifest', False):
1980 requirements.add('treemanifest')
1981 requirements.add('treemanifest')
1981 if ui.configbool('experimental', 'manifestv2', False):
1982 if ui.configbool('experimental', 'manifestv2', False):
1982 requirements.add('manifestv2')
1983 requirements.add('manifestv2')
1983
1984
1984 return requirements
1985 return requirements
@@ -1,680 +1,676 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import errno
15 import errno
16
16
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 match as matchmod,
27 match as matchmod,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 # Tags computation can be expensive and caches exist to make it fast in
32 # Tags computation can be expensive and caches exist to make it fast in
33 # the common case.
33 # the common case.
34 #
34 #
35 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
35 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 # each revision in the repository. The file is effectively an array of
36 # each revision in the repository. The file is effectively an array of
37 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
37 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 # details.
38 # details.
39 #
39 #
40 # The .hgtags filenode cache grows in proportion to the length of the
40 # The .hgtags filenode cache grows in proportion to the length of the
41 # changelog. The file is truncated when the # changelog is stripped.
41 # changelog. The file is truncated when the # changelog is stripped.
42 #
42 #
43 # The purpose of the filenode cache is to avoid the most expensive part
43 # The purpose of the filenode cache is to avoid the most expensive part
44 # of finding global tags, which is looking up the .hgtags filenode in the
44 # of finding global tags, which is looking up the .hgtags filenode in the
45 # manifest for each head. This can take dozens or over 100ms for
45 # manifest for each head. This can take dozens or over 100ms for
46 # repositories with very large manifests. Multiplied by dozens or even
46 # repositories with very large manifests. Multiplied by dozens or even
47 # hundreds of heads and there is a significant performance concern.
47 # hundreds of heads and there is a significant performance concern.
48 #
48 #
49 # There also exist a separate cache file for each repository filter.
49 # There also exist a separate cache file for each repository filter.
50 # These "tags-*" files store information about the history of tags.
50 # These "tags-*" files store information about the history of tags.
51 #
51 #
52 # The tags cache files consists of a cache validation line followed by
52 # The tags cache files consists of a cache validation line followed by
53 # a history of tags.
53 # a history of tags.
54 #
54 #
55 # The cache validation line has the format:
55 # The cache validation line has the format:
56 #
56 #
57 # <tiprev> <tipnode> [<filteredhash>]
57 # <tiprev> <tipnode> [<filteredhash>]
58 #
58 #
59 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
59 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 # node for that changeset. These redundantly identify the repository
60 # node for that changeset. These redundantly identify the repository
61 # tip from the time the cache was written. In addition, <filteredhash>,
61 # tip from the time the cache was written. In addition, <filteredhash>,
62 # if present, is a 40 character hex hash of the contents of the filtered
62 # if present, is a 40 character hex hash of the contents of the filtered
63 # revisions for this filter. If the set of filtered revs changes, the
63 # revisions for this filter. If the set of filtered revs changes, the
64 # hash will change and invalidate the cache.
64 # hash will change and invalidate the cache.
65 #
65 #
66 # The history part of the tags cache consists of lines of the form:
66 # The history part of the tags cache consists of lines of the form:
67 #
67 #
68 # <node> <tag>
68 # <node> <tag>
69 #
69 #
70 # (This format is identical to that of .hgtags files.)
70 # (This format is identical to that of .hgtags files.)
71 #
71 #
72 # <tag> is the tag name and <node> is the 40 character hex changeset
72 # <tag> is the tag name and <node> is the 40 character hex changeset
73 # the tag is associated with.
73 # the tag is associated with.
74 #
74 #
75 # Tags are written sorted by tag name.
75 # Tags are written sorted by tag name.
76 #
76 #
77 # Tags associated with multiple changesets have an entry for each changeset.
77 # Tags associated with multiple changesets have an entry for each changeset.
78 # The most recent changeset (in terms of revlog ordering for the head
78 # The most recent changeset (in terms of revlog ordering for the head
79 # setting it) for each tag is last.
79 # setting it) for each tag is last.
80
80
81 def findglobaltags(ui, repo, alltags, tagtypes):
81 def findglobaltags(ui, repo):
82 '''Find global tags in a repo.
82 '''Find global tags in a repo: return (alltags, tagtypes)
83
83
84 "alltags" maps tag name to (node, hist) 2-tuples.
84 "alltags" maps tag name to (node, hist) 2-tuples.
85
85
86 "tagtypes" maps tag name to tag type. Global tags always have the
86 "tagtypes" maps tag name to tag type. Global tags always have the
87 "global" tag type.
87 "global" tag type.
88
88
89 The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
90 should be passed in.
91
92 The tags cache is read and updated as a side-effect of calling.
89 The tags cache is read and updated as a side-effect of calling.
93 '''
90 '''
94 # This is so we can be lazy and assume alltags contains only global
91 alltags = {}
95 # tags when we pass it to _writetagcache().
92 tagtypes = {}
96 assert len(alltags) == len(tagtypes) == 0, \
97 "findglobaltags() should be called first"
98
93
99 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
94 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
100 if cachetags is not None:
95 if cachetags is not None:
101 assert not shouldwrite
96 assert not shouldwrite
102 # XXX is this really 100% correct? are there oddball special
97 # XXX is this really 100% correct? are there oddball special
103 # cases where a global tag should outrank a local tag but won't,
98 # cases where a global tag should outrank a local tag but won't,
104 # because cachetags does not contain rank info?
99 # because cachetags does not contain rank info?
105 _updatetags(cachetags, 'global', alltags, tagtypes)
100 _updatetags(cachetags, 'global', alltags, tagtypes)
106 return
101 return alltags, tagtypes
107
102
108 seen = set() # set of fnode
103 seen = set() # set of fnode
109 fctx = None
104 fctx = None
110 for head in reversed(heads): # oldest to newest
105 for head in reversed(heads): # oldest to newest
111 assert head in repo.changelog.nodemap, \
106 assert head in repo.changelog.nodemap, \
112 "tag cache returned bogus head %s" % short(head)
107 "tag cache returned bogus head %s" % short(head)
113
108
114 fnode = tagfnode.get(head)
109 fnode = tagfnode.get(head)
115 if fnode and fnode not in seen:
110 if fnode and fnode not in seen:
116 seen.add(fnode)
111 seen.add(fnode)
117 if not fctx:
112 if not fctx:
118 fctx = repo.filectx('.hgtags', fileid=fnode)
113 fctx = repo.filectx('.hgtags', fileid=fnode)
119 else:
114 else:
120 fctx = fctx.filectx(fnode)
115 fctx = fctx.filectx(fnode)
121
116
122 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
117 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
123 _updatetags(filetags, 'global', alltags, tagtypes)
118 _updatetags(filetags, 'global', alltags, tagtypes)
124
119
125 # and update the cache (if necessary)
120 # and update the cache (if necessary)
126 if shouldwrite:
121 if shouldwrite:
127 _writetagcache(ui, repo, valid, alltags)
122 _writetagcache(ui, repo, valid, alltags)
123 return alltags, tagtypes
128
124
129 def readlocaltags(ui, repo, alltags, tagtypes):
125 def readlocaltags(ui, repo, alltags, tagtypes):
130 '''Read local tags in repo. Update alltags and tagtypes.'''
126 '''Read local tags in repo. Update alltags and tagtypes.'''
131 try:
127 try:
132 data = repo.vfs.read("localtags")
128 data = repo.vfs.read("localtags")
133 except IOError as inst:
129 except IOError as inst:
134 if inst.errno != errno.ENOENT:
130 if inst.errno != errno.ENOENT:
135 raise
131 raise
136 return
132 return
137
133
138 # localtags is in the local encoding; re-encode to UTF-8 on
134 # localtags is in the local encoding; re-encode to UTF-8 on
139 # input for consistency with the rest of this module.
135 # input for consistency with the rest of this module.
140 filetags = _readtags(
136 filetags = _readtags(
141 ui, repo, data.splitlines(), "localtags",
137 ui, repo, data.splitlines(), "localtags",
142 recode=encoding.fromlocal)
138 recode=encoding.fromlocal)
143
139
144 # remove tags pointing to invalid nodes
140 # remove tags pointing to invalid nodes
145 cl = repo.changelog
141 cl = repo.changelog
146 for t in filetags.keys():
142 for t in filetags.keys():
147 try:
143 try:
148 cl.rev(filetags[t][0])
144 cl.rev(filetags[t][0])
149 except (LookupError, ValueError):
145 except (LookupError, ValueError):
150 del filetags[t]
146 del filetags[t]
151
147
152 _updatetags(filetags, "local", alltags, tagtypes)
148 _updatetags(filetags, "local", alltags, tagtypes)
153
149
154 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
150 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
155 '''Read tag definitions from a file (or any source of lines).
151 '''Read tag definitions from a file (or any source of lines).
156
152
157 This function returns two sortdicts with similar information:
153 This function returns two sortdicts with similar information:
158
154
159 - the first dict, bintaghist, contains the tag information as expected by
155 - the first dict, bintaghist, contains the tag information as expected by
160 the _readtags function, i.e. a mapping from tag name to (node, hist):
156 the _readtags function, i.e. a mapping from tag name to (node, hist):
161 - node is the node id from the last line read for that name,
157 - node is the node id from the last line read for that name,
162 - hist is the list of node ids previously associated with it (in file
158 - hist is the list of node ids previously associated with it (in file
163 order). All node ids are binary, not hex.
159 order). All node ids are binary, not hex.
164
160
165 - the second dict, hextaglines, is a mapping from tag name to a list of
161 - the second dict, hextaglines, is a mapping from tag name to a list of
166 [hexnode, line number] pairs, ordered from the oldest to the newest node.
162 [hexnode, line number] pairs, ordered from the oldest to the newest node.
167
163
168 When calcnodelines is False the hextaglines dict is not calculated (an
164 When calcnodelines is False the hextaglines dict is not calculated (an
169 empty dict is returned). This is done to improve this function's
165 empty dict is returned). This is done to improve this function's
170 performance in cases where the line numbers are not needed.
166 performance in cases where the line numbers are not needed.
171 '''
167 '''
172
168
173 bintaghist = util.sortdict()
169 bintaghist = util.sortdict()
174 hextaglines = util.sortdict()
170 hextaglines = util.sortdict()
175 count = 0
171 count = 0
176
172
177 def dbg(msg):
173 def dbg(msg):
178 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
174 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
179
175
180 for nline, line in enumerate(lines):
176 for nline, line in enumerate(lines):
181 count += 1
177 count += 1
182 if not line:
178 if not line:
183 continue
179 continue
184 try:
180 try:
185 (nodehex, name) = line.split(" ", 1)
181 (nodehex, name) = line.split(" ", 1)
186 except ValueError:
182 except ValueError:
187 dbg("cannot parse entry")
183 dbg("cannot parse entry")
188 continue
184 continue
189 name = name.strip()
185 name = name.strip()
190 if recode:
186 if recode:
191 name = recode(name)
187 name = recode(name)
192 try:
188 try:
193 nodebin = bin(nodehex)
189 nodebin = bin(nodehex)
194 except TypeError:
190 except TypeError:
195 dbg("node '%s' is not well formed" % nodehex)
191 dbg("node '%s' is not well formed" % nodehex)
196 continue
192 continue
197
193
198 # update filetags
194 # update filetags
199 if calcnodelines:
195 if calcnodelines:
200 # map tag name to a list of line numbers
196 # map tag name to a list of line numbers
201 if name not in hextaglines:
197 if name not in hextaglines:
202 hextaglines[name] = []
198 hextaglines[name] = []
203 hextaglines[name].append([nodehex, nline])
199 hextaglines[name].append([nodehex, nline])
204 continue
200 continue
205 # map tag name to (node, hist)
201 # map tag name to (node, hist)
206 if name not in bintaghist:
202 if name not in bintaghist:
207 bintaghist[name] = []
203 bintaghist[name] = []
208 bintaghist[name].append(nodebin)
204 bintaghist[name].append(nodebin)
209 return bintaghist, hextaglines
205 return bintaghist, hextaglines
210
206
211 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
207 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
212 '''Read tag definitions from a file (or any source of lines).
208 '''Read tag definitions from a file (or any source of lines).
213
209
214 Returns a mapping from tag name to (node, hist).
210 Returns a mapping from tag name to (node, hist).
215
211
216 "node" is the node id from the last line read for that name. "hist"
212 "node" is the node id from the last line read for that name. "hist"
217 is the list of node ids previously associated with it (in file order).
213 is the list of node ids previously associated with it (in file order).
218 All node ids are binary, not hex.
214 All node ids are binary, not hex.
219 '''
215 '''
220 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
216 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
221 calcnodelines=calcnodelines)
217 calcnodelines=calcnodelines)
222 # util.sortdict().__setitem__ is much slower at replacing then inserting
218 # util.sortdict().__setitem__ is much slower at replacing then inserting
223 # new entries. The difference can matter if there are thousands of tags.
219 # new entries. The difference can matter if there are thousands of tags.
224 # Create a new sortdict to avoid the performance penalty.
220 # Create a new sortdict to avoid the performance penalty.
225 newtags = util.sortdict()
221 newtags = util.sortdict()
226 for tag, taghist in filetags.items():
222 for tag, taghist in filetags.items():
227 newtags[tag] = (taghist[-1], taghist[:-1])
223 newtags[tag] = (taghist[-1], taghist[:-1])
228 return newtags
224 return newtags
229
225
230 def _updatetags(filetags, tagtype, alltags, tagtypes):
226 def _updatetags(filetags, tagtype, alltags, tagtypes):
231 '''Incorporate the tag info read from one file into the two
227 '''Incorporate the tag info read from one file into the two
232 dictionaries, alltags and tagtypes, that contain all tag
228 dictionaries, alltags and tagtypes, that contain all tag
233 info (global across all heads plus local).'''
229 info (global across all heads plus local).'''
234
230
235 for name, nodehist in filetags.iteritems():
231 for name, nodehist in filetags.iteritems():
236 if name not in alltags:
232 if name not in alltags:
237 alltags[name] = nodehist
233 alltags[name] = nodehist
238 tagtypes[name] = tagtype
234 tagtypes[name] = tagtype
239 continue
235 continue
240
236
241 # we prefer alltags[name] if:
237 # we prefer alltags[name] if:
242 # it supersedes us OR
238 # it supersedes us OR
243 # mutual supersedes and it has a higher rank
239 # mutual supersedes and it has a higher rank
244 # otherwise we win because we're tip-most
240 # otherwise we win because we're tip-most
245 anode, ahist = nodehist
241 anode, ahist = nodehist
246 bnode, bhist = alltags[name]
242 bnode, bhist = alltags[name]
247 if (bnode != anode and anode in bhist and
243 if (bnode != anode and anode in bhist and
248 (bnode not in ahist or len(bhist) > len(ahist))):
244 (bnode not in ahist or len(bhist) > len(ahist))):
249 anode = bnode
245 anode = bnode
250 else:
246 else:
251 tagtypes[name] = tagtype
247 tagtypes[name] = tagtype
252 ahist.extend([n for n in bhist if n not in ahist])
248 ahist.extend([n for n in bhist if n not in ahist])
253 alltags[name] = anode, ahist
249 alltags[name] = anode, ahist
254
250
255 def _filename(repo):
251 def _filename(repo):
256 """name of a tagcache file for a given repo or repoview"""
252 """name of a tagcache file for a given repo or repoview"""
257 filename = 'cache/tags2'
253 filename = 'cache/tags2'
258 if repo.filtername:
254 if repo.filtername:
259 filename = '%s-%s' % (filename, repo.filtername)
255 filename = '%s-%s' % (filename, repo.filtername)
260 return filename
256 return filename
261
257
262 def _readtagcache(ui, repo):
258 def _readtagcache(ui, repo):
263 '''Read the tag cache.
259 '''Read the tag cache.
264
260
265 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
261 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
266
262
267 If the cache is completely up-to-date, "cachetags" is a dict of the
263 If the cache is completely up-to-date, "cachetags" is a dict of the
268 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
264 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
269 None and "shouldwrite" is False.
265 None and "shouldwrite" is False.
270
266
271 If the cache is not up to date, "cachetags" is None. "heads" is a list
267 If the cache is not up to date, "cachetags" is None. "heads" is a list
272 of all heads currently in the repository, ordered from tip to oldest.
268 of all heads currently in the repository, ordered from tip to oldest.
273 "validinfo" is a tuple describing cache validation info. This is used
269 "validinfo" is a tuple describing cache validation info. This is used
274 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
270 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
275 filenode. "shouldwrite" is True.
271 filenode. "shouldwrite" is True.
276
272
277 If the cache is not up to date, the caller is responsible for reading tag
273 If the cache is not up to date, the caller is responsible for reading tag
278 info from each returned head. (See findglobaltags().)
274 info from each returned head. (See findglobaltags().)
279 '''
275 '''
280 try:
276 try:
281 cachefile = repo.vfs(_filename(repo), 'r')
277 cachefile = repo.vfs(_filename(repo), 'r')
282 # force reading the file for static-http
278 # force reading the file for static-http
283 cachelines = iter(cachefile)
279 cachelines = iter(cachefile)
284 except IOError:
280 except IOError:
285 cachefile = None
281 cachefile = None
286
282
287 cacherev = None
283 cacherev = None
288 cachenode = None
284 cachenode = None
289 cachehash = None
285 cachehash = None
290 if cachefile:
286 if cachefile:
291 try:
287 try:
292 validline = next(cachelines)
288 validline = next(cachelines)
293 validline = validline.split()
289 validline = validline.split()
294 cacherev = int(validline[0])
290 cacherev = int(validline[0])
295 cachenode = bin(validline[1])
291 cachenode = bin(validline[1])
296 if len(validline) > 2:
292 if len(validline) > 2:
297 cachehash = bin(validline[2])
293 cachehash = bin(validline[2])
298 except Exception:
294 except Exception:
299 # corruption of the cache, just recompute it.
295 # corruption of the cache, just recompute it.
300 pass
296 pass
301
297
302 tipnode = repo.changelog.tip()
298 tipnode = repo.changelog.tip()
303 tiprev = len(repo.changelog) - 1
299 tiprev = len(repo.changelog) - 1
304
300
305 # Case 1 (common): tip is the same, so nothing has changed.
301 # Case 1 (common): tip is the same, so nothing has changed.
306 # (Unchanged tip trivially means no changesets have been added.
302 # (Unchanged tip trivially means no changesets have been added.
307 # But, thanks to localrepository.destroyed(), it also means none
303 # But, thanks to localrepository.destroyed(), it also means none
308 # have been destroyed by strip or rollback.)
304 # have been destroyed by strip or rollback.)
309 if (cacherev == tiprev
305 if (cacherev == tiprev
310 and cachenode == tipnode
306 and cachenode == tipnode
311 and cachehash == scmutil.filteredhash(repo, tiprev)):
307 and cachehash == scmutil.filteredhash(repo, tiprev)):
312 tags = _readtags(ui, repo, cachelines, cachefile.name)
308 tags = _readtags(ui, repo, cachelines, cachefile.name)
313 cachefile.close()
309 cachefile.close()
314 return (None, None, None, tags, False)
310 return (None, None, None, tags, False)
315 if cachefile:
311 if cachefile:
316 cachefile.close() # ignore rest of file
312 cachefile.close() # ignore rest of file
317
313
318 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
314 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
319
315
320 repoheads = repo.heads()
316 repoheads = repo.heads()
321 # Case 2 (uncommon): empty repo; get out quickly and don't bother
317 # Case 2 (uncommon): empty repo; get out quickly and don't bother
322 # writing an empty cache.
318 # writing an empty cache.
323 if repoheads == [nullid]:
319 if repoheads == [nullid]:
324 return ([], {}, valid, {}, False)
320 return ([], {}, valid, {}, False)
325
321
326 # Case 3 (uncommon): cache file missing or empty.
322 # Case 3 (uncommon): cache file missing or empty.
327
323
328 # Case 4 (uncommon): tip rev decreased. This should only happen
324 # Case 4 (uncommon): tip rev decreased. This should only happen
329 # when we're called from localrepository.destroyed(). Refresh the
325 # when we're called from localrepository.destroyed(). Refresh the
330 # cache so future invocations will not see disappeared heads in the
326 # cache so future invocations will not see disappeared heads in the
331 # cache.
327 # cache.
332
328
333 # Case 5 (common): tip has changed, so we've added/replaced heads.
329 # Case 5 (common): tip has changed, so we've added/replaced heads.
334
330
335 # As it happens, the code to handle cases 3, 4, 5 is the same.
331 # As it happens, the code to handle cases 3, 4, 5 is the same.
336
332
337 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
333 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
338 # exposed".
334 # exposed".
339 if not len(repo.file('.hgtags')):
335 if not len(repo.file('.hgtags')):
340 # No tags have ever been committed, so we can avoid a
336 # No tags have ever been committed, so we can avoid a
341 # potentially expensive search.
337 # potentially expensive search.
342 return ([], {}, valid, None, True)
338 return ([], {}, valid, None, True)
343
339
344
340
345 # Now we have to lookup the .hgtags filenode for every new head.
341 # Now we have to lookup the .hgtags filenode for every new head.
346 # This is the most expensive part of finding tags, so performance
342 # This is the most expensive part of finding tags, so performance
347 # depends primarily on the size of newheads. Worst case: no cache
343 # depends primarily on the size of newheads. Worst case: no cache
348 # file, so newheads == repoheads.
344 # file, so newheads == repoheads.
349 cachefnode = _getfnodes(ui, repo, repoheads)
345 cachefnode = _getfnodes(ui, repo, repoheads)
350
346
351 # Caller has to iterate over all heads, but can use the filenodes in
347 # Caller has to iterate over all heads, but can use the filenodes in
352 # cachefnode to get to each .hgtags revision quickly.
348 # cachefnode to get to each .hgtags revision quickly.
353 return (repoheads, cachefnode, valid, None, True)
349 return (repoheads, cachefnode, valid, None, True)
354
350
355 def _getfnodes(ui, repo, nodes):
351 def _getfnodes(ui, repo, nodes):
356 """return .hgtags fnodes for a list of changeset nodes
352 """return .hgtags fnodes for a list of changeset nodes
357
353
358 Return value is a {node: fnode} mapping. There will be no entry for nodes
354 Return value is a {node: fnode} mapping. There will be no entry for nodes
359 without a '.hgtags' file.
355 without a '.hgtags' file.
360 """
356 """
361 starttime = util.timer()
357 starttime = util.timer()
362 fnodescache = hgtagsfnodescache(repo.unfiltered())
358 fnodescache = hgtagsfnodescache(repo.unfiltered())
363 cachefnode = {}
359 cachefnode = {}
364 for head in reversed(nodes):
360 for head in reversed(nodes):
365 fnode = fnodescache.getfnode(head)
361 fnode = fnodescache.getfnode(head)
366 if fnode != nullid:
362 if fnode != nullid:
367 cachefnode[head] = fnode
363 cachefnode[head] = fnode
368
364
369 fnodescache.write()
365 fnodescache.write()
370
366
371 duration = util.timer() - starttime
367 duration = util.timer() - starttime
372 ui.log('tagscache',
368 ui.log('tagscache',
373 '%d/%d cache hits/lookups in %0.4f '
369 '%d/%d cache hits/lookups in %0.4f '
374 'seconds\n',
370 'seconds\n',
375 fnodescache.hitcount, fnodescache.lookupcount, duration)
371 fnodescache.hitcount, fnodescache.lookupcount, duration)
376 return cachefnode
372 return cachefnode
377
373
378 def _writetagcache(ui, repo, valid, cachetags):
374 def _writetagcache(ui, repo, valid, cachetags):
379 filename = _filename(repo)
375 filename = _filename(repo)
380 try:
376 try:
381 cachefile = repo.vfs(filename, 'w', atomictemp=True)
377 cachefile = repo.vfs(filename, 'w', atomictemp=True)
382 except (OSError, IOError):
378 except (OSError, IOError):
383 return
379 return
384
380
385 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
381 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
386 filename, len(cachetags))
382 filename, len(cachetags))
387
383
388 if valid[2]:
384 if valid[2]:
389 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
385 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
390 else:
386 else:
391 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
387 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
392
388
393 # Tag names in the cache are in UTF-8 -- which is the whole reason
389 # Tag names in the cache are in UTF-8 -- which is the whole reason
394 # we keep them in UTF-8 throughout this module. If we converted
390 # we keep them in UTF-8 throughout this module. If we converted
395 # them local encoding on input, we would lose info writing them to
391 # them local encoding on input, we would lose info writing them to
396 # the cache.
392 # the cache.
397 for (name, (node, hist)) in sorted(cachetags.iteritems()):
393 for (name, (node, hist)) in sorted(cachetags.iteritems()):
398 for n in hist:
394 for n in hist:
399 cachefile.write("%s %s\n" % (hex(n), name))
395 cachefile.write("%s %s\n" % (hex(n), name))
400 cachefile.write("%s %s\n" % (hex(node), name))
396 cachefile.write("%s %s\n" % (hex(node), name))
401
397
402 try:
398 try:
403 cachefile.close()
399 cachefile.close()
404 except (OSError, IOError):
400 except (OSError, IOError):
405 pass
401 pass
406
402
407 def tag(repo, names, node, message, local, user, date, editor=False):
403 def tag(repo, names, node, message, local, user, date, editor=False):
408 '''tag a revision with one or more symbolic names.
404 '''tag a revision with one or more symbolic names.
409
405
410 names is a list of strings or, when adding a single tag, names may be a
406 names is a list of strings or, when adding a single tag, names may be a
411 string.
407 string.
412
408
413 if local is True, the tags are stored in a per-repository file.
409 if local is True, the tags are stored in a per-repository file.
414 otherwise, they are stored in the .hgtags file, and a new
410 otherwise, they are stored in the .hgtags file, and a new
415 changeset is committed with the change.
411 changeset is committed with the change.
416
412
417 keyword arguments:
413 keyword arguments:
418
414
419 local: whether to store tags in non-version-controlled file
415 local: whether to store tags in non-version-controlled file
420 (default False)
416 (default False)
421
417
422 message: commit message to use if committing
418 message: commit message to use if committing
423
419
424 user: name of user to use if committing
420 user: name of user to use if committing
425
421
426 date: date tuple to use if committing'''
422 date: date tuple to use if committing'''
427
423
428 if not local:
424 if not local:
429 m = matchmod.exact(repo.root, '', ['.hgtags'])
425 m = matchmod.exact(repo.root, '', ['.hgtags'])
430 if any(repo.status(match=m, unknown=True, ignored=True)):
426 if any(repo.status(match=m, unknown=True, ignored=True)):
431 raise error.Abort(_('working copy of .hgtags is changed'),
427 raise error.Abort(_('working copy of .hgtags is changed'),
432 hint=_('please commit .hgtags manually'))
428 hint=_('please commit .hgtags manually'))
433
429
434 repo.tags() # instantiate the cache
430 repo.tags() # instantiate the cache
435 _tag(repo.unfiltered(), names, node, message, local, user, date,
431 _tag(repo.unfiltered(), names, node, message, local, user, date,
436 editor=editor)
432 editor=editor)
437
433
438 def _tag(repo, names, node, message, local, user, date, extra=None,
434 def _tag(repo, names, node, message, local, user, date, extra=None,
439 editor=False):
435 editor=False):
440 if isinstance(names, str):
436 if isinstance(names, str):
441 names = (names,)
437 names = (names,)
442
438
443 branches = repo.branchmap()
439 branches = repo.branchmap()
444 for name in names:
440 for name in names:
445 repo.hook('pretag', throw=True, node=hex(node), tag=name,
441 repo.hook('pretag', throw=True, node=hex(node), tag=name,
446 local=local)
442 local=local)
447 if name in branches:
443 if name in branches:
448 repo.ui.warn(_("warning: tag %s conflicts with existing"
444 repo.ui.warn(_("warning: tag %s conflicts with existing"
449 " branch name\n") % name)
445 " branch name\n") % name)
450
446
451 def writetags(fp, names, munge, prevtags):
447 def writetags(fp, names, munge, prevtags):
452 fp.seek(0, 2)
448 fp.seek(0, 2)
453 if prevtags and prevtags[-1] != '\n':
449 if prevtags and prevtags[-1] != '\n':
454 fp.write('\n')
450 fp.write('\n')
455 for name in names:
451 for name in names:
456 if munge:
452 if munge:
457 m = munge(name)
453 m = munge(name)
458 else:
454 else:
459 m = name
455 m = name
460
456
461 if (repo._tagscache.tagtypes and
457 if (repo._tagscache.tagtypes and
462 name in repo._tagscache.tagtypes):
458 name in repo._tagscache.tagtypes):
463 old = repo.tags().get(name, nullid)
459 old = repo.tags().get(name, nullid)
464 fp.write('%s %s\n' % (hex(old), m))
460 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(node), m))
461 fp.write('%s %s\n' % (hex(node), m))
466 fp.close()
462 fp.close()
467
463
468 prevtags = ''
464 prevtags = ''
469 if local:
465 if local:
470 try:
466 try:
471 fp = repo.vfs('localtags', 'r+')
467 fp = repo.vfs('localtags', 'r+')
472 except IOError:
468 except IOError:
473 fp = repo.vfs('localtags', 'a')
469 fp = repo.vfs('localtags', 'a')
474 else:
470 else:
475 prevtags = fp.read()
471 prevtags = fp.read()
476
472
477 # local tags are stored in the current charset
473 # local tags are stored in the current charset
478 writetags(fp, names, None, prevtags)
474 writetags(fp, names, None, prevtags)
479 for name in names:
475 for name in names:
480 repo.hook('tag', node=hex(node), tag=name, local=local)
476 repo.hook('tag', node=hex(node), tag=name, local=local)
481 return
477 return
482
478
483 try:
479 try:
484 fp = repo.wvfs('.hgtags', 'rb+')
480 fp = repo.wvfs('.hgtags', 'rb+')
485 except IOError as e:
481 except IOError as e:
486 if e.errno != errno.ENOENT:
482 if e.errno != errno.ENOENT:
487 raise
483 raise
488 fp = repo.wvfs('.hgtags', 'ab')
484 fp = repo.wvfs('.hgtags', 'ab')
489 else:
485 else:
490 prevtags = fp.read()
486 prevtags = fp.read()
491
487
492 # committed tags are stored in UTF-8
488 # committed tags are stored in UTF-8
493 writetags(fp, names, encoding.fromlocal, prevtags)
489 writetags(fp, names, encoding.fromlocal, prevtags)
494
490
495 fp.close()
491 fp.close()
496
492
497 repo.invalidatecaches()
493 repo.invalidatecaches()
498
494
499 if '.hgtags' not in repo.dirstate:
495 if '.hgtags' not in repo.dirstate:
500 repo[None].add(['.hgtags'])
496 repo[None].add(['.hgtags'])
501
497
502 m = matchmod.exact(repo.root, '', ['.hgtags'])
498 m = matchmod.exact(repo.root, '', ['.hgtags'])
503 tagnode = repo.commit(message, user, date, extra=extra, match=m,
499 tagnode = repo.commit(message, user, date, extra=extra, match=m,
504 editor=editor)
500 editor=editor)
505
501
506 for name in names:
502 for name in names:
507 repo.hook('tag', node=hex(node), tag=name, local=local)
503 repo.hook('tag', node=hex(node), tag=name, local=local)
508
504
509 return tagnode
505 return tagnode
510
506
511 _fnodescachefile = 'cache/hgtagsfnodes1'
507 _fnodescachefile = 'cache/hgtagsfnodes1'
512 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
508 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
513 _fnodesmissingrec = '\xff' * 24
509 _fnodesmissingrec = '\xff' * 24
514
510
515 class hgtagsfnodescache(object):
511 class hgtagsfnodescache(object):
516 """Persistent cache mapping revisions to .hgtags filenodes.
512 """Persistent cache mapping revisions to .hgtags filenodes.
517
513
518 The cache is an array of records. Each item in the array corresponds to
514 The cache is an array of records. Each item in the array corresponds to
519 a changelog revision. Values in the array contain the first 4 bytes of
515 a changelog revision. Values in the array contain the first 4 bytes of
520 the node hash and the 20 bytes .hgtags filenode for that revision.
516 the node hash and the 20 bytes .hgtags filenode for that revision.
521
517
522 The first 4 bytes are present as a form of verification. Repository
518 The first 4 bytes are present as a form of verification. Repository
523 stripping and rewriting may change the node at a numeric revision in the
519 stripping and rewriting may change the node at a numeric revision in the
524 changelog. The changeset fragment serves as a verifier to detect
520 changelog. The changeset fragment serves as a verifier to detect
525 rewriting. This logic is shared with the rev branch cache (see
521 rewriting. This logic is shared with the rev branch cache (see
526 branchmap.py).
522 branchmap.py).
527
523
528 The instance holds in memory the full cache content but entries are
524 The instance holds in memory the full cache content but entries are
529 only parsed on read.
525 only parsed on read.
530
526
531 Instances behave like lists. ``c[i]`` works where i is a rev or
527 Instances behave like lists. ``c[i]`` works where i is a rev or
532 changeset node. Missing indexes are populated automatically on access.
528 changeset node. Missing indexes are populated automatically on access.
533 """
529 """
534 def __init__(self, repo):
530 def __init__(self, repo):
535 assert repo.filtername is None
531 assert repo.filtername is None
536
532
537 self._repo = repo
533 self._repo = repo
538
534
539 # Only for reporting purposes.
535 # Only for reporting purposes.
540 self.lookupcount = 0
536 self.lookupcount = 0
541 self.hitcount = 0
537 self.hitcount = 0
542
538
543
539
544 try:
540 try:
545 data = repo.vfs.read(_fnodescachefile)
541 data = repo.vfs.read(_fnodescachefile)
546 except (OSError, IOError):
542 except (OSError, IOError):
547 data = ""
543 data = ""
548 self._raw = bytearray(data)
544 self._raw = bytearray(data)
549
545
550 # The end state of self._raw is an array that is of the exact length
546 # The end state of self._raw is an array that is of the exact length
551 # required to hold a record for every revision in the repository.
547 # required to hold a record for every revision in the repository.
552 # We truncate or extend the array as necessary. self._dirtyoffset is
548 # We truncate or extend the array as necessary. self._dirtyoffset is
553 # defined to be the start offset at which we need to write the output
549 # defined to be the start offset at which we need to write the output
554 # file. This offset is also adjusted when new entries are calculated
550 # file. This offset is also adjusted when new entries are calculated
555 # for array members.
551 # for array members.
556 cllen = len(repo.changelog)
552 cllen = len(repo.changelog)
557 wantedlen = cllen * _fnodesrecsize
553 wantedlen = cllen * _fnodesrecsize
558 rawlen = len(self._raw)
554 rawlen = len(self._raw)
559
555
560 self._dirtyoffset = None
556 self._dirtyoffset = None
561
557
562 if rawlen < wantedlen:
558 if rawlen < wantedlen:
563 self._dirtyoffset = rawlen
559 self._dirtyoffset = rawlen
564 self._raw.extend('\xff' * (wantedlen - rawlen))
560 self._raw.extend('\xff' * (wantedlen - rawlen))
565 elif rawlen > wantedlen:
561 elif rawlen > wantedlen:
566 # There's no easy way to truncate array instances. This seems
562 # There's no easy way to truncate array instances. This seems
567 # slightly less evil than copying a potentially large array slice.
563 # slightly less evil than copying a potentially large array slice.
568 for i in range(rawlen - wantedlen):
564 for i in range(rawlen - wantedlen):
569 self._raw.pop()
565 self._raw.pop()
570 self._dirtyoffset = len(self._raw)
566 self._dirtyoffset = len(self._raw)
571
567
572 def getfnode(self, node, computemissing=True):
568 def getfnode(self, node, computemissing=True):
573 """Obtain the filenode of the .hgtags file at a specified revision.
569 """Obtain the filenode of the .hgtags file at a specified revision.
574
570
575 If the value is in the cache, the entry will be validated and returned.
571 If the value is in the cache, the entry will be validated and returned.
576 Otherwise, the filenode will be computed and returned unless
572 Otherwise, the filenode will be computed and returned unless
577 "computemissing" is False, in which case None will be returned without
573 "computemissing" is False, in which case None will be returned without
578 any potentially expensive computation being performed.
574 any potentially expensive computation being performed.
579
575
580 If an .hgtags does not exist at the specified revision, nullid is
576 If an .hgtags does not exist at the specified revision, nullid is
581 returned.
577 returned.
582 """
578 """
583 ctx = self._repo[node]
579 ctx = self._repo[node]
584 rev = ctx.rev()
580 rev = ctx.rev()
585
581
586 self.lookupcount += 1
582 self.lookupcount += 1
587
583
588 offset = rev * _fnodesrecsize
584 offset = rev * _fnodesrecsize
589 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
585 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
590 properprefix = node[0:4]
586 properprefix = node[0:4]
591
587
592 # Validate and return existing entry.
588 # Validate and return existing entry.
593 if record != _fnodesmissingrec:
589 if record != _fnodesmissingrec:
594 fileprefix = record[0:4]
590 fileprefix = record[0:4]
595
591
596 if fileprefix == properprefix:
592 if fileprefix == properprefix:
597 self.hitcount += 1
593 self.hitcount += 1
598 return record[4:]
594 return record[4:]
599
595
600 # Fall through.
596 # Fall through.
601
597
602 # If we get here, the entry is either missing or invalid.
598 # If we get here, the entry is either missing or invalid.
603
599
604 if not computemissing:
600 if not computemissing:
605 return None
601 return None
606
602
607 # Populate missing entry.
603 # Populate missing entry.
608 try:
604 try:
609 fnode = ctx.filenode('.hgtags')
605 fnode = ctx.filenode('.hgtags')
610 except error.LookupError:
606 except error.LookupError:
611 # No .hgtags file on this revision.
607 # No .hgtags file on this revision.
612 fnode = nullid
608 fnode = nullid
613
609
614 self._writeentry(offset, properprefix, fnode)
610 self._writeentry(offset, properprefix, fnode)
615 return fnode
611 return fnode
616
612
617 def setfnode(self, node, fnode):
613 def setfnode(self, node, fnode):
618 """Set the .hgtags filenode for a given changeset."""
614 """Set the .hgtags filenode for a given changeset."""
619 assert len(fnode) == 20
615 assert len(fnode) == 20
620 ctx = self._repo[node]
616 ctx = self._repo[node]
621
617
622 # Do a lookup first to avoid writing if nothing has changed.
618 # Do a lookup first to avoid writing if nothing has changed.
623 if self.getfnode(ctx.node(), computemissing=False) == fnode:
619 if self.getfnode(ctx.node(), computemissing=False) == fnode:
624 return
620 return
625
621
626 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
622 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
627
623
628 def _writeentry(self, offset, prefix, fnode):
624 def _writeentry(self, offset, prefix, fnode):
629 # Slices on array instances only accept other array.
625 # Slices on array instances only accept other array.
630 entry = bytearray(prefix + fnode)
626 entry = bytearray(prefix + fnode)
631 self._raw[offset:offset + _fnodesrecsize] = entry
627 self._raw[offset:offset + _fnodesrecsize] = entry
632 # self._dirtyoffset could be None.
628 # self._dirtyoffset could be None.
633 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
629 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
634
630
635 def write(self):
631 def write(self):
636 """Perform all necessary writes to cache file.
632 """Perform all necessary writes to cache file.
637
633
638 This may no-op if no writes are needed or if a write lock could
634 This may no-op if no writes are needed or if a write lock could
639 not be obtained.
635 not be obtained.
640 """
636 """
641 if self._dirtyoffset is None:
637 if self._dirtyoffset is None:
642 return
638 return
643
639
644 data = self._raw[self._dirtyoffset:]
640 data = self._raw[self._dirtyoffset:]
645 if not data:
641 if not data:
646 return
642 return
647
643
648 repo = self._repo
644 repo = self._repo
649
645
650 try:
646 try:
651 lock = repo.wlock(wait=False)
647 lock = repo.wlock(wait=False)
652 except error.LockError:
648 except error.LockError:
653 repo.ui.log('tagscache',
649 repo.ui.log('tagscache',
654 'not writing .hg/%s because lock cannot be acquired\n' %
650 'not writing .hg/%s because lock cannot be acquired\n' %
655 (_fnodescachefile))
651 (_fnodescachefile))
656 return
652 return
657
653
658 try:
654 try:
659 f = repo.vfs.open(_fnodescachefile, 'ab')
655 f = repo.vfs.open(_fnodescachefile, 'ab')
660 try:
656 try:
661 # if the file has been truncated
657 # if the file has been truncated
662 actualoffset = f.tell()
658 actualoffset = f.tell()
663 if actualoffset < self._dirtyoffset:
659 if actualoffset < self._dirtyoffset:
664 self._dirtyoffset = actualoffset
660 self._dirtyoffset = actualoffset
665 data = self._raw[self._dirtyoffset:]
661 data = self._raw[self._dirtyoffset:]
666 f.seek(self._dirtyoffset)
662 f.seek(self._dirtyoffset)
667 f.truncate()
663 f.truncate()
668 repo.ui.log('tagscache',
664 repo.ui.log('tagscache',
669 'writing %d bytes to %s\n' % (
665 'writing %d bytes to %s\n' % (
670 len(data), _fnodescachefile))
666 len(data), _fnodescachefile))
671 f.write(data)
667 f.write(data)
672 self._dirtyoffset = None
668 self._dirtyoffset = None
673 finally:
669 finally:
674 f.close()
670 f.close()
675 except (IOError, OSError) as inst:
671 except (IOError, OSError) as inst:
676 repo.ui.log('tagscache',
672 repo.ui.log('tagscache',
677 "couldn't write %s: %s\n" % (
673 "couldn't write %s: %s\n" % (
678 _fnodescachefile, inst))
674 _fnodescachefile, inst))
679 finally:
675 finally:
680 lock.release()
676 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now