##// END OF EJS Templates
localrepo: use dirstate savebackup instead of handling dirstate file manually...
Mateusz Kwapich -
r29191:ad1ce3c7 default
parent child Browse files
Show More
@@ -1,1971 +1,1967
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import inspect
11 import inspect
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 wdirrev,
22 wdirrev,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 cmdutil,
30 cmdutil,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 encoding,
33 encoding,
34 error,
34 error,
35 exchange,
35 exchange,
36 extensions,
36 extensions,
37 filelog,
37 filelog,
38 hook,
38 hook,
39 lock as lockmod,
39 lock as lockmod,
40 manifest,
40 manifest,
41 match as matchmod,
41 match as matchmod,
42 merge as mergemod,
42 merge as mergemod,
43 namespaces,
43 namespaces,
44 obsolete,
44 obsolete,
45 pathutil,
45 pathutil,
46 peer,
46 peer,
47 phases,
47 phases,
48 pushkey,
48 pushkey,
49 repoview,
49 repoview,
50 revset,
50 revset,
51 scmutil,
51 scmutil,
52 store,
52 store,
53 subrepo,
53 subrepo,
54 tags as tagsmod,
54 tags as tagsmod,
55 transaction,
55 transaction,
56 util,
56 util,
57 )
57 )
58
58
59 release = lockmod.release
59 release = lockmod.release
60 urlerr = util.urlerr
60 urlerr = util.urlerr
61 urlreq = util.urlreq
61 urlreq = util.urlreq
62
62
63 class repofilecache(scmutil.filecache):
63 class repofilecache(scmutil.filecache):
64 """All filecache usage on repo are done for logic that should be unfiltered
64 """All filecache usage on repo are done for logic that should be unfiltered
65 """
65 """
66
66
67 def __get__(self, repo, type=None):
67 def __get__(self, repo, type=None):
68 return super(repofilecache, self).__get__(repo.unfiltered(), type)
68 return super(repofilecache, self).__get__(repo.unfiltered(), type)
69 def __set__(self, repo, value):
69 def __set__(self, repo, value):
70 return super(repofilecache, self).__set__(repo.unfiltered(), value)
70 return super(repofilecache, self).__set__(repo.unfiltered(), value)
71 def __delete__(self, repo):
71 def __delete__(self, repo):
72 return super(repofilecache, self).__delete__(repo.unfiltered())
72 return super(repofilecache, self).__delete__(repo.unfiltered())
73
73
74 class storecache(repofilecache):
74 class storecache(repofilecache):
75 """filecache for files in the store"""
75 """filecache for files in the store"""
76 def join(self, obj, fname):
76 def join(self, obj, fname):
77 return obj.sjoin(fname)
77 return obj.sjoin(fname)
78
78
79 class unfilteredpropertycache(util.propertycache):
79 class unfilteredpropertycache(util.propertycache):
80 """propertycache that apply to unfiltered repo only"""
80 """propertycache that apply to unfiltered repo only"""
81
81
82 def __get__(self, repo, type=None):
82 def __get__(self, repo, type=None):
83 unfi = repo.unfiltered()
83 unfi = repo.unfiltered()
84 if unfi is repo:
84 if unfi is repo:
85 return super(unfilteredpropertycache, self).__get__(unfi)
85 return super(unfilteredpropertycache, self).__get__(unfi)
86 return getattr(unfi, self.name)
86 return getattr(unfi, self.name)
87
87
88 class filteredpropertycache(util.propertycache):
88 class filteredpropertycache(util.propertycache):
89 """propertycache that must take filtering in account"""
89 """propertycache that must take filtering in account"""
90
90
91 def cachevalue(self, obj, value):
91 def cachevalue(self, obj, value):
92 object.__setattr__(obj, self.name, value)
92 object.__setattr__(obj, self.name, value)
93
93
94
94
95 def hasunfilteredcache(repo, name):
95 def hasunfilteredcache(repo, name):
96 """check if a repo has an unfilteredpropertycache value for <name>"""
96 """check if a repo has an unfilteredpropertycache value for <name>"""
97 return name in vars(repo.unfiltered())
97 return name in vars(repo.unfiltered())
98
98
99 def unfilteredmethod(orig):
99 def unfilteredmethod(orig):
100 """decorate method that always need to be run on unfiltered version"""
100 """decorate method that always need to be run on unfiltered version"""
101 def wrapper(repo, *args, **kwargs):
101 def wrapper(repo, *args, **kwargs):
102 return orig(repo.unfiltered(), *args, **kwargs)
102 return orig(repo.unfiltered(), *args, **kwargs)
103 return wrapper
103 return wrapper
104
104
105 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
105 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
106 'unbundle'))
106 'unbundle'))
107 legacycaps = moderncaps.union(set(['changegroupsubset']))
107 legacycaps = moderncaps.union(set(['changegroupsubset']))
108
108
109 class localpeer(peer.peerrepository):
109 class localpeer(peer.peerrepository):
110 '''peer for a local repo; reflects only the most recent API'''
110 '''peer for a local repo; reflects only the most recent API'''
111
111
112 def __init__(self, repo, caps=moderncaps):
112 def __init__(self, repo, caps=moderncaps):
113 peer.peerrepository.__init__(self)
113 peer.peerrepository.__init__(self)
114 self._repo = repo.filtered('served')
114 self._repo = repo.filtered('served')
115 self.ui = repo.ui
115 self.ui = repo.ui
116 self._caps = repo._restrictcapabilities(caps)
116 self._caps = repo._restrictcapabilities(caps)
117 self.requirements = repo.requirements
117 self.requirements = repo.requirements
118 self.supportedformats = repo.supportedformats
118 self.supportedformats = repo.supportedformats
119
119
120 def close(self):
120 def close(self):
121 self._repo.close()
121 self._repo.close()
122
122
123 def _capabilities(self):
123 def _capabilities(self):
124 return self._caps
124 return self._caps
125
125
126 def local(self):
126 def local(self):
127 return self._repo
127 return self._repo
128
128
129 def canpush(self):
129 def canpush(self):
130 return True
130 return True
131
131
132 def url(self):
132 def url(self):
133 return self._repo.url()
133 return self._repo.url()
134
134
135 def lookup(self, key):
135 def lookup(self, key):
136 return self._repo.lookup(key)
136 return self._repo.lookup(key)
137
137
138 def branchmap(self):
138 def branchmap(self):
139 return self._repo.branchmap()
139 return self._repo.branchmap()
140
140
141 def heads(self):
141 def heads(self):
142 return self._repo.heads()
142 return self._repo.heads()
143
143
144 def known(self, nodes):
144 def known(self, nodes):
145 return self._repo.known(nodes)
145 return self._repo.known(nodes)
146
146
147 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
147 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
148 **kwargs):
148 **kwargs):
149 cg = exchange.getbundle(self._repo, source, heads=heads,
149 cg = exchange.getbundle(self._repo, source, heads=heads,
150 common=common, bundlecaps=bundlecaps, **kwargs)
150 common=common, bundlecaps=bundlecaps, **kwargs)
151 if bundlecaps is not None and 'HG20' in bundlecaps:
151 if bundlecaps is not None and 'HG20' in bundlecaps:
152 # When requesting a bundle2, getbundle returns a stream to make the
152 # When requesting a bundle2, getbundle returns a stream to make the
153 # wire level function happier. We need to build a proper object
153 # wire level function happier. We need to build a proper object
154 # from it in local peer.
154 # from it in local peer.
155 cg = bundle2.getunbundler(self.ui, cg)
155 cg = bundle2.getunbundler(self.ui, cg)
156 return cg
156 return cg
157
157
158 # TODO We might want to move the next two calls into legacypeer and add
158 # TODO We might want to move the next two calls into legacypeer and add
159 # unbundle instead.
159 # unbundle instead.
160
160
161 def unbundle(self, cg, heads, url):
161 def unbundle(self, cg, heads, url):
162 """apply a bundle on a repo
162 """apply a bundle on a repo
163
163
164 This function handles the repo locking itself."""
164 This function handles the repo locking itself."""
165 try:
165 try:
166 try:
166 try:
167 cg = exchange.readbundle(self.ui, cg, None)
167 cg = exchange.readbundle(self.ui, cg, None)
168 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
168 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
169 if util.safehasattr(ret, 'getchunks'):
169 if util.safehasattr(ret, 'getchunks'):
170 # This is a bundle20 object, turn it into an unbundler.
170 # This is a bundle20 object, turn it into an unbundler.
171 # This little dance should be dropped eventually when the
171 # This little dance should be dropped eventually when the
172 # API is finally improved.
172 # API is finally improved.
173 stream = util.chunkbuffer(ret.getchunks())
173 stream = util.chunkbuffer(ret.getchunks())
174 ret = bundle2.getunbundler(self.ui, stream)
174 ret = bundle2.getunbundler(self.ui, stream)
175 return ret
175 return ret
176 except Exception as exc:
176 except Exception as exc:
177 # If the exception contains output salvaged from a bundle2
177 # If the exception contains output salvaged from a bundle2
178 # reply, we need to make sure it is printed before continuing
178 # reply, we need to make sure it is printed before continuing
179 # to fail. So we build a bundle2 with such output and consume
179 # to fail. So we build a bundle2 with such output and consume
180 # it directly.
180 # it directly.
181 #
181 #
182 # This is not very elegant but allows a "simple" solution for
182 # This is not very elegant but allows a "simple" solution for
183 # issue4594
183 # issue4594
184 output = getattr(exc, '_bundle2salvagedoutput', ())
184 output = getattr(exc, '_bundle2salvagedoutput', ())
185 if output:
185 if output:
186 bundler = bundle2.bundle20(self._repo.ui)
186 bundler = bundle2.bundle20(self._repo.ui)
187 for out in output:
187 for out in output:
188 bundler.addpart(out)
188 bundler.addpart(out)
189 stream = util.chunkbuffer(bundler.getchunks())
189 stream = util.chunkbuffer(bundler.getchunks())
190 b = bundle2.getunbundler(self.ui, stream)
190 b = bundle2.getunbundler(self.ui, stream)
191 bundle2.processbundle(self._repo, b)
191 bundle2.processbundle(self._repo, b)
192 raise
192 raise
193 except error.PushRaced as exc:
193 except error.PushRaced as exc:
194 raise error.ResponseError(_('push failed:'), str(exc))
194 raise error.ResponseError(_('push failed:'), str(exc))
195
195
196 def lock(self):
196 def lock(self):
197 return self._repo.lock()
197 return self._repo.lock()
198
198
199 def addchangegroup(self, cg, source, url):
199 def addchangegroup(self, cg, source, url):
200 return cg.apply(self._repo, source, url)
200 return cg.apply(self._repo, source, url)
201
201
202 def pushkey(self, namespace, key, old, new):
202 def pushkey(self, namespace, key, old, new):
203 return self._repo.pushkey(namespace, key, old, new)
203 return self._repo.pushkey(namespace, key, old, new)
204
204
205 def listkeys(self, namespace):
205 def listkeys(self, namespace):
206 return self._repo.listkeys(namespace)
206 return self._repo.listkeys(namespace)
207
207
208 def debugwireargs(self, one, two, three=None, four=None, five=None):
208 def debugwireargs(self, one, two, three=None, four=None, five=None):
209 '''used to test argument passing over the wire'''
209 '''used to test argument passing over the wire'''
210 return "%s %s %s %s %s" % (one, two, three, four, five)
210 return "%s %s %s %s %s" % (one, two, three, four, five)
211
211
212 class locallegacypeer(localpeer):
212 class locallegacypeer(localpeer):
213 '''peer extension which implements legacy methods too; used for tests with
213 '''peer extension which implements legacy methods too; used for tests with
214 restricted capabilities'''
214 restricted capabilities'''
215
215
216 def __init__(self, repo):
216 def __init__(self, repo):
217 localpeer.__init__(self, repo, caps=legacycaps)
217 localpeer.__init__(self, repo, caps=legacycaps)
218
218
219 def branches(self, nodes):
219 def branches(self, nodes):
220 return self._repo.branches(nodes)
220 return self._repo.branches(nodes)
221
221
222 def between(self, pairs):
222 def between(self, pairs):
223 return self._repo.between(pairs)
223 return self._repo.between(pairs)
224
224
225 def changegroup(self, basenodes, source):
225 def changegroup(self, basenodes, source):
226 return changegroup.changegroup(self._repo, basenodes, source)
226 return changegroup.changegroup(self._repo, basenodes, source)
227
227
228 def changegroupsubset(self, bases, heads, source):
228 def changegroupsubset(self, bases, heads, source):
229 return changegroup.changegroupsubset(self._repo, bases, heads, source)
229 return changegroup.changegroupsubset(self._repo, bases, heads, source)
230
230
231 class localrepository(object):
231 class localrepository(object):
232
232
233 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
233 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
234 'manifestv2'))
234 'manifestv2'))
235 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
235 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
236 'dotencode'))
236 'dotencode'))
237 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
237 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
238 filtername = None
238 filtername = None
239
239
240 # a list of (ui, featureset) functions.
240 # a list of (ui, featureset) functions.
241 # only functions defined in module of enabled extensions are invoked
241 # only functions defined in module of enabled extensions are invoked
242 featuresetupfuncs = set()
242 featuresetupfuncs = set()
243
243
244 def __init__(self, baseui, path=None, create=False):
244 def __init__(self, baseui, path=None, create=False):
245 self.requirements = set()
245 self.requirements = set()
246 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
246 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
247 self.wopener = self.wvfs
247 self.wopener = self.wvfs
248 self.root = self.wvfs.base
248 self.root = self.wvfs.base
249 self.path = self.wvfs.join(".hg")
249 self.path = self.wvfs.join(".hg")
250 self.origroot = path
250 self.origroot = path
251 self.auditor = pathutil.pathauditor(self.root, self._checknested)
251 self.auditor = pathutil.pathauditor(self.root, self._checknested)
252 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
252 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
253 realfs=False)
253 realfs=False)
254 self.vfs = scmutil.vfs(self.path)
254 self.vfs = scmutil.vfs(self.path)
255 self.opener = self.vfs
255 self.opener = self.vfs
256 self.baseui = baseui
256 self.baseui = baseui
257 self.ui = baseui.copy()
257 self.ui = baseui.copy()
258 self.ui.copy = baseui.copy # prevent copying repo configuration
258 self.ui.copy = baseui.copy # prevent copying repo configuration
259 # A list of callback to shape the phase if no data were found.
259 # A list of callback to shape the phase if no data were found.
260 # Callback are in the form: func(repo, roots) --> processed root.
260 # Callback are in the form: func(repo, roots) --> processed root.
261 # This list it to be filled by extension during repo setup
261 # This list it to be filled by extension during repo setup
262 self._phasedefaults = []
262 self._phasedefaults = []
263 try:
263 try:
264 self.ui.readconfig(self.join("hgrc"), self.root)
264 self.ui.readconfig(self.join("hgrc"), self.root)
265 extensions.loadall(self.ui)
265 extensions.loadall(self.ui)
266 except IOError:
266 except IOError:
267 pass
267 pass
268
268
269 if self.featuresetupfuncs:
269 if self.featuresetupfuncs:
270 self.supported = set(self._basesupported) # use private copy
270 self.supported = set(self._basesupported) # use private copy
271 extmods = set(m.__name__ for n, m
271 extmods = set(m.__name__ for n, m
272 in extensions.extensions(self.ui))
272 in extensions.extensions(self.ui))
273 for setupfunc in self.featuresetupfuncs:
273 for setupfunc in self.featuresetupfuncs:
274 if setupfunc.__module__ in extmods:
274 if setupfunc.__module__ in extmods:
275 setupfunc(self.ui, self.supported)
275 setupfunc(self.ui, self.supported)
276 else:
276 else:
277 self.supported = self._basesupported
277 self.supported = self._basesupported
278
278
279 if not self.vfs.isdir():
279 if not self.vfs.isdir():
280 if create:
280 if create:
281 self.requirements = newreporequirements(self)
281 self.requirements = newreporequirements(self)
282
282
283 if not self.wvfs.exists():
283 if not self.wvfs.exists():
284 self.wvfs.makedirs()
284 self.wvfs.makedirs()
285 self.vfs.makedir(notindexed=True)
285 self.vfs.makedir(notindexed=True)
286
286
287 if 'store' in self.requirements:
287 if 'store' in self.requirements:
288 self.vfs.mkdir("store")
288 self.vfs.mkdir("store")
289
289
290 # create an invalid changelog
290 # create an invalid changelog
291 self.vfs.append(
291 self.vfs.append(
292 "00changelog.i",
292 "00changelog.i",
293 '\0\0\0\2' # represents revlogv2
293 '\0\0\0\2' # represents revlogv2
294 ' dummy changelog to prevent using the old repo layout'
294 ' dummy changelog to prevent using the old repo layout'
295 )
295 )
296 else:
296 else:
297 raise error.RepoError(_("repository %s not found") % path)
297 raise error.RepoError(_("repository %s not found") % path)
298 elif create:
298 elif create:
299 raise error.RepoError(_("repository %s already exists") % path)
299 raise error.RepoError(_("repository %s already exists") % path)
300 else:
300 else:
301 try:
301 try:
302 self.requirements = scmutil.readrequires(
302 self.requirements = scmutil.readrequires(
303 self.vfs, self.supported)
303 self.vfs, self.supported)
304 except IOError as inst:
304 except IOError as inst:
305 if inst.errno != errno.ENOENT:
305 if inst.errno != errno.ENOENT:
306 raise
306 raise
307
307
308 self.sharedpath = self.path
308 self.sharedpath = self.path
309 try:
309 try:
310 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
310 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
311 realpath=True)
311 realpath=True)
312 s = vfs.base
312 s = vfs.base
313 if not vfs.exists():
313 if not vfs.exists():
314 raise error.RepoError(
314 raise error.RepoError(
315 _('.hg/sharedpath points to nonexistent directory %s') % s)
315 _('.hg/sharedpath points to nonexistent directory %s') % s)
316 self.sharedpath = s
316 self.sharedpath = s
317 except IOError as inst:
317 except IOError as inst:
318 if inst.errno != errno.ENOENT:
318 if inst.errno != errno.ENOENT:
319 raise
319 raise
320
320
321 self.store = store.store(
321 self.store = store.store(
322 self.requirements, self.sharedpath, scmutil.vfs)
322 self.requirements, self.sharedpath, scmutil.vfs)
323 self.spath = self.store.path
323 self.spath = self.store.path
324 self.svfs = self.store.vfs
324 self.svfs = self.store.vfs
325 self.sjoin = self.store.join
325 self.sjoin = self.store.join
326 self.vfs.createmode = self.store.createmode
326 self.vfs.createmode = self.store.createmode
327 self._applyopenerreqs()
327 self._applyopenerreqs()
328 if create:
328 if create:
329 self._writerequirements()
329 self._writerequirements()
330
330
331 self._dirstatevalidatewarned = False
331 self._dirstatevalidatewarned = False
332
332
333 self._branchcaches = {}
333 self._branchcaches = {}
334 self._revbranchcache = None
334 self._revbranchcache = None
335 self.filterpats = {}
335 self.filterpats = {}
336 self._datafilters = {}
336 self._datafilters = {}
337 self._transref = self._lockref = self._wlockref = None
337 self._transref = self._lockref = self._wlockref = None
338
338
339 # A cache for various files under .hg/ that tracks file changes,
339 # A cache for various files under .hg/ that tracks file changes,
340 # (used by the filecache decorator)
340 # (used by the filecache decorator)
341 #
341 #
342 # Maps a property name to its util.filecacheentry
342 # Maps a property name to its util.filecacheentry
343 self._filecache = {}
343 self._filecache = {}
344
344
345 # hold sets of revision to be filtered
345 # hold sets of revision to be filtered
346 # should be cleared when something might have changed the filter value:
346 # should be cleared when something might have changed the filter value:
347 # - new changesets,
347 # - new changesets,
348 # - phase change,
348 # - phase change,
349 # - new obsolescence marker,
349 # - new obsolescence marker,
350 # - working directory parent change,
350 # - working directory parent change,
351 # - bookmark changes
351 # - bookmark changes
352 self.filteredrevcache = {}
352 self.filteredrevcache = {}
353
353
354 # generic mapping between names and nodes
354 # generic mapping between names and nodes
355 self.names = namespaces.namespaces()
355 self.names = namespaces.namespaces()
356
356
357 def close(self):
357 def close(self):
358 self._writecaches()
358 self._writecaches()
359
359
360 def _writecaches(self):
360 def _writecaches(self):
361 if self._revbranchcache:
361 if self._revbranchcache:
362 self._revbranchcache.write()
362 self._revbranchcache.write()
363
363
364 def _restrictcapabilities(self, caps):
364 def _restrictcapabilities(self, caps):
365 if self.ui.configbool('experimental', 'bundle2-advertise', True):
365 if self.ui.configbool('experimental', 'bundle2-advertise', True):
366 caps = set(caps)
366 caps = set(caps)
367 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
367 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
368 caps.add('bundle2=' + urlreq.quote(capsblob))
368 caps.add('bundle2=' + urlreq.quote(capsblob))
369 return caps
369 return caps
370
370
371 def _applyopenerreqs(self):
371 def _applyopenerreqs(self):
372 self.svfs.options = dict((r, 1) for r in self.requirements
372 self.svfs.options = dict((r, 1) for r in self.requirements
373 if r in self.openerreqs)
373 if r in self.openerreqs)
374 # experimental config: format.chunkcachesize
374 # experimental config: format.chunkcachesize
375 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
375 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
376 if chunkcachesize is not None:
376 if chunkcachesize is not None:
377 self.svfs.options['chunkcachesize'] = chunkcachesize
377 self.svfs.options['chunkcachesize'] = chunkcachesize
378 # experimental config: format.maxchainlen
378 # experimental config: format.maxchainlen
379 maxchainlen = self.ui.configint('format', 'maxchainlen')
379 maxchainlen = self.ui.configint('format', 'maxchainlen')
380 if maxchainlen is not None:
380 if maxchainlen is not None:
381 self.svfs.options['maxchainlen'] = maxchainlen
381 self.svfs.options['maxchainlen'] = maxchainlen
382 # experimental config: format.manifestcachesize
382 # experimental config: format.manifestcachesize
383 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
383 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
384 if manifestcachesize is not None:
384 if manifestcachesize is not None:
385 self.svfs.options['manifestcachesize'] = manifestcachesize
385 self.svfs.options['manifestcachesize'] = manifestcachesize
386 # experimental config: format.aggressivemergedeltas
386 # experimental config: format.aggressivemergedeltas
387 aggressivemergedeltas = self.ui.configbool('format',
387 aggressivemergedeltas = self.ui.configbool('format',
388 'aggressivemergedeltas', False)
388 'aggressivemergedeltas', False)
389 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
389 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
390 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
390 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
391
391
392 def _writerequirements(self):
392 def _writerequirements(self):
393 scmutil.writerequires(self.vfs, self.requirements)
393 scmutil.writerequires(self.vfs, self.requirements)
394
394
395 def _checknested(self, path):
395 def _checknested(self, path):
396 """Determine if path is a legal nested repository."""
396 """Determine if path is a legal nested repository."""
397 if not path.startswith(self.root):
397 if not path.startswith(self.root):
398 return False
398 return False
399 subpath = path[len(self.root) + 1:]
399 subpath = path[len(self.root) + 1:]
400 normsubpath = util.pconvert(subpath)
400 normsubpath = util.pconvert(subpath)
401
401
402 # XXX: Checking against the current working copy is wrong in
402 # XXX: Checking against the current working copy is wrong in
403 # the sense that it can reject things like
403 # the sense that it can reject things like
404 #
404 #
405 # $ hg cat -r 10 sub/x.txt
405 # $ hg cat -r 10 sub/x.txt
406 #
406 #
407 # if sub/ is no longer a subrepository in the working copy
407 # if sub/ is no longer a subrepository in the working copy
408 # parent revision.
408 # parent revision.
409 #
409 #
410 # However, it can of course also allow things that would have
410 # However, it can of course also allow things that would have
411 # been rejected before, such as the above cat command if sub/
411 # been rejected before, such as the above cat command if sub/
412 # is a subrepository now, but was a normal directory before.
412 # is a subrepository now, but was a normal directory before.
413 # The old path auditor would have rejected by mistake since it
413 # The old path auditor would have rejected by mistake since it
414 # panics when it sees sub/.hg/.
414 # panics when it sees sub/.hg/.
415 #
415 #
416 # All in all, checking against the working copy seems sensible
416 # All in all, checking against the working copy seems sensible
417 # since we want to prevent access to nested repositories on
417 # since we want to prevent access to nested repositories on
418 # the filesystem *now*.
418 # the filesystem *now*.
419 ctx = self[None]
419 ctx = self[None]
420 parts = util.splitpath(subpath)
420 parts = util.splitpath(subpath)
421 while parts:
421 while parts:
422 prefix = '/'.join(parts)
422 prefix = '/'.join(parts)
423 if prefix in ctx.substate:
423 if prefix in ctx.substate:
424 if prefix == normsubpath:
424 if prefix == normsubpath:
425 return True
425 return True
426 else:
426 else:
427 sub = ctx.sub(prefix)
427 sub = ctx.sub(prefix)
428 return sub.checknested(subpath[len(prefix) + 1:])
428 return sub.checknested(subpath[len(prefix) + 1:])
429 else:
429 else:
430 parts.pop()
430 parts.pop()
431 return False
431 return False
432
432
433 def peer(self):
433 def peer(self):
434 return localpeer(self) # not cached to avoid reference cycle
434 return localpeer(self) # not cached to avoid reference cycle
435
435
436 def unfiltered(self):
436 def unfiltered(self):
437 """Return unfiltered version of the repository
437 """Return unfiltered version of the repository
438
438
439 Intended to be overwritten by filtered repo."""
439 Intended to be overwritten by filtered repo."""
440 return self
440 return self
441
441
442 def filtered(self, name):
442 def filtered(self, name):
443 """Return a filtered version of a repository"""
443 """Return a filtered version of a repository"""
444 # build a new class with the mixin and the current class
444 # build a new class with the mixin and the current class
445 # (possibly subclass of the repo)
445 # (possibly subclass of the repo)
446 class proxycls(repoview.repoview, self.unfiltered().__class__):
446 class proxycls(repoview.repoview, self.unfiltered().__class__):
447 pass
447 pass
448 return proxycls(self, name)
448 return proxycls(self, name)
449
449
450 @repofilecache('bookmarks', 'bookmarks.current')
450 @repofilecache('bookmarks', 'bookmarks.current')
451 def _bookmarks(self):
451 def _bookmarks(self):
452 return bookmarks.bmstore(self)
452 return bookmarks.bmstore(self)
453
453
454 @property
454 @property
455 def _activebookmark(self):
455 def _activebookmark(self):
456 return self._bookmarks.active
456 return self._bookmarks.active
457
457
458 def bookmarkheads(self, bookmark):
458 def bookmarkheads(self, bookmark):
459 name = bookmark.split('@', 1)[0]
459 name = bookmark.split('@', 1)[0]
460 heads = []
460 heads = []
461 for mark, n in self._bookmarks.iteritems():
461 for mark, n in self._bookmarks.iteritems():
462 if mark.split('@', 1)[0] == name:
462 if mark.split('@', 1)[0] == name:
463 heads.append(n)
463 heads.append(n)
464 return heads
464 return heads
465
465
466 # _phaserevs and _phasesets depend on changelog. what we need is to
466 # _phaserevs and _phasesets depend on changelog. what we need is to
467 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
467 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
468 # can't be easily expressed in filecache mechanism.
468 # can't be easily expressed in filecache mechanism.
469 @storecache('phaseroots', '00changelog.i')
469 @storecache('phaseroots', '00changelog.i')
470 def _phasecache(self):
470 def _phasecache(self):
471 return phases.phasecache(self, self._phasedefaults)
471 return phases.phasecache(self, self._phasedefaults)
472
472
473 @storecache('obsstore')
473 @storecache('obsstore')
474 def obsstore(self):
474 def obsstore(self):
475 # read default format for new obsstore.
475 # read default format for new obsstore.
476 # developer config: format.obsstore-version
476 # developer config: format.obsstore-version
477 defaultformat = self.ui.configint('format', 'obsstore-version', None)
477 defaultformat = self.ui.configint('format', 'obsstore-version', None)
478 # rely on obsstore class default when possible.
478 # rely on obsstore class default when possible.
479 kwargs = {}
479 kwargs = {}
480 if defaultformat is not None:
480 if defaultformat is not None:
481 kwargs['defaultformat'] = defaultformat
481 kwargs['defaultformat'] = defaultformat
482 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
482 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
483 store = obsolete.obsstore(self.svfs, readonly=readonly,
483 store = obsolete.obsstore(self.svfs, readonly=readonly,
484 **kwargs)
484 **kwargs)
485 if store and readonly:
485 if store and readonly:
486 self.ui.warn(
486 self.ui.warn(
487 _('obsolete feature not enabled but %i markers found!\n')
487 _('obsolete feature not enabled but %i markers found!\n')
488 % len(list(store)))
488 % len(list(store)))
489 return store
489 return store
490
490
491 @storecache('00changelog.i')
491 @storecache('00changelog.i')
492 def changelog(self):
492 def changelog(self):
493 c = changelog.changelog(self.svfs)
493 c = changelog.changelog(self.svfs)
494 if 'HG_PENDING' in os.environ:
494 if 'HG_PENDING' in os.environ:
495 p = os.environ['HG_PENDING']
495 p = os.environ['HG_PENDING']
496 if p.startswith(self.root):
496 if p.startswith(self.root):
497 c.readpending('00changelog.i.a')
497 c.readpending('00changelog.i.a')
498 return c
498 return c
499
499
500 @storecache('00manifest.i')
500 @storecache('00manifest.i')
501 def manifest(self):
501 def manifest(self):
502 return manifest.manifest(self.svfs)
502 return manifest.manifest(self.svfs)
503
503
504 def dirlog(self, dir):
504 def dirlog(self, dir):
505 return self.manifest.dirlog(dir)
505 return self.manifest.dirlog(dir)
506
506
507 @repofilecache('dirstate')
507 @repofilecache('dirstate')
508 def dirstate(self):
508 def dirstate(self):
509 return dirstate.dirstate(self.vfs, self.ui, self.root,
509 return dirstate.dirstate(self.vfs, self.ui, self.root,
510 self._dirstatevalidate)
510 self._dirstatevalidate)
511
511
512 def _dirstatevalidate(self, node):
512 def _dirstatevalidate(self, node):
513 try:
513 try:
514 self.changelog.rev(node)
514 self.changelog.rev(node)
515 return node
515 return node
516 except error.LookupError:
516 except error.LookupError:
517 if not self._dirstatevalidatewarned:
517 if not self._dirstatevalidatewarned:
518 self._dirstatevalidatewarned = True
518 self._dirstatevalidatewarned = True
519 self.ui.warn(_("warning: ignoring unknown"
519 self.ui.warn(_("warning: ignoring unknown"
520 " working parent %s!\n") % short(node))
520 " working parent %s!\n") % short(node))
521 return nullid
521 return nullid
522
522
523 def __getitem__(self, changeid):
523 def __getitem__(self, changeid):
524 if changeid is None or changeid == wdirrev:
524 if changeid is None or changeid == wdirrev:
525 return context.workingctx(self)
525 return context.workingctx(self)
526 if isinstance(changeid, slice):
526 if isinstance(changeid, slice):
527 return [context.changectx(self, i)
527 return [context.changectx(self, i)
528 for i in xrange(*changeid.indices(len(self)))
528 for i in xrange(*changeid.indices(len(self)))
529 if i not in self.changelog.filteredrevs]
529 if i not in self.changelog.filteredrevs]
530 return context.changectx(self, changeid)
530 return context.changectx(self, changeid)
531
531
532 def __contains__(self, changeid):
532 def __contains__(self, changeid):
533 try:
533 try:
534 self[changeid]
534 self[changeid]
535 return True
535 return True
536 except error.RepoLookupError:
536 except error.RepoLookupError:
537 return False
537 return False
538
538
539 def __nonzero__(self):
539 def __nonzero__(self):
540 return True
540 return True
541
541
542 def __len__(self):
542 def __len__(self):
543 return len(self.changelog)
543 return len(self.changelog)
544
544
545 def __iter__(self):
545 def __iter__(self):
546 return iter(self.changelog)
546 return iter(self.changelog)
547
547
548 def revs(self, expr, *args):
548 def revs(self, expr, *args):
549 '''Find revisions matching a revset.
549 '''Find revisions matching a revset.
550
550
551 The revset is specified as a string ``expr`` that may contain
551 The revset is specified as a string ``expr`` that may contain
552 %-formatting to escape certain types. See ``revset.formatspec``.
552 %-formatting to escape certain types. See ``revset.formatspec``.
553
553
554 Return a revset.abstractsmartset, which is a list-like interface
554 Return a revset.abstractsmartset, which is a list-like interface
555 that contains integer revisions.
555 that contains integer revisions.
556 '''
556 '''
557 expr = revset.formatspec(expr, *args)
557 expr = revset.formatspec(expr, *args)
558 m = revset.match(None, expr)
558 m = revset.match(None, expr)
559 return m(self)
559 return m(self)
560
560
561 def set(self, expr, *args):
561 def set(self, expr, *args):
562 '''Find revisions matching a revset and emit changectx instances.
562 '''Find revisions matching a revset and emit changectx instances.
563
563
564 This is a convenience wrapper around ``revs()`` that iterates the
564 This is a convenience wrapper around ``revs()`` that iterates the
565 result and is a generator of changectx instances.
565 result and is a generator of changectx instances.
566 '''
566 '''
567 for r in self.revs(expr, *args):
567 for r in self.revs(expr, *args):
568 yield self[r]
568 yield self[r]
569
569
570 def url(self):
570 def url(self):
571 return 'file:' + self.root
571 return 'file:' + self.root
572
572
573 def hook(self, name, throw=False, **args):
573 def hook(self, name, throw=False, **args):
574 """Call a hook, passing this repo instance.
574 """Call a hook, passing this repo instance.
575
575
576 This a convenience method to aid invoking hooks. Extensions likely
576 This a convenience method to aid invoking hooks. Extensions likely
577 won't call this unless they have registered a custom hook or are
577 won't call this unless they have registered a custom hook or are
578 replacing code that is expected to call a hook.
578 replacing code that is expected to call a hook.
579 """
579 """
580 return hook.hook(self.ui, self, name, throw, **args)
580 return hook.hook(self.ui, self, name, throw, **args)
581
581
582 @unfilteredmethod
582 @unfilteredmethod
583 def _tag(self, names, node, message, local, user, date, extra=None,
583 def _tag(self, names, node, message, local, user, date, extra=None,
584 editor=False):
584 editor=False):
585 if isinstance(names, str):
585 if isinstance(names, str):
586 names = (names,)
586 names = (names,)
587
587
588 branches = self.branchmap()
588 branches = self.branchmap()
589 for name in names:
589 for name in names:
590 self.hook('pretag', throw=True, node=hex(node), tag=name,
590 self.hook('pretag', throw=True, node=hex(node), tag=name,
591 local=local)
591 local=local)
592 if name in branches:
592 if name in branches:
593 self.ui.warn(_("warning: tag %s conflicts with existing"
593 self.ui.warn(_("warning: tag %s conflicts with existing"
594 " branch name\n") % name)
594 " branch name\n") % name)
595
595
596 def writetags(fp, names, munge, prevtags):
596 def writetags(fp, names, munge, prevtags):
597 fp.seek(0, 2)
597 fp.seek(0, 2)
598 if prevtags and prevtags[-1] != '\n':
598 if prevtags and prevtags[-1] != '\n':
599 fp.write('\n')
599 fp.write('\n')
600 for name in names:
600 for name in names:
601 if munge:
601 if munge:
602 m = munge(name)
602 m = munge(name)
603 else:
603 else:
604 m = name
604 m = name
605
605
606 if (self._tagscache.tagtypes and
606 if (self._tagscache.tagtypes and
607 name in self._tagscache.tagtypes):
607 name in self._tagscache.tagtypes):
608 old = self.tags().get(name, nullid)
608 old = self.tags().get(name, nullid)
609 fp.write('%s %s\n' % (hex(old), m))
609 fp.write('%s %s\n' % (hex(old), m))
610 fp.write('%s %s\n' % (hex(node), m))
610 fp.write('%s %s\n' % (hex(node), m))
611 fp.close()
611 fp.close()
612
612
613 prevtags = ''
613 prevtags = ''
614 if local:
614 if local:
615 try:
615 try:
616 fp = self.vfs('localtags', 'r+')
616 fp = self.vfs('localtags', 'r+')
617 except IOError:
617 except IOError:
618 fp = self.vfs('localtags', 'a')
618 fp = self.vfs('localtags', 'a')
619 else:
619 else:
620 prevtags = fp.read()
620 prevtags = fp.read()
621
621
622 # local tags are stored in the current charset
622 # local tags are stored in the current charset
623 writetags(fp, names, None, prevtags)
623 writetags(fp, names, None, prevtags)
624 for name in names:
624 for name in names:
625 self.hook('tag', node=hex(node), tag=name, local=local)
625 self.hook('tag', node=hex(node), tag=name, local=local)
626 return
626 return
627
627
628 try:
628 try:
629 fp = self.wfile('.hgtags', 'rb+')
629 fp = self.wfile('.hgtags', 'rb+')
630 except IOError as e:
630 except IOError as e:
631 if e.errno != errno.ENOENT:
631 if e.errno != errno.ENOENT:
632 raise
632 raise
633 fp = self.wfile('.hgtags', 'ab')
633 fp = self.wfile('.hgtags', 'ab')
634 else:
634 else:
635 prevtags = fp.read()
635 prevtags = fp.read()
636
636
637 # committed tags are stored in UTF-8
637 # committed tags are stored in UTF-8
638 writetags(fp, names, encoding.fromlocal, prevtags)
638 writetags(fp, names, encoding.fromlocal, prevtags)
639
639
640 fp.close()
640 fp.close()
641
641
642 self.invalidatecaches()
642 self.invalidatecaches()
643
643
644 if '.hgtags' not in self.dirstate:
644 if '.hgtags' not in self.dirstate:
645 self[None].add(['.hgtags'])
645 self[None].add(['.hgtags'])
646
646
647 m = matchmod.exact(self.root, '', ['.hgtags'])
647 m = matchmod.exact(self.root, '', ['.hgtags'])
648 tagnode = self.commit(message, user, date, extra=extra, match=m,
648 tagnode = self.commit(message, user, date, extra=extra, match=m,
649 editor=editor)
649 editor=editor)
650
650
651 for name in names:
651 for name in names:
652 self.hook('tag', node=hex(node), tag=name, local=local)
652 self.hook('tag', node=hex(node), tag=name, local=local)
653
653
654 return tagnode
654 return tagnode
655
655
656 def tag(self, names, node, message, local, user, date, editor=False):
656 def tag(self, names, node, message, local, user, date, editor=False):
657 '''tag a revision with one or more symbolic names.
657 '''tag a revision with one or more symbolic names.
658
658
659 names is a list of strings or, when adding a single tag, names may be a
659 names is a list of strings or, when adding a single tag, names may be a
660 string.
660 string.
661
661
662 if local is True, the tags are stored in a per-repository file.
662 if local is True, the tags are stored in a per-repository file.
663 otherwise, they are stored in the .hgtags file, and a new
663 otherwise, they are stored in the .hgtags file, and a new
664 changeset is committed with the change.
664 changeset is committed with the change.
665
665
666 keyword arguments:
666 keyword arguments:
667
667
668 local: whether to store tags in non-version-controlled file
668 local: whether to store tags in non-version-controlled file
669 (default False)
669 (default False)
670
670
671 message: commit message to use if committing
671 message: commit message to use if committing
672
672
673 user: name of user to use if committing
673 user: name of user to use if committing
674
674
675 date: date tuple to use if committing'''
675 date: date tuple to use if committing'''
676
676
677 if not local:
677 if not local:
678 m = matchmod.exact(self.root, '', ['.hgtags'])
678 m = matchmod.exact(self.root, '', ['.hgtags'])
679 if any(self.status(match=m, unknown=True, ignored=True)):
679 if any(self.status(match=m, unknown=True, ignored=True)):
680 raise error.Abort(_('working copy of .hgtags is changed'),
680 raise error.Abort(_('working copy of .hgtags is changed'),
681 hint=_('please commit .hgtags manually'))
681 hint=_('please commit .hgtags manually'))
682
682
683 self.tags() # instantiate the cache
683 self.tags() # instantiate the cache
684 self._tag(names, node, message, local, user, date, editor=editor)
684 self._tag(names, node, message, local, user, date, editor=editor)
685
685
686 @filteredpropertycache
686 @filteredpropertycache
687 def _tagscache(self):
687 def _tagscache(self):
688 '''Returns a tagscache object that contains various tags related
688 '''Returns a tagscache object that contains various tags related
689 caches.'''
689 caches.'''
690
690
691 # This simplifies its cache management by having one decorated
691 # This simplifies its cache management by having one decorated
692 # function (this one) and the rest simply fetch things from it.
692 # function (this one) and the rest simply fetch things from it.
693 class tagscache(object):
693 class tagscache(object):
694 def __init__(self):
694 def __init__(self):
695 # These two define the set of tags for this repository. tags
695 # These two define the set of tags for this repository. tags
696 # maps tag name to node; tagtypes maps tag name to 'global' or
696 # maps tag name to node; tagtypes maps tag name to 'global' or
697 # 'local'. (Global tags are defined by .hgtags across all
697 # 'local'. (Global tags are defined by .hgtags across all
698 # heads, and local tags are defined in .hg/localtags.)
698 # heads, and local tags are defined in .hg/localtags.)
699 # They constitute the in-memory cache of tags.
699 # They constitute the in-memory cache of tags.
700 self.tags = self.tagtypes = None
700 self.tags = self.tagtypes = None
701
701
702 self.nodetagscache = self.tagslist = None
702 self.nodetagscache = self.tagslist = None
703
703
704 cache = tagscache()
704 cache = tagscache()
705 cache.tags, cache.tagtypes = self._findtags()
705 cache.tags, cache.tagtypes = self._findtags()
706
706
707 return cache
707 return cache
708
708
709 def tags(self):
709 def tags(self):
710 '''return a mapping of tag to node'''
710 '''return a mapping of tag to node'''
711 t = {}
711 t = {}
712 if self.changelog.filteredrevs:
712 if self.changelog.filteredrevs:
713 tags, tt = self._findtags()
713 tags, tt = self._findtags()
714 else:
714 else:
715 tags = self._tagscache.tags
715 tags = self._tagscache.tags
716 for k, v in tags.iteritems():
716 for k, v in tags.iteritems():
717 try:
717 try:
718 # ignore tags to unknown nodes
718 # ignore tags to unknown nodes
719 self.changelog.rev(v)
719 self.changelog.rev(v)
720 t[k] = v
720 t[k] = v
721 except (error.LookupError, ValueError):
721 except (error.LookupError, ValueError):
722 pass
722 pass
723 return t
723 return t
724
724
725 def _findtags(self):
725 def _findtags(self):
726 '''Do the hard work of finding tags. Return a pair of dicts
726 '''Do the hard work of finding tags. Return a pair of dicts
727 (tags, tagtypes) where tags maps tag name to node, and tagtypes
727 (tags, tagtypes) where tags maps tag name to node, and tagtypes
728 maps tag name to a string like \'global\' or \'local\'.
728 maps tag name to a string like \'global\' or \'local\'.
729 Subclasses or extensions are free to add their own tags, but
729 Subclasses or extensions are free to add their own tags, but
730 should be aware that the returned dicts will be retained for the
730 should be aware that the returned dicts will be retained for the
731 duration of the localrepo object.'''
731 duration of the localrepo object.'''
732
732
733 # XXX what tagtype should subclasses/extensions use? Currently
733 # XXX what tagtype should subclasses/extensions use? Currently
734 # mq and bookmarks add tags, but do not set the tagtype at all.
734 # mq and bookmarks add tags, but do not set the tagtype at all.
735 # Should each extension invent its own tag type? Should there
735 # Should each extension invent its own tag type? Should there
736 # be one tagtype for all such "virtual" tags? Or is the status
736 # be one tagtype for all such "virtual" tags? Or is the status
737 # quo fine?
737 # quo fine?
738
738
739 alltags = {} # map tag name to (node, hist)
739 alltags = {} # map tag name to (node, hist)
740 tagtypes = {}
740 tagtypes = {}
741
741
742 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
742 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
743 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
743 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
744
744
745 # Build the return dicts. Have to re-encode tag names because
745 # Build the return dicts. Have to re-encode tag names because
746 # the tags module always uses UTF-8 (in order not to lose info
746 # the tags module always uses UTF-8 (in order not to lose info
747 # writing to the cache), but the rest of Mercurial wants them in
747 # writing to the cache), but the rest of Mercurial wants them in
748 # local encoding.
748 # local encoding.
749 tags = {}
749 tags = {}
750 for (name, (node, hist)) in alltags.iteritems():
750 for (name, (node, hist)) in alltags.iteritems():
751 if node != nullid:
751 if node != nullid:
752 tags[encoding.tolocal(name)] = node
752 tags[encoding.tolocal(name)] = node
753 tags['tip'] = self.changelog.tip()
753 tags['tip'] = self.changelog.tip()
754 tagtypes = dict([(encoding.tolocal(name), value)
754 tagtypes = dict([(encoding.tolocal(name), value)
755 for (name, value) in tagtypes.iteritems()])
755 for (name, value) in tagtypes.iteritems()])
756 return (tags, tagtypes)
756 return (tags, tagtypes)
757
757
758 def tagtype(self, tagname):
758 def tagtype(self, tagname):
759 '''
759 '''
760 return the type of the given tag. result can be:
760 return the type of the given tag. result can be:
761
761
762 'local' : a local tag
762 'local' : a local tag
763 'global' : a global tag
763 'global' : a global tag
764 None : tag does not exist
764 None : tag does not exist
765 '''
765 '''
766
766
767 return self._tagscache.tagtypes.get(tagname)
767 return self._tagscache.tagtypes.get(tagname)
768
768
769 def tagslist(self):
769 def tagslist(self):
770 '''return a list of tags ordered by revision'''
770 '''return a list of tags ordered by revision'''
771 if not self._tagscache.tagslist:
771 if not self._tagscache.tagslist:
772 l = []
772 l = []
773 for t, n in self.tags().iteritems():
773 for t, n in self.tags().iteritems():
774 l.append((self.changelog.rev(n), t, n))
774 l.append((self.changelog.rev(n), t, n))
775 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
775 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
776
776
777 return self._tagscache.tagslist
777 return self._tagscache.tagslist
778
778
779 def nodetags(self, node):
779 def nodetags(self, node):
780 '''return the tags associated with a node'''
780 '''return the tags associated with a node'''
781 if not self._tagscache.nodetagscache:
781 if not self._tagscache.nodetagscache:
782 nodetagscache = {}
782 nodetagscache = {}
783 for t, n in self._tagscache.tags.iteritems():
783 for t, n in self._tagscache.tags.iteritems():
784 nodetagscache.setdefault(n, []).append(t)
784 nodetagscache.setdefault(n, []).append(t)
785 for tags in nodetagscache.itervalues():
785 for tags in nodetagscache.itervalues():
786 tags.sort()
786 tags.sort()
787 self._tagscache.nodetagscache = nodetagscache
787 self._tagscache.nodetagscache = nodetagscache
788 return self._tagscache.nodetagscache.get(node, [])
788 return self._tagscache.nodetagscache.get(node, [])
789
789
790 def nodebookmarks(self, node):
790 def nodebookmarks(self, node):
791 """return the list of bookmarks pointing to the specified node"""
791 """return the list of bookmarks pointing to the specified node"""
792 marks = []
792 marks = []
793 for bookmark, n in self._bookmarks.iteritems():
793 for bookmark, n in self._bookmarks.iteritems():
794 if n == node:
794 if n == node:
795 marks.append(bookmark)
795 marks.append(bookmark)
796 return sorted(marks)
796 return sorted(marks)
797
797
798 def branchmap(self):
798 def branchmap(self):
799 '''returns a dictionary {branch: [branchheads]} with branchheads
799 '''returns a dictionary {branch: [branchheads]} with branchheads
800 ordered by increasing revision number'''
800 ordered by increasing revision number'''
801 branchmap.updatecache(self)
801 branchmap.updatecache(self)
802 return self._branchcaches[self.filtername]
802 return self._branchcaches[self.filtername]
803
803
804 @unfilteredmethod
804 @unfilteredmethod
805 def revbranchcache(self):
805 def revbranchcache(self):
806 if not self._revbranchcache:
806 if not self._revbranchcache:
807 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
807 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
808 return self._revbranchcache
808 return self._revbranchcache
809
809
810 def branchtip(self, branch, ignoremissing=False):
810 def branchtip(self, branch, ignoremissing=False):
811 '''return the tip node for a given branch
811 '''return the tip node for a given branch
812
812
813 If ignoremissing is True, then this method will not raise an error.
813 If ignoremissing is True, then this method will not raise an error.
814 This is helpful for callers that only expect None for a missing branch
814 This is helpful for callers that only expect None for a missing branch
815 (e.g. namespace).
815 (e.g. namespace).
816
816
817 '''
817 '''
818 try:
818 try:
819 return self.branchmap().branchtip(branch)
819 return self.branchmap().branchtip(branch)
820 except KeyError:
820 except KeyError:
821 if not ignoremissing:
821 if not ignoremissing:
822 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
822 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
823 else:
823 else:
824 pass
824 pass
825
825
826 def lookup(self, key):
826 def lookup(self, key):
827 return self[key].node()
827 return self[key].node()
828
828
829 def lookupbranch(self, key, remote=None):
829 def lookupbranch(self, key, remote=None):
830 repo = remote or self
830 repo = remote or self
831 if key in repo.branchmap():
831 if key in repo.branchmap():
832 return key
832 return key
833
833
834 repo = (remote and remote.local()) and remote or self
834 repo = (remote and remote.local()) and remote or self
835 return repo[key].branch()
835 return repo[key].branch()
836
836
837 def known(self, nodes):
837 def known(self, nodes):
838 cl = self.changelog
838 cl = self.changelog
839 nm = cl.nodemap
839 nm = cl.nodemap
840 filtered = cl.filteredrevs
840 filtered = cl.filteredrevs
841 result = []
841 result = []
842 for n in nodes:
842 for n in nodes:
843 r = nm.get(n)
843 r = nm.get(n)
844 resp = not (r is None or r in filtered)
844 resp = not (r is None or r in filtered)
845 result.append(resp)
845 result.append(resp)
846 return result
846 return result
847
847
848 def local(self):
848 def local(self):
849 return self
849 return self
850
850
851 def publishing(self):
851 def publishing(self):
852 # it's safe (and desirable) to trust the publish flag unconditionally
852 # it's safe (and desirable) to trust the publish flag unconditionally
853 # so that we don't finalize changes shared between users via ssh or nfs
853 # so that we don't finalize changes shared between users via ssh or nfs
854 return self.ui.configbool('phases', 'publish', True, untrusted=True)
854 return self.ui.configbool('phases', 'publish', True, untrusted=True)
855
855
856 def cancopy(self):
856 def cancopy(self):
857 # so statichttprepo's override of local() works
857 # so statichttprepo's override of local() works
858 if not self.local():
858 if not self.local():
859 return False
859 return False
860 if not self.publishing():
860 if not self.publishing():
861 return True
861 return True
862 # if publishing we can't copy if there is filtered content
862 # if publishing we can't copy if there is filtered content
863 return not self.filtered('visible').changelog.filteredrevs
863 return not self.filtered('visible').changelog.filteredrevs
864
864
865 def shared(self):
865 def shared(self):
866 '''the type of shared repository (None if not shared)'''
866 '''the type of shared repository (None if not shared)'''
867 if self.sharedpath != self.path:
867 if self.sharedpath != self.path:
868 return 'store'
868 return 'store'
869 return None
869 return None
870
870
871 def join(self, f, *insidef):
871 def join(self, f, *insidef):
872 return self.vfs.join(os.path.join(f, *insidef))
872 return self.vfs.join(os.path.join(f, *insidef))
873
873
874 def wjoin(self, f, *insidef):
874 def wjoin(self, f, *insidef):
875 return self.vfs.reljoin(self.root, f, *insidef)
875 return self.vfs.reljoin(self.root, f, *insidef)
876
876
877 def file(self, f):
877 def file(self, f):
878 if f[0] == '/':
878 if f[0] == '/':
879 f = f[1:]
879 f = f[1:]
880 return filelog.filelog(self.svfs, f)
880 return filelog.filelog(self.svfs, f)
881
881
882 def changectx(self, changeid):
882 def changectx(self, changeid):
883 return self[changeid]
883 return self[changeid]
884
884
885 def setparents(self, p1, p2=nullid):
885 def setparents(self, p1, p2=nullid):
886 self.dirstate.beginparentchange()
886 self.dirstate.beginparentchange()
887 copies = self.dirstate.setparents(p1, p2)
887 copies = self.dirstate.setparents(p1, p2)
888 pctx = self[p1]
888 pctx = self[p1]
889 if copies:
889 if copies:
890 # Adjust copy records, the dirstate cannot do it, it
890 # Adjust copy records, the dirstate cannot do it, it
891 # requires access to parents manifests. Preserve them
891 # requires access to parents manifests. Preserve them
892 # only for entries added to first parent.
892 # only for entries added to first parent.
893 for f in copies:
893 for f in copies:
894 if f not in pctx and copies[f] in pctx:
894 if f not in pctx and copies[f] in pctx:
895 self.dirstate.copy(copies[f], f)
895 self.dirstate.copy(copies[f], f)
896 if p2 == nullid:
896 if p2 == nullid:
897 for f, s in sorted(self.dirstate.copies().items()):
897 for f, s in sorted(self.dirstate.copies().items()):
898 if f not in pctx and s not in pctx:
898 if f not in pctx and s not in pctx:
899 self.dirstate.copy(None, f)
899 self.dirstate.copy(None, f)
900 self.dirstate.endparentchange()
900 self.dirstate.endparentchange()
901
901
902 def filectx(self, path, changeid=None, fileid=None):
902 def filectx(self, path, changeid=None, fileid=None):
903 """changeid can be a changeset revision, node, or tag.
903 """changeid can be a changeset revision, node, or tag.
904 fileid can be a file revision or node."""
904 fileid can be a file revision or node."""
905 return context.filectx(self, path, changeid, fileid)
905 return context.filectx(self, path, changeid, fileid)
906
906
907 def getcwd(self):
907 def getcwd(self):
908 return self.dirstate.getcwd()
908 return self.dirstate.getcwd()
909
909
910 def pathto(self, f, cwd=None):
910 def pathto(self, f, cwd=None):
911 return self.dirstate.pathto(f, cwd)
911 return self.dirstate.pathto(f, cwd)
912
912
913 def wfile(self, f, mode='r'):
913 def wfile(self, f, mode='r'):
914 return self.wvfs(f, mode)
914 return self.wvfs(f, mode)
915
915
916 def _link(self, f):
916 def _link(self, f):
917 return self.wvfs.islink(f)
917 return self.wvfs.islink(f)
918
918
919 def _loadfilter(self, filter):
919 def _loadfilter(self, filter):
920 if filter not in self.filterpats:
920 if filter not in self.filterpats:
921 l = []
921 l = []
922 for pat, cmd in self.ui.configitems(filter):
922 for pat, cmd in self.ui.configitems(filter):
923 if cmd == '!':
923 if cmd == '!':
924 continue
924 continue
925 mf = matchmod.match(self.root, '', [pat])
925 mf = matchmod.match(self.root, '', [pat])
926 fn = None
926 fn = None
927 params = cmd
927 params = cmd
928 for name, filterfn in self._datafilters.iteritems():
928 for name, filterfn in self._datafilters.iteritems():
929 if cmd.startswith(name):
929 if cmd.startswith(name):
930 fn = filterfn
930 fn = filterfn
931 params = cmd[len(name):].lstrip()
931 params = cmd[len(name):].lstrip()
932 break
932 break
933 if not fn:
933 if not fn:
934 fn = lambda s, c, **kwargs: util.filter(s, c)
934 fn = lambda s, c, **kwargs: util.filter(s, c)
935 # Wrap old filters not supporting keyword arguments
935 # Wrap old filters not supporting keyword arguments
936 if not inspect.getargspec(fn)[2]:
936 if not inspect.getargspec(fn)[2]:
937 oldfn = fn
937 oldfn = fn
938 fn = lambda s, c, **kwargs: oldfn(s, c)
938 fn = lambda s, c, **kwargs: oldfn(s, c)
939 l.append((mf, fn, params))
939 l.append((mf, fn, params))
940 self.filterpats[filter] = l
940 self.filterpats[filter] = l
941 return self.filterpats[filter]
941 return self.filterpats[filter]
942
942
943 def _filter(self, filterpats, filename, data):
943 def _filter(self, filterpats, filename, data):
944 for mf, fn, cmd in filterpats:
944 for mf, fn, cmd in filterpats:
945 if mf(filename):
945 if mf(filename):
946 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
946 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
948 break
948 break
949
949
950 return data
950 return data
951
951
952 @unfilteredpropertycache
952 @unfilteredpropertycache
953 def _encodefilterpats(self):
953 def _encodefilterpats(self):
954 return self._loadfilter('encode')
954 return self._loadfilter('encode')
955
955
956 @unfilteredpropertycache
956 @unfilteredpropertycache
957 def _decodefilterpats(self):
957 def _decodefilterpats(self):
958 return self._loadfilter('decode')
958 return self._loadfilter('decode')
959
959
960 def adddatafilter(self, name, filter):
960 def adddatafilter(self, name, filter):
961 self._datafilters[name] = filter
961 self._datafilters[name] = filter
962
962
963 def wread(self, filename):
963 def wread(self, filename):
964 if self._link(filename):
964 if self._link(filename):
965 data = self.wvfs.readlink(filename)
965 data = self.wvfs.readlink(filename)
966 else:
966 else:
967 data = self.wvfs.read(filename)
967 data = self.wvfs.read(filename)
968 return self._filter(self._encodefilterpats, filename, data)
968 return self._filter(self._encodefilterpats, filename, data)
969
969
970 def wwrite(self, filename, data, flags, backgroundclose=False):
970 def wwrite(self, filename, data, flags, backgroundclose=False):
971 """write ``data`` into ``filename`` in the working directory
971 """write ``data`` into ``filename`` in the working directory
972
972
973 This returns length of written (maybe decoded) data.
973 This returns length of written (maybe decoded) data.
974 """
974 """
975 data = self._filter(self._decodefilterpats, filename, data)
975 data = self._filter(self._decodefilterpats, filename, data)
976 if 'l' in flags:
976 if 'l' in flags:
977 self.wvfs.symlink(data, filename)
977 self.wvfs.symlink(data, filename)
978 else:
978 else:
979 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
979 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
980 if 'x' in flags:
980 if 'x' in flags:
981 self.wvfs.setflags(filename, False, True)
981 self.wvfs.setflags(filename, False, True)
982 return len(data)
982 return len(data)
983
983
984 def wwritedata(self, filename, data):
984 def wwritedata(self, filename, data):
985 return self._filter(self._decodefilterpats, filename, data)
985 return self._filter(self._decodefilterpats, filename, data)
986
986
987 def currenttransaction(self):
987 def currenttransaction(self):
988 """return the current transaction or None if non exists"""
988 """return the current transaction or None if non exists"""
989 if self._transref:
989 if self._transref:
990 tr = self._transref()
990 tr = self._transref()
991 else:
991 else:
992 tr = None
992 tr = None
993
993
994 if tr and tr.running():
994 if tr and tr.running():
995 return tr
995 return tr
996 return None
996 return None
997
997
998 def transaction(self, desc, report=None):
998 def transaction(self, desc, report=None):
999 if (self.ui.configbool('devel', 'all-warnings')
999 if (self.ui.configbool('devel', 'all-warnings')
1000 or self.ui.configbool('devel', 'check-locks')):
1000 or self.ui.configbool('devel', 'check-locks')):
1001 l = self._lockref and self._lockref()
1001 l = self._lockref and self._lockref()
1002 if l is None or not l.held:
1002 if l is None or not l.held:
1003 raise RuntimeError('programming error: transaction requires '
1003 raise RuntimeError('programming error: transaction requires '
1004 'locking')
1004 'locking')
1005 tr = self.currenttransaction()
1005 tr = self.currenttransaction()
1006 if tr is not None:
1006 if tr is not None:
1007 return tr.nest()
1007 return tr.nest()
1008
1008
1009 # abort here if the journal already exists
1009 # abort here if the journal already exists
1010 if self.svfs.exists("journal"):
1010 if self.svfs.exists("journal"):
1011 raise error.RepoError(
1011 raise error.RepoError(
1012 _("abandoned transaction found"),
1012 _("abandoned transaction found"),
1013 hint=_("run 'hg recover' to clean up transaction"))
1013 hint=_("run 'hg recover' to clean up transaction"))
1014
1014
1015 # make journal.dirstate contain in-memory changes at this point
1016 self.dirstate.write(None)
1017
1018 idbase = "%.40f#%f" % (random.random(), time.time())
1015 idbase = "%.40f#%f" % (random.random(), time.time())
1019 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1016 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1020 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1017 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1021
1018
1022 self._writejournal(desc)
1019 self._writejournal(desc)
1023 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1020 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1024 if report:
1021 if report:
1025 rp = report
1022 rp = report
1026 else:
1023 else:
1027 rp = self.ui.warn
1024 rp = self.ui.warn
1028 vfsmap = {'plain': self.vfs} # root of .hg/
1025 vfsmap = {'plain': self.vfs} # root of .hg/
1029 # we must avoid cyclic reference between repo and transaction.
1026 # we must avoid cyclic reference between repo and transaction.
1030 reporef = weakref.ref(self)
1027 reporef = weakref.ref(self)
1031 def validate(tr):
1028 def validate(tr):
1032 """will run pre-closing hooks"""
1029 """will run pre-closing hooks"""
1033 reporef().hook('pretxnclose', throw=True,
1030 reporef().hook('pretxnclose', throw=True,
1034 txnname=desc, **tr.hookargs)
1031 txnname=desc, **tr.hookargs)
1035 def releasefn(tr, success):
1032 def releasefn(tr, success):
1036 repo = reporef()
1033 repo = reporef()
1037 if success:
1034 if success:
1038 # this should be explicitly invoked here, because
1035 # this should be explicitly invoked here, because
1039 # in-memory changes aren't written out at closing
1036 # in-memory changes aren't written out at closing
1040 # transaction, if tr.addfilegenerator (via
1037 # transaction, if tr.addfilegenerator (via
1041 # dirstate.write or so) isn't invoked while
1038 # dirstate.write or so) isn't invoked while
1042 # transaction running
1039 # transaction running
1043 repo.dirstate.write(None)
1040 repo.dirstate.write(None)
1044 else:
1041 else:
1045 # discard all changes (including ones already written
1042 # discard all changes (including ones already written
1046 # out) in this transaction
1043 # out) in this transaction
1047 repo.dirstate.restorebackup(None, prefix='journal.')
1044 repo.dirstate.restorebackup(None, prefix='journal.')
1048
1045
1049 repo.invalidate(clearfilecache=True)
1046 repo.invalidate(clearfilecache=True)
1050
1047
1051 tr = transaction.transaction(rp, self.svfs, vfsmap,
1048 tr = transaction.transaction(rp, self.svfs, vfsmap,
1052 "journal",
1049 "journal",
1053 "undo",
1050 "undo",
1054 aftertrans(renames),
1051 aftertrans(renames),
1055 self.store.createmode,
1052 self.store.createmode,
1056 validator=validate,
1053 validator=validate,
1057 releasefn=releasefn)
1054 releasefn=releasefn)
1058
1055
1059 tr.hookargs['txnid'] = txnid
1056 tr.hookargs['txnid'] = txnid
1060 # note: writing the fncache only during finalize mean that the file is
1057 # note: writing the fncache only during finalize mean that the file is
1061 # outdated when running hooks. As fncache is used for streaming clone,
1058 # outdated when running hooks. As fncache is used for streaming clone,
1062 # this is not expected to break anything that happen during the hooks.
1059 # this is not expected to break anything that happen during the hooks.
1063 tr.addfinalize('flush-fncache', self.store.write)
1060 tr.addfinalize('flush-fncache', self.store.write)
1064 def txnclosehook(tr2):
1061 def txnclosehook(tr2):
1065 """To be run if transaction is successful, will schedule a hook run
1062 """To be run if transaction is successful, will schedule a hook run
1066 """
1063 """
1067 # Don't reference tr2 in hook() so we don't hold a reference.
1064 # Don't reference tr2 in hook() so we don't hold a reference.
1068 # This reduces memory consumption when there are multiple
1065 # This reduces memory consumption when there are multiple
1069 # transactions per lock. This can likely go away if issue5045
1066 # transactions per lock. This can likely go away if issue5045
1070 # fixes the function accumulation.
1067 # fixes the function accumulation.
1071 hookargs = tr2.hookargs
1068 hookargs = tr2.hookargs
1072
1069
1073 def hook():
1070 def hook():
1074 reporef().hook('txnclose', throw=False, txnname=desc,
1071 reporef().hook('txnclose', throw=False, txnname=desc,
1075 **hookargs)
1072 **hookargs)
1076 reporef()._afterlock(hook)
1073 reporef()._afterlock(hook)
1077 tr.addfinalize('txnclose-hook', txnclosehook)
1074 tr.addfinalize('txnclose-hook', txnclosehook)
1078 def txnaborthook(tr2):
1075 def txnaborthook(tr2):
1079 """To be run if transaction is aborted
1076 """To be run if transaction is aborted
1080 """
1077 """
1081 reporef().hook('txnabort', throw=False, txnname=desc,
1078 reporef().hook('txnabort', throw=False, txnname=desc,
1082 **tr2.hookargs)
1079 **tr2.hookargs)
1083 tr.addabort('txnabort-hook', txnaborthook)
1080 tr.addabort('txnabort-hook', txnaborthook)
1084 # avoid eager cache invalidation. in-memory data should be identical
1081 # avoid eager cache invalidation. in-memory data should be identical
1085 # to stored data if transaction has no error.
1082 # to stored data if transaction has no error.
1086 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1083 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1087 self._transref = weakref.ref(tr)
1084 self._transref = weakref.ref(tr)
1088 return tr
1085 return tr
1089
1086
1090 def _journalfiles(self):
1087 def _journalfiles(self):
1091 return ((self.svfs, 'journal'),
1088 return ((self.svfs, 'journal'),
1092 (self.vfs, 'journal.dirstate'),
1089 (self.vfs, 'journal.dirstate'),
1093 (self.vfs, 'journal.branch'),
1090 (self.vfs, 'journal.branch'),
1094 (self.vfs, 'journal.desc'),
1091 (self.vfs, 'journal.desc'),
1095 (self.vfs, 'journal.bookmarks'),
1092 (self.vfs, 'journal.bookmarks'),
1096 (self.svfs, 'journal.phaseroots'))
1093 (self.svfs, 'journal.phaseroots'))
1097
1094
1098 def undofiles(self):
1095 def undofiles(self):
1099 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1096 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1100
1097
1101 def _writejournal(self, desc):
1098 def _writejournal(self, desc):
1102 self.vfs.write("journal.dirstate",
1099 self.dirstate.savebackup(None, prefix='journal.')
1103 self.vfs.tryread("dirstate"))
1104 self.vfs.write("journal.branch",
1100 self.vfs.write("journal.branch",
1105 encoding.fromlocal(self.dirstate.branch()))
1101 encoding.fromlocal(self.dirstate.branch()))
1106 self.vfs.write("journal.desc",
1102 self.vfs.write("journal.desc",
1107 "%d\n%s\n" % (len(self), desc))
1103 "%d\n%s\n" % (len(self), desc))
1108 self.vfs.write("journal.bookmarks",
1104 self.vfs.write("journal.bookmarks",
1109 self.vfs.tryread("bookmarks"))
1105 self.vfs.tryread("bookmarks"))
1110 self.svfs.write("journal.phaseroots",
1106 self.svfs.write("journal.phaseroots",
1111 self.svfs.tryread("phaseroots"))
1107 self.svfs.tryread("phaseroots"))
1112
1108
1113 def recover(self):
1109 def recover(self):
1114 with self.lock():
1110 with self.lock():
1115 if self.svfs.exists("journal"):
1111 if self.svfs.exists("journal"):
1116 self.ui.status(_("rolling back interrupted transaction\n"))
1112 self.ui.status(_("rolling back interrupted transaction\n"))
1117 vfsmap = {'': self.svfs,
1113 vfsmap = {'': self.svfs,
1118 'plain': self.vfs,}
1114 'plain': self.vfs,}
1119 transaction.rollback(self.svfs, vfsmap, "journal",
1115 transaction.rollback(self.svfs, vfsmap, "journal",
1120 self.ui.warn)
1116 self.ui.warn)
1121 self.invalidate()
1117 self.invalidate()
1122 return True
1118 return True
1123 else:
1119 else:
1124 self.ui.warn(_("no interrupted transaction available\n"))
1120 self.ui.warn(_("no interrupted transaction available\n"))
1125 return False
1121 return False
1126
1122
1127 def rollback(self, dryrun=False, force=False):
1123 def rollback(self, dryrun=False, force=False):
1128 wlock = lock = dsguard = None
1124 wlock = lock = dsguard = None
1129 try:
1125 try:
1130 wlock = self.wlock()
1126 wlock = self.wlock()
1131 lock = self.lock()
1127 lock = self.lock()
1132 if self.svfs.exists("undo"):
1128 if self.svfs.exists("undo"):
1133 dsguard = cmdutil.dirstateguard(self, 'rollback')
1129 dsguard = cmdutil.dirstateguard(self, 'rollback')
1134
1130
1135 return self._rollback(dryrun, force, dsguard)
1131 return self._rollback(dryrun, force, dsguard)
1136 else:
1132 else:
1137 self.ui.warn(_("no rollback information available\n"))
1133 self.ui.warn(_("no rollback information available\n"))
1138 return 1
1134 return 1
1139 finally:
1135 finally:
1140 release(dsguard, lock, wlock)
1136 release(dsguard, lock, wlock)
1141
1137
1142 @unfilteredmethod # Until we get smarter cache management
1138 @unfilteredmethod # Until we get smarter cache management
1143 def _rollback(self, dryrun, force, dsguard):
1139 def _rollback(self, dryrun, force, dsguard):
1144 ui = self.ui
1140 ui = self.ui
1145 try:
1141 try:
1146 args = self.vfs.read('undo.desc').splitlines()
1142 args = self.vfs.read('undo.desc').splitlines()
1147 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1143 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1148 if len(args) >= 3:
1144 if len(args) >= 3:
1149 detail = args[2]
1145 detail = args[2]
1150 oldtip = oldlen - 1
1146 oldtip = oldlen - 1
1151
1147
1152 if detail and ui.verbose:
1148 if detail and ui.verbose:
1153 msg = (_('repository tip rolled back to revision %s'
1149 msg = (_('repository tip rolled back to revision %s'
1154 ' (undo %s: %s)\n')
1150 ' (undo %s: %s)\n')
1155 % (oldtip, desc, detail))
1151 % (oldtip, desc, detail))
1156 else:
1152 else:
1157 msg = (_('repository tip rolled back to revision %s'
1153 msg = (_('repository tip rolled back to revision %s'
1158 ' (undo %s)\n')
1154 ' (undo %s)\n')
1159 % (oldtip, desc))
1155 % (oldtip, desc))
1160 except IOError:
1156 except IOError:
1161 msg = _('rolling back unknown transaction\n')
1157 msg = _('rolling back unknown transaction\n')
1162 desc = None
1158 desc = None
1163
1159
1164 if not force and self['.'] != self['tip'] and desc == 'commit':
1160 if not force and self['.'] != self['tip'] and desc == 'commit':
1165 raise error.Abort(
1161 raise error.Abort(
1166 _('rollback of last commit while not checked out '
1162 _('rollback of last commit while not checked out '
1167 'may lose data'), hint=_('use -f to force'))
1163 'may lose data'), hint=_('use -f to force'))
1168
1164
1169 ui.status(msg)
1165 ui.status(msg)
1170 if dryrun:
1166 if dryrun:
1171 return 0
1167 return 0
1172
1168
1173 parents = self.dirstate.parents()
1169 parents = self.dirstate.parents()
1174 self.destroying()
1170 self.destroying()
1175 vfsmap = {'plain': self.vfs, '': self.svfs}
1171 vfsmap = {'plain': self.vfs, '': self.svfs}
1176 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1172 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1177 if self.vfs.exists('undo.bookmarks'):
1173 if self.vfs.exists('undo.bookmarks'):
1178 self.vfs.rename('undo.bookmarks', 'bookmarks')
1174 self.vfs.rename('undo.bookmarks', 'bookmarks')
1179 if self.svfs.exists('undo.phaseroots'):
1175 if self.svfs.exists('undo.phaseroots'):
1180 self.svfs.rename('undo.phaseroots', 'phaseroots')
1176 self.svfs.rename('undo.phaseroots', 'phaseroots')
1181 self.invalidate()
1177 self.invalidate()
1182
1178
1183 parentgone = (parents[0] not in self.changelog.nodemap or
1179 parentgone = (parents[0] not in self.changelog.nodemap or
1184 parents[1] not in self.changelog.nodemap)
1180 parents[1] not in self.changelog.nodemap)
1185 if parentgone:
1181 if parentgone:
1186 # prevent dirstateguard from overwriting already restored one
1182 # prevent dirstateguard from overwriting already restored one
1187 dsguard.close()
1183 dsguard.close()
1188
1184
1189 self.dirstate.restorebackup(None, prefix='undo.')
1185 self.dirstate.restorebackup(None, prefix='undo.')
1190 try:
1186 try:
1191 branch = self.vfs.read('undo.branch')
1187 branch = self.vfs.read('undo.branch')
1192 self.dirstate.setbranch(encoding.tolocal(branch))
1188 self.dirstate.setbranch(encoding.tolocal(branch))
1193 except IOError:
1189 except IOError:
1194 ui.warn(_('named branch could not be reset: '
1190 ui.warn(_('named branch could not be reset: '
1195 'current branch is still \'%s\'\n')
1191 'current branch is still \'%s\'\n')
1196 % self.dirstate.branch())
1192 % self.dirstate.branch())
1197
1193
1198 parents = tuple([p.rev() for p in self[None].parents()])
1194 parents = tuple([p.rev() for p in self[None].parents()])
1199 if len(parents) > 1:
1195 if len(parents) > 1:
1200 ui.status(_('working directory now based on '
1196 ui.status(_('working directory now based on '
1201 'revisions %d and %d\n') % parents)
1197 'revisions %d and %d\n') % parents)
1202 else:
1198 else:
1203 ui.status(_('working directory now based on '
1199 ui.status(_('working directory now based on '
1204 'revision %d\n') % parents)
1200 'revision %d\n') % parents)
1205 mergemod.mergestate.clean(self, self['.'].node())
1201 mergemod.mergestate.clean(self, self['.'].node())
1206
1202
1207 # TODO: if we know which new heads may result from this rollback, pass
1203 # TODO: if we know which new heads may result from this rollback, pass
1208 # them to destroy(), which will prevent the branchhead cache from being
1204 # them to destroy(), which will prevent the branchhead cache from being
1209 # invalidated.
1205 # invalidated.
1210 self.destroyed()
1206 self.destroyed()
1211 return 0
1207 return 0
1212
1208
1213 def invalidatecaches(self):
1209 def invalidatecaches(self):
1214
1210
1215 if '_tagscache' in vars(self):
1211 if '_tagscache' in vars(self):
1216 # can't use delattr on proxy
1212 # can't use delattr on proxy
1217 del self.__dict__['_tagscache']
1213 del self.__dict__['_tagscache']
1218
1214
1219 self.unfiltered()._branchcaches.clear()
1215 self.unfiltered()._branchcaches.clear()
1220 self.invalidatevolatilesets()
1216 self.invalidatevolatilesets()
1221
1217
1222 def invalidatevolatilesets(self):
1218 def invalidatevolatilesets(self):
1223 self.filteredrevcache.clear()
1219 self.filteredrevcache.clear()
1224 obsolete.clearobscaches(self)
1220 obsolete.clearobscaches(self)
1225
1221
1226 def invalidatedirstate(self):
1222 def invalidatedirstate(self):
1227 '''Invalidates the dirstate, causing the next call to dirstate
1223 '''Invalidates the dirstate, causing the next call to dirstate
1228 to check if it was modified since the last time it was read,
1224 to check if it was modified since the last time it was read,
1229 rereading it if it has.
1225 rereading it if it has.
1230
1226
1231 This is different to dirstate.invalidate() that it doesn't always
1227 This is different to dirstate.invalidate() that it doesn't always
1232 rereads the dirstate. Use dirstate.invalidate() if you want to
1228 rereads the dirstate. Use dirstate.invalidate() if you want to
1233 explicitly read the dirstate again (i.e. restoring it to a previous
1229 explicitly read the dirstate again (i.e. restoring it to a previous
1234 known good state).'''
1230 known good state).'''
1235 if hasunfilteredcache(self, 'dirstate'):
1231 if hasunfilteredcache(self, 'dirstate'):
1236 for k in self.dirstate._filecache:
1232 for k in self.dirstate._filecache:
1237 try:
1233 try:
1238 delattr(self.dirstate, k)
1234 delattr(self.dirstate, k)
1239 except AttributeError:
1235 except AttributeError:
1240 pass
1236 pass
1241 delattr(self.unfiltered(), 'dirstate')
1237 delattr(self.unfiltered(), 'dirstate')
1242
1238
1243 def invalidate(self, clearfilecache=False):
1239 def invalidate(self, clearfilecache=False):
1244 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1240 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1245 for k in self._filecache.keys():
1241 for k in self._filecache.keys():
1246 # dirstate is invalidated separately in invalidatedirstate()
1242 # dirstate is invalidated separately in invalidatedirstate()
1247 if k == 'dirstate':
1243 if k == 'dirstate':
1248 continue
1244 continue
1249
1245
1250 if clearfilecache:
1246 if clearfilecache:
1251 del self._filecache[k]
1247 del self._filecache[k]
1252 try:
1248 try:
1253 delattr(unfiltered, k)
1249 delattr(unfiltered, k)
1254 except AttributeError:
1250 except AttributeError:
1255 pass
1251 pass
1256 self.invalidatecaches()
1252 self.invalidatecaches()
1257 self.store.invalidatecaches()
1253 self.store.invalidatecaches()
1258
1254
1259 def invalidateall(self):
1255 def invalidateall(self):
1260 '''Fully invalidates both store and non-store parts, causing the
1256 '''Fully invalidates both store and non-store parts, causing the
1261 subsequent operation to reread any outside changes.'''
1257 subsequent operation to reread any outside changes.'''
1262 # extension should hook this to invalidate its caches
1258 # extension should hook this to invalidate its caches
1263 self.invalidate()
1259 self.invalidate()
1264 self.invalidatedirstate()
1260 self.invalidatedirstate()
1265
1261
1266 def _refreshfilecachestats(self, tr):
1262 def _refreshfilecachestats(self, tr):
1267 """Reload stats of cached files so that they are flagged as valid"""
1263 """Reload stats of cached files so that they are flagged as valid"""
1268 for k, ce in self._filecache.items():
1264 for k, ce in self._filecache.items():
1269 if k == 'dirstate' or k not in self.__dict__:
1265 if k == 'dirstate' or k not in self.__dict__:
1270 continue
1266 continue
1271 ce.refresh()
1267 ce.refresh()
1272
1268
1273 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1269 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1274 inheritchecker=None, parentenvvar=None):
1270 inheritchecker=None, parentenvvar=None):
1275 parentlock = None
1271 parentlock = None
1276 # the contents of parentenvvar are used by the underlying lock to
1272 # the contents of parentenvvar are used by the underlying lock to
1277 # determine whether it can be inherited
1273 # determine whether it can be inherited
1278 if parentenvvar is not None:
1274 if parentenvvar is not None:
1279 parentlock = os.environ.get(parentenvvar)
1275 parentlock = os.environ.get(parentenvvar)
1280 try:
1276 try:
1281 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1277 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1282 acquirefn=acquirefn, desc=desc,
1278 acquirefn=acquirefn, desc=desc,
1283 inheritchecker=inheritchecker,
1279 inheritchecker=inheritchecker,
1284 parentlock=parentlock)
1280 parentlock=parentlock)
1285 except error.LockHeld as inst:
1281 except error.LockHeld as inst:
1286 if not wait:
1282 if not wait:
1287 raise
1283 raise
1288 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1284 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1289 (desc, inst.locker))
1285 (desc, inst.locker))
1290 # default to 600 seconds timeout
1286 # default to 600 seconds timeout
1291 l = lockmod.lock(vfs, lockname,
1287 l = lockmod.lock(vfs, lockname,
1292 int(self.ui.config("ui", "timeout", "600")),
1288 int(self.ui.config("ui", "timeout", "600")),
1293 releasefn=releasefn, acquirefn=acquirefn,
1289 releasefn=releasefn, acquirefn=acquirefn,
1294 desc=desc)
1290 desc=desc)
1295 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1291 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1296 return l
1292 return l
1297
1293
1298 def _afterlock(self, callback):
1294 def _afterlock(self, callback):
1299 """add a callback to be run when the repository is fully unlocked
1295 """add a callback to be run when the repository is fully unlocked
1300
1296
1301 The callback will be executed when the outermost lock is released
1297 The callback will be executed when the outermost lock is released
1302 (with wlock being higher level than 'lock')."""
1298 (with wlock being higher level than 'lock')."""
1303 for ref in (self._wlockref, self._lockref):
1299 for ref in (self._wlockref, self._lockref):
1304 l = ref and ref()
1300 l = ref and ref()
1305 if l and l.held:
1301 if l and l.held:
1306 l.postrelease.append(callback)
1302 l.postrelease.append(callback)
1307 break
1303 break
1308 else: # no lock have been found.
1304 else: # no lock have been found.
1309 callback()
1305 callback()
1310
1306
1311 def lock(self, wait=True):
1307 def lock(self, wait=True):
1312 '''Lock the repository store (.hg/store) and return a weak reference
1308 '''Lock the repository store (.hg/store) and return a weak reference
1313 to the lock. Use this before modifying the store (e.g. committing or
1309 to the lock. Use this before modifying the store (e.g. committing or
1314 stripping). If you are opening a transaction, get a lock as well.)
1310 stripping). If you are opening a transaction, get a lock as well.)
1315
1311
1316 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1312 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1317 'wlock' first to avoid a dead-lock hazard.'''
1313 'wlock' first to avoid a dead-lock hazard.'''
1318 l = self._lockref and self._lockref()
1314 l = self._lockref and self._lockref()
1319 if l is not None and l.held:
1315 if l is not None and l.held:
1320 l.lock()
1316 l.lock()
1321 return l
1317 return l
1322
1318
1323 l = self._lock(self.svfs, "lock", wait, None,
1319 l = self._lock(self.svfs, "lock", wait, None,
1324 self.invalidate, _('repository %s') % self.origroot)
1320 self.invalidate, _('repository %s') % self.origroot)
1325 self._lockref = weakref.ref(l)
1321 self._lockref = weakref.ref(l)
1326 return l
1322 return l
1327
1323
1328 def _wlockchecktransaction(self):
1324 def _wlockchecktransaction(self):
1329 if self.currenttransaction() is not None:
1325 if self.currenttransaction() is not None:
1330 raise error.LockInheritanceContractViolation(
1326 raise error.LockInheritanceContractViolation(
1331 'wlock cannot be inherited in the middle of a transaction')
1327 'wlock cannot be inherited in the middle of a transaction')
1332
1328
1333 def wlock(self, wait=True):
1329 def wlock(self, wait=True):
1334 '''Lock the non-store parts of the repository (everything under
1330 '''Lock the non-store parts of the repository (everything under
1335 .hg except .hg/store) and return a weak reference to the lock.
1331 .hg except .hg/store) and return a weak reference to the lock.
1336
1332
1337 Use this before modifying files in .hg.
1333 Use this before modifying files in .hg.
1338
1334
1339 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1335 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1340 'wlock' first to avoid a dead-lock hazard.'''
1336 'wlock' first to avoid a dead-lock hazard.'''
1341 l = self._wlockref and self._wlockref()
1337 l = self._wlockref and self._wlockref()
1342 if l is not None and l.held:
1338 if l is not None and l.held:
1343 l.lock()
1339 l.lock()
1344 return l
1340 return l
1345
1341
1346 # We do not need to check for non-waiting lock acquisition. Such
1342 # We do not need to check for non-waiting lock acquisition. Such
1347 # acquisition would not cause dead-lock as they would just fail.
1343 # acquisition would not cause dead-lock as they would just fail.
1348 if wait and (self.ui.configbool('devel', 'all-warnings')
1344 if wait and (self.ui.configbool('devel', 'all-warnings')
1349 or self.ui.configbool('devel', 'check-locks')):
1345 or self.ui.configbool('devel', 'check-locks')):
1350 l = self._lockref and self._lockref()
1346 l = self._lockref and self._lockref()
1351 if l is not None and l.held:
1347 if l is not None and l.held:
1352 self.ui.develwarn('"wlock" acquired after "lock"')
1348 self.ui.develwarn('"wlock" acquired after "lock"')
1353
1349
1354 def unlock():
1350 def unlock():
1355 if self.dirstate.pendingparentchange():
1351 if self.dirstate.pendingparentchange():
1356 self.dirstate.invalidate()
1352 self.dirstate.invalidate()
1357 else:
1353 else:
1358 self.dirstate.write(None)
1354 self.dirstate.write(None)
1359
1355
1360 self._filecache['dirstate'].refresh()
1356 self._filecache['dirstate'].refresh()
1361
1357
1362 l = self._lock(self.vfs, "wlock", wait, unlock,
1358 l = self._lock(self.vfs, "wlock", wait, unlock,
1363 self.invalidatedirstate, _('working directory of %s') %
1359 self.invalidatedirstate, _('working directory of %s') %
1364 self.origroot,
1360 self.origroot,
1365 inheritchecker=self._wlockchecktransaction,
1361 inheritchecker=self._wlockchecktransaction,
1366 parentenvvar='HG_WLOCK_LOCKER')
1362 parentenvvar='HG_WLOCK_LOCKER')
1367 self._wlockref = weakref.ref(l)
1363 self._wlockref = weakref.ref(l)
1368 return l
1364 return l
1369
1365
1370 def _currentlock(self, lockref):
1366 def _currentlock(self, lockref):
1371 """Returns the lock if it's held, or None if it's not."""
1367 """Returns the lock if it's held, or None if it's not."""
1372 if lockref is None:
1368 if lockref is None:
1373 return None
1369 return None
1374 l = lockref()
1370 l = lockref()
1375 if l is None or not l.held:
1371 if l is None or not l.held:
1376 return None
1372 return None
1377 return l
1373 return l
1378
1374
1379 def currentwlock(self):
1375 def currentwlock(self):
1380 """Returns the wlock if it's held, or None if it's not."""
1376 """Returns the wlock if it's held, or None if it's not."""
1381 return self._currentlock(self._wlockref)
1377 return self._currentlock(self._wlockref)
1382
1378
1383 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1379 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1384 """
1380 """
1385 commit an individual file as part of a larger transaction
1381 commit an individual file as part of a larger transaction
1386 """
1382 """
1387
1383
1388 fname = fctx.path()
1384 fname = fctx.path()
1389 fparent1 = manifest1.get(fname, nullid)
1385 fparent1 = manifest1.get(fname, nullid)
1390 fparent2 = manifest2.get(fname, nullid)
1386 fparent2 = manifest2.get(fname, nullid)
1391 if isinstance(fctx, context.filectx):
1387 if isinstance(fctx, context.filectx):
1392 node = fctx.filenode()
1388 node = fctx.filenode()
1393 if node in [fparent1, fparent2]:
1389 if node in [fparent1, fparent2]:
1394 self.ui.debug('reusing %s filelog entry\n' % fname)
1390 self.ui.debug('reusing %s filelog entry\n' % fname)
1395 return node
1391 return node
1396
1392
1397 flog = self.file(fname)
1393 flog = self.file(fname)
1398 meta = {}
1394 meta = {}
1399 copy = fctx.renamed()
1395 copy = fctx.renamed()
1400 if copy and copy[0] != fname:
1396 if copy and copy[0] != fname:
1401 # Mark the new revision of this file as a copy of another
1397 # Mark the new revision of this file as a copy of another
1402 # file. This copy data will effectively act as a parent
1398 # file. This copy data will effectively act as a parent
1403 # of this new revision. If this is a merge, the first
1399 # of this new revision. If this is a merge, the first
1404 # parent will be the nullid (meaning "look up the copy data")
1400 # parent will be the nullid (meaning "look up the copy data")
1405 # and the second one will be the other parent. For example:
1401 # and the second one will be the other parent. For example:
1406 #
1402 #
1407 # 0 --- 1 --- 3 rev1 changes file foo
1403 # 0 --- 1 --- 3 rev1 changes file foo
1408 # \ / rev2 renames foo to bar and changes it
1404 # \ / rev2 renames foo to bar and changes it
1409 # \- 2 -/ rev3 should have bar with all changes and
1405 # \- 2 -/ rev3 should have bar with all changes and
1410 # should record that bar descends from
1406 # should record that bar descends from
1411 # bar in rev2 and foo in rev1
1407 # bar in rev2 and foo in rev1
1412 #
1408 #
1413 # this allows this merge to succeed:
1409 # this allows this merge to succeed:
1414 #
1410 #
1415 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1411 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1416 # \ / merging rev3 and rev4 should use bar@rev2
1412 # \ / merging rev3 and rev4 should use bar@rev2
1417 # \- 2 --- 4 as the merge base
1413 # \- 2 --- 4 as the merge base
1418 #
1414 #
1419
1415
1420 cfname = copy[0]
1416 cfname = copy[0]
1421 crev = manifest1.get(cfname)
1417 crev = manifest1.get(cfname)
1422 newfparent = fparent2
1418 newfparent = fparent2
1423
1419
1424 if manifest2: # branch merge
1420 if manifest2: # branch merge
1425 if fparent2 == nullid or crev is None: # copied on remote side
1421 if fparent2 == nullid or crev is None: # copied on remote side
1426 if cfname in manifest2:
1422 if cfname in manifest2:
1427 crev = manifest2[cfname]
1423 crev = manifest2[cfname]
1428 newfparent = fparent1
1424 newfparent = fparent1
1429
1425
1430 # Here, we used to search backwards through history to try to find
1426 # Here, we used to search backwards through history to try to find
1431 # where the file copy came from if the source of a copy was not in
1427 # where the file copy came from if the source of a copy was not in
1432 # the parent directory. However, this doesn't actually make sense to
1428 # the parent directory. However, this doesn't actually make sense to
1433 # do (what does a copy from something not in your working copy even
1429 # do (what does a copy from something not in your working copy even
1434 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1430 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1435 # the user that copy information was dropped, so if they didn't
1431 # the user that copy information was dropped, so if they didn't
1436 # expect this outcome it can be fixed, but this is the correct
1432 # expect this outcome it can be fixed, but this is the correct
1437 # behavior in this circumstance.
1433 # behavior in this circumstance.
1438
1434
1439 if crev:
1435 if crev:
1440 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1436 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1441 meta["copy"] = cfname
1437 meta["copy"] = cfname
1442 meta["copyrev"] = hex(crev)
1438 meta["copyrev"] = hex(crev)
1443 fparent1, fparent2 = nullid, newfparent
1439 fparent1, fparent2 = nullid, newfparent
1444 else:
1440 else:
1445 self.ui.warn(_("warning: can't find ancestor for '%s' "
1441 self.ui.warn(_("warning: can't find ancestor for '%s' "
1446 "copied from '%s'!\n") % (fname, cfname))
1442 "copied from '%s'!\n") % (fname, cfname))
1447
1443
1448 elif fparent1 == nullid:
1444 elif fparent1 == nullid:
1449 fparent1, fparent2 = fparent2, nullid
1445 fparent1, fparent2 = fparent2, nullid
1450 elif fparent2 != nullid:
1446 elif fparent2 != nullid:
1451 # is one parent an ancestor of the other?
1447 # is one parent an ancestor of the other?
1452 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1448 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1453 if fparent1 in fparentancestors:
1449 if fparent1 in fparentancestors:
1454 fparent1, fparent2 = fparent2, nullid
1450 fparent1, fparent2 = fparent2, nullid
1455 elif fparent2 in fparentancestors:
1451 elif fparent2 in fparentancestors:
1456 fparent2 = nullid
1452 fparent2 = nullid
1457
1453
1458 # is the file changed?
1454 # is the file changed?
1459 text = fctx.data()
1455 text = fctx.data()
1460 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1456 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1461 changelist.append(fname)
1457 changelist.append(fname)
1462 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1458 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1463 # are just the flags changed during merge?
1459 # are just the flags changed during merge?
1464 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1460 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1465 changelist.append(fname)
1461 changelist.append(fname)
1466
1462
1467 return fparent1
1463 return fparent1
1468
1464
1469 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1465 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1470 """check for commit arguments that aren't commitable"""
1466 """check for commit arguments that aren't commitable"""
1471 if match.isexact() or match.prefix():
1467 if match.isexact() or match.prefix():
1472 matched = set(status.modified + status.added + status.removed)
1468 matched = set(status.modified + status.added + status.removed)
1473
1469
1474 for f in match.files():
1470 for f in match.files():
1475 f = self.dirstate.normalize(f)
1471 f = self.dirstate.normalize(f)
1476 if f == '.' or f in matched or f in wctx.substate:
1472 if f == '.' or f in matched or f in wctx.substate:
1477 continue
1473 continue
1478 if f in status.deleted:
1474 if f in status.deleted:
1479 fail(f, _('file not found!'))
1475 fail(f, _('file not found!'))
1480 if f in vdirs: # visited directory
1476 if f in vdirs: # visited directory
1481 d = f + '/'
1477 d = f + '/'
1482 for mf in matched:
1478 for mf in matched:
1483 if mf.startswith(d):
1479 if mf.startswith(d):
1484 break
1480 break
1485 else:
1481 else:
1486 fail(f, _("no match under directory!"))
1482 fail(f, _("no match under directory!"))
1487 elif f not in self.dirstate:
1483 elif f not in self.dirstate:
1488 fail(f, _("file not tracked!"))
1484 fail(f, _("file not tracked!"))
1489
1485
1490 @unfilteredmethod
1486 @unfilteredmethod
1491 def commit(self, text="", user=None, date=None, match=None, force=False,
1487 def commit(self, text="", user=None, date=None, match=None, force=False,
1492 editor=False, extra=None):
1488 editor=False, extra=None):
1493 """Add a new revision to current repository.
1489 """Add a new revision to current repository.
1494
1490
1495 Revision information is gathered from the working directory,
1491 Revision information is gathered from the working directory,
1496 match can be used to filter the committed files. If editor is
1492 match can be used to filter the committed files. If editor is
1497 supplied, it is called to get a commit message.
1493 supplied, it is called to get a commit message.
1498 """
1494 """
1499 if extra is None:
1495 if extra is None:
1500 extra = {}
1496 extra = {}
1501
1497
1502 def fail(f, msg):
1498 def fail(f, msg):
1503 raise error.Abort('%s: %s' % (f, msg))
1499 raise error.Abort('%s: %s' % (f, msg))
1504
1500
1505 if not match:
1501 if not match:
1506 match = matchmod.always(self.root, '')
1502 match = matchmod.always(self.root, '')
1507
1503
1508 if not force:
1504 if not force:
1509 vdirs = []
1505 vdirs = []
1510 match.explicitdir = vdirs.append
1506 match.explicitdir = vdirs.append
1511 match.bad = fail
1507 match.bad = fail
1512
1508
1513 wlock = lock = tr = None
1509 wlock = lock = tr = None
1514 try:
1510 try:
1515 wlock = self.wlock()
1511 wlock = self.wlock()
1516 lock = self.lock() # for recent changelog (see issue4368)
1512 lock = self.lock() # for recent changelog (see issue4368)
1517
1513
1518 wctx = self[None]
1514 wctx = self[None]
1519 merge = len(wctx.parents()) > 1
1515 merge = len(wctx.parents()) > 1
1520
1516
1521 if not force and merge and match.ispartial():
1517 if not force and merge and match.ispartial():
1522 raise error.Abort(_('cannot partially commit a merge '
1518 raise error.Abort(_('cannot partially commit a merge '
1523 '(do not specify files or patterns)'))
1519 '(do not specify files or patterns)'))
1524
1520
1525 status = self.status(match=match, clean=force)
1521 status = self.status(match=match, clean=force)
1526 if force:
1522 if force:
1527 status.modified.extend(status.clean) # mq may commit clean files
1523 status.modified.extend(status.clean) # mq may commit clean files
1528
1524
1529 # check subrepos
1525 # check subrepos
1530 subs = []
1526 subs = []
1531 commitsubs = set()
1527 commitsubs = set()
1532 newstate = wctx.substate.copy()
1528 newstate = wctx.substate.copy()
1533 # only manage subrepos and .hgsubstate if .hgsub is present
1529 # only manage subrepos and .hgsubstate if .hgsub is present
1534 if '.hgsub' in wctx:
1530 if '.hgsub' in wctx:
1535 # we'll decide whether to track this ourselves, thanks
1531 # we'll decide whether to track this ourselves, thanks
1536 for c in status.modified, status.added, status.removed:
1532 for c in status.modified, status.added, status.removed:
1537 if '.hgsubstate' in c:
1533 if '.hgsubstate' in c:
1538 c.remove('.hgsubstate')
1534 c.remove('.hgsubstate')
1539
1535
1540 # compare current state to last committed state
1536 # compare current state to last committed state
1541 # build new substate based on last committed state
1537 # build new substate based on last committed state
1542 oldstate = wctx.p1().substate
1538 oldstate = wctx.p1().substate
1543 for s in sorted(newstate.keys()):
1539 for s in sorted(newstate.keys()):
1544 if not match(s):
1540 if not match(s):
1545 # ignore working copy, use old state if present
1541 # ignore working copy, use old state if present
1546 if s in oldstate:
1542 if s in oldstate:
1547 newstate[s] = oldstate[s]
1543 newstate[s] = oldstate[s]
1548 continue
1544 continue
1549 if not force:
1545 if not force:
1550 raise error.Abort(
1546 raise error.Abort(
1551 _("commit with new subrepo %s excluded") % s)
1547 _("commit with new subrepo %s excluded") % s)
1552 dirtyreason = wctx.sub(s).dirtyreason(True)
1548 dirtyreason = wctx.sub(s).dirtyreason(True)
1553 if dirtyreason:
1549 if dirtyreason:
1554 if not self.ui.configbool('ui', 'commitsubrepos'):
1550 if not self.ui.configbool('ui', 'commitsubrepos'):
1555 raise error.Abort(dirtyreason,
1551 raise error.Abort(dirtyreason,
1556 hint=_("use --subrepos for recursive commit"))
1552 hint=_("use --subrepos for recursive commit"))
1557 subs.append(s)
1553 subs.append(s)
1558 commitsubs.add(s)
1554 commitsubs.add(s)
1559 else:
1555 else:
1560 bs = wctx.sub(s).basestate()
1556 bs = wctx.sub(s).basestate()
1561 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1557 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1562 if oldstate.get(s, (None, None, None))[1] != bs:
1558 if oldstate.get(s, (None, None, None))[1] != bs:
1563 subs.append(s)
1559 subs.append(s)
1564
1560
1565 # check for removed subrepos
1561 # check for removed subrepos
1566 for p in wctx.parents():
1562 for p in wctx.parents():
1567 r = [s for s in p.substate if s not in newstate]
1563 r = [s for s in p.substate if s not in newstate]
1568 subs += [s for s in r if match(s)]
1564 subs += [s for s in r if match(s)]
1569 if subs:
1565 if subs:
1570 if (not match('.hgsub') and
1566 if (not match('.hgsub') and
1571 '.hgsub' in (wctx.modified() + wctx.added())):
1567 '.hgsub' in (wctx.modified() + wctx.added())):
1572 raise error.Abort(
1568 raise error.Abort(
1573 _("can't commit subrepos without .hgsub"))
1569 _("can't commit subrepos without .hgsub"))
1574 status.modified.insert(0, '.hgsubstate')
1570 status.modified.insert(0, '.hgsubstate')
1575
1571
1576 elif '.hgsub' in status.removed:
1572 elif '.hgsub' in status.removed:
1577 # clean up .hgsubstate when .hgsub is removed
1573 # clean up .hgsubstate when .hgsub is removed
1578 if ('.hgsubstate' in wctx and
1574 if ('.hgsubstate' in wctx and
1579 '.hgsubstate' not in (status.modified + status.added +
1575 '.hgsubstate' not in (status.modified + status.added +
1580 status.removed)):
1576 status.removed)):
1581 status.removed.insert(0, '.hgsubstate')
1577 status.removed.insert(0, '.hgsubstate')
1582
1578
1583 # make sure all explicit patterns are matched
1579 # make sure all explicit patterns are matched
1584 if not force:
1580 if not force:
1585 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1581 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1586
1582
1587 cctx = context.workingcommitctx(self, status,
1583 cctx = context.workingcommitctx(self, status,
1588 text, user, date, extra)
1584 text, user, date, extra)
1589
1585
1590 # internal config: ui.allowemptycommit
1586 # internal config: ui.allowemptycommit
1591 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1587 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1592 or extra.get('close') or merge or cctx.files()
1588 or extra.get('close') or merge or cctx.files()
1593 or self.ui.configbool('ui', 'allowemptycommit'))
1589 or self.ui.configbool('ui', 'allowemptycommit'))
1594 if not allowemptycommit:
1590 if not allowemptycommit:
1595 return None
1591 return None
1596
1592
1597 if merge and cctx.deleted():
1593 if merge and cctx.deleted():
1598 raise error.Abort(_("cannot commit merge with missing files"))
1594 raise error.Abort(_("cannot commit merge with missing files"))
1599
1595
1600 ms = mergemod.mergestate.read(self)
1596 ms = mergemod.mergestate.read(self)
1601
1597
1602 if list(ms.unresolved()):
1598 if list(ms.unresolved()):
1603 raise error.Abort(_('unresolved merge conflicts '
1599 raise error.Abort(_('unresolved merge conflicts '
1604 '(see "hg help resolve")'))
1600 '(see "hg help resolve")'))
1605 if ms.mdstate() != 's' or list(ms.driverresolved()):
1601 if ms.mdstate() != 's' or list(ms.driverresolved()):
1606 raise error.Abort(_('driver-resolved merge conflicts'),
1602 raise error.Abort(_('driver-resolved merge conflicts'),
1607 hint=_('run "hg resolve --all" to resolve'))
1603 hint=_('run "hg resolve --all" to resolve'))
1608
1604
1609 if editor:
1605 if editor:
1610 cctx._text = editor(self, cctx, subs)
1606 cctx._text = editor(self, cctx, subs)
1611 edited = (text != cctx._text)
1607 edited = (text != cctx._text)
1612
1608
1613 # Save commit message in case this transaction gets rolled back
1609 # Save commit message in case this transaction gets rolled back
1614 # (e.g. by a pretxncommit hook). Leave the content alone on
1610 # (e.g. by a pretxncommit hook). Leave the content alone on
1615 # the assumption that the user will use the same editor again.
1611 # the assumption that the user will use the same editor again.
1616 msgfn = self.savecommitmessage(cctx._text)
1612 msgfn = self.savecommitmessage(cctx._text)
1617
1613
1618 # commit subs and write new state
1614 # commit subs and write new state
1619 if subs:
1615 if subs:
1620 for s in sorted(commitsubs):
1616 for s in sorted(commitsubs):
1621 sub = wctx.sub(s)
1617 sub = wctx.sub(s)
1622 self.ui.status(_('committing subrepository %s\n') %
1618 self.ui.status(_('committing subrepository %s\n') %
1623 subrepo.subrelpath(sub))
1619 subrepo.subrelpath(sub))
1624 sr = sub.commit(cctx._text, user, date)
1620 sr = sub.commit(cctx._text, user, date)
1625 newstate[s] = (newstate[s][0], sr)
1621 newstate[s] = (newstate[s][0], sr)
1626 subrepo.writestate(self, newstate)
1622 subrepo.writestate(self, newstate)
1627
1623
1628 p1, p2 = self.dirstate.parents()
1624 p1, p2 = self.dirstate.parents()
1629 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1625 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1630 try:
1626 try:
1631 self.hook("precommit", throw=True, parent1=hookp1,
1627 self.hook("precommit", throw=True, parent1=hookp1,
1632 parent2=hookp2)
1628 parent2=hookp2)
1633 tr = self.transaction('commit')
1629 tr = self.transaction('commit')
1634 ret = self.commitctx(cctx, True)
1630 ret = self.commitctx(cctx, True)
1635 except: # re-raises
1631 except: # re-raises
1636 if edited:
1632 if edited:
1637 self.ui.write(
1633 self.ui.write(
1638 _('note: commit message saved in %s\n') % msgfn)
1634 _('note: commit message saved in %s\n') % msgfn)
1639 raise
1635 raise
1640 # update bookmarks, dirstate and mergestate
1636 # update bookmarks, dirstate and mergestate
1641 bookmarks.update(self, [p1, p2], ret)
1637 bookmarks.update(self, [p1, p2], ret)
1642 cctx.markcommitted(ret)
1638 cctx.markcommitted(ret)
1643 ms.reset()
1639 ms.reset()
1644 tr.close()
1640 tr.close()
1645
1641
1646 finally:
1642 finally:
1647 lockmod.release(tr, lock, wlock)
1643 lockmod.release(tr, lock, wlock)
1648
1644
1649 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1645 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1650 # hack for command that use a temporary commit (eg: histedit)
1646 # hack for command that use a temporary commit (eg: histedit)
1651 # temporary commit got stripped before hook release
1647 # temporary commit got stripped before hook release
1652 if self.changelog.hasnode(ret):
1648 if self.changelog.hasnode(ret):
1653 self.hook("commit", node=node, parent1=parent1,
1649 self.hook("commit", node=node, parent1=parent1,
1654 parent2=parent2)
1650 parent2=parent2)
1655 self._afterlock(commithook)
1651 self._afterlock(commithook)
1656 return ret
1652 return ret
1657
1653
1658 @unfilteredmethod
1654 @unfilteredmethod
1659 def commitctx(self, ctx, error=False):
1655 def commitctx(self, ctx, error=False):
1660 """Add a new revision to current repository.
1656 """Add a new revision to current repository.
1661 Revision information is passed via the context argument.
1657 Revision information is passed via the context argument.
1662 """
1658 """
1663
1659
1664 tr = None
1660 tr = None
1665 p1, p2 = ctx.p1(), ctx.p2()
1661 p1, p2 = ctx.p1(), ctx.p2()
1666 user = ctx.user()
1662 user = ctx.user()
1667
1663
1668 lock = self.lock()
1664 lock = self.lock()
1669 try:
1665 try:
1670 tr = self.transaction("commit")
1666 tr = self.transaction("commit")
1671 trp = weakref.proxy(tr)
1667 trp = weakref.proxy(tr)
1672
1668
1673 if ctx.files():
1669 if ctx.files():
1674 m1 = p1.manifest()
1670 m1 = p1.manifest()
1675 m2 = p2.manifest()
1671 m2 = p2.manifest()
1676 m = m1.copy()
1672 m = m1.copy()
1677
1673
1678 # check in files
1674 # check in files
1679 added = []
1675 added = []
1680 changed = []
1676 changed = []
1681 removed = list(ctx.removed())
1677 removed = list(ctx.removed())
1682 linkrev = len(self)
1678 linkrev = len(self)
1683 self.ui.note(_("committing files:\n"))
1679 self.ui.note(_("committing files:\n"))
1684 for f in sorted(ctx.modified() + ctx.added()):
1680 for f in sorted(ctx.modified() + ctx.added()):
1685 self.ui.note(f + "\n")
1681 self.ui.note(f + "\n")
1686 try:
1682 try:
1687 fctx = ctx[f]
1683 fctx = ctx[f]
1688 if fctx is None:
1684 if fctx is None:
1689 removed.append(f)
1685 removed.append(f)
1690 else:
1686 else:
1691 added.append(f)
1687 added.append(f)
1692 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1688 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1693 trp, changed)
1689 trp, changed)
1694 m.setflag(f, fctx.flags())
1690 m.setflag(f, fctx.flags())
1695 except OSError as inst:
1691 except OSError as inst:
1696 self.ui.warn(_("trouble committing %s!\n") % f)
1692 self.ui.warn(_("trouble committing %s!\n") % f)
1697 raise
1693 raise
1698 except IOError as inst:
1694 except IOError as inst:
1699 errcode = getattr(inst, 'errno', errno.ENOENT)
1695 errcode = getattr(inst, 'errno', errno.ENOENT)
1700 if error or errcode and errcode != errno.ENOENT:
1696 if error or errcode and errcode != errno.ENOENT:
1701 self.ui.warn(_("trouble committing %s!\n") % f)
1697 self.ui.warn(_("trouble committing %s!\n") % f)
1702 raise
1698 raise
1703
1699
1704 # update manifest
1700 # update manifest
1705 self.ui.note(_("committing manifest\n"))
1701 self.ui.note(_("committing manifest\n"))
1706 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1702 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1707 drop = [f for f in removed if f in m]
1703 drop = [f for f in removed if f in m]
1708 for f in drop:
1704 for f in drop:
1709 del m[f]
1705 del m[f]
1710 mn = self.manifest.add(m, trp, linkrev,
1706 mn = self.manifest.add(m, trp, linkrev,
1711 p1.manifestnode(), p2.manifestnode(),
1707 p1.manifestnode(), p2.manifestnode(),
1712 added, drop)
1708 added, drop)
1713 files = changed + removed
1709 files = changed + removed
1714 else:
1710 else:
1715 mn = p1.manifestnode()
1711 mn = p1.manifestnode()
1716 files = []
1712 files = []
1717
1713
1718 # update changelog
1714 # update changelog
1719 self.ui.note(_("committing changelog\n"))
1715 self.ui.note(_("committing changelog\n"))
1720 self.changelog.delayupdate(tr)
1716 self.changelog.delayupdate(tr)
1721 n = self.changelog.add(mn, files, ctx.description(),
1717 n = self.changelog.add(mn, files, ctx.description(),
1722 trp, p1.node(), p2.node(),
1718 trp, p1.node(), p2.node(),
1723 user, ctx.date(), ctx.extra().copy())
1719 user, ctx.date(), ctx.extra().copy())
1724 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1720 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1725 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1721 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1726 parent2=xp2)
1722 parent2=xp2)
1727 # set the new commit is proper phase
1723 # set the new commit is proper phase
1728 targetphase = subrepo.newcommitphase(self.ui, ctx)
1724 targetphase = subrepo.newcommitphase(self.ui, ctx)
1729 if targetphase:
1725 if targetphase:
1730 # retract boundary do not alter parent changeset.
1726 # retract boundary do not alter parent changeset.
1731 # if a parent have higher the resulting phase will
1727 # if a parent have higher the resulting phase will
1732 # be compliant anyway
1728 # be compliant anyway
1733 #
1729 #
1734 # if minimal phase was 0 we don't need to retract anything
1730 # if minimal phase was 0 we don't need to retract anything
1735 phases.retractboundary(self, tr, targetphase, [n])
1731 phases.retractboundary(self, tr, targetphase, [n])
1736 tr.close()
1732 tr.close()
1737 branchmap.updatecache(self.filtered('served'))
1733 branchmap.updatecache(self.filtered('served'))
1738 return n
1734 return n
1739 finally:
1735 finally:
1740 if tr:
1736 if tr:
1741 tr.release()
1737 tr.release()
1742 lock.release()
1738 lock.release()
1743
1739
1744 @unfilteredmethod
1740 @unfilteredmethod
1745 def destroying(self):
1741 def destroying(self):
1746 '''Inform the repository that nodes are about to be destroyed.
1742 '''Inform the repository that nodes are about to be destroyed.
1747 Intended for use by strip and rollback, so there's a common
1743 Intended for use by strip and rollback, so there's a common
1748 place for anything that has to be done before destroying history.
1744 place for anything that has to be done before destroying history.
1749
1745
1750 This is mostly useful for saving state that is in memory and waiting
1746 This is mostly useful for saving state that is in memory and waiting
1751 to be flushed when the current lock is released. Because a call to
1747 to be flushed when the current lock is released. Because a call to
1752 destroyed is imminent, the repo will be invalidated causing those
1748 destroyed is imminent, the repo will be invalidated causing those
1753 changes to stay in memory (waiting for the next unlock), or vanish
1749 changes to stay in memory (waiting for the next unlock), or vanish
1754 completely.
1750 completely.
1755 '''
1751 '''
1756 # When using the same lock to commit and strip, the phasecache is left
1752 # When using the same lock to commit and strip, the phasecache is left
1757 # dirty after committing. Then when we strip, the repo is invalidated,
1753 # dirty after committing. Then when we strip, the repo is invalidated,
1758 # causing those changes to disappear.
1754 # causing those changes to disappear.
1759 if '_phasecache' in vars(self):
1755 if '_phasecache' in vars(self):
1760 self._phasecache.write()
1756 self._phasecache.write()
1761
1757
1762 @unfilteredmethod
1758 @unfilteredmethod
1763 def destroyed(self):
1759 def destroyed(self):
1764 '''Inform the repository that nodes have been destroyed.
1760 '''Inform the repository that nodes have been destroyed.
1765 Intended for use by strip and rollback, so there's a common
1761 Intended for use by strip and rollback, so there's a common
1766 place for anything that has to be done after destroying history.
1762 place for anything that has to be done after destroying history.
1767 '''
1763 '''
1768 # When one tries to:
1764 # When one tries to:
1769 # 1) destroy nodes thus calling this method (e.g. strip)
1765 # 1) destroy nodes thus calling this method (e.g. strip)
1770 # 2) use phasecache somewhere (e.g. commit)
1766 # 2) use phasecache somewhere (e.g. commit)
1771 #
1767 #
1772 # then 2) will fail because the phasecache contains nodes that were
1768 # then 2) will fail because the phasecache contains nodes that were
1773 # removed. We can either remove phasecache from the filecache,
1769 # removed. We can either remove phasecache from the filecache,
1774 # causing it to reload next time it is accessed, or simply filter
1770 # causing it to reload next time it is accessed, or simply filter
1775 # the removed nodes now and write the updated cache.
1771 # the removed nodes now and write the updated cache.
1776 self._phasecache.filterunknown(self)
1772 self._phasecache.filterunknown(self)
1777 self._phasecache.write()
1773 self._phasecache.write()
1778
1774
1779 # update the 'served' branch cache to help read only server process
1775 # update the 'served' branch cache to help read only server process
1780 # Thanks to branchcache collaboration this is done from the nearest
1776 # Thanks to branchcache collaboration this is done from the nearest
1781 # filtered subset and it is expected to be fast.
1777 # filtered subset and it is expected to be fast.
1782 branchmap.updatecache(self.filtered('served'))
1778 branchmap.updatecache(self.filtered('served'))
1783
1779
1784 # Ensure the persistent tag cache is updated. Doing it now
1780 # Ensure the persistent tag cache is updated. Doing it now
1785 # means that the tag cache only has to worry about destroyed
1781 # means that the tag cache only has to worry about destroyed
1786 # heads immediately after a strip/rollback. That in turn
1782 # heads immediately after a strip/rollback. That in turn
1787 # guarantees that "cachetip == currenttip" (comparing both rev
1783 # guarantees that "cachetip == currenttip" (comparing both rev
1788 # and node) always means no nodes have been added or destroyed.
1784 # and node) always means no nodes have been added or destroyed.
1789
1785
1790 # XXX this is suboptimal when qrefresh'ing: we strip the current
1786 # XXX this is suboptimal when qrefresh'ing: we strip the current
1791 # head, refresh the tag cache, then immediately add a new head.
1787 # head, refresh the tag cache, then immediately add a new head.
1792 # But I think doing it this way is necessary for the "instant
1788 # But I think doing it this way is necessary for the "instant
1793 # tag cache retrieval" case to work.
1789 # tag cache retrieval" case to work.
1794 self.invalidate()
1790 self.invalidate()
1795
1791
1796 def walk(self, match, node=None):
1792 def walk(self, match, node=None):
1797 '''
1793 '''
1798 walk recursively through the directory tree or a given
1794 walk recursively through the directory tree or a given
1799 changeset, finding all files matched by the match
1795 changeset, finding all files matched by the match
1800 function
1796 function
1801 '''
1797 '''
1802 return self[node].walk(match)
1798 return self[node].walk(match)
1803
1799
1804 def status(self, node1='.', node2=None, match=None,
1800 def status(self, node1='.', node2=None, match=None,
1805 ignored=False, clean=False, unknown=False,
1801 ignored=False, clean=False, unknown=False,
1806 listsubrepos=False):
1802 listsubrepos=False):
1807 '''a convenience method that calls node1.status(node2)'''
1803 '''a convenience method that calls node1.status(node2)'''
1808 return self[node1].status(node2, match, ignored, clean, unknown,
1804 return self[node1].status(node2, match, ignored, clean, unknown,
1809 listsubrepos)
1805 listsubrepos)
1810
1806
1811 def heads(self, start=None):
1807 def heads(self, start=None):
1812 heads = self.changelog.heads(start)
1808 heads = self.changelog.heads(start)
1813 # sort the output in rev descending order
1809 # sort the output in rev descending order
1814 return sorted(heads, key=self.changelog.rev, reverse=True)
1810 return sorted(heads, key=self.changelog.rev, reverse=True)
1815
1811
1816 def branchheads(self, branch=None, start=None, closed=False):
1812 def branchheads(self, branch=None, start=None, closed=False):
1817 '''return a (possibly filtered) list of heads for the given branch
1813 '''return a (possibly filtered) list of heads for the given branch
1818
1814
1819 Heads are returned in topological order, from newest to oldest.
1815 Heads are returned in topological order, from newest to oldest.
1820 If branch is None, use the dirstate branch.
1816 If branch is None, use the dirstate branch.
1821 If start is not None, return only heads reachable from start.
1817 If start is not None, return only heads reachable from start.
1822 If closed is True, return heads that are marked as closed as well.
1818 If closed is True, return heads that are marked as closed as well.
1823 '''
1819 '''
1824 if branch is None:
1820 if branch is None:
1825 branch = self[None].branch()
1821 branch = self[None].branch()
1826 branches = self.branchmap()
1822 branches = self.branchmap()
1827 if branch not in branches:
1823 if branch not in branches:
1828 return []
1824 return []
1829 # the cache returns heads ordered lowest to highest
1825 # the cache returns heads ordered lowest to highest
1830 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1826 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1831 if start is not None:
1827 if start is not None:
1832 # filter out the heads that cannot be reached from startrev
1828 # filter out the heads that cannot be reached from startrev
1833 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1829 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1834 bheads = [h for h in bheads if h in fbheads]
1830 bheads = [h for h in bheads if h in fbheads]
1835 return bheads
1831 return bheads
1836
1832
1837 def branches(self, nodes):
1833 def branches(self, nodes):
1838 if not nodes:
1834 if not nodes:
1839 nodes = [self.changelog.tip()]
1835 nodes = [self.changelog.tip()]
1840 b = []
1836 b = []
1841 for n in nodes:
1837 for n in nodes:
1842 t = n
1838 t = n
1843 while True:
1839 while True:
1844 p = self.changelog.parents(n)
1840 p = self.changelog.parents(n)
1845 if p[1] != nullid or p[0] == nullid:
1841 if p[1] != nullid or p[0] == nullid:
1846 b.append((t, n, p[0], p[1]))
1842 b.append((t, n, p[0], p[1]))
1847 break
1843 break
1848 n = p[0]
1844 n = p[0]
1849 return b
1845 return b
1850
1846
1851 def between(self, pairs):
1847 def between(self, pairs):
1852 r = []
1848 r = []
1853
1849
1854 for top, bottom in pairs:
1850 for top, bottom in pairs:
1855 n, l, i = top, [], 0
1851 n, l, i = top, [], 0
1856 f = 1
1852 f = 1
1857
1853
1858 while n != bottom and n != nullid:
1854 while n != bottom and n != nullid:
1859 p = self.changelog.parents(n)[0]
1855 p = self.changelog.parents(n)[0]
1860 if i == f:
1856 if i == f:
1861 l.append(n)
1857 l.append(n)
1862 f = f * 2
1858 f = f * 2
1863 n = p
1859 n = p
1864 i += 1
1860 i += 1
1865
1861
1866 r.append(l)
1862 r.append(l)
1867
1863
1868 return r
1864 return r
1869
1865
1870 def checkpush(self, pushop):
1866 def checkpush(self, pushop):
1871 """Extensions can override this function if additional checks have
1867 """Extensions can override this function if additional checks have
1872 to be performed before pushing, or call it if they override push
1868 to be performed before pushing, or call it if they override push
1873 command.
1869 command.
1874 """
1870 """
1875 pass
1871 pass
1876
1872
1877 @unfilteredpropertycache
1873 @unfilteredpropertycache
1878 def prepushoutgoinghooks(self):
1874 def prepushoutgoinghooks(self):
1879 """Return util.hooks consists of a pushop with repo, remote, outgoing
1875 """Return util.hooks consists of a pushop with repo, remote, outgoing
1880 methods, which are called before pushing changesets.
1876 methods, which are called before pushing changesets.
1881 """
1877 """
1882 return util.hooks()
1878 return util.hooks()
1883
1879
1884 def pushkey(self, namespace, key, old, new):
1880 def pushkey(self, namespace, key, old, new):
1885 try:
1881 try:
1886 tr = self.currenttransaction()
1882 tr = self.currenttransaction()
1887 hookargs = {}
1883 hookargs = {}
1888 if tr is not None:
1884 if tr is not None:
1889 hookargs.update(tr.hookargs)
1885 hookargs.update(tr.hookargs)
1890 hookargs['namespace'] = namespace
1886 hookargs['namespace'] = namespace
1891 hookargs['key'] = key
1887 hookargs['key'] = key
1892 hookargs['old'] = old
1888 hookargs['old'] = old
1893 hookargs['new'] = new
1889 hookargs['new'] = new
1894 self.hook('prepushkey', throw=True, **hookargs)
1890 self.hook('prepushkey', throw=True, **hookargs)
1895 except error.HookAbort as exc:
1891 except error.HookAbort as exc:
1896 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1892 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1897 if exc.hint:
1893 if exc.hint:
1898 self.ui.write_err(_("(%s)\n") % exc.hint)
1894 self.ui.write_err(_("(%s)\n") % exc.hint)
1899 return False
1895 return False
1900 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1896 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1901 ret = pushkey.push(self, namespace, key, old, new)
1897 ret = pushkey.push(self, namespace, key, old, new)
1902 def runhook():
1898 def runhook():
1903 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1899 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1904 ret=ret)
1900 ret=ret)
1905 self._afterlock(runhook)
1901 self._afterlock(runhook)
1906 return ret
1902 return ret
1907
1903
1908 def listkeys(self, namespace):
1904 def listkeys(self, namespace):
1909 self.hook('prelistkeys', throw=True, namespace=namespace)
1905 self.hook('prelistkeys', throw=True, namespace=namespace)
1910 self.ui.debug('listing keys for "%s"\n' % namespace)
1906 self.ui.debug('listing keys for "%s"\n' % namespace)
1911 values = pushkey.list(self, namespace)
1907 values = pushkey.list(self, namespace)
1912 self.hook('listkeys', namespace=namespace, values=values)
1908 self.hook('listkeys', namespace=namespace, values=values)
1913 return values
1909 return values
1914
1910
1915 def debugwireargs(self, one, two, three=None, four=None, five=None):
1911 def debugwireargs(self, one, two, three=None, four=None, five=None):
1916 '''used to test argument passing over the wire'''
1912 '''used to test argument passing over the wire'''
1917 return "%s %s %s %s %s" % (one, two, three, four, five)
1913 return "%s %s %s %s %s" % (one, two, three, four, five)
1918
1914
1919 def savecommitmessage(self, text):
1915 def savecommitmessage(self, text):
1920 fp = self.vfs('last-message.txt', 'wb')
1916 fp = self.vfs('last-message.txt', 'wb')
1921 try:
1917 try:
1922 fp.write(text)
1918 fp.write(text)
1923 finally:
1919 finally:
1924 fp.close()
1920 fp.close()
1925 return self.pathto(fp.name[len(self.root) + 1:])
1921 return self.pathto(fp.name[len(self.root) + 1:])
1926
1922
1927 # used to avoid circular references so destructors work
1923 # used to avoid circular references so destructors work
1928 def aftertrans(files):
1924 def aftertrans(files):
1929 renamefiles = [tuple(t) for t in files]
1925 renamefiles = [tuple(t) for t in files]
1930 def a():
1926 def a():
1931 for vfs, src, dest in renamefiles:
1927 for vfs, src, dest in renamefiles:
1932 try:
1928 try:
1933 vfs.rename(src, dest)
1929 vfs.rename(src, dest)
1934 except OSError: # journal file does not yet exist
1930 except OSError: # journal file does not yet exist
1935 pass
1931 pass
1936 return a
1932 return a
1937
1933
1938 def undoname(fn):
1934 def undoname(fn):
1939 base, name = os.path.split(fn)
1935 base, name = os.path.split(fn)
1940 assert name.startswith('journal')
1936 assert name.startswith('journal')
1941 return os.path.join(base, name.replace('journal', 'undo', 1))
1937 return os.path.join(base, name.replace('journal', 'undo', 1))
1942
1938
1943 def instance(ui, path, create):
1939 def instance(ui, path, create):
1944 return localrepository(ui, util.urllocalpath(path), create)
1940 return localrepository(ui, util.urllocalpath(path), create)
1945
1941
1946 def islocal(path):
1942 def islocal(path):
1947 return True
1943 return True
1948
1944
1949 def newreporequirements(repo):
1945 def newreporequirements(repo):
1950 """Determine the set of requirements for a new local repository.
1946 """Determine the set of requirements for a new local repository.
1951
1947
1952 Extensions can wrap this function to specify custom requirements for
1948 Extensions can wrap this function to specify custom requirements for
1953 new repositories.
1949 new repositories.
1954 """
1950 """
1955 ui = repo.ui
1951 ui = repo.ui
1956 requirements = set(['revlogv1'])
1952 requirements = set(['revlogv1'])
1957 if ui.configbool('format', 'usestore', True):
1953 if ui.configbool('format', 'usestore', True):
1958 requirements.add('store')
1954 requirements.add('store')
1959 if ui.configbool('format', 'usefncache', True):
1955 if ui.configbool('format', 'usefncache', True):
1960 requirements.add('fncache')
1956 requirements.add('fncache')
1961 if ui.configbool('format', 'dotencode', True):
1957 if ui.configbool('format', 'dotencode', True):
1962 requirements.add('dotencode')
1958 requirements.add('dotencode')
1963
1959
1964 if scmutil.gdinitconfig(ui):
1960 if scmutil.gdinitconfig(ui):
1965 requirements.add('generaldelta')
1961 requirements.add('generaldelta')
1966 if ui.configbool('experimental', 'treemanifest', False):
1962 if ui.configbool('experimental', 'treemanifest', False):
1967 requirements.add('treemanifest')
1963 requirements.add('treemanifest')
1968 if ui.configbool('experimental', 'manifestv2', False):
1964 if ui.configbool('experimental', 'manifestv2', False):
1969 requirements.add('manifestv2')
1965 requirements.add('manifestv2')
1970
1966
1971 return requirements
1967 return requirements
@@ -1,157 +1,158
1 #require unix-permissions
1 #require unix-permissions
2
2
3 test that new files created in .hg inherit the permissions from .hg/store
3 test that new files created in .hg inherit the permissions from .hg/store
4
4
5 $ mkdir dir
5 $ mkdir dir
6
6
7 just in case somebody has a strange $TMPDIR
7 just in case somebody has a strange $TMPDIR
8
8
9 $ chmod g-s dir
9 $ chmod g-s dir
10 $ cd dir
10 $ cd dir
11
11
12 $ cat >printmodes.py <<EOF
12 $ cat >printmodes.py <<EOF
13 > import os, sys
13 > import os, sys
14 >
14 >
15 > allnames = []
15 > allnames = []
16 > isdir = {}
16 > isdir = {}
17 > for root, dirs, files in os.walk(sys.argv[1]):
17 > for root, dirs, files in os.walk(sys.argv[1]):
18 > for d in dirs:
18 > for d in dirs:
19 > name = os.path.join(root, d)
19 > name = os.path.join(root, d)
20 > isdir[name] = 1
20 > isdir[name] = 1
21 > allnames.append(name)
21 > allnames.append(name)
22 > for f in files:
22 > for f in files:
23 > name = os.path.join(root, f)
23 > name = os.path.join(root, f)
24 > allnames.append(name)
24 > allnames.append(name)
25 > allnames.sort()
25 > allnames.sort()
26 > for name in allnames:
26 > for name in allnames:
27 > suffix = name in isdir and '/' or ''
27 > suffix = name in isdir and '/' or ''
28 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
28 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
29 > EOF
29 > EOF
30
30
31 $ cat >mode.py <<EOF
31 $ cat >mode.py <<EOF
32 > import sys
32 > import sys
33 > import os
33 > import os
34 > print '%05o' % os.lstat(sys.argv[1]).st_mode
34 > print '%05o' % os.lstat(sys.argv[1]).st_mode
35 > EOF
35 > EOF
36
36
37 $ umask 077
37 $ umask 077
38
38
39 $ hg init repo
39 $ hg init repo
40 $ cd repo
40 $ cd repo
41
41
42 $ chmod 0770 .hg/store
42 $ chmod 0770 .hg/store
43
43
44 before commit
44 before commit
45 store can be written by the group, other files cannot
45 store can be written by the group, other files cannot
46 store is setgid
46 store is setgid
47
47
48 $ python ../printmodes.py .
48 $ python ../printmodes.py .
49 00700 ./.hg/
49 00700 ./.hg/
50 00600 ./.hg/00changelog.i
50 00600 ./.hg/00changelog.i
51 00600 ./.hg/requires
51 00600 ./.hg/requires
52 00770 ./.hg/store/
52 00770 ./.hg/store/
53
53
54 $ mkdir dir
54 $ mkdir dir
55 $ touch foo dir/bar
55 $ touch foo dir/bar
56 $ hg ci -qAm 'add files'
56 $ hg ci -qAm 'add files'
57
57
58 after commit
58 after commit
59 working dir files can only be written by the owner
59 working dir files can only be written by the owner
60 files created in .hg can be written by the group
60 files created in .hg can be written by the group
61 (in particular, store/**, dirstate, branch cache file, undo files)
61 (in particular, store/**, dirstate, branch cache file, undo files)
62 new directories are setgid
62 new directories are setgid
63
63
64 $ python ../printmodes.py .
64 $ python ../printmodes.py .
65 00700 ./.hg/
65 00700 ./.hg/
66 00600 ./.hg/00changelog.i
66 00600 ./.hg/00changelog.i
67 00770 ./.hg/cache/
67 00770 ./.hg/cache/
68 00660 ./.hg/cache/branch2-served
68 00660 ./.hg/cache/branch2-served
69 00660 ./.hg/cache/rbc-names-v1
69 00660 ./.hg/cache/rbc-names-v1
70 00660 ./.hg/cache/rbc-revs-v1
70 00660 ./.hg/cache/rbc-revs-v1
71 00660 ./.hg/dirstate
71 00660 ./.hg/dirstate
72 00660 ./.hg/last-message.txt
72 00660 ./.hg/last-message.txt
73 00600 ./.hg/requires
73 00600 ./.hg/requires
74 00770 ./.hg/store/
74 00770 ./.hg/store/
75 00660 ./.hg/store/00changelog.i
75 00660 ./.hg/store/00changelog.i
76 00660 ./.hg/store/00manifest.i
76 00660 ./.hg/store/00manifest.i
77 00770 ./.hg/store/data/
77 00770 ./.hg/store/data/
78 00770 ./.hg/store/data/dir/
78 00770 ./.hg/store/data/dir/
79 00660 ./.hg/store/data/dir/bar.i
79 00660 ./.hg/store/data/dir/bar.i
80 00660 ./.hg/store/data/foo.i
80 00660 ./.hg/store/data/foo.i
81 00660 ./.hg/store/fncache
81 00660 ./.hg/store/fncache
82 00660 ./.hg/store/phaseroots
82 00660 ./.hg/store/phaseroots
83 00660 ./.hg/store/undo
83 00660 ./.hg/store/undo
84 00660 ./.hg/store/undo.backupfiles
84 00660 ./.hg/store/undo.backupfiles
85 00660 ./.hg/store/undo.phaseroots
85 00660 ./.hg/store/undo.phaseroots
86 00660 ./.hg/undo.backup.dirstate
86 00660 ./.hg/undo.backup.dirstate
87 00660 ./.hg/undo.bookmarks
87 00660 ./.hg/undo.bookmarks
88 00660 ./.hg/undo.branch
88 00660 ./.hg/undo.branch
89 00660 ./.hg/undo.desc
89 00660 ./.hg/undo.desc
90 00660 ./.hg/undo.dirstate
90 00660 ./.hg/undo.dirstate
91 00700 ./dir/
91 00700 ./dir/
92 00600 ./dir/bar
92 00600 ./dir/bar
93 00600 ./foo
93 00600 ./foo
94
94
95 $ umask 007
95 $ umask 007
96 $ hg init ../push
96 $ hg init ../push
97
97
98 before push
98 before push
99 group can write everything
99 group can write everything
100
100
101 $ python ../printmodes.py ../push
101 $ python ../printmodes.py ../push
102 00770 ../push/.hg/
102 00770 ../push/.hg/
103 00660 ../push/.hg/00changelog.i
103 00660 ../push/.hg/00changelog.i
104 00660 ../push/.hg/requires
104 00660 ../push/.hg/requires
105 00770 ../push/.hg/store/
105 00770 ../push/.hg/store/
106
106
107 $ umask 077
107 $ umask 077
108 $ hg -q push ../push
108 $ hg -q push ../push
109
109
110 after push
110 after push
111 group can still write everything
111 group can still write everything
112
112
113 $ python ../printmodes.py ../push
113 $ python ../printmodes.py ../push
114 00770 ../push/.hg/
114 00770 ../push/.hg/
115 00660 ../push/.hg/00changelog.i
115 00660 ../push/.hg/00changelog.i
116 00770 ../push/.hg/cache/
116 00770 ../push/.hg/cache/
117 00660 ../push/.hg/cache/branch2-base
117 00660 ../push/.hg/cache/branch2-base
118 00660 ../push/.hg/cache/rbc-names-v1
118 00660 ../push/.hg/cache/rbc-names-v1
119 00660 ../push/.hg/cache/rbc-revs-v1
119 00660 ../push/.hg/cache/rbc-revs-v1
120 00660 ../push/.hg/dirstate
120 00660 ../push/.hg/requires
121 00660 ../push/.hg/requires
121 00770 ../push/.hg/store/
122 00770 ../push/.hg/store/
122 00660 ../push/.hg/store/00changelog.i
123 00660 ../push/.hg/store/00changelog.i
123 00660 ../push/.hg/store/00manifest.i
124 00660 ../push/.hg/store/00manifest.i
124 00770 ../push/.hg/store/data/
125 00770 ../push/.hg/store/data/
125 00770 ../push/.hg/store/data/dir/
126 00770 ../push/.hg/store/data/dir/
126 00660 ../push/.hg/store/data/dir/bar.i
127 00660 ../push/.hg/store/data/dir/bar.i
127 00660 ../push/.hg/store/data/foo.i
128 00660 ../push/.hg/store/data/foo.i
128 00660 ../push/.hg/store/fncache
129 00660 ../push/.hg/store/fncache
129 00660 ../push/.hg/store/undo
130 00660 ../push/.hg/store/undo
130 00660 ../push/.hg/store/undo.backupfiles
131 00660 ../push/.hg/store/undo.backupfiles
131 00660 ../push/.hg/store/undo.phaseroots
132 00660 ../push/.hg/store/undo.phaseroots
132 00660 ../push/.hg/undo.bookmarks
133 00660 ../push/.hg/undo.bookmarks
133 00660 ../push/.hg/undo.branch
134 00660 ../push/.hg/undo.branch
134 00660 ../push/.hg/undo.desc
135 00660 ../push/.hg/undo.desc
135 00660 ../push/.hg/undo.dirstate
136 00660 ../push/.hg/undo.dirstate
136
137
137
138
138 Test that we don't lose the setgid bit when we call chmod.
139 Test that we don't lose the setgid bit when we call chmod.
139 Not all systems support setgid directories (e.g. HFS+), so
140 Not all systems support setgid directories (e.g. HFS+), so
140 just check that directories have the same mode.
141 just check that directories have the same mode.
141
142
142 $ cd ..
143 $ cd ..
143 $ hg init setgid
144 $ hg init setgid
144 $ cd setgid
145 $ cd setgid
145 $ chmod g+rwx .hg/store
146 $ chmod g+rwx .hg/store
146 $ chmod g+s .hg/store 2> /dev/null || true
147 $ chmod g+s .hg/store 2> /dev/null || true
147 $ mkdir dir
148 $ mkdir dir
148 $ touch dir/file
149 $ touch dir/file
149 $ hg ci -qAm 'add dir/file'
150 $ hg ci -qAm 'add dir/file'
150 $ storemode=`python ../mode.py .hg/store`
151 $ storemode=`python ../mode.py .hg/store`
151 $ dirmode=`python ../mode.py .hg/store/data/dir`
152 $ dirmode=`python ../mode.py .hg/store/data/dir`
152 $ if [ "$storemode" != "$dirmode" ]; then
153 $ if [ "$storemode" != "$dirmode" ]; then
153 > echo "$storemode != $dirmode"
154 > echo "$storemode != $dirmode"
154 > fi
155 > fi
155 $ cd ..
156 $ cd ..
156
157
157 $ cd .. # g-s dir
158 $ cd .. # g-s dir
General Comments 0
You need to be logged in to leave comments. Login now