##// END OF EJS Templates
localrepo: jettison parents() method per deprecation policy (API)
Augie Fackler -
r29075:3f0177d2 default
parent child Browse files
Show More
@@ -1,1983 +1,1977 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import inspect
11 import inspect
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 wdirrev,
22 wdirrev,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 cmdutil,
30 cmdutil,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 encoding,
33 encoding,
34 error,
34 error,
35 exchange,
35 exchange,
36 extensions,
36 extensions,
37 filelog,
37 filelog,
38 hook,
38 hook,
39 lock as lockmod,
39 lock as lockmod,
40 manifest,
40 manifest,
41 match as matchmod,
41 match as matchmod,
42 merge as mergemod,
42 merge as mergemod,
43 namespaces,
43 namespaces,
44 obsolete,
44 obsolete,
45 pathutil,
45 pathutil,
46 peer,
46 peer,
47 phases,
47 phases,
48 pushkey,
48 pushkey,
49 repoview,
49 repoview,
50 revset,
50 revset,
51 scmutil,
51 scmutil,
52 store,
52 store,
53 subrepo,
53 subrepo,
54 tags as tagsmod,
54 tags as tagsmod,
55 transaction,
55 transaction,
56 util,
56 util,
57 )
57 )
58
58
59 release = lockmod.release
59 release = lockmod.release
60 propertycache = util.propertycache
60 propertycache = util.propertycache
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63 filecache = scmutil.filecache
63 filecache = scmutil.filecache
64
64
65 class repofilecache(filecache):
65 class repofilecache(filecache):
66 """All filecache usage on repo are done for logic that should be unfiltered
66 """All filecache usage on repo are done for logic that should be unfiltered
67 """
67 """
68
68
69 def __get__(self, repo, type=None):
69 def __get__(self, repo, type=None):
70 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 def __set__(self, repo, value):
71 def __set__(self, repo, value):
72 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 def __delete__(self, repo):
73 def __delete__(self, repo):
74 return super(repofilecache, self).__delete__(repo.unfiltered())
74 return super(repofilecache, self).__delete__(repo.unfiltered())
75
75
76 class storecache(repofilecache):
76 class storecache(repofilecache):
77 """filecache for files in the store"""
77 """filecache for files in the store"""
78 def join(self, obj, fname):
78 def join(self, obj, fname):
79 return obj.sjoin(fname)
79 return obj.sjoin(fname)
80
80
81 class unfilteredpropertycache(propertycache):
81 class unfilteredpropertycache(propertycache):
82 """propertycache that apply to unfiltered repo only"""
82 """propertycache that apply to unfiltered repo only"""
83
83
84 def __get__(self, repo, type=None):
84 def __get__(self, repo, type=None):
85 unfi = repo.unfiltered()
85 unfi = repo.unfiltered()
86 if unfi is repo:
86 if unfi is repo:
87 return super(unfilteredpropertycache, self).__get__(unfi)
87 return super(unfilteredpropertycache, self).__get__(unfi)
88 return getattr(unfi, self.name)
88 return getattr(unfi, self.name)
89
89
90 class filteredpropertycache(propertycache):
90 class filteredpropertycache(propertycache):
91 """propertycache that must take filtering in account"""
91 """propertycache that must take filtering in account"""
92
92
93 def cachevalue(self, obj, value):
93 def cachevalue(self, obj, value):
94 object.__setattr__(obj, self.name, value)
94 object.__setattr__(obj, self.name, value)
95
95
96
96
97 def hasunfilteredcache(repo, name):
97 def hasunfilteredcache(repo, name):
98 """check if a repo has an unfilteredpropertycache value for <name>"""
98 """check if a repo has an unfilteredpropertycache value for <name>"""
99 return name in vars(repo.unfiltered())
99 return name in vars(repo.unfiltered())
100
100
101 def unfilteredmethod(orig):
101 def unfilteredmethod(orig):
102 """decorate method that always need to be run on unfiltered version"""
102 """decorate method that always need to be run on unfiltered version"""
103 def wrapper(repo, *args, **kwargs):
103 def wrapper(repo, *args, **kwargs):
104 return orig(repo.unfiltered(), *args, **kwargs)
104 return orig(repo.unfiltered(), *args, **kwargs)
105 return wrapper
105 return wrapper
106
106
107 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 'unbundle'))
108 'unbundle'))
109 legacycaps = moderncaps.union(set(['changegroupsubset']))
109 legacycaps = moderncaps.union(set(['changegroupsubset']))
110
110
111 class localpeer(peer.peerrepository):
111 class localpeer(peer.peerrepository):
112 '''peer for a local repo; reflects only the most recent API'''
112 '''peer for a local repo; reflects only the most recent API'''
113
113
114 def __init__(self, repo, caps=moderncaps):
114 def __init__(self, repo, caps=moderncaps):
115 peer.peerrepository.__init__(self)
115 peer.peerrepository.__init__(self)
116 self._repo = repo.filtered('served')
116 self._repo = repo.filtered('served')
117 self.ui = repo.ui
117 self.ui = repo.ui
118 self._caps = repo._restrictcapabilities(caps)
118 self._caps = repo._restrictcapabilities(caps)
119 self.requirements = repo.requirements
119 self.requirements = repo.requirements
120 self.supportedformats = repo.supportedformats
120 self.supportedformats = repo.supportedformats
121
121
122 def close(self):
122 def close(self):
123 self._repo.close()
123 self._repo.close()
124
124
125 def _capabilities(self):
125 def _capabilities(self):
126 return self._caps
126 return self._caps
127
127
128 def local(self):
128 def local(self):
129 return self._repo
129 return self._repo
130
130
131 def canpush(self):
131 def canpush(self):
132 return True
132 return True
133
133
134 def url(self):
134 def url(self):
135 return self._repo.url()
135 return self._repo.url()
136
136
137 def lookup(self, key):
137 def lookup(self, key):
138 return self._repo.lookup(key)
138 return self._repo.lookup(key)
139
139
140 def branchmap(self):
140 def branchmap(self):
141 return self._repo.branchmap()
141 return self._repo.branchmap()
142
142
143 def heads(self):
143 def heads(self):
144 return self._repo.heads()
144 return self._repo.heads()
145
145
146 def known(self, nodes):
146 def known(self, nodes):
147 return self._repo.known(nodes)
147 return self._repo.known(nodes)
148
148
149 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 **kwargs):
150 **kwargs):
151 cg = exchange.getbundle(self._repo, source, heads=heads,
151 cg = exchange.getbundle(self._repo, source, heads=heads,
152 common=common, bundlecaps=bundlecaps, **kwargs)
152 common=common, bundlecaps=bundlecaps, **kwargs)
153 if bundlecaps is not None and 'HG20' in bundlecaps:
153 if bundlecaps is not None and 'HG20' in bundlecaps:
154 # When requesting a bundle2, getbundle returns a stream to make the
154 # When requesting a bundle2, getbundle returns a stream to make the
155 # wire level function happier. We need to build a proper object
155 # wire level function happier. We need to build a proper object
156 # from it in local peer.
156 # from it in local peer.
157 cg = bundle2.getunbundler(self.ui, cg)
157 cg = bundle2.getunbundler(self.ui, cg)
158 return cg
158 return cg
159
159
160 # TODO We might want to move the next two calls into legacypeer and add
160 # TODO We might want to move the next two calls into legacypeer and add
161 # unbundle instead.
161 # unbundle instead.
162
162
163 def unbundle(self, cg, heads, url):
163 def unbundle(self, cg, heads, url):
164 """apply a bundle on a repo
164 """apply a bundle on a repo
165
165
166 This function handles the repo locking itself."""
166 This function handles the repo locking itself."""
167 try:
167 try:
168 try:
168 try:
169 cg = exchange.readbundle(self.ui, cg, None)
169 cg = exchange.readbundle(self.ui, cg, None)
170 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 if util.safehasattr(ret, 'getchunks'):
171 if util.safehasattr(ret, 'getchunks'):
172 # This is a bundle20 object, turn it into an unbundler.
172 # This is a bundle20 object, turn it into an unbundler.
173 # This little dance should be dropped eventually when the
173 # This little dance should be dropped eventually when the
174 # API is finally improved.
174 # API is finally improved.
175 stream = util.chunkbuffer(ret.getchunks())
175 stream = util.chunkbuffer(ret.getchunks())
176 ret = bundle2.getunbundler(self.ui, stream)
176 ret = bundle2.getunbundler(self.ui, stream)
177 return ret
177 return ret
178 except Exception as exc:
178 except Exception as exc:
179 # If the exception contains output salvaged from a bundle2
179 # If the exception contains output salvaged from a bundle2
180 # reply, we need to make sure it is printed before continuing
180 # reply, we need to make sure it is printed before continuing
181 # to fail. So we build a bundle2 with such output and consume
181 # to fail. So we build a bundle2 with such output and consume
182 # it directly.
182 # it directly.
183 #
183 #
184 # This is not very elegant but allows a "simple" solution for
184 # This is not very elegant but allows a "simple" solution for
185 # issue4594
185 # issue4594
186 output = getattr(exc, '_bundle2salvagedoutput', ())
186 output = getattr(exc, '_bundle2salvagedoutput', ())
187 if output:
187 if output:
188 bundler = bundle2.bundle20(self._repo.ui)
188 bundler = bundle2.bundle20(self._repo.ui)
189 for out in output:
189 for out in output:
190 bundler.addpart(out)
190 bundler.addpart(out)
191 stream = util.chunkbuffer(bundler.getchunks())
191 stream = util.chunkbuffer(bundler.getchunks())
192 b = bundle2.getunbundler(self.ui, stream)
192 b = bundle2.getunbundler(self.ui, stream)
193 bundle2.processbundle(self._repo, b)
193 bundle2.processbundle(self._repo, b)
194 raise
194 raise
195 except error.PushRaced as exc:
195 except error.PushRaced as exc:
196 raise error.ResponseError(_('push failed:'), str(exc))
196 raise error.ResponseError(_('push failed:'), str(exc))
197
197
198 def lock(self):
198 def lock(self):
199 return self._repo.lock()
199 return self._repo.lock()
200
200
201 def addchangegroup(self, cg, source, url):
201 def addchangegroup(self, cg, source, url):
202 return cg.apply(self._repo, source, url)
202 return cg.apply(self._repo, source, url)
203
203
204 def pushkey(self, namespace, key, old, new):
204 def pushkey(self, namespace, key, old, new):
205 return self._repo.pushkey(namespace, key, old, new)
205 return self._repo.pushkey(namespace, key, old, new)
206
206
207 def listkeys(self, namespace):
207 def listkeys(self, namespace):
208 return self._repo.listkeys(namespace)
208 return self._repo.listkeys(namespace)
209
209
210 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 '''used to test argument passing over the wire'''
211 '''used to test argument passing over the wire'''
212 return "%s %s %s %s %s" % (one, two, three, four, five)
212 return "%s %s %s %s %s" % (one, two, three, four, five)
213
213
214 class locallegacypeer(localpeer):
214 class locallegacypeer(localpeer):
215 '''peer extension which implements legacy methods too; used for tests with
215 '''peer extension which implements legacy methods too; used for tests with
216 restricted capabilities'''
216 restricted capabilities'''
217
217
218 def __init__(self, repo):
218 def __init__(self, repo):
219 localpeer.__init__(self, repo, caps=legacycaps)
219 localpeer.__init__(self, repo, caps=legacycaps)
220
220
221 def branches(self, nodes):
221 def branches(self, nodes):
222 return self._repo.branches(nodes)
222 return self._repo.branches(nodes)
223
223
224 def between(self, pairs):
224 def between(self, pairs):
225 return self._repo.between(pairs)
225 return self._repo.between(pairs)
226
226
227 def changegroup(self, basenodes, source):
227 def changegroup(self, basenodes, source):
228 return changegroup.changegroup(self._repo, basenodes, source)
228 return changegroup.changegroup(self._repo, basenodes, source)
229
229
230 def changegroupsubset(self, bases, heads, source):
230 def changegroupsubset(self, bases, heads, source):
231 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232
232
233 class localrepository(object):
233 class localrepository(object):
234
234
235 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 'manifestv2'))
236 'manifestv2'))
237 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 'dotencode'))
238 'dotencode'))
239 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 filtername = None
240 filtername = None
241
241
242 # a list of (ui, featureset) functions.
242 # a list of (ui, featureset) functions.
243 # only functions defined in module of enabled extensions are invoked
243 # only functions defined in module of enabled extensions are invoked
244 featuresetupfuncs = set()
244 featuresetupfuncs = set()
245
245
246 def __init__(self, baseui, path=None, create=False):
246 def __init__(self, baseui, path=None, create=False):
247 self.requirements = set()
247 self.requirements = set()
248 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
248 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 self.wopener = self.wvfs
249 self.wopener = self.wvfs
250 self.root = self.wvfs.base
250 self.root = self.wvfs.base
251 self.path = self.wvfs.join(".hg")
251 self.path = self.wvfs.join(".hg")
252 self.origroot = path
252 self.origroot = path
253 self.auditor = pathutil.pathauditor(self.root, self._checknested)
253 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
254 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 realfs=False)
255 realfs=False)
256 self.vfs = scmutil.vfs(self.path)
256 self.vfs = scmutil.vfs(self.path)
257 self.opener = self.vfs
257 self.opener = self.vfs
258 self.baseui = baseui
258 self.baseui = baseui
259 self.ui = baseui.copy()
259 self.ui = baseui.copy()
260 self.ui.copy = baseui.copy # prevent copying repo configuration
260 self.ui.copy = baseui.copy # prevent copying repo configuration
261 # A list of callback to shape the phase if no data were found.
261 # A list of callback to shape the phase if no data were found.
262 # Callback are in the form: func(repo, roots) --> processed root.
262 # Callback are in the form: func(repo, roots) --> processed root.
263 # This list it to be filled by extension during repo setup
263 # This list it to be filled by extension during repo setup
264 self._phasedefaults = []
264 self._phasedefaults = []
265 try:
265 try:
266 self.ui.readconfig(self.join("hgrc"), self.root)
266 self.ui.readconfig(self.join("hgrc"), self.root)
267 extensions.loadall(self.ui)
267 extensions.loadall(self.ui)
268 except IOError:
268 except IOError:
269 pass
269 pass
270
270
271 if self.featuresetupfuncs:
271 if self.featuresetupfuncs:
272 self.supported = set(self._basesupported) # use private copy
272 self.supported = set(self._basesupported) # use private copy
273 extmods = set(m.__name__ for n, m
273 extmods = set(m.__name__ for n, m
274 in extensions.extensions(self.ui))
274 in extensions.extensions(self.ui))
275 for setupfunc in self.featuresetupfuncs:
275 for setupfunc in self.featuresetupfuncs:
276 if setupfunc.__module__ in extmods:
276 if setupfunc.__module__ in extmods:
277 setupfunc(self.ui, self.supported)
277 setupfunc(self.ui, self.supported)
278 else:
278 else:
279 self.supported = self._basesupported
279 self.supported = self._basesupported
280
280
281 if not self.vfs.isdir():
281 if not self.vfs.isdir():
282 if create:
282 if create:
283 self.requirements = newreporequirements(self)
283 self.requirements = newreporequirements(self)
284
284
285 if not self.wvfs.exists():
285 if not self.wvfs.exists():
286 self.wvfs.makedirs()
286 self.wvfs.makedirs()
287 self.vfs.makedir(notindexed=True)
287 self.vfs.makedir(notindexed=True)
288
288
289 if 'store' in self.requirements:
289 if 'store' in self.requirements:
290 self.vfs.mkdir("store")
290 self.vfs.mkdir("store")
291
291
292 # create an invalid changelog
292 # create an invalid changelog
293 self.vfs.append(
293 self.vfs.append(
294 "00changelog.i",
294 "00changelog.i",
295 '\0\0\0\2' # represents revlogv2
295 '\0\0\0\2' # represents revlogv2
296 ' dummy changelog to prevent using the old repo layout'
296 ' dummy changelog to prevent using the old repo layout'
297 )
297 )
298 else:
298 else:
299 raise error.RepoError(_("repository %s not found") % path)
299 raise error.RepoError(_("repository %s not found") % path)
300 elif create:
300 elif create:
301 raise error.RepoError(_("repository %s already exists") % path)
301 raise error.RepoError(_("repository %s already exists") % path)
302 else:
302 else:
303 try:
303 try:
304 self.requirements = scmutil.readrequires(
304 self.requirements = scmutil.readrequires(
305 self.vfs, self.supported)
305 self.vfs, self.supported)
306 except IOError as inst:
306 except IOError as inst:
307 if inst.errno != errno.ENOENT:
307 if inst.errno != errno.ENOENT:
308 raise
308 raise
309
309
310 self.sharedpath = self.path
310 self.sharedpath = self.path
311 try:
311 try:
312 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
312 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 realpath=True)
313 realpath=True)
314 s = vfs.base
314 s = vfs.base
315 if not vfs.exists():
315 if not vfs.exists():
316 raise error.RepoError(
316 raise error.RepoError(
317 _('.hg/sharedpath points to nonexistent directory %s') % s)
317 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 self.sharedpath = s
318 self.sharedpath = s
319 except IOError as inst:
319 except IOError as inst:
320 if inst.errno != errno.ENOENT:
320 if inst.errno != errno.ENOENT:
321 raise
321 raise
322
322
323 self.store = store.store(
323 self.store = store.store(
324 self.requirements, self.sharedpath, scmutil.vfs)
324 self.requirements, self.sharedpath, scmutil.vfs)
325 self.spath = self.store.path
325 self.spath = self.store.path
326 self.svfs = self.store.vfs
326 self.svfs = self.store.vfs
327 self.sjoin = self.store.join
327 self.sjoin = self.store.join
328 self.vfs.createmode = self.store.createmode
328 self.vfs.createmode = self.store.createmode
329 self._applyopenerreqs()
329 self._applyopenerreqs()
330 if create:
330 if create:
331 self._writerequirements()
331 self._writerequirements()
332
332
333 self._dirstatevalidatewarned = False
333 self._dirstatevalidatewarned = False
334
334
335 self._branchcaches = {}
335 self._branchcaches = {}
336 self._revbranchcache = None
336 self._revbranchcache = None
337 self.filterpats = {}
337 self.filterpats = {}
338 self._datafilters = {}
338 self._datafilters = {}
339 self._transref = self._lockref = self._wlockref = None
339 self._transref = self._lockref = self._wlockref = None
340
340
341 # A cache for various files under .hg/ that tracks file changes,
341 # A cache for various files under .hg/ that tracks file changes,
342 # (used by the filecache decorator)
342 # (used by the filecache decorator)
343 #
343 #
344 # Maps a property name to its util.filecacheentry
344 # Maps a property name to its util.filecacheentry
345 self._filecache = {}
345 self._filecache = {}
346
346
347 # hold sets of revision to be filtered
347 # hold sets of revision to be filtered
348 # should be cleared when something might have changed the filter value:
348 # should be cleared when something might have changed the filter value:
349 # - new changesets,
349 # - new changesets,
350 # - phase change,
350 # - phase change,
351 # - new obsolescence marker,
351 # - new obsolescence marker,
352 # - working directory parent change,
352 # - working directory parent change,
353 # - bookmark changes
353 # - bookmark changes
354 self.filteredrevcache = {}
354 self.filteredrevcache = {}
355
355
356 # generic mapping between names and nodes
356 # generic mapping between names and nodes
357 self.names = namespaces.namespaces()
357 self.names = namespaces.namespaces()
358
358
359 def close(self):
359 def close(self):
360 self._writecaches()
360 self._writecaches()
361
361
362 def _writecaches(self):
362 def _writecaches(self):
363 if self._revbranchcache:
363 if self._revbranchcache:
364 self._revbranchcache.write()
364 self._revbranchcache.write()
365
365
366 def _restrictcapabilities(self, caps):
366 def _restrictcapabilities(self, caps):
367 if self.ui.configbool('experimental', 'bundle2-advertise', True):
367 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 caps = set(caps)
368 caps = set(caps)
369 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
369 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 caps.add('bundle2=' + urlreq.quote(capsblob))
370 caps.add('bundle2=' + urlreq.quote(capsblob))
371 return caps
371 return caps
372
372
373 def _applyopenerreqs(self):
373 def _applyopenerreqs(self):
374 self.svfs.options = dict((r, 1) for r in self.requirements
374 self.svfs.options = dict((r, 1) for r in self.requirements
375 if r in self.openerreqs)
375 if r in self.openerreqs)
376 # experimental config: format.chunkcachesize
376 # experimental config: format.chunkcachesize
377 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
377 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 if chunkcachesize is not None:
378 if chunkcachesize is not None:
379 self.svfs.options['chunkcachesize'] = chunkcachesize
379 self.svfs.options['chunkcachesize'] = chunkcachesize
380 # experimental config: format.maxchainlen
380 # experimental config: format.maxchainlen
381 maxchainlen = self.ui.configint('format', 'maxchainlen')
381 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 if maxchainlen is not None:
382 if maxchainlen is not None:
383 self.svfs.options['maxchainlen'] = maxchainlen
383 self.svfs.options['maxchainlen'] = maxchainlen
384 # experimental config: format.manifestcachesize
384 # experimental config: format.manifestcachesize
385 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
385 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 if manifestcachesize is not None:
386 if manifestcachesize is not None:
387 self.svfs.options['manifestcachesize'] = manifestcachesize
387 self.svfs.options['manifestcachesize'] = manifestcachesize
388 # experimental config: format.aggressivemergedeltas
388 # experimental config: format.aggressivemergedeltas
389 aggressivemergedeltas = self.ui.configbool('format',
389 aggressivemergedeltas = self.ui.configbool('format',
390 'aggressivemergedeltas', False)
390 'aggressivemergedeltas', False)
391 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
391 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
392 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393
393
394 def _writerequirements(self):
394 def _writerequirements(self):
395 scmutil.writerequires(self.vfs, self.requirements)
395 scmutil.writerequires(self.vfs, self.requirements)
396
396
397 def _checknested(self, path):
397 def _checknested(self, path):
398 """Determine if path is a legal nested repository."""
398 """Determine if path is a legal nested repository."""
399 if not path.startswith(self.root):
399 if not path.startswith(self.root):
400 return False
400 return False
401 subpath = path[len(self.root) + 1:]
401 subpath = path[len(self.root) + 1:]
402 normsubpath = util.pconvert(subpath)
402 normsubpath = util.pconvert(subpath)
403
403
404 # XXX: Checking against the current working copy is wrong in
404 # XXX: Checking against the current working copy is wrong in
405 # the sense that it can reject things like
405 # the sense that it can reject things like
406 #
406 #
407 # $ hg cat -r 10 sub/x.txt
407 # $ hg cat -r 10 sub/x.txt
408 #
408 #
409 # if sub/ is no longer a subrepository in the working copy
409 # if sub/ is no longer a subrepository in the working copy
410 # parent revision.
410 # parent revision.
411 #
411 #
412 # However, it can of course also allow things that would have
412 # However, it can of course also allow things that would have
413 # been rejected before, such as the above cat command if sub/
413 # been rejected before, such as the above cat command if sub/
414 # is a subrepository now, but was a normal directory before.
414 # is a subrepository now, but was a normal directory before.
415 # The old path auditor would have rejected by mistake since it
415 # The old path auditor would have rejected by mistake since it
416 # panics when it sees sub/.hg/.
416 # panics when it sees sub/.hg/.
417 #
417 #
418 # All in all, checking against the working copy seems sensible
418 # All in all, checking against the working copy seems sensible
419 # since we want to prevent access to nested repositories on
419 # since we want to prevent access to nested repositories on
420 # the filesystem *now*.
420 # the filesystem *now*.
421 ctx = self[None]
421 ctx = self[None]
422 parts = util.splitpath(subpath)
422 parts = util.splitpath(subpath)
423 while parts:
423 while parts:
424 prefix = '/'.join(parts)
424 prefix = '/'.join(parts)
425 if prefix in ctx.substate:
425 if prefix in ctx.substate:
426 if prefix == normsubpath:
426 if prefix == normsubpath:
427 return True
427 return True
428 else:
428 else:
429 sub = ctx.sub(prefix)
429 sub = ctx.sub(prefix)
430 return sub.checknested(subpath[len(prefix) + 1:])
430 return sub.checknested(subpath[len(prefix) + 1:])
431 else:
431 else:
432 parts.pop()
432 parts.pop()
433 return False
433 return False
434
434
435 def peer(self):
435 def peer(self):
436 return localpeer(self) # not cached to avoid reference cycle
436 return localpeer(self) # not cached to avoid reference cycle
437
437
438 def unfiltered(self):
438 def unfiltered(self):
439 """Return unfiltered version of the repository
439 """Return unfiltered version of the repository
440
440
441 Intended to be overwritten by filtered repo."""
441 Intended to be overwritten by filtered repo."""
442 return self
442 return self
443
443
444 def filtered(self, name):
444 def filtered(self, name):
445 """Return a filtered version of a repository"""
445 """Return a filtered version of a repository"""
446 # build a new class with the mixin and the current class
446 # build a new class with the mixin and the current class
447 # (possibly subclass of the repo)
447 # (possibly subclass of the repo)
448 class proxycls(repoview.repoview, self.unfiltered().__class__):
448 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 pass
449 pass
450 return proxycls(self, name)
450 return proxycls(self, name)
451
451
452 @repofilecache('bookmarks', 'bookmarks.current')
452 @repofilecache('bookmarks', 'bookmarks.current')
453 def _bookmarks(self):
453 def _bookmarks(self):
454 return bookmarks.bmstore(self)
454 return bookmarks.bmstore(self)
455
455
456 @property
456 @property
457 def _activebookmark(self):
457 def _activebookmark(self):
458 return self._bookmarks.active
458 return self._bookmarks.active
459
459
460 def bookmarkheads(self, bookmark):
460 def bookmarkheads(self, bookmark):
461 name = bookmark.split('@', 1)[0]
461 name = bookmark.split('@', 1)[0]
462 heads = []
462 heads = []
463 for mark, n in self._bookmarks.iteritems():
463 for mark, n in self._bookmarks.iteritems():
464 if mark.split('@', 1)[0] == name:
464 if mark.split('@', 1)[0] == name:
465 heads.append(n)
465 heads.append(n)
466 return heads
466 return heads
467
467
468 # _phaserevs and _phasesets depend on changelog. what we need is to
468 # _phaserevs and _phasesets depend on changelog. what we need is to
469 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
469 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 # can't be easily expressed in filecache mechanism.
470 # can't be easily expressed in filecache mechanism.
471 @storecache('phaseroots', '00changelog.i')
471 @storecache('phaseroots', '00changelog.i')
472 def _phasecache(self):
472 def _phasecache(self):
473 return phases.phasecache(self, self._phasedefaults)
473 return phases.phasecache(self, self._phasedefaults)
474
474
475 @storecache('obsstore')
475 @storecache('obsstore')
476 def obsstore(self):
476 def obsstore(self):
477 # read default format for new obsstore.
477 # read default format for new obsstore.
478 # developer config: format.obsstore-version
478 # developer config: format.obsstore-version
479 defaultformat = self.ui.configint('format', 'obsstore-version', None)
479 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 # rely on obsstore class default when possible.
480 # rely on obsstore class default when possible.
481 kwargs = {}
481 kwargs = {}
482 if defaultformat is not None:
482 if defaultformat is not None:
483 kwargs['defaultformat'] = defaultformat
483 kwargs['defaultformat'] = defaultformat
484 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
484 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 store = obsolete.obsstore(self.svfs, readonly=readonly,
485 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 **kwargs)
486 **kwargs)
487 if store and readonly:
487 if store and readonly:
488 self.ui.warn(
488 self.ui.warn(
489 _('obsolete feature not enabled but %i markers found!\n')
489 _('obsolete feature not enabled but %i markers found!\n')
490 % len(list(store)))
490 % len(list(store)))
491 return store
491 return store
492
492
493 @storecache('00changelog.i')
493 @storecache('00changelog.i')
494 def changelog(self):
494 def changelog(self):
495 c = changelog.changelog(self.svfs)
495 c = changelog.changelog(self.svfs)
496 if 'HG_PENDING' in os.environ:
496 if 'HG_PENDING' in os.environ:
497 p = os.environ['HG_PENDING']
497 p = os.environ['HG_PENDING']
498 if p.startswith(self.root):
498 if p.startswith(self.root):
499 c.readpending('00changelog.i.a')
499 c.readpending('00changelog.i.a')
500 return c
500 return c
501
501
502 @storecache('00manifest.i')
502 @storecache('00manifest.i')
503 def manifest(self):
503 def manifest(self):
504 return manifest.manifest(self.svfs)
504 return manifest.manifest(self.svfs)
505
505
506 def dirlog(self, dir):
506 def dirlog(self, dir):
507 return self.manifest.dirlog(dir)
507 return self.manifest.dirlog(dir)
508
508
509 @repofilecache('dirstate')
509 @repofilecache('dirstate')
510 def dirstate(self):
510 def dirstate(self):
511 return dirstate.dirstate(self.vfs, self.ui, self.root,
511 return dirstate.dirstate(self.vfs, self.ui, self.root,
512 self._dirstatevalidate)
512 self._dirstatevalidate)
513
513
514 def _dirstatevalidate(self, node):
514 def _dirstatevalidate(self, node):
515 try:
515 try:
516 self.changelog.rev(node)
516 self.changelog.rev(node)
517 return node
517 return node
518 except error.LookupError:
518 except error.LookupError:
519 if not self._dirstatevalidatewarned:
519 if not self._dirstatevalidatewarned:
520 self._dirstatevalidatewarned = True
520 self._dirstatevalidatewarned = True
521 self.ui.warn(_("warning: ignoring unknown"
521 self.ui.warn(_("warning: ignoring unknown"
522 " working parent %s!\n") % short(node))
522 " working parent %s!\n") % short(node))
523 return nullid
523 return nullid
524
524
525 def __getitem__(self, changeid):
525 def __getitem__(self, changeid):
526 if changeid is None or changeid == wdirrev:
526 if changeid is None or changeid == wdirrev:
527 return context.workingctx(self)
527 return context.workingctx(self)
528 if isinstance(changeid, slice):
528 if isinstance(changeid, slice):
529 return [context.changectx(self, i)
529 return [context.changectx(self, i)
530 for i in xrange(*changeid.indices(len(self)))
530 for i in xrange(*changeid.indices(len(self)))
531 if i not in self.changelog.filteredrevs]
531 if i not in self.changelog.filteredrevs]
532 return context.changectx(self, changeid)
532 return context.changectx(self, changeid)
533
533
534 def __contains__(self, changeid):
534 def __contains__(self, changeid):
535 try:
535 try:
536 self[changeid]
536 self[changeid]
537 return True
537 return True
538 except error.RepoLookupError:
538 except error.RepoLookupError:
539 return False
539 return False
540
540
541 def __nonzero__(self):
541 def __nonzero__(self):
542 return True
542 return True
543
543
544 def __len__(self):
544 def __len__(self):
545 return len(self.changelog)
545 return len(self.changelog)
546
546
547 def __iter__(self):
547 def __iter__(self):
548 return iter(self.changelog)
548 return iter(self.changelog)
549
549
550 def revs(self, expr, *args):
550 def revs(self, expr, *args):
551 '''Find revisions matching a revset.
551 '''Find revisions matching a revset.
552
552
553 The revset is specified as a string ``expr`` that may contain
553 The revset is specified as a string ``expr`` that may contain
554 %-formatting to escape certain types. See ``revset.formatspec``.
554 %-formatting to escape certain types. See ``revset.formatspec``.
555
555
556 Return a revset.abstractsmartset, which is a list-like interface
556 Return a revset.abstractsmartset, which is a list-like interface
557 that contains integer revisions.
557 that contains integer revisions.
558 '''
558 '''
559 expr = revset.formatspec(expr, *args)
559 expr = revset.formatspec(expr, *args)
560 m = revset.match(None, expr)
560 m = revset.match(None, expr)
561 return m(self)
561 return m(self)
562
562
563 def set(self, expr, *args):
563 def set(self, expr, *args):
564 '''Find revisions matching a revset and emit changectx instances.
564 '''Find revisions matching a revset and emit changectx instances.
565
565
566 This is a convenience wrapper around ``revs()`` that iterates the
566 This is a convenience wrapper around ``revs()`` that iterates the
567 result and is a generator of changectx instances.
567 result and is a generator of changectx instances.
568 '''
568 '''
569 for r in self.revs(expr, *args):
569 for r in self.revs(expr, *args):
570 yield self[r]
570 yield self[r]
571
571
572 def url(self):
572 def url(self):
573 return 'file:' + self.root
573 return 'file:' + self.root
574
574
575 def hook(self, name, throw=False, **args):
575 def hook(self, name, throw=False, **args):
576 """Call a hook, passing this repo instance.
576 """Call a hook, passing this repo instance.
577
577
578 This a convenience method to aid invoking hooks. Extensions likely
578 This a convenience method to aid invoking hooks. Extensions likely
579 won't call this unless they have registered a custom hook or are
579 won't call this unless they have registered a custom hook or are
580 replacing code that is expected to call a hook.
580 replacing code that is expected to call a hook.
581 """
581 """
582 return hook.hook(self.ui, self, name, throw, **args)
582 return hook.hook(self.ui, self, name, throw, **args)
583
583
584 @unfilteredmethod
584 @unfilteredmethod
585 def _tag(self, names, node, message, local, user, date, extra=None,
585 def _tag(self, names, node, message, local, user, date, extra=None,
586 editor=False):
586 editor=False):
587 if isinstance(names, str):
587 if isinstance(names, str):
588 names = (names,)
588 names = (names,)
589
589
590 branches = self.branchmap()
590 branches = self.branchmap()
591 for name in names:
591 for name in names:
592 self.hook('pretag', throw=True, node=hex(node), tag=name,
592 self.hook('pretag', throw=True, node=hex(node), tag=name,
593 local=local)
593 local=local)
594 if name in branches:
594 if name in branches:
595 self.ui.warn(_("warning: tag %s conflicts with existing"
595 self.ui.warn(_("warning: tag %s conflicts with existing"
596 " branch name\n") % name)
596 " branch name\n") % name)
597
597
598 def writetags(fp, names, munge, prevtags):
598 def writetags(fp, names, munge, prevtags):
599 fp.seek(0, 2)
599 fp.seek(0, 2)
600 if prevtags and prevtags[-1] != '\n':
600 if prevtags and prevtags[-1] != '\n':
601 fp.write('\n')
601 fp.write('\n')
602 for name in names:
602 for name in names:
603 if munge:
603 if munge:
604 m = munge(name)
604 m = munge(name)
605 else:
605 else:
606 m = name
606 m = name
607
607
608 if (self._tagscache.tagtypes and
608 if (self._tagscache.tagtypes and
609 name in self._tagscache.tagtypes):
609 name in self._tagscache.tagtypes):
610 old = self.tags().get(name, nullid)
610 old = self.tags().get(name, nullid)
611 fp.write('%s %s\n' % (hex(old), m))
611 fp.write('%s %s\n' % (hex(old), m))
612 fp.write('%s %s\n' % (hex(node), m))
612 fp.write('%s %s\n' % (hex(node), m))
613 fp.close()
613 fp.close()
614
614
615 prevtags = ''
615 prevtags = ''
616 if local:
616 if local:
617 try:
617 try:
618 fp = self.vfs('localtags', 'r+')
618 fp = self.vfs('localtags', 'r+')
619 except IOError:
619 except IOError:
620 fp = self.vfs('localtags', 'a')
620 fp = self.vfs('localtags', 'a')
621 else:
621 else:
622 prevtags = fp.read()
622 prevtags = fp.read()
623
623
624 # local tags are stored in the current charset
624 # local tags are stored in the current charset
625 writetags(fp, names, None, prevtags)
625 writetags(fp, names, None, prevtags)
626 for name in names:
626 for name in names:
627 self.hook('tag', node=hex(node), tag=name, local=local)
627 self.hook('tag', node=hex(node), tag=name, local=local)
628 return
628 return
629
629
630 try:
630 try:
631 fp = self.wfile('.hgtags', 'rb+')
631 fp = self.wfile('.hgtags', 'rb+')
632 except IOError as e:
632 except IOError as e:
633 if e.errno != errno.ENOENT:
633 if e.errno != errno.ENOENT:
634 raise
634 raise
635 fp = self.wfile('.hgtags', 'ab')
635 fp = self.wfile('.hgtags', 'ab')
636 else:
636 else:
637 prevtags = fp.read()
637 prevtags = fp.read()
638
638
639 # committed tags are stored in UTF-8
639 # committed tags are stored in UTF-8
640 writetags(fp, names, encoding.fromlocal, prevtags)
640 writetags(fp, names, encoding.fromlocal, prevtags)
641
641
642 fp.close()
642 fp.close()
643
643
644 self.invalidatecaches()
644 self.invalidatecaches()
645
645
646 if '.hgtags' not in self.dirstate:
646 if '.hgtags' not in self.dirstate:
647 self[None].add(['.hgtags'])
647 self[None].add(['.hgtags'])
648
648
649 m = matchmod.exact(self.root, '', ['.hgtags'])
649 m = matchmod.exact(self.root, '', ['.hgtags'])
650 tagnode = self.commit(message, user, date, extra=extra, match=m,
650 tagnode = self.commit(message, user, date, extra=extra, match=m,
651 editor=editor)
651 editor=editor)
652
652
653 for name in names:
653 for name in names:
654 self.hook('tag', node=hex(node), tag=name, local=local)
654 self.hook('tag', node=hex(node), tag=name, local=local)
655
655
656 return tagnode
656 return tagnode
657
657
658 def tag(self, names, node, message, local, user, date, editor=False):
658 def tag(self, names, node, message, local, user, date, editor=False):
659 '''tag a revision with one or more symbolic names.
659 '''tag a revision with one or more symbolic names.
660
660
661 names is a list of strings or, when adding a single tag, names may be a
661 names is a list of strings or, when adding a single tag, names may be a
662 string.
662 string.
663
663
664 if local is True, the tags are stored in a per-repository file.
664 if local is True, the tags are stored in a per-repository file.
665 otherwise, they are stored in the .hgtags file, and a new
665 otherwise, they are stored in the .hgtags file, and a new
666 changeset is committed with the change.
666 changeset is committed with the change.
667
667
668 keyword arguments:
668 keyword arguments:
669
669
670 local: whether to store tags in non-version-controlled file
670 local: whether to store tags in non-version-controlled file
671 (default False)
671 (default False)
672
672
673 message: commit message to use if committing
673 message: commit message to use if committing
674
674
675 user: name of user to use if committing
675 user: name of user to use if committing
676
676
677 date: date tuple to use if committing'''
677 date: date tuple to use if committing'''
678
678
679 if not local:
679 if not local:
680 m = matchmod.exact(self.root, '', ['.hgtags'])
680 m = matchmod.exact(self.root, '', ['.hgtags'])
681 if any(self.status(match=m, unknown=True, ignored=True)):
681 if any(self.status(match=m, unknown=True, ignored=True)):
682 raise error.Abort(_('working copy of .hgtags is changed'),
682 raise error.Abort(_('working copy of .hgtags is changed'),
683 hint=_('please commit .hgtags manually'))
683 hint=_('please commit .hgtags manually'))
684
684
685 self.tags() # instantiate the cache
685 self.tags() # instantiate the cache
686 self._tag(names, node, message, local, user, date, editor=editor)
686 self._tag(names, node, message, local, user, date, editor=editor)
687
687
688 @filteredpropertycache
688 @filteredpropertycache
689 def _tagscache(self):
689 def _tagscache(self):
690 '''Returns a tagscache object that contains various tags related
690 '''Returns a tagscache object that contains various tags related
691 caches.'''
691 caches.'''
692
692
693 # This simplifies its cache management by having one decorated
693 # This simplifies its cache management by having one decorated
694 # function (this one) and the rest simply fetch things from it.
694 # function (this one) and the rest simply fetch things from it.
695 class tagscache(object):
695 class tagscache(object):
696 def __init__(self):
696 def __init__(self):
697 # These two define the set of tags for this repository. tags
697 # These two define the set of tags for this repository. tags
698 # maps tag name to node; tagtypes maps tag name to 'global' or
698 # maps tag name to node; tagtypes maps tag name to 'global' or
699 # 'local'. (Global tags are defined by .hgtags across all
699 # 'local'. (Global tags are defined by .hgtags across all
700 # heads, and local tags are defined in .hg/localtags.)
700 # heads, and local tags are defined in .hg/localtags.)
701 # They constitute the in-memory cache of tags.
701 # They constitute the in-memory cache of tags.
702 self.tags = self.tagtypes = None
702 self.tags = self.tagtypes = None
703
703
704 self.nodetagscache = self.tagslist = None
704 self.nodetagscache = self.tagslist = None
705
705
706 cache = tagscache()
706 cache = tagscache()
707 cache.tags, cache.tagtypes = self._findtags()
707 cache.tags, cache.tagtypes = self._findtags()
708
708
709 return cache
709 return cache
710
710
711 def tags(self):
711 def tags(self):
712 '''return a mapping of tag to node'''
712 '''return a mapping of tag to node'''
713 t = {}
713 t = {}
714 if self.changelog.filteredrevs:
714 if self.changelog.filteredrevs:
715 tags, tt = self._findtags()
715 tags, tt = self._findtags()
716 else:
716 else:
717 tags = self._tagscache.tags
717 tags = self._tagscache.tags
718 for k, v in tags.iteritems():
718 for k, v in tags.iteritems():
719 try:
719 try:
720 # ignore tags to unknown nodes
720 # ignore tags to unknown nodes
721 self.changelog.rev(v)
721 self.changelog.rev(v)
722 t[k] = v
722 t[k] = v
723 except (error.LookupError, ValueError):
723 except (error.LookupError, ValueError):
724 pass
724 pass
725 return t
725 return t
726
726
727 def _findtags(self):
727 def _findtags(self):
728 '''Do the hard work of finding tags. Return a pair of dicts
728 '''Do the hard work of finding tags. Return a pair of dicts
729 (tags, tagtypes) where tags maps tag name to node, and tagtypes
729 (tags, tagtypes) where tags maps tag name to node, and tagtypes
730 maps tag name to a string like \'global\' or \'local\'.
730 maps tag name to a string like \'global\' or \'local\'.
731 Subclasses or extensions are free to add their own tags, but
731 Subclasses or extensions are free to add their own tags, but
732 should be aware that the returned dicts will be retained for the
732 should be aware that the returned dicts will be retained for the
733 duration of the localrepo object.'''
733 duration of the localrepo object.'''
734
734
735 # XXX what tagtype should subclasses/extensions use? Currently
735 # XXX what tagtype should subclasses/extensions use? Currently
736 # mq and bookmarks add tags, but do not set the tagtype at all.
736 # mq and bookmarks add tags, but do not set the tagtype at all.
737 # Should each extension invent its own tag type? Should there
737 # Should each extension invent its own tag type? Should there
738 # be one tagtype for all such "virtual" tags? Or is the status
738 # be one tagtype for all such "virtual" tags? Or is the status
739 # quo fine?
739 # quo fine?
740
740
741 alltags = {} # map tag name to (node, hist)
741 alltags = {} # map tag name to (node, hist)
742 tagtypes = {}
742 tagtypes = {}
743
743
744 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
744 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
745 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
745 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
746
746
747 # Build the return dicts. Have to re-encode tag names because
747 # Build the return dicts. Have to re-encode tag names because
748 # the tags module always uses UTF-8 (in order not to lose info
748 # the tags module always uses UTF-8 (in order not to lose info
749 # writing to the cache), but the rest of Mercurial wants them in
749 # writing to the cache), but the rest of Mercurial wants them in
750 # local encoding.
750 # local encoding.
751 tags = {}
751 tags = {}
752 for (name, (node, hist)) in alltags.iteritems():
752 for (name, (node, hist)) in alltags.iteritems():
753 if node != nullid:
753 if node != nullid:
754 tags[encoding.tolocal(name)] = node
754 tags[encoding.tolocal(name)] = node
755 tags['tip'] = self.changelog.tip()
755 tags['tip'] = self.changelog.tip()
756 tagtypes = dict([(encoding.tolocal(name), value)
756 tagtypes = dict([(encoding.tolocal(name), value)
757 for (name, value) in tagtypes.iteritems()])
757 for (name, value) in tagtypes.iteritems()])
758 return (tags, tagtypes)
758 return (tags, tagtypes)
759
759
760 def tagtype(self, tagname):
760 def tagtype(self, tagname):
761 '''
761 '''
762 return the type of the given tag. result can be:
762 return the type of the given tag. result can be:
763
763
764 'local' : a local tag
764 'local' : a local tag
765 'global' : a global tag
765 'global' : a global tag
766 None : tag does not exist
766 None : tag does not exist
767 '''
767 '''
768
768
769 return self._tagscache.tagtypes.get(tagname)
769 return self._tagscache.tagtypes.get(tagname)
770
770
771 def tagslist(self):
771 def tagslist(self):
772 '''return a list of tags ordered by revision'''
772 '''return a list of tags ordered by revision'''
773 if not self._tagscache.tagslist:
773 if not self._tagscache.tagslist:
774 l = []
774 l = []
775 for t, n in self.tags().iteritems():
775 for t, n in self.tags().iteritems():
776 l.append((self.changelog.rev(n), t, n))
776 l.append((self.changelog.rev(n), t, n))
777 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
777 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
778
778
779 return self._tagscache.tagslist
779 return self._tagscache.tagslist
780
780
781 def nodetags(self, node):
781 def nodetags(self, node):
782 '''return the tags associated with a node'''
782 '''return the tags associated with a node'''
783 if not self._tagscache.nodetagscache:
783 if not self._tagscache.nodetagscache:
784 nodetagscache = {}
784 nodetagscache = {}
785 for t, n in self._tagscache.tags.iteritems():
785 for t, n in self._tagscache.tags.iteritems():
786 nodetagscache.setdefault(n, []).append(t)
786 nodetagscache.setdefault(n, []).append(t)
787 for tags in nodetagscache.itervalues():
787 for tags in nodetagscache.itervalues():
788 tags.sort()
788 tags.sort()
789 self._tagscache.nodetagscache = nodetagscache
789 self._tagscache.nodetagscache = nodetagscache
790 return self._tagscache.nodetagscache.get(node, [])
790 return self._tagscache.nodetagscache.get(node, [])
791
791
792 def nodebookmarks(self, node):
792 def nodebookmarks(self, node):
793 """return the list of bookmarks pointing to the specified node"""
793 """return the list of bookmarks pointing to the specified node"""
794 marks = []
794 marks = []
795 for bookmark, n in self._bookmarks.iteritems():
795 for bookmark, n in self._bookmarks.iteritems():
796 if n == node:
796 if n == node:
797 marks.append(bookmark)
797 marks.append(bookmark)
798 return sorted(marks)
798 return sorted(marks)
799
799
800 def branchmap(self):
800 def branchmap(self):
801 '''returns a dictionary {branch: [branchheads]} with branchheads
801 '''returns a dictionary {branch: [branchheads]} with branchheads
802 ordered by increasing revision number'''
802 ordered by increasing revision number'''
803 branchmap.updatecache(self)
803 branchmap.updatecache(self)
804 return self._branchcaches[self.filtername]
804 return self._branchcaches[self.filtername]
805
805
806 @unfilteredmethod
806 @unfilteredmethod
807 def revbranchcache(self):
807 def revbranchcache(self):
808 if not self._revbranchcache:
808 if not self._revbranchcache:
809 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
809 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
810 return self._revbranchcache
810 return self._revbranchcache
811
811
812 def branchtip(self, branch, ignoremissing=False):
812 def branchtip(self, branch, ignoremissing=False):
813 '''return the tip node for a given branch
813 '''return the tip node for a given branch
814
814
815 If ignoremissing is True, then this method will not raise an error.
815 If ignoremissing is True, then this method will not raise an error.
816 This is helpful for callers that only expect None for a missing branch
816 This is helpful for callers that only expect None for a missing branch
817 (e.g. namespace).
817 (e.g. namespace).
818
818
819 '''
819 '''
820 try:
820 try:
821 return self.branchmap().branchtip(branch)
821 return self.branchmap().branchtip(branch)
822 except KeyError:
822 except KeyError:
823 if not ignoremissing:
823 if not ignoremissing:
824 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
824 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
825 else:
825 else:
826 pass
826 pass
827
827
828 def lookup(self, key):
828 def lookup(self, key):
829 return self[key].node()
829 return self[key].node()
830
830
831 def lookupbranch(self, key, remote=None):
831 def lookupbranch(self, key, remote=None):
832 repo = remote or self
832 repo = remote or self
833 if key in repo.branchmap():
833 if key in repo.branchmap():
834 return key
834 return key
835
835
836 repo = (remote and remote.local()) and remote or self
836 repo = (remote and remote.local()) and remote or self
837 return repo[key].branch()
837 return repo[key].branch()
838
838
839 def known(self, nodes):
839 def known(self, nodes):
840 cl = self.changelog
840 cl = self.changelog
841 nm = cl.nodemap
841 nm = cl.nodemap
842 filtered = cl.filteredrevs
842 filtered = cl.filteredrevs
843 result = []
843 result = []
844 for n in nodes:
844 for n in nodes:
845 r = nm.get(n)
845 r = nm.get(n)
846 resp = not (r is None or r in filtered)
846 resp = not (r is None or r in filtered)
847 result.append(resp)
847 result.append(resp)
848 return result
848 return result
849
849
850 def local(self):
850 def local(self):
851 return self
851 return self
852
852
853 def publishing(self):
853 def publishing(self):
854 # it's safe (and desirable) to trust the publish flag unconditionally
854 # it's safe (and desirable) to trust the publish flag unconditionally
855 # so that we don't finalize changes shared between users via ssh or nfs
855 # so that we don't finalize changes shared between users via ssh or nfs
856 return self.ui.configbool('phases', 'publish', True, untrusted=True)
856 return self.ui.configbool('phases', 'publish', True, untrusted=True)
857
857
858 def cancopy(self):
858 def cancopy(self):
859 # so statichttprepo's override of local() works
859 # so statichttprepo's override of local() works
860 if not self.local():
860 if not self.local():
861 return False
861 return False
862 if not self.publishing():
862 if not self.publishing():
863 return True
863 return True
864 # if publishing we can't copy if there is filtered content
864 # if publishing we can't copy if there is filtered content
865 return not self.filtered('visible').changelog.filteredrevs
865 return not self.filtered('visible').changelog.filteredrevs
866
866
867 def shared(self):
867 def shared(self):
868 '''the type of shared repository (None if not shared)'''
868 '''the type of shared repository (None if not shared)'''
869 if self.sharedpath != self.path:
869 if self.sharedpath != self.path:
870 return 'store'
870 return 'store'
871 return None
871 return None
872
872
873 def join(self, f, *insidef):
873 def join(self, f, *insidef):
874 return self.vfs.join(os.path.join(f, *insidef))
874 return self.vfs.join(os.path.join(f, *insidef))
875
875
876 def wjoin(self, f, *insidef):
876 def wjoin(self, f, *insidef):
877 return self.vfs.reljoin(self.root, f, *insidef)
877 return self.vfs.reljoin(self.root, f, *insidef)
878
878
879 def file(self, f):
879 def file(self, f):
880 if f[0] == '/':
880 if f[0] == '/':
881 f = f[1:]
881 f = f[1:]
882 return filelog.filelog(self.svfs, f)
882 return filelog.filelog(self.svfs, f)
883
883
884 def parents(self, changeid=None):
885 '''get list of changectxs for parents of changeid'''
886 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
887 self.ui.deprecwarn(msg, '3.7')
888 return self[changeid].parents()
889
890 def changectx(self, changeid):
884 def changectx(self, changeid):
891 return self[changeid]
885 return self[changeid]
892
886
893 def setparents(self, p1, p2=nullid):
887 def setparents(self, p1, p2=nullid):
894 self.dirstate.beginparentchange()
888 self.dirstate.beginparentchange()
895 copies = self.dirstate.setparents(p1, p2)
889 copies = self.dirstate.setparents(p1, p2)
896 pctx = self[p1]
890 pctx = self[p1]
897 if copies:
891 if copies:
898 # Adjust copy records, the dirstate cannot do it, it
892 # Adjust copy records, the dirstate cannot do it, it
899 # requires access to parents manifests. Preserve them
893 # requires access to parents manifests. Preserve them
900 # only for entries added to first parent.
894 # only for entries added to first parent.
901 for f in copies:
895 for f in copies:
902 if f not in pctx and copies[f] in pctx:
896 if f not in pctx and copies[f] in pctx:
903 self.dirstate.copy(copies[f], f)
897 self.dirstate.copy(copies[f], f)
904 if p2 == nullid:
898 if p2 == nullid:
905 for f, s in sorted(self.dirstate.copies().items()):
899 for f, s in sorted(self.dirstate.copies().items()):
906 if f not in pctx and s not in pctx:
900 if f not in pctx and s not in pctx:
907 self.dirstate.copy(None, f)
901 self.dirstate.copy(None, f)
908 self.dirstate.endparentchange()
902 self.dirstate.endparentchange()
909
903
910 def filectx(self, path, changeid=None, fileid=None):
904 def filectx(self, path, changeid=None, fileid=None):
911 """changeid can be a changeset revision, node, or tag.
905 """changeid can be a changeset revision, node, or tag.
912 fileid can be a file revision or node."""
906 fileid can be a file revision or node."""
913 return context.filectx(self, path, changeid, fileid)
907 return context.filectx(self, path, changeid, fileid)
914
908
915 def getcwd(self):
909 def getcwd(self):
916 return self.dirstate.getcwd()
910 return self.dirstate.getcwd()
917
911
918 def pathto(self, f, cwd=None):
912 def pathto(self, f, cwd=None):
919 return self.dirstate.pathto(f, cwd)
913 return self.dirstate.pathto(f, cwd)
920
914
921 def wfile(self, f, mode='r'):
915 def wfile(self, f, mode='r'):
922 return self.wvfs(f, mode)
916 return self.wvfs(f, mode)
923
917
924 def _link(self, f):
918 def _link(self, f):
925 return self.wvfs.islink(f)
919 return self.wvfs.islink(f)
926
920
927 def _loadfilter(self, filter):
921 def _loadfilter(self, filter):
928 if filter not in self.filterpats:
922 if filter not in self.filterpats:
929 l = []
923 l = []
930 for pat, cmd in self.ui.configitems(filter):
924 for pat, cmd in self.ui.configitems(filter):
931 if cmd == '!':
925 if cmd == '!':
932 continue
926 continue
933 mf = matchmod.match(self.root, '', [pat])
927 mf = matchmod.match(self.root, '', [pat])
934 fn = None
928 fn = None
935 params = cmd
929 params = cmd
936 for name, filterfn in self._datafilters.iteritems():
930 for name, filterfn in self._datafilters.iteritems():
937 if cmd.startswith(name):
931 if cmd.startswith(name):
938 fn = filterfn
932 fn = filterfn
939 params = cmd[len(name):].lstrip()
933 params = cmd[len(name):].lstrip()
940 break
934 break
941 if not fn:
935 if not fn:
942 fn = lambda s, c, **kwargs: util.filter(s, c)
936 fn = lambda s, c, **kwargs: util.filter(s, c)
943 # Wrap old filters not supporting keyword arguments
937 # Wrap old filters not supporting keyword arguments
944 if not inspect.getargspec(fn)[2]:
938 if not inspect.getargspec(fn)[2]:
945 oldfn = fn
939 oldfn = fn
946 fn = lambda s, c, **kwargs: oldfn(s, c)
940 fn = lambda s, c, **kwargs: oldfn(s, c)
947 l.append((mf, fn, params))
941 l.append((mf, fn, params))
948 self.filterpats[filter] = l
942 self.filterpats[filter] = l
949 return self.filterpats[filter]
943 return self.filterpats[filter]
950
944
951 def _filter(self, filterpats, filename, data):
945 def _filter(self, filterpats, filename, data):
952 for mf, fn, cmd in filterpats:
946 for mf, fn, cmd in filterpats:
953 if mf(filename):
947 if mf(filename):
954 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
948 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
955 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
949 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
956 break
950 break
957
951
958 return data
952 return data
959
953
960 @unfilteredpropertycache
954 @unfilteredpropertycache
961 def _encodefilterpats(self):
955 def _encodefilterpats(self):
962 return self._loadfilter('encode')
956 return self._loadfilter('encode')
963
957
964 @unfilteredpropertycache
958 @unfilteredpropertycache
965 def _decodefilterpats(self):
959 def _decodefilterpats(self):
966 return self._loadfilter('decode')
960 return self._loadfilter('decode')
967
961
968 def adddatafilter(self, name, filter):
962 def adddatafilter(self, name, filter):
969 self._datafilters[name] = filter
963 self._datafilters[name] = filter
970
964
971 def wread(self, filename):
965 def wread(self, filename):
972 if self._link(filename):
966 if self._link(filename):
973 data = self.wvfs.readlink(filename)
967 data = self.wvfs.readlink(filename)
974 else:
968 else:
975 data = self.wvfs.read(filename)
969 data = self.wvfs.read(filename)
976 return self._filter(self._encodefilterpats, filename, data)
970 return self._filter(self._encodefilterpats, filename, data)
977
971
978 def wwrite(self, filename, data, flags, backgroundclose=False):
972 def wwrite(self, filename, data, flags, backgroundclose=False):
979 """write ``data`` into ``filename`` in the working directory
973 """write ``data`` into ``filename`` in the working directory
980
974
981 This returns length of written (maybe decoded) data.
975 This returns length of written (maybe decoded) data.
982 """
976 """
983 data = self._filter(self._decodefilterpats, filename, data)
977 data = self._filter(self._decodefilterpats, filename, data)
984 if 'l' in flags:
978 if 'l' in flags:
985 self.wvfs.symlink(data, filename)
979 self.wvfs.symlink(data, filename)
986 else:
980 else:
987 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
981 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
988 if 'x' in flags:
982 if 'x' in flags:
989 self.wvfs.setflags(filename, False, True)
983 self.wvfs.setflags(filename, False, True)
990 return len(data)
984 return len(data)
991
985
992 def wwritedata(self, filename, data):
986 def wwritedata(self, filename, data):
993 return self._filter(self._decodefilterpats, filename, data)
987 return self._filter(self._decodefilterpats, filename, data)
994
988
995 def currenttransaction(self):
989 def currenttransaction(self):
996 """return the current transaction or None if non exists"""
990 """return the current transaction or None if non exists"""
997 if self._transref:
991 if self._transref:
998 tr = self._transref()
992 tr = self._transref()
999 else:
993 else:
1000 tr = None
994 tr = None
1001
995
1002 if tr and tr.running():
996 if tr and tr.running():
1003 return tr
997 return tr
1004 return None
998 return None
1005
999
1006 def transaction(self, desc, report=None):
1000 def transaction(self, desc, report=None):
1007 if (self.ui.configbool('devel', 'all-warnings')
1001 if (self.ui.configbool('devel', 'all-warnings')
1008 or self.ui.configbool('devel', 'check-locks')):
1002 or self.ui.configbool('devel', 'check-locks')):
1009 l = self._lockref and self._lockref()
1003 l = self._lockref and self._lockref()
1010 if l is None or not l.held:
1004 if l is None or not l.held:
1011 self.ui.develwarn('transaction with no lock')
1005 self.ui.develwarn('transaction with no lock')
1012 tr = self.currenttransaction()
1006 tr = self.currenttransaction()
1013 if tr is not None:
1007 if tr is not None:
1014 return tr.nest()
1008 return tr.nest()
1015
1009
1016 # abort here if the journal already exists
1010 # abort here if the journal already exists
1017 if self.svfs.exists("journal"):
1011 if self.svfs.exists("journal"):
1018 raise error.RepoError(
1012 raise error.RepoError(
1019 _("abandoned transaction found"),
1013 _("abandoned transaction found"),
1020 hint=_("run 'hg recover' to clean up transaction"))
1014 hint=_("run 'hg recover' to clean up transaction"))
1021
1015
1022 # make journal.dirstate contain in-memory changes at this point
1016 # make journal.dirstate contain in-memory changes at this point
1023 self.dirstate.write(None)
1017 self.dirstate.write(None)
1024
1018
1025 idbase = "%.40f#%f" % (random.random(), time.time())
1019 idbase = "%.40f#%f" % (random.random(), time.time())
1026 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1020 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1027 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1021 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1028
1022
1029 self._writejournal(desc)
1023 self._writejournal(desc)
1030 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1024 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1031 if report:
1025 if report:
1032 rp = report
1026 rp = report
1033 else:
1027 else:
1034 rp = self.ui.warn
1028 rp = self.ui.warn
1035 vfsmap = {'plain': self.vfs} # root of .hg/
1029 vfsmap = {'plain': self.vfs} # root of .hg/
1036 # we must avoid cyclic reference between repo and transaction.
1030 # we must avoid cyclic reference between repo and transaction.
1037 reporef = weakref.ref(self)
1031 reporef = weakref.ref(self)
1038 def validate(tr):
1032 def validate(tr):
1039 """will run pre-closing hooks"""
1033 """will run pre-closing hooks"""
1040 reporef().hook('pretxnclose', throw=True,
1034 reporef().hook('pretxnclose', throw=True,
1041 txnname=desc, **tr.hookargs)
1035 txnname=desc, **tr.hookargs)
1042 def releasefn(tr, success):
1036 def releasefn(tr, success):
1043 repo = reporef()
1037 repo = reporef()
1044 if success:
1038 if success:
1045 # this should be explicitly invoked here, because
1039 # this should be explicitly invoked here, because
1046 # in-memory changes aren't written out at closing
1040 # in-memory changes aren't written out at closing
1047 # transaction, if tr.addfilegenerator (via
1041 # transaction, if tr.addfilegenerator (via
1048 # dirstate.write or so) isn't invoked while
1042 # dirstate.write or so) isn't invoked while
1049 # transaction running
1043 # transaction running
1050 repo.dirstate.write(None)
1044 repo.dirstate.write(None)
1051 else:
1045 else:
1052 # prevent in-memory changes from being written out at
1046 # prevent in-memory changes from being written out at
1053 # the end of outer wlock scope or so
1047 # the end of outer wlock scope or so
1054 repo.dirstate.invalidate()
1048 repo.dirstate.invalidate()
1055
1049
1056 # discard all changes (including ones already written
1050 # discard all changes (including ones already written
1057 # out) in this transaction
1051 # out) in this transaction
1058 repo.vfs.rename('journal.dirstate', 'dirstate')
1052 repo.vfs.rename('journal.dirstate', 'dirstate')
1059
1053
1060 repo.invalidate(clearfilecache=True)
1054 repo.invalidate(clearfilecache=True)
1061
1055
1062 tr = transaction.transaction(rp, self.svfs, vfsmap,
1056 tr = transaction.transaction(rp, self.svfs, vfsmap,
1063 "journal",
1057 "journal",
1064 "undo",
1058 "undo",
1065 aftertrans(renames),
1059 aftertrans(renames),
1066 self.store.createmode,
1060 self.store.createmode,
1067 validator=validate,
1061 validator=validate,
1068 releasefn=releasefn)
1062 releasefn=releasefn)
1069
1063
1070 tr.hookargs['txnid'] = txnid
1064 tr.hookargs['txnid'] = txnid
1071 # note: writing the fncache only during finalize mean that the file is
1065 # note: writing the fncache only during finalize mean that the file is
1072 # outdated when running hooks. As fncache is used for streaming clone,
1066 # outdated when running hooks. As fncache is used for streaming clone,
1073 # this is not expected to break anything that happen during the hooks.
1067 # this is not expected to break anything that happen during the hooks.
1074 tr.addfinalize('flush-fncache', self.store.write)
1068 tr.addfinalize('flush-fncache', self.store.write)
1075 def txnclosehook(tr2):
1069 def txnclosehook(tr2):
1076 """To be run if transaction is successful, will schedule a hook run
1070 """To be run if transaction is successful, will schedule a hook run
1077 """
1071 """
1078 # Don't reference tr2 in hook() so we don't hold a reference.
1072 # Don't reference tr2 in hook() so we don't hold a reference.
1079 # This reduces memory consumption when there are multiple
1073 # This reduces memory consumption when there are multiple
1080 # transactions per lock. This can likely go away if issue5045
1074 # transactions per lock. This can likely go away if issue5045
1081 # fixes the function accumulation.
1075 # fixes the function accumulation.
1082 hookargs = tr2.hookargs
1076 hookargs = tr2.hookargs
1083
1077
1084 def hook():
1078 def hook():
1085 reporef().hook('txnclose', throw=False, txnname=desc,
1079 reporef().hook('txnclose', throw=False, txnname=desc,
1086 **hookargs)
1080 **hookargs)
1087 reporef()._afterlock(hook)
1081 reporef()._afterlock(hook)
1088 tr.addfinalize('txnclose-hook', txnclosehook)
1082 tr.addfinalize('txnclose-hook', txnclosehook)
1089 def txnaborthook(tr2):
1083 def txnaborthook(tr2):
1090 """To be run if transaction is aborted
1084 """To be run if transaction is aborted
1091 """
1085 """
1092 reporef().hook('txnabort', throw=False, txnname=desc,
1086 reporef().hook('txnabort', throw=False, txnname=desc,
1093 **tr2.hookargs)
1087 **tr2.hookargs)
1094 tr.addabort('txnabort-hook', txnaborthook)
1088 tr.addabort('txnabort-hook', txnaborthook)
1095 # avoid eager cache invalidation. in-memory data should be identical
1089 # avoid eager cache invalidation. in-memory data should be identical
1096 # to stored data if transaction has no error.
1090 # to stored data if transaction has no error.
1097 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1091 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1098 self._transref = weakref.ref(tr)
1092 self._transref = weakref.ref(tr)
1099 return tr
1093 return tr
1100
1094
1101 def _journalfiles(self):
1095 def _journalfiles(self):
1102 return ((self.svfs, 'journal'),
1096 return ((self.svfs, 'journal'),
1103 (self.vfs, 'journal.dirstate'),
1097 (self.vfs, 'journal.dirstate'),
1104 (self.vfs, 'journal.branch'),
1098 (self.vfs, 'journal.branch'),
1105 (self.vfs, 'journal.desc'),
1099 (self.vfs, 'journal.desc'),
1106 (self.vfs, 'journal.bookmarks'),
1100 (self.vfs, 'journal.bookmarks'),
1107 (self.svfs, 'journal.phaseroots'))
1101 (self.svfs, 'journal.phaseroots'))
1108
1102
1109 def undofiles(self):
1103 def undofiles(self):
1110 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1104 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1111
1105
1112 def _writejournal(self, desc):
1106 def _writejournal(self, desc):
1113 self.vfs.write("journal.dirstate",
1107 self.vfs.write("journal.dirstate",
1114 self.vfs.tryread("dirstate"))
1108 self.vfs.tryread("dirstate"))
1115 self.vfs.write("journal.branch",
1109 self.vfs.write("journal.branch",
1116 encoding.fromlocal(self.dirstate.branch()))
1110 encoding.fromlocal(self.dirstate.branch()))
1117 self.vfs.write("journal.desc",
1111 self.vfs.write("journal.desc",
1118 "%d\n%s\n" % (len(self), desc))
1112 "%d\n%s\n" % (len(self), desc))
1119 self.vfs.write("journal.bookmarks",
1113 self.vfs.write("journal.bookmarks",
1120 self.vfs.tryread("bookmarks"))
1114 self.vfs.tryread("bookmarks"))
1121 self.svfs.write("journal.phaseroots",
1115 self.svfs.write("journal.phaseroots",
1122 self.svfs.tryread("phaseroots"))
1116 self.svfs.tryread("phaseroots"))
1123
1117
1124 def recover(self):
1118 def recover(self):
1125 with self.lock():
1119 with self.lock():
1126 if self.svfs.exists("journal"):
1120 if self.svfs.exists("journal"):
1127 self.ui.status(_("rolling back interrupted transaction\n"))
1121 self.ui.status(_("rolling back interrupted transaction\n"))
1128 vfsmap = {'': self.svfs,
1122 vfsmap = {'': self.svfs,
1129 'plain': self.vfs,}
1123 'plain': self.vfs,}
1130 transaction.rollback(self.svfs, vfsmap, "journal",
1124 transaction.rollback(self.svfs, vfsmap, "journal",
1131 self.ui.warn)
1125 self.ui.warn)
1132 self.invalidate()
1126 self.invalidate()
1133 return True
1127 return True
1134 else:
1128 else:
1135 self.ui.warn(_("no interrupted transaction available\n"))
1129 self.ui.warn(_("no interrupted transaction available\n"))
1136 return False
1130 return False
1137
1131
1138 def rollback(self, dryrun=False, force=False):
1132 def rollback(self, dryrun=False, force=False):
1139 wlock = lock = dsguard = None
1133 wlock = lock = dsguard = None
1140 try:
1134 try:
1141 wlock = self.wlock()
1135 wlock = self.wlock()
1142 lock = self.lock()
1136 lock = self.lock()
1143 if self.svfs.exists("undo"):
1137 if self.svfs.exists("undo"):
1144 dsguard = cmdutil.dirstateguard(self, 'rollback')
1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1145
1139
1146 return self._rollback(dryrun, force, dsguard)
1140 return self._rollback(dryrun, force, dsguard)
1147 else:
1141 else:
1148 self.ui.warn(_("no rollback information available\n"))
1142 self.ui.warn(_("no rollback information available\n"))
1149 return 1
1143 return 1
1150 finally:
1144 finally:
1151 release(dsguard, lock, wlock)
1145 release(dsguard, lock, wlock)
1152
1146
1153 @unfilteredmethod # Until we get smarter cache management
1147 @unfilteredmethod # Until we get smarter cache management
1154 def _rollback(self, dryrun, force, dsguard):
1148 def _rollback(self, dryrun, force, dsguard):
1155 ui = self.ui
1149 ui = self.ui
1156 try:
1150 try:
1157 args = self.vfs.read('undo.desc').splitlines()
1151 args = self.vfs.read('undo.desc').splitlines()
1158 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1159 if len(args) >= 3:
1153 if len(args) >= 3:
1160 detail = args[2]
1154 detail = args[2]
1161 oldtip = oldlen - 1
1155 oldtip = oldlen - 1
1162
1156
1163 if detail and ui.verbose:
1157 if detail and ui.verbose:
1164 msg = (_('repository tip rolled back to revision %s'
1158 msg = (_('repository tip rolled back to revision %s'
1165 ' (undo %s: %s)\n')
1159 ' (undo %s: %s)\n')
1166 % (oldtip, desc, detail))
1160 % (oldtip, desc, detail))
1167 else:
1161 else:
1168 msg = (_('repository tip rolled back to revision %s'
1162 msg = (_('repository tip rolled back to revision %s'
1169 ' (undo %s)\n')
1163 ' (undo %s)\n')
1170 % (oldtip, desc))
1164 % (oldtip, desc))
1171 except IOError:
1165 except IOError:
1172 msg = _('rolling back unknown transaction\n')
1166 msg = _('rolling back unknown transaction\n')
1173 desc = None
1167 desc = None
1174
1168
1175 if not force and self['.'] != self['tip'] and desc == 'commit':
1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1176 raise error.Abort(
1170 raise error.Abort(
1177 _('rollback of last commit while not checked out '
1171 _('rollback of last commit while not checked out '
1178 'may lose data'), hint=_('use -f to force'))
1172 'may lose data'), hint=_('use -f to force'))
1179
1173
1180 ui.status(msg)
1174 ui.status(msg)
1181 if dryrun:
1175 if dryrun:
1182 return 0
1176 return 0
1183
1177
1184 parents = self.dirstate.parents()
1178 parents = self.dirstate.parents()
1185 self.destroying()
1179 self.destroying()
1186 vfsmap = {'plain': self.vfs, '': self.svfs}
1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1187 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1188 if self.vfs.exists('undo.bookmarks'):
1182 if self.vfs.exists('undo.bookmarks'):
1189 self.vfs.rename('undo.bookmarks', 'bookmarks')
1183 self.vfs.rename('undo.bookmarks', 'bookmarks')
1190 if self.svfs.exists('undo.phaseroots'):
1184 if self.svfs.exists('undo.phaseroots'):
1191 self.svfs.rename('undo.phaseroots', 'phaseroots')
1185 self.svfs.rename('undo.phaseroots', 'phaseroots')
1192 self.invalidate()
1186 self.invalidate()
1193
1187
1194 parentgone = (parents[0] not in self.changelog.nodemap or
1188 parentgone = (parents[0] not in self.changelog.nodemap or
1195 parents[1] not in self.changelog.nodemap)
1189 parents[1] not in self.changelog.nodemap)
1196 if parentgone:
1190 if parentgone:
1197 # prevent dirstateguard from overwriting already restored one
1191 # prevent dirstateguard from overwriting already restored one
1198 dsguard.close()
1192 dsguard.close()
1199
1193
1200 self.vfs.rename('undo.dirstate', 'dirstate')
1194 self.vfs.rename('undo.dirstate', 'dirstate')
1201 try:
1195 try:
1202 branch = self.vfs.read('undo.branch')
1196 branch = self.vfs.read('undo.branch')
1203 self.dirstate.setbranch(encoding.tolocal(branch))
1197 self.dirstate.setbranch(encoding.tolocal(branch))
1204 except IOError:
1198 except IOError:
1205 ui.warn(_('named branch could not be reset: '
1199 ui.warn(_('named branch could not be reset: '
1206 'current branch is still \'%s\'\n')
1200 'current branch is still \'%s\'\n')
1207 % self.dirstate.branch())
1201 % self.dirstate.branch())
1208
1202
1209 self.dirstate.invalidate()
1203 self.dirstate.invalidate()
1210 parents = tuple([p.rev() for p in self[None].parents()])
1204 parents = tuple([p.rev() for p in self[None].parents()])
1211 if len(parents) > 1:
1205 if len(parents) > 1:
1212 ui.status(_('working directory now based on '
1206 ui.status(_('working directory now based on '
1213 'revisions %d and %d\n') % parents)
1207 'revisions %d and %d\n') % parents)
1214 else:
1208 else:
1215 ui.status(_('working directory now based on '
1209 ui.status(_('working directory now based on '
1216 'revision %d\n') % parents)
1210 'revision %d\n') % parents)
1217 mergemod.mergestate.clean(self, self['.'].node())
1211 mergemod.mergestate.clean(self, self['.'].node())
1218
1212
1219 # TODO: if we know which new heads may result from this rollback, pass
1213 # TODO: if we know which new heads may result from this rollback, pass
1220 # them to destroy(), which will prevent the branchhead cache from being
1214 # them to destroy(), which will prevent the branchhead cache from being
1221 # invalidated.
1215 # invalidated.
1222 self.destroyed()
1216 self.destroyed()
1223 return 0
1217 return 0
1224
1218
1225 def invalidatecaches(self):
1219 def invalidatecaches(self):
1226
1220
1227 if '_tagscache' in vars(self):
1221 if '_tagscache' in vars(self):
1228 # can't use delattr on proxy
1222 # can't use delattr on proxy
1229 del self.__dict__['_tagscache']
1223 del self.__dict__['_tagscache']
1230
1224
1231 self.unfiltered()._branchcaches.clear()
1225 self.unfiltered()._branchcaches.clear()
1232 self.invalidatevolatilesets()
1226 self.invalidatevolatilesets()
1233
1227
1234 def invalidatevolatilesets(self):
1228 def invalidatevolatilesets(self):
1235 self.filteredrevcache.clear()
1229 self.filteredrevcache.clear()
1236 obsolete.clearobscaches(self)
1230 obsolete.clearobscaches(self)
1237
1231
1238 def invalidatedirstate(self):
1232 def invalidatedirstate(self):
1239 '''Invalidates the dirstate, causing the next call to dirstate
1233 '''Invalidates the dirstate, causing the next call to dirstate
1240 to check if it was modified since the last time it was read,
1234 to check if it was modified since the last time it was read,
1241 rereading it if it has.
1235 rereading it if it has.
1242
1236
1243 This is different to dirstate.invalidate() that it doesn't always
1237 This is different to dirstate.invalidate() that it doesn't always
1244 rereads the dirstate. Use dirstate.invalidate() if you want to
1238 rereads the dirstate. Use dirstate.invalidate() if you want to
1245 explicitly read the dirstate again (i.e. restoring it to a previous
1239 explicitly read the dirstate again (i.e. restoring it to a previous
1246 known good state).'''
1240 known good state).'''
1247 if hasunfilteredcache(self, 'dirstate'):
1241 if hasunfilteredcache(self, 'dirstate'):
1248 for k in self.dirstate._filecache:
1242 for k in self.dirstate._filecache:
1249 try:
1243 try:
1250 delattr(self.dirstate, k)
1244 delattr(self.dirstate, k)
1251 except AttributeError:
1245 except AttributeError:
1252 pass
1246 pass
1253 delattr(self.unfiltered(), 'dirstate')
1247 delattr(self.unfiltered(), 'dirstate')
1254
1248
1255 def invalidate(self, clearfilecache=False):
1249 def invalidate(self, clearfilecache=False):
1256 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1250 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1257 for k in self._filecache.keys():
1251 for k in self._filecache.keys():
1258 # dirstate is invalidated separately in invalidatedirstate()
1252 # dirstate is invalidated separately in invalidatedirstate()
1259 if k == 'dirstate':
1253 if k == 'dirstate':
1260 continue
1254 continue
1261
1255
1262 if clearfilecache:
1256 if clearfilecache:
1263 del self._filecache[k]
1257 del self._filecache[k]
1264 try:
1258 try:
1265 delattr(unfiltered, k)
1259 delattr(unfiltered, k)
1266 except AttributeError:
1260 except AttributeError:
1267 pass
1261 pass
1268 self.invalidatecaches()
1262 self.invalidatecaches()
1269 self.store.invalidatecaches()
1263 self.store.invalidatecaches()
1270
1264
1271 def invalidateall(self):
1265 def invalidateall(self):
1272 '''Fully invalidates both store and non-store parts, causing the
1266 '''Fully invalidates both store and non-store parts, causing the
1273 subsequent operation to reread any outside changes.'''
1267 subsequent operation to reread any outside changes.'''
1274 # extension should hook this to invalidate its caches
1268 # extension should hook this to invalidate its caches
1275 self.invalidate()
1269 self.invalidate()
1276 self.invalidatedirstate()
1270 self.invalidatedirstate()
1277
1271
1278 def _refreshfilecachestats(self, tr):
1272 def _refreshfilecachestats(self, tr):
1279 """Reload stats of cached files so that they are flagged as valid"""
1273 """Reload stats of cached files so that they are flagged as valid"""
1280 for k, ce in self._filecache.items():
1274 for k, ce in self._filecache.items():
1281 if k == 'dirstate' or k not in self.__dict__:
1275 if k == 'dirstate' or k not in self.__dict__:
1282 continue
1276 continue
1283 ce.refresh()
1277 ce.refresh()
1284
1278
1285 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1279 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1286 inheritchecker=None, parentenvvar=None):
1280 inheritchecker=None, parentenvvar=None):
1287 parentlock = None
1281 parentlock = None
1288 # the contents of parentenvvar are used by the underlying lock to
1282 # the contents of parentenvvar are used by the underlying lock to
1289 # determine whether it can be inherited
1283 # determine whether it can be inherited
1290 if parentenvvar is not None:
1284 if parentenvvar is not None:
1291 parentlock = os.environ.get(parentenvvar)
1285 parentlock = os.environ.get(parentenvvar)
1292 try:
1286 try:
1293 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1287 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1294 acquirefn=acquirefn, desc=desc,
1288 acquirefn=acquirefn, desc=desc,
1295 inheritchecker=inheritchecker,
1289 inheritchecker=inheritchecker,
1296 parentlock=parentlock)
1290 parentlock=parentlock)
1297 except error.LockHeld as inst:
1291 except error.LockHeld as inst:
1298 if not wait:
1292 if not wait:
1299 raise
1293 raise
1300 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1294 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1301 (desc, inst.locker))
1295 (desc, inst.locker))
1302 # default to 600 seconds timeout
1296 # default to 600 seconds timeout
1303 l = lockmod.lock(vfs, lockname,
1297 l = lockmod.lock(vfs, lockname,
1304 int(self.ui.config("ui", "timeout", "600")),
1298 int(self.ui.config("ui", "timeout", "600")),
1305 releasefn=releasefn, acquirefn=acquirefn,
1299 releasefn=releasefn, acquirefn=acquirefn,
1306 desc=desc)
1300 desc=desc)
1307 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1301 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1308 return l
1302 return l
1309
1303
1310 def _afterlock(self, callback):
1304 def _afterlock(self, callback):
1311 """add a callback to be run when the repository is fully unlocked
1305 """add a callback to be run when the repository is fully unlocked
1312
1306
1313 The callback will be executed when the outermost lock is released
1307 The callback will be executed when the outermost lock is released
1314 (with wlock being higher level than 'lock')."""
1308 (with wlock being higher level than 'lock')."""
1315 for ref in (self._wlockref, self._lockref):
1309 for ref in (self._wlockref, self._lockref):
1316 l = ref and ref()
1310 l = ref and ref()
1317 if l and l.held:
1311 if l and l.held:
1318 l.postrelease.append(callback)
1312 l.postrelease.append(callback)
1319 break
1313 break
1320 else: # no lock have been found.
1314 else: # no lock have been found.
1321 callback()
1315 callback()
1322
1316
1323 def lock(self, wait=True):
1317 def lock(self, wait=True):
1324 '''Lock the repository store (.hg/store) and return a weak reference
1318 '''Lock the repository store (.hg/store) and return a weak reference
1325 to the lock. Use this before modifying the store (e.g. committing or
1319 to the lock. Use this before modifying the store (e.g. committing or
1326 stripping). If you are opening a transaction, get a lock as well.)
1320 stripping). If you are opening a transaction, get a lock as well.)
1327
1321
1328 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1322 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1329 'wlock' first to avoid a dead-lock hazard.'''
1323 'wlock' first to avoid a dead-lock hazard.'''
1330 l = self._lockref and self._lockref()
1324 l = self._lockref and self._lockref()
1331 if l is not None and l.held:
1325 if l is not None and l.held:
1332 l.lock()
1326 l.lock()
1333 return l
1327 return l
1334
1328
1335 l = self._lock(self.svfs, "lock", wait, None,
1329 l = self._lock(self.svfs, "lock", wait, None,
1336 self.invalidate, _('repository %s') % self.origroot)
1330 self.invalidate, _('repository %s') % self.origroot)
1337 self._lockref = weakref.ref(l)
1331 self._lockref = weakref.ref(l)
1338 return l
1332 return l
1339
1333
1340 def _wlockchecktransaction(self):
1334 def _wlockchecktransaction(self):
1341 if self.currenttransaction() is not None:
1335 if self.currenttransaction() is not None:
1342 raise error.LockInheritanceContractViolation(
1336 raise error.LockInheritanceContractViolation(
1343 'wlock cannot be inherited in the middle of a transaction')
1337 'wlock cannot be inherited in the middle of a transaction')
1344
1338
1345 def wlock(self, wait=True):
1339 def wlock(self, wait=True):
1346 '''Lock the non-store parts of the repository (everything under
1340 '''Lock the non-store parts of the repository (everything under
1347 .hg except .hg/store) and return a weak reference to the lock.
1341 .hg except .hg/store) and return a weak reference to the lock.
1348
1342
1349 Use this before modifying files in .hg.
1343 Use this before modifying files in .hg.
1350
1344
1351 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1345 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1352 'wlock' first to avoid a dead-lock hazard.'''
1346 'wlock' first to avoid a dead-lock hazard.'''
1353 l = self._wlockref and self._wlockref()
1347 l = self._wlockref and self._wlockref()
1354 if l is not None and l.held:
1348 if l is not None and l.held:
1355 l.lock()
1349 l.lock()
1356 return l
1350 return l
1357
1351
1358 # We do not need to check for non-waiting lock acquisition. Such
1352 # We do not need to check for non-waiting lock acquisition. Such
1359 # acquisition would not cause dead-lock as they would just fail.
1353 # acquisition would not cause dead-lock as they would just fail.
1360 if wait and (self.ui.configbool('devel', 'all-warnings')
1354 if wait and (self.ui.configbool('devel', 'all-warnings')
1361 or self.ui.configbool('devel', 'check-locks')):
1355 or self.ui.configbool('devel', 'check-locks')):
1362 l = self._lockref and self._lockref()
1356 l = self._lockref and self._lockref()
1363 if l is not None and l.held:
1357 if l is not None and l.held:
1364 self.ui.develwarn('"wlock" acquired after "lock"')
1358 self.ui.develwarn('"wlock" acquired after "lock"')
1365
1359
1366 def unlock():
1360 def unlock():
1367 if self.dirstate.pendingparentchange():
1361 if self.dirstate.pendingparentchange():
1368 self.dirstate.invalidate()
1362 self.dirstate.invalidate()
1369 else:
1363 else:
1370 self.dirstate.write(None)
1364 self.dirstate.write(None)
1371
1365
1372 self._filecache['dirstate'].refresh()
1366 self._filecache['dirstate'].refresh()
1373
1367
1374 l = self._lock(self.vfs, "wlock", wait, unlock,
1368 l = self._lock(self.vfs, "wlock", wait, unlock,
1375 self.invalidatedirstate, _('working directory of %s') %
1369 self.invalidatedirstate, _('working directory of %s') %
1376 self.origroot,
1370 self.origroot,
1377 inheritchecker=self._wlockchecktransaction,
1371 inheritchecker=self._wlockchecktransaction,
1378 parentenvvar='HG_WLOCK_LOCKER')
1372 parentenvvar='HG_WLOCK_LOCKER')
1379 self._wlockref = weakref.ref(l)
1373 self._wlockref = weakref.ref(l)
1380 return l
1374 return l
1381
1375
1382 def _currentlock(self, lockref):
1376 def _currentlock(self, lockref):
1383 """Returns the lock if it's held, or None if it's not."""
1377 """Returns the lock if it's held, or None if it's not."""
1384 if lockref is None:
1378 if lockref is None:
1385 return None
1379 return None
1386 l = lockref()
1380 l = lockref()
1387 if l is None or not l.held:
1381 if l is None or not l.held:
1388 return None
1382 return None
1389 return l
1383 return l
1390
1384
1391 def currentwlock(self):
1385 def currentwlock(self):
1392 """Returns the wlock if it's held, or None if it's not."""
1386 """Returns the wlock if it's held, or None if it's not."""
1393 return self._currentlock(self._wlockref)
1387 return self._currentlock(self._wlockref)
1394
1388
1395 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1389 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1396 """
1390 """
1397 commit an individual file as part of a larger transaction
1391 commit an individual file as part of a larger transaction
1398 """
1392 """
1399
1393
1400 fname = fctx.path()
1394 fname = fctx.path()
1401 fparent1 = manifest1.get(fname, nullid)
1395 fparent1 = manifest1.get(fname, nullid)
1402 fparent2 = manifest2.get(fname, nullid)
1396 fparent2 = manifest2.get(fname, nullid)
1403 if isinstance(fctx, context.filectx):
1397 if isinstance(fctx, context.filectx):
1404 node = fctx.filenode()
1398 node = fctx.filenode()
1405 if node in [fparent1, fparent2]:
1399 if node in [fparent1, fparent2]:
1406 self.ui.debug('reusing %s filelog entry\n' % fname)
1400 self.ui.debug('reusing %s filelog entry\n' % fname)
1407 return node
1401 return node
1408
1402
1409 flog = self.file(fname)
1403 flog = self.file(fname)
1410 meta = {}
1404 meta = {}
1411 copy = fctx.renamed()
1405 copy = fctx.renamed()
1412 if copy and copy[0] != fname:
1406 if copy and copy[0] != fname:
1413 # Mark the new revision of this file as a copy of another
1407 # Mark the new revision of this file as a copy of another
1414 # file. This copy data will effectively act as a parent
1408 # file. This copy data will effectively act as a parent
1415 # of this new revision. If this is a merge, the first
1409 # of this new revision. If this is a merge, the first
1416 # parent will be the nullid (meaning "look up the copy data")
1410 # parent will be the nullid (meaning "look up the copy data")
1417 # and the second one will be the other parent. For example:
1411 # and the second one will be the other parent. For example:
1418 #
1412 #
1419 # 0 --- 1 --- 3 rev1 changes file foo
1413 # 0 --- 1 --- 3 rev1 changes file foo
1420 # \ / rev2 renames foo to bar and changes it
1414 # \ / rev2 renames foo to bar and changes it
1421 # \- 2 -/ rev3 should have bar with all changes and
1415 # \- 2 -/ rev3 should have bar with all changes and
1422 # should record that bar descends from
1416 # should record that bar descends from
1423 # bar in rev2 and foo in rev1
1417 # bar in rev2 and foo in rev1
1424 #
1418 #
1425 # this allows this merge to succeed:
1419 # this allows this merge to succeed:
1426 #
1420 #
1427 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1421 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1428 # \ / merging rev3 and rev4 should use bar@rev2
1422 # \ / merging rev3 and rev4 should use bar@rev2
1429 # \- 2 --- 4 as the merge base
1423 # \- 2 --- 4 as the merge base
1430 #
1424 #
1431
1425
1432 cfname = copy[0]
1426 cfname = copy[0]
1433 crev = manifest1.get(cfname)
1427 crev = manifest1.get(cfname)
1434 newfparent = fparent2
1428 newfparent = fparent2
1435
1429
1436 if manifest2: # branch merge
1430 if manifest2: # branch merge
1437 if fparent2 == nullid or crev is None: # copied on remote side
1431 if fparent2 == nullid or crev is None: # copied on remote side
1438 if cfname in manifest2:
1432 if cfname in manifest2:
1439 crev = manifest2[cfname]
1433 crev = manifest2[cfname]
1440 newfparent = fparent1
1434 newfparent = fparent1
1441
1435
1442 # Here, we used to search backwards through history to try to find
1436 # Here, we used to search backwards through history to try to find
1443 # where the file copy came from if the source of a copy was not in
1437 # where the file copy came from if the source of a copy was not in
1444 # the parent directory. However, this doesn't actually make sense to
1438 # the parent directory. However, this doesn't actually make sense to
1445 # do (what does a copy from something not in your working copy even
1439 # do (what does a copy from something not in your working copy even
1446 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1440 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1447 # the user that copy information was dropped, so if they didn't
1441 # the user that copy information was dropped, so if they didn't
1448 # expect this outcome it can be fixed, but this is the correct
1442 # expect this outcome it can be fixed, but this is the correct
1449 # behavior in this circumstance.
1443 # behavior in this circumstance.
1450
1444
1451 if crev:
1445 if crev:
1452 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1446 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1453 meta["copy"] = cfname
1447 meta["copy"] = cfname
1454 meta["copyrev"] = hex(crev)
1448 meta["copyrev"] = hex(crev)
1455 fparent1, fparent2 = nullid, newfparent
1449 fparent1, fparent2 = nullid, newfparent
1456 else:
1450 else:
1457 self.ui.warn(_("warning: can't find ancestor for '%s' "
1451 self.ui.warn(_("warning: can't find ancestor for '%s' "
1458 "copied from '%s'!\n") % (fname, cfname))
1452 "copied from '%s'!\n") % (fname, cfname))
1459
1453
1460 elif fparent1 == nullid:
1454 elif fparent1 == nullid:
1461 fparent1, fparent2 = fparent2, nullid
1455 fparent1, fparent2 = fparent2, nullid
1462 elif fparent2 != nullid:
1456 elif fparent2 != nullid:
1463 # is one parent an ancestor of the other?
1457 # is one parent an ancestor of the other?
1464 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1458 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1465 if fparent1 in fparentancestors:
1459 if fparent1 in fparentancestors:
1466 fparent1, fparent2 = fparent2, nullid
1460 fparent1, fparent2 = fparent2, nullid
1467 elif fparent2 in fparentancestors:
1461 elif fparent2 in fparentancestors:
1468 fparent2 = nullid
1462 fparent2 = nullid
1469
1463
1470 # is the file changed?
1464 # is the file changed?
1471 text = fctx.data()
1465 text = fctx.data()
1472 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1466 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1473 changelist.append(fname)
1467 changelist.append(fname)
1474 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1468 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1475 # are just the flags changed during merge?
1469 # are just the flags changed during merge?
1476 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1470 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1477 changelist.append(fname)
1471 changelist.append(fname)
1478
1472
1479 return fparent1
1473 return fparent1
1480
1474
1481 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1475 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1482 """check for commit arguments that aren't commitable"""
1476 """check for commit arguments that aren't commitable"""
1483 if match.isexact() or match.prefix():
1477 if match.isexact() or match.prefix():
1484 matched = set(status.modified + status.added + status.removed)
1478 matched = set(status.modified + status.added + status.removed)
1485
1479
1486 for f in match.files():
1480 for f in match.files():
1487 f = self.dirstate.normalize(f)
1481 f = self.dirstate.normalize(f)
1488 if f == '.' or f in matched or f in wctx.substate:
1482 if f == '.' or f in matched or f in wctx.substate:
1489 continue
1483 continue
1490 if f in status.deleted:
1484 if f in status.deleted:
1491 fail(f, _('file not found!'))
1485 fail(f, _('file not found!'))
1492 if f in vdirs: # visited directory
1486 if f in vdirs: # visited directory
1493 d = f + '/'
1487 d = f + '/'
1494 for mf in matched:
1488 for mf in matched:
1495 if mf.startswith(d):
1489 if mf.startswith(d):
1496 break
1490 break
1497 else:
1491 else:
1498 fail(f, _("no match under directory!"))
1492 fail(f, _("no match under directory!"))
1499 elif f not in self.dirstate:
1493 elif f not in self.dirstate:
1500 fail(f, _("file not tracked!"))
1494 fail(f, _("file not tracked!"))
1501
1495
1502 @unfilteredmethod
1496 @unfilteredmethod
1503 def commit(self, text="", user=None, date=None, match=None, force=False,
1497 def commit(self, text="", user=None, date=None, match=None, force=False,
1504 editor=False, extra=None):
1498 editor=False, extra=None):
1505 """Add a new revision to current repository.
1499 """Add a new revision to current repository.
1506
1500
1507 Revision information is gathered from the working directory,
1501 Revision information is gathered from the working directory,
1508 match can be used to filter the committed files. If editor is
1502 match can be used to filter the committed files. If editor is
1509 supplied, it is called to get a commit message.
1503 supplied, it is called to get a commit message.
1510 """
1504 """
1511 if extra is None:
1505 if extra is None:
1512 extra = {}
1506 extra = {}
1513
1507
1514 def fail(f, msg):
1508 def fail(f, msg):
1515 raise error.Abort('%s: %s' % (f, msg))
1509 raise error.Abort('%s: %s' % (f, msg))
1516
1510
1517 if not match:
1511 if not match:
1518 match = matchmod.always(self.root, '')
1512 match = matchmod.always(self.root, '')
1519
1513
1520 if not force:
1514 if not force:
1521 vdirs = []
1515 vdirs = []
1522 match.explicitdir = vdirs.append
1516 match.explicitdir = vdirs.append
1523 match.bad = fail
1517 match.bad = fail
1524
1518
1525 wlock = lock = tr = None
1519 wlock = lock = tr = None
1526 try:
1520 try:
1527 wlock = self.wlock()
1521 wlock = self.wlock()
1528 lock = self.lock() # for recent changelog (see issue4368)
1522 lock = self.lock() # for recent changelog (see issue4368)
1529
1523
1530 wctx = self[None]
1524 wctx = self[None]
1531 merge = len(wctx.parents()) > 1
1525 merge = len(wctx.parents()) > 1
1532
1526
1533 if not force and merge and match.ispartial():
1527 if not force and merge and match.ispartial():
1534 raise error.Abort(_('cannot partially commit a merge '
1528 raise error.Abort(_('cannot partially commit a merge '
1535 '(do not specify files or patterns)'))
1529 '(do not specify files or patterns)'))
1536
1530
1537 status = self.status(match=match, clean=force)
1531 status = self.status(match=match, clean=force)
1538 if force:
1532 if force:
1539 status.modified.extend(status.clean) # mq may commit clean files
1533 status.modified.extend(status.clean) # mq may commit clean files
1540
1534
1541 # check subrepos
1535 # check subrepos
1542 subs = []
1536 subs = []
1543 commitsubs = set()
1537 commitsubs = set()
1544 newstate = wctx.substate.copy()
1538 newstate = wctx.substate.copy()
1545 # only manage subrepos and .hgsubstate if .hgsub is present
1539 # only manage subrepos and .hgsubstate if .hgsub is present
1546 if '.hgsub' in wctx:
1540 if '.hgsub' in wctx:
1547 # we'll decide whether to track this ourselves, thanks
1541 # we'll decide whether to track this ourselves, thanks
1548 for c in status.modified, status.added, status.removed:
1542 for c in status.modified, status.added, status.removed:
1549 if '.hgsubstate' in c:
1543 if '.hgsubstate' in c:
1550 c.remove('.hgsubstate')
1544 c.remove('.hgsubstate')
1551
1545
1552 # compare current state to last committed state
1546 # compare current state to last committed state
1553 # build new substate based on last committed state
1547 # build new substate based on last committed state
1554 oldstate = wctx.p1().substate
1548 oldstate = wctx.p1().substate
1555 for s in sorted(newstate.keys()):
1549 for s in sorted(newstate.keys()):
1556 if not match(s):
1550 if not match(s):
1557 # ignore working copy, use old state if present
1551 # ignore working copy, use old state if present
1558 if s in oldstate:
1552 if s in oldstate:
1559 newstate[s] = oldstate[s]
1553 newstate[s] = oldstate[s]
1560 continue
1554 continue
1561 if not force:
1555 if not force:
1562 raise error.Abort(
1556 raise error.Abort(
1563 _("commit with new subrepo %s excluded") % s)
1557 _("commit with new subrepo %s excluded") % s)
1564 dirtyreason = wctx.sub(s).dirtyreason(True)
1558 dirtyreason = wctx.sub(s).dirtyreason(True)
1565 if dirtyreason:
1559 if dirtyreason:
1566 if not self.ui.configbool('ui', 'commitsubrepos'):
1560 if not self.ui.configbool('ui', 'commitsubrepos'):
1567 raise error.Abort(dirtyreason,
1561 raise error.Abort(dirtyreason,
1568 hint=_("use --subrepos for recursive commit"))
1562 hint=_("use --subrepos for recursive commit"))
1569 subs.append(s)
1563 subs.append(s)
1570 commitsubs.add(s)
1564 commitsubs.add(s)
1571 else:
1565 else:
1572 bs = wctx.sub(s).basestate()
1566 bs = wctx.sub(s).basestate()
1573 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1567 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1574 if oldstate.get(s, (None, None, None))[1] != bs:
1568 if oldstate.get(s, (None, None, None))[1] != bs:
1575 subs.append(s)
1569 subs.append(s)
1576
1570
1577 # check for removed subrepos
1571 # check for removed subrepos
1578 for p in wctx.parents():
1572 for p in wctx.parents():
1579 r = [s for s in p.substate if s not in newstate]
1573 r = [s for s in p.substate if s not in newstate]
1580 subs += [s for s in r if match(s)]
1574 subs += [s for s in r if match(s)]
1581 if subs:
1575 if subs:
1582 if (not match('.hgsub') and
1576 if (not match('.hgsub') and
1583 '.hgsub' in (wctx.modified() + wctx.added())):
1577 '.hgsub' in (wctx.modified() + wctx.added())):
1584 raise error.Abort(
1578 raise error.Abort(
1585 _("can't commit subrepos without .hgsub"))
1579 _("can't commit subrepos without .hgsub"))
1586 status.modified.insert(0, '.hgsubstate')
1580 status.modified.insert(0, '.hgsubstate')
1587
1581
1588 elif '.hgsub' in status.removed:
1582 elif '.hgsub' in status.removed:
1589 # clean up .hgsubstate when .hgsub is removed
1583 # clean up .hgsubstate when .hgsub is removed
1590 if ('.hgsubstate' in wctx and
1584 if ('.hgsubstate' in wctx and
1591 '.hgsubstate' not in (status.modified + status.added +
1585 '.hgsubstate' not in (status.modified + status.added +
1592 status.removed)):
1586 status.removed)):
1593 status.removed.insert(0, '.hgsubstate')
1587 status.removed.insert(0, '.hgsubstate')
1594
1588
1595 # make sure all explicit patterns are matched
1589 # make sure all explicit patterns are matched
1596 if not force:
1590 if not force:
1597 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1591 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1598
1592
1599 cctx = context.workingcommitctx(self, status,
1593 cctx = context.workingcommitctx(self, status,
1600 text, user, date, extra)
1594 text, user, date, extra)
1601
1595
1602 # internal config: ui.allowemptycommit
1596 # internal config: ui.allowemptycommit
1603 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1597 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1604 or extra.get('close') or merge or cctx.files()
1598 or extra.get('close') or merge or cctx.files()
1605 or self.ui.configbool('ui', 'allowemptycommit'))
1599 or self.ui.configbool('ui', 'allowemptycommit'))
1606 if not allowemptycommit:
1600 if not allowemptycommit:
1607 return None
1601 return None
1608
1602
1609 if merge and cctx.deleted():
1603 if merge and cctx.deleted():
1610 raise error.Abort(_("cannot commit merge with missing files"))
1604 raise error.Abort(_("cannot commit merge with missing files"))
1611
1605
1612 ms = mergemod.mergestate.read(self)
1606 ms = mergemod.mergestate.read(self)
1613
1607
1614 if list(ms.unresolved()):
1608 if list(ms.unresolved()):
1615 raise error.Abort(_('unresolved merge conflicts '
1609 raise error.Abort(_('unresolved merge conflicts '
1616 '(see "hg help resolve")'))
1610 '(see "hg help resolve")'))
1617 if ms.mdstate() != 's' or list(ms.driverresolved()):
1611 if ms.mdstate() != 's' or list(ms.driverresolved()):
1618 raise error.Abort(_('driver-resolved merge conflicts'),
1612 raise error.Abort(_('driver-resolved merge conflicts'),
1619 hint=_('run "hg resolve --all" to resolve'))
1613 hint=_('run "hg resolve --all" to resolve'))
1620
1614
1621 if editor:
1615 if editor:
1622 cctx._text = editor(self, cctx, subs)
1616 cctx._text = editor(self, cctx, subs)
1623 edited = (text != cctx._text)
1617 edited = (text != cctx._text)
1624
1618
1625 # Save commit message in case this transaction gets rolled back
1619 # Save commit message in case this transaction gets rolled back
1626 # (e.g. by a pretxncommit hook). Leave the content alone on
1620 # (e.g. by a pretxncommit hook). Leave the content alone on
1627 # the assumption that the user will use the same editor again.
1621 # the assumption that the user will use the same editor again.
1628 msgfn = self.savecommitmessage(cctx._text)
1622 msgfn = self.savecommitmessage(cctx._text)
1629
1623
1630 # commit subs and write new state
1624 # commit subs and write new state
1631 if subs:
1625 if subs:
1632 for s in sorted(commitsubs):
1626 for s in sorted(commitsubs):
1633 sub = wctx.sub(s)
1627 sub = wctx.sub(s)
1634 self.ui.status(_('committing subrepository %s\n') %
1628 self.ui.status(_('committing subrepository %s\n') %
1635 subrepo.subrelpath(sub))
1629 subrepo.subrelpath(sub))
1636 sr = sub.commit(cctx._text, user, date)
1630 sr = sub.commit(cctx._text, user, date)
1637 newstate[s] = (newstate[s][0], sr)
1631 newstate[s] = (newstate[s][0], sr)
1638 subrepo.writestate(self, newstate)
1632 subrepo.writestate(self, newstate)
1639
1633
1640 p1, p2 = self.dirstate.parents()
1634 p1, p2 = self.dirstate.parents()
1641 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1635 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1642 try:
1636 try:
1643 self.hook("precommit", throw=True, parent1=hookp1,
1637 self.hook("precommit", throw=True, parent1=hookp1,
1644 parent2=hookp2)
1638 parent2=hookp2)
1645 tr = self.transaction('commit')
1639 tr = self.transaction('commit')
1646 ret = self.commitctx(cctx, True)
1640 ret = self.commitctx(cctx, True)
1647 except: # re-raises
1641 except: # re-raises
1648 if edited:
1642 if edited:
1649 self.ui.write(
1643 self.ui.write(
1650 _('note: commit message saved in %s\n') % msgfn)
1644 _('note: commit message saved in %s\n') % msgfn)
1651 raise
1645 raise
1652 # update bookmarks, dirstate and mergestate
1646 # update bookmarks, dirstate and mergestate
1653 bookmarks.update(self, [p1, p2], ret)
1647 bookmarks.update(self, [p1, p2], ret)
1654 cctx.markcommitted(ret)
1648 cctx.markcommitted(ret)
1655 ms.reset()
1649 ms.reset()
1656 tr.close()
1650 tr.close()
1657
1651
1658 finally:
1652 finally:
1659 lockmod.release(tr, lock, wlock)
1653 lockmod.release(tr, lock, wlock)
1660
1654
1661 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1655 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1662 # hack for command that use a temporary commit (eg: histedit)
1656 # hack for command that use a temporary commit (eg: histedit)
1663 # temporary commit got stripped before hook release
1657 # temporary commit got stripped before hook release
1664 if self.changelog.hasnode(ret):
1658 if self.changelog.hasnode(ret):
1665 self.hook("commit", node=node, parent1=parent1,
1659 self.hook("commit", node=node, parent1=parent1,
1666 parent2=parent2)
1660 parent2=parent2)
1667 self._afterlock(commithook)
1661 self._afterlock(commithook)
1668 return ret
1662 return ret
1669
1663
1670 @unfilteredmethod
1664 @unfilteredmethod
1671 def commitctx(self, ctx, error=False):
1665 def commitctx(self, ctx, error=False):
1672 """Add a new revision to current repository.
1666 """Add a new revision to current repository.
1673 Revision information is passed via the context argument.
1667 Revision information is passed via the context argument.
1674 """
1668 """
1675
1669
1676 tr = None
1670 tr = None
1677 p1, p2 = ctx.p1(), ctx.p2()
1671 p1, p2 = ctx.p1(), ctx.p2()
1678 user = ctx.user()
1672 user = ctx.user()
1679
1673
1680 lock = self.lock()
1674 lock = self.lock()
1681 try:
1675 try:
1682 tr = self.transaction("commit")
1676 tr = self.transaction("commit")
1683 trp = weakref.proxy(tr)
1677 trp = weakref.proxy(tr)
1684
1678
1685 if ctx.files():
1679 if ctx.files():
1686 m1 = p1.manifest()
1680 m1 = p1.manifest()
1687 m2 = p2.manifest()
1681 m2 = p2.manifest()
1688 m = m1.copy()
1682 m = m1.copy()
1689
1683
1690 # check in files
1684 # check in files
1691 added = []
1685 added = []
1692 changed = []
1686 changed = []
1693 removed = list(ctx.removed())
1687 removed = list(ctx.removed())
1694 linkrev = len(self)
1688 linkrev = len(self)
1695 self.ui.note(_("committing files:\n"))
1689 self.ui.note(_("committing files:\n"))
1696 for f in sorted(ctx.modified() + ctx.added()):
1690 for f in sorted(ctx.modified() + ctx.added()):
1697 self.ui.note(f + "\n")
1691 self.ui.note(f + "\n")
1698 try:
1692 try:
1699 fctx = ctx[f]
1693 fctx = ctx[f]
1700 if fctx is None:
1694 if fctx is None:
1701 removed.append(f)
1695 removed.append(f)
1702 else:
1696 else:
1703 added.append(f)
1697 added.append(f)
1704 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1698 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1705 trp, changed)
1699 trp, changed)
1706 m.setflag(f, fctx.flags())
1700 m.setflag(f, fctx.flags())
1707 except OSError as inst:
1701 except OSError as inst:
1708 self.ui.warn(_("trouble committing %s!\n") % f)
1702 self.ui.warn(_("trouble committing %s!\n") % f)
1709 raise
1703 raise
1710 except IOError as inst:
1704 except IOError as inst:
1711 errcode = getattr(inst, 'errno', errno.ENOENT)
1705 errcode = getattr(inst, 'errno', errno.ENOENT)
1712 if error or errcode and errcode != errno.ENOENT:
1706 if error or errcode and errcode != errno.ENOENT:
1713 self.ui.warn(_("trouble committing %s!\n") % f)
1707 self.ui.warn(_("trouble committing %s!\n") % f)
1714 raise
1708 raise
1715
1709
1716 # update manifest
1710 # update manifest
1717 self.ui.note(_("committing manifest\n"))
1711 self.ui.note(_("committing manifest\n"))
1718 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1712 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1719 drop = [f for f in removed if f in m]
1713 drop = [f for f in removed if f in m]
1720 for f in drop:
1714 for f in drop:
1721 del m[f]
1715 del m[f]
1722 mn = self.manifest.add(m, trp, linkrev,
1716 mn = self.manifest.add(m, trp, linkrev,
1723 p1.manifestnode(), p2.manifestnode(),
1717 p1.manifestnode(), p2.manifestnode(),
1724 added, drop)
1718 added, drop)
1725 files = changed + removed
1719 files = changed + removed
1726 else:
1720 else:
1727 mn = p1.manifestnode()
1721 mn = p1.manifestnode()
1728 files = []
1722 files = []
1729
1723
1730 # update changelog
1724 # update changelog
1731 self.ui.note(_("committing changelog\n"))
1725 self.ui.note(_("committing changelog\n"))
1732 self.changelog.delayupdate(tr)
1726 self.changelog.delayupdate(tr)
1733 n = self.changelog.add(mn, files, ctx.description(),
1727 n = self.changelog.add(mn, files, ctx.description(),
1734 trp, p1.node(), p2.node(),
1728 trp, p1.node(), p2.node(),
1735 user, ctx.date(), ctx.extra().copy())
1729 user, ctx.date(), ctx.extra().copy())
1736 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1730 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1737 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1731 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1738 parent2=xp2)
1732 parent2=xp2)
1739 # set the new commit is proper phase
1733 # set the new commit is proper phase
1740 targetphase = subrepo.newcommitphase(self.ui, ctx)
1734 targetphase = subrepo.newcommitphase(self.ui, ctx)
1741 if targetphase:
1735 if targetphase:
1742 # retract boundary do not alter parent changeset.
1736 # retract boundary do not alter parent changeset.
1743 # if a parent have higher the resulting phase will
1737 # if a parent have higher the resulting phase will
1744 # be compliant anyway
1738 # be compliant anyway
1745 #
1739 #
1746 # if minimal phase was 0 we don't need to retract anything
1740 # if minimal phase was 0 we don't need to retract anything
1747 phases.retractboundary(self, tr, targetphase, [n])
1741 phases.retractboundary(self, tr, targetphase, [n])
1748 tr.close()
1742 tr.close()
1749 branchmap.updatecache(self.filtered('served'))
1743 branchmap.updatecache(self.filtered('served'))
1750 return n
1744 return n
1751 finally:
1745 finally:
1752 if tr:
1746 if tr:
1753 tr.release()
1747 tr.release()
1754 lock.release()
1748 lock.release()
1755
1749
1756 @unfilteredmethod
1750 @unfilteredmethod
1757 def destroying(self):
1751 def destroying(self):
1758 '''Inform the repository that nodes are about to be destroyed.
1752 '''Inform the repository that nodes are about to be destroyed.
1759 Intended for use by strip and rollback, so there's a common
1753 Intended for use by strip and rollback, so there's a common
1760 place for anything that has to be done before destroying history.
1754 place for anything that has to be done before destroying history.
1761
1755
1762 This is mostly useful for saving state that is in memory and waiting
1756 This is mostly useful for saving state that is in memory and waiting
1763 to be flushed when the current lock is released. Because a call to
1757 to be flushed when the current lock is released. Because a call to
1764 destroyed is imminent, the repo will be invalidated causing those
1758 destroyed is imminent, the repo will be invalidated causing those
1765 changes to stay in memory (waiting for the next unlock), or vanish
1759 changes to stay in memory (waiting for the next unlock), or vanish
1766 completely.
1760 completely.
1767 '''
1761 '''
1768 # When using the same lock to commit and strip, the phasecache is left
1762 # When using the same lock to commit and strip, the phasecache is left
1769 # dirty after committing. Then when we strip, the repo is invalidated,
1763 # dirty after committing. Then when we strip, the repo is invalidated,
1770 # causing those changes to disappear.
1764 # causing those changes to disappear.
1771 if '_phasecache' in vars(self):
1765 if '_phasecache' in vars(self):
1772 self._phasecache.write()
1766 self._phasecache.write()
1773
1767
1774 @unfilteredmethod
1768 @unfilteredmethod
1775 def destroyed(self):
1769 def destroyed(self):
1776 '''Inform the repository that nodes have been destroyed.
1770 '''Inform the repository that nodes have been destroyed.
1777 Intended for use by strip and rollback, so there's a common
1771 Intended for use by strip and rollback, so there's a common
1778 place for anything that has to be done after destroying history.
1772 place for anything that has to be done after destroying history.
1779 '''
1773 '''
1780 # When one tries to:
1774 # When one tries to:
1781 # 1) destroy nodes thus calling this method (e.g. strip)
1775 # 1) destroy nodes thus calling this method (e.g. strip)
1782 # 2) use phasecache somewhere (e.g. commit)
1776 # 2) use phasecache somewhere (e.g. commit)
1783 #
1777 #
1784 # then 2) will fail because the phasecache contains nodes that were
1778 # then 2) will fail because the phasecache contains nodes that were
1785 # removed. We can either remove phasecache from the filecache,
1779 # removed. We can either remove phasecache from the filecache,
1786 # causing it to reload next time it is accessed, or simply filter
1780 # causing it to reload next time it is accessed, or simply filter
1787 # the removed nodes now and write the updated cache.
1781 # the removed nodes now and write the updated cache.
1788 self._phasecache.filterunknown(self)
1782 self._phasecache.filterunknown(self)
1789 self._phasecache.write()
1783 self._phasecache.write()
1790
1784
1791 # update the 'served' branch cache to help read only server process
1785 # update the 'served' branch cache to help read only server process
1792 # Thanks to branchcache collaboration this is done from the nearest
1786 # Thanks to branchcache collaboration this is done from the nearest
1793 # filtered subset and it is expected to be fast.
1787 # filtered subset and it is expected to be fast.
1794 branchmap.updatecache(self.filtered('served'))
1788 branchmap.updatecache(self.filtered('served'))
1795
1789
1796 # Ensure the persistent tag cache is updated. Doing it now
1790 # Ensure the persistent tag cache is updated. Doing it now
1797 # means that the tag cache only has to worry about destroyed
1791 # means that the tag cache only has to worry about destroyed
1798 # heads immediately after a strip/rollback. That in turn
1792 # heads immediately after a strip/rollback. That in turn
1799 # guarantees that "cachetip == currenttip" (comparing both rev
1793 # guarantees that "cachetip == currenttip" (comparing both rev
1800 # and node) always means no nodes have been added or destroyed.
1794 # and node) always means no nodes have been added or destroyed.
1801
1795
1802 # XXX this is suboptimal when qrefresh'ing: we strip the current
1796 # XXX this is suboptimal when qrefresh'ing: we strip the current
1803 # head, refresh the tag cache, then immediately add a new head.
1797 # head, refresh the tag cache, then immediately add a new head.
1804 # But I think doing it this way is necessary for the "instant
1798 # But I think doing it this way is necessary for the "instant
1805 # tag cache retrieval" case to work.
1799 # tag cache retrieval" case to work.
1806 self.invalidate()
1800 self.invalidate()
1807
1801
1808 def walk(self, match, node=None):
1802 def walk(self, match, node=None):
1809 '''
1803 '''
1810 walk recursively through the directory tree or a given
1804 walk recursively through the directory tree or a given
1811 changeset, finding all files matched by the match
1805 changeset, finding all files matched by the match
1812 function
1806 function
1813 '''
1807 '''
1814 return self[node].walk(match)
1808 return self[node].walk(match)
1815
1809
1816 def status(self, node1='.', node2=None, match=None,
1810 def status(self, node1='.', node2=None, match=None,
1817 ignored=False, clean=False, unknown=False,
1811 ignored=False, clean=False, unknown=False,
1818 listsubrepos=False):
1812 listsubrepos=False):
1819 '''a convenience method that calls node1.status(node2)'''
1813 '''a convenience method that calls node1.status(node2)'''
1820 return self[node1].status(node2, match, ignored, clean, unknown,
1814 return self[node1].status(node2, match, ignored, clean, unknown,
1821 listsubrepos)
1815 listsubrepos)
1822
1816
1823 def heads(self, start=None):
1817 def heads(self, start=None):
1824 heads = self.changelog.heads(start)
1818 heads = self.changelog.heads(start)
1825 # sort the output in rev descending order
1819 # sort the output in rev descending order
1826 return sorted(heads, key=self.changelog.rev, reverse=True)
1820 return sorted(heads, key=self.changelog.rev, reverse=True)
1827
1821
1828 def branchheads(self, branch=None, start=None, closed=False):
1822 def branchheads(self, branch=None, start=None, closed=False):
1829 '''return a (possibly filtered) list of heads for the given branch
1823 '''return a (possibly filtered) list of heads for the given branch
1830
1824
1831 Heads are returned in topological order, from newest to oldest.
1825 Heads are returned in topological order, from newest to oldest.
1832 If branch is None, use the dirstate branch.
1826 If branch is None, use the dirstate branch.
1833 If start is not None, return only heads reachable from start.
1827 If start is not None, return only heads reachable from start.
1834 If closed is True, return heads that are marked as closed as well.
1828 If closed is True, return heads that are marked as closed as well.
1835 '''
1829 '''
1836 if branch is None:
1830 if branch is None:
1837 branch = self[None].branch()
1831 branch = self[None].branch()
1838 branches = self.branchmap()
1832 branches = self.branchmap()
1839 if branch not in branches:
1833 if branch not in branches:
1840 return []
1834 return []
1841 # the cache returns heads ordered lowest to highest
1835 # the cache returns heads ordered lowest to highest
1842 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1836 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1843 if start is not None:
1837 if start is not None:
1844 # filter out the heads that cannot be reached from startrev
1838 # filter out the heads that cannot be reached from startrev
1845 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1839 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1846 bheads = [h for h in bheads if h in fbheads]
1840 bheads = [h for h in bheads if h in fbheads]
1847 return bheads
1841 return bheads
1848
1842
1849 def branches(self, nodes):
1843 def branches(self, nodes):
1850 if not nodes:
1844 if not nodes:
1851 nodes = [self.changelog.tip()]
1845 nodes = [self.changelog.tip()]
1852 b = []
1846 b = []
1853 for n in nodes:
1847 for n in nodes:
1854 t = n
1848 t = n
1855 while True:
1849 while True:
1856 p = self.changelog.parents(n)
1850 p = self.changelog.parents(n)
1857 if p[1] != nullid or p[0] == nullid:
1851 if p[1] != nullid or p[0] == nullid:
1858 b.append((t, n, p[0], p[1]))
1852 b.append((t, n, p[0], p[1]))
1859 break
1853 break
1860 n = p[0]
1854 n = p[0]
1861 return b
1855 return b
1862
1856
1863 def between(self, pairs):
1857 def between(self, pairs):
1864 r = []
1858 r = []
1865
1859
1866 for top, bottom in pairs:
1860 for top, bottom in pairs:
1867 n, l, i = top, [], 0
1861 n, l, i = top, [], 0
1868 f = 1
1862 f = 1
1869
1863
1870 while n != bottom and n != nullid:
1864 while n != bottom and n != nullid:
1871 p = self.changelog.parents(n)[0]
1865 p = self.changelog.parents(n)[0]
1872 if i == f:
1866 if i == f:
1873 l.append(n)
1867 l.append(n)
1874 f = f * 2
1868 f = f * 2
1875 n = p
1869 n = p
1876 i += 1
1870 i += 1
1877
1871
1878 r.append(l)
1872 r.append(l)
1879
1873
1880 return r
1874 return r
1881
1875
1882 def checkpush(self, pushop):
1876 def checkpush(self, pushop):
1883 """Extensions can override this function if additional checks have
1877 """Extensions can override this function if additional checks have
1884 to be performed before pushing, or call it if they override push
1878 to be performed before pushing, or call it if they override push
1885 command.
1879 command.
1886 """
1880 """
1887 pass
1881 pass
1888
1882
1889 @unfilteredpropertycache
1883 @unfilteredpropertycache
1890 def prepushoutgoinghooks(self):
1884 def prepushoutgoinghooks(self):
1891 """Return util.hooks consists of a pushop with repo, remote, outgoing
1885 """Return util.hooks consists of a pushop with repo, remote, outgoing
1892 methods, which are called before pushing changesets.
1886 methods, which are called before pushing changesets.
1893 """
1887 """
1894 return util.hooks()
1888 return util.hooks()
1895
1889
1896 def pushkey(self, namespace, key, old, new):
1890 def pushkey(self, namespace, key, old, new):
1897 try:
1891 try:
1898 tr = self.currenttransaction()
1892 tr = self.currenttransaction()
1899 hookargs = {}
1893 hookargs = {}
1900 if tr is not None:
1894 if tr is not None:
1901 hookargs.update(tr.hookargs)
1895 hookargs.update(tr.hookargs)
1902 hookargs['namespace'] = namespace
1896 hookargs['namespace'] = namespace
1903 hookargs['key'] = key
1897 hookargs['key'] = key
1904 hookargs['old'] = old
1898 hookargs['old'] = old
1905 hookargs['new'] = new
1899 hookargs['new'] = new
1906 self.hook('prepushkey', throw=True, **hookargs)
1900 self.hook('prepushkey', throw=True, **hookargs)
1907 except error.HookAbort as exc:
1901 except error.HookAbort as exc:
1908 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1902 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1909 if exc.hint:
1903 if exc.hint:
1910 self.ui.write_err(_("(%s)\n") % exc.hint)
1904 self.ui.write_err(_("(%s)\n") % exc.hint)
1911 return False
1905 return False
1912 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1906 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1913 ret = pushkey.push(self, namespace, key, old, new)
1907 ret = pushkey.push(self, namespace, key, old, new)
1914 def runhook():
1908 def runhook():
1915 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1909 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1916 ret=ret)
1910 ret=ret)
1917 self._afterlock(runhook)
1911 self._afterlock(runhook)
1918 return ret
1912 return ret
1919
1913
1920 def listkeys(self, namespace):
1914 def listkeys(self, namespace):
1921 self.hook('prelistkeys', throw=True, namespace=namespace)
1915 self.hook('prelistkeys', throw=True, namespace=namespace)
1922 self.ui.debug('listing keys for "%s"\n' % namespace)
1916 self.ui.debug('listing keys for "%s"\n' % namespace)
1923 values = pushkey.list(self, namespace)
1917 values = pushkey.list(self, namespace)
1924 self.hook('listkeys', namespace=namespace, values=values)
1918 self.hook('listkeys', namespace=namespace, values=values)
1925 return values
1919 return values
1926
1920
1927 def debugwireargs(self, one, two, three=None, four=None, five=None):
1921 def debugwireargs(self, one, two, three=None, four=None, five=None):
1928 '''used to test argument passing over the wire'''
1922 '''used to test argument passing over the wire'''
1929 return "%s %s %s %s %s" % (one, two, three, four, five)
1923 return "%s %s %s %s %s" % (one, two, three, four, five)
1930
1924
1931 def savecommitmessage(self, text):
1925 def savecommitmessage(self, text):
1932 fp = self.vfs('last-message.txt', 'wb')
1926 fp = self.vfs('last-message.txt', 'wb')
1933 try:
1927 try:
1934 fp.write(text)
1928 fp.write(text)
1935 finally:
1929 finally:
1936 fp.close()
1930 fp.close()
1937 return self.pathto(fp.name[len(self.root) + 1:])
1931 return self.pathto(fp.name[len(self.root) + 1:])
1938
1932
1939 # used to avoid circular references so destructors work
1933 # used to avoid circular references so destructors work
1940 def aftertrans(files):
1934 def aftertrans(files):
1941 renamefiles = [tuple(t) for t in files]
1935 renamefiles = [tuple(t) for t in files]
1942 def a():
1936 def a():
1943 for vfs, src, dest in renamefiles:
1937 for vfs, src, dest in renamefiles:
1944 try:
1938 try:
1945 vfs.rename(src, dest)
1939 vfs.rename(src, dest)
1946 except OSError: # journal file does not yet exist
1940 except OSError: # journal file does not yet exist
1947 pass
1941 pass
1948 return a
1942 return a
1949
1943
1950 def undoname(fn):
1944 def undoname(fn):
1951 base, name = os.path.split(fn)
1945 base, name = os.path.split(fn)
1952 assert name.startswith('journal')
1946 assert name.startswith('journal')
1953 return os.path.join(base, name.replace('journal', 'undo', 1))
1947 return os.path.join(base, name.replace('journal', 'undo', 1))
1954
1948
1955 def instance(ui, path, create):
1949 def instance(ui, path, create):
1956 return localrepository(ui, util.urllocalpath(path), create)
1950 return localrepository(ui, util.urllocalpath(path), create)
1957
1951
1958 def islocal(path):
1952 def islocal(path):
1959 return True
1953 return True
1960
1954
1961 def newreporequirements(repo):
1955 def newreporequirements(repo):
1962 """Determine the set of requirements for a new local repository.
1956 """Determine the set of requirements for a new local repository.
1963
1957
1964 Extensions can wrap this function to specify custom requirements for
1958 Extensions can wrap this function to specify custom requirements for
1965 new repositories.
1959 new repositories.
1966 """
1960 """
1967 ui = repo.ui
1961 ui = repo.ui
1968 requirements = set(['revlogv1'])
1962 requirements = set(['revlogv1'])
1969 if ui.configbool('format', 'usestore', True):
1963 if ui.configbool('format', 'usestore', True):
1970 requirements.add('store')
1964 requirements.add('store')
1971 if ui.configbool('format', 'usefncache', True):
1965 if ui.configbool('format', 'usefncache', True):
1972 requirements.add('fncache')
1966 requirements.add('fncache')
1973 if ui.configbool('format', 'dotencode', True):
1967 if ui.configbool('format', 'dotencode', True):
1974 requirements.add('dotencode')
1968 requirements.add('dotencode')
1975
1969
1976 if scmutil.gdinitconfig(ui):
1970 if scmutil.gdinitconfig(ui):
1977 requirements.add('generaldelta')
1971 requirements.add('generaldelta')
1978 if ui.configbool('experimental', 'treemanifest', False):
1972 if ui.configbool('experimental', 'treemanifest', False):
1979 requirements.add('treemanifest')
1973 requirements.add('treemanifest')
1980 if ui.configbool('experimental', 'manifestv2', False):
1974 if ui.configbool('experimental', 'manifestv2', False):
1981 requirements.add('manifestv2')
1975 requirements.add('manifestv2')
1982
1976
1983 return requirements
1977 return requirements
General Comments 0
You need to be logged in to leave comments. Login now