##// END OF EJS Templates
localrepo: jettison now-unused dirlog() method from localrepo
Augie Fackler -
r29709:b9ee2a1c default
parent child Browse files
Show More
@@ -1,1976 +1,1973 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63
63
64 class repofilecache(scmutil.filecache):
64 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 if repo is None:
69 if repo is None:
70 return self
70 return self
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 def __set__(self, repo, value):
72 def __set__(self, repo, value):
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 def __delete__(self, repo):
74 def __delete__(self, repo):
75 return super(repofilecache, self).__delete__(repo.unfiltered())
75 return super(repofilecache, self).__delete__(repo.unfiltered())
76
76
77 class storecache(repofilecache):
77 class storecache(repofilecache):
78 """filecache for files in the store"""
78 """filecache for files in the store"""
79 def join(self, obj, fname):
79 def join(self, obj, fname):
80 return obj.sjoin(fname)
80 return obj.sjoin(fname)
81
81
82 class unfilteredpropertycache(util.propertycache):
82 class unfilteredpropertycache(util.propertycache):
83 """propertycache that apply to unfiltered repo only"""
83 """propertycache that apply to unfiltered repo only"""
84
84
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 unfi = repo.unfiltered()
86 unfi = repo.unfiltered()
87 if unfi is repo:
87 if unfi is repo:
88 return super(unfilteredpropertycache, self).__get__(unfi)
88 return super(unfilteredpropertycache, self).__get__(unfi)
89 return getattr(unfi, self.name)
89 return getattr(unfi, self.name)
90
90
91 class filteredpropertycache(util.propertycache):
91 class filteredpropertycache(util.propertycache):
92 """propertycache that must take filtering in account"""
92 """propertycache that must take filtering in account"""
93
93
94 def cachevalue(self, obj, value):
94 def cachevalue(self, obj, value):
95 object.__setattr__(obj, self.name, value)
95 object.__setattr__(obj, self.name, value)
96
96
97
97
98 def hasunfilteredcache(repo, name):
98 def hasunfilteredcache(repo, name):
99 """check if a repo has an unfilteredpropertycache value for <name>"""
99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 return name in vars(repo.unfiltered())
100 return name in vars(repo.unfiltered())
101
101
102 def unfilteredmethod(orig):
102 def unfilteredmethod(orig):
103 """decorate method that always need to be run on unfiltered version"""
103 """decorate method that always need to be run on unfiltered version"""
104 def wrapper(repo, *args, **kwargs):
104 def wrapper(repo, *args, **kwargs):
105 return orig(repo.unfiltered(), *args, **kwargs)
105 return orig(repo.unfiltered(), *args, **kwargs)
106 return wrapper
106 return wrapper
107
107
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 'unbundle'))
109 'unbundle'))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111
111
112 class localpeer(peer.peerrepository):
112 class localpeer(peer.peerrepository):
113 '''peer for a local repo; reflects only the most recent API'''
113 '''peer for a local repo; reflects only the most recent API'''
114
114
115 def __init__(self, repo, caps=moderncaps):
115 def __init__(self, repo, caps=moderncaps):
116 peer.peerrepository.__init__(self)
116 peer.peerrepository.__init__(self)
117 self._repo = repo.filtered('served')
117 self._repo = repo.filtered('served')
118 self.ui = repo.ui
118 self.ui = repo.ui
119 self._caps = repo._restrictcapabilities(caps)
119 self._caps = repo._restrictcapabilities(caps)
120 self.requirements = repo.requirements
120 self.requirements = repo.requirements
121 self.supportedformats = repo.supportedformats
121 self.supportedformats = repo.supportedformats
122
122
123 def close(self):
123 def close(self):
124 self._repo.close()
124 self._repo.close()
125
125
126 def _capabilities(self):
126 def _capabilities(self):
127 return self._caps
127 return self._caps
128
128
129 def local(self):
129 def local(self):
130 return self._repo
130 return self._repo
131
131
132 def canpush(self):
132 def canpush(self):
133 return True
133 return True
134
134
135 def url(self):
135 def url(self):
136 return self._repo.url()
136 return self._repo.url()
137
137
138 def lookup(self, key):
138 def lookup(self, key):
139 return self._repo.lookup(key)
139 return self._repo.lookup(key)
140
140
141 def branchmap(self):
141 def branchmap(self):
142 return self._repo.branchmap()
142 return self._repo.branchmap()
143
143
144 def heads(self):
144 def heads(self):
145 return self._repo.heads()
145 return self._repo.heads()
146
146
147 def known(self, nodes):
147 def known(self, nodes):
148 return self._repo.known(nodes)
148 return self._repo.known(nodes)
149
149
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 **kwargs):
151 **kwargs):
152 cg = exchange.getbundle(self._repo, source, heads=heads,
152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 common=common, bundlecaps=bundlecaps, **kwargs)
153 common=common, bundlecaps=bundlecaps, **kwargs)
154 if bundlecaps is not None and 'HG20' in bundlecaps:
154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 # When requesting a bundle2, getbundle returns a stream to make the
155 # When requesting a bundle2, getbundle returns a stream to make the
156 # wire level function happier. We need to build a proper object
156 # wire level function happier. We need to build a proper object
157 # from it in local peer.
157 # from it in local peer.
158 cg = bundle2.getunbundler(self.ui, cg)
158 cg = bundle2.getunbundler(self.ui, cg)
159 return cg
159 return cg
160
160
161 # TODO We might want to move the next two calls into legacypeer and add
161 # TODO We might want to move the next two calls into legacypeer and add
162 # unbundle instead.
162 # unbundle instead.
163
163
164 def unbundle(self, cg, heads, url):
164 def unbundle(self, cg, heads, url):
165 """apply a bundle on a repo
165 """apply a bundle on a repo
166
166
167 This function handles the repo locking itself."""
167 This function handles the repo locking itself."""
168 try:
168 try:
169 try:
169 try:
170 cg = exchange.readbundle(self.ui, cg, None)
170 cg = exchange.readbundle(self.ui, cg, None)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 if util.safehasattr(ret, 'getchunks'):
172 if util.safehasattr(ret, 'getchunks'):
173 # This is a bundle20 object, turn it into an unbundler.
173 # This is a bundle20 object, turn it into an unbundler.
174 # This little dance should be dropped eventually when the
174 # This little dance should be dropped eventually when the
175 # API is finally improved.
175 # API is finally improved.
176 stream = util.chunkbuffer(ret.getchunks())
176 stream = util.chunkbuffer(ret.getchunks())
177 ret = bundle2.getunbundler(self.ui, stream)
177 ret = bundle2.getunbundler(self.ui, stream)
178 return ret
178 return ret
179 except Exception as exc:
179 except Exception as exc:
180 # If the exception contains output salvaged from a bundle2
180 # If the exception contains output salvaged from a bundle2
181 # reply, we need to make sure it is printed before continuing
181 # reply, we need to make sure it is printed before continuing
182 # to fail. So we build a bundle2 with such output and consume
182 # to fail. So we build a bundle2 with such output and consume
183 # it directly.
183 # it directly.
184 #
184 #
185 # This is not very elegant but allows a "simple" solution for
185 # This is not very elegant but allows a "simple" solution for
186 # issue4594
186 # issue4594
187 output = getattr(exc, '_bundle2salvagedoutput', ())
187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 if output:
188 if output:
189 bundler = bundle2.bundle20(self._repo.ui)
189 bundler = bundle2.bundle20(self._repo.ui)
190 for out in output:
190 for out in output:
191 bundler.addpart(out)
191 bundler.addpart(out)
192 stream = util.chunkbuffer(bundler.getchunks())
192 stream = util.chunkbuffer(bundler.getchunks())
193 b = bundle2.getunbundler(self.ui, stream)
193 b = bundle2.getunbundler(self.ui, stream)
194 bundle2.processbundle(self._repo, b)
194 bundle2.processbundle(self._repo, b)
195 raise
195 raise
196 except error.PushRaced as exc:
196 except error.PushRaced as exc:
197 raise error.ResponseError(_('push failed:'), str(exc))
197 raise error.ResponseError(_('push failed:'), str(exc))
198
198
199 def lock(self):
199 def lock(self):
200 return self._repo.lock()
200 return self._repo.lock()
201
201
202 def addchangegroup(self, cg, source, url):
202 def addchangegroup(self, cg, source, url):
203 return cg.apply(self._repo, source, url)
203 return cg.apply(self._repo, source, url)
204
204
205 def pushkey(self, namespace, key, old, new):
205 def pushkey(self, namespace, key, old, new):
206 return self._repo.pushkey(namespace, key, old, new)
206 return self._repo.pushkey(namespace, key, old, new)
207
207
208 def listkeys(self, namespace):
208 def listkeys(self, namespace):
209 return self._repo.listkeys(namespace)
209 return self._repo.listkeys(namespace)
210
210
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 '''used to test argument passing over the wire'''
212 '''used to test argument passing over the wire'''
213 return "%s %s %s %s %s" % (one, two, three, four, five)
213 return "%s %s %s %s %s" % (one, two, three, four, five)
214
214
215 class locallegacypeer(localpeer):
215 class locallegacypeer(localpeer):
216 '''peer extension which implements legacy methods too; used for tests with
216 '''peer extension which implements legacy methods too; used for tests with
217 restricted capabilities'''
217 restricted capabilities'''
218
218
219 def __init__(self, repo):
219 def __init__(self, repo):
220 localpeer.__init__(self, repo, caps=legacycaps)
220 localpeer.__init__(self, repo, caps=legacycaps)
221
221
222 def branches(self, nodes):
222 def branches(self, nodes):
223 return self._repo.branches(nodes)
223 return self._repo.branches(nodes)
224
224
225 def between(self, pairs):
225 def between(self, pairs):
226 return self._repo.between(pairs)
226 return self._repo.between(pairs)
227
227
228 def changegroup(self, basenodes, source):
228 def changegroup(self, basenodes, source):
229 return changegroup.changegroup(self._repo, basenodes, source)
229 return changegroup.changegroup(self._repo, basenodes, source)
230
230
231 def changegroupsubset(self, bases, heads, source):
231 def changegroupsubset(self, bases, heads, source):
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233
233
234 class localrepository(object):
234 class localrepository(object):
235
235
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 'manifestv2'))
237 'manifestv2'))
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 'dotencode'))
239 'dotencode'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 filtername = None
241 filtername = None
242
242
243 # a list of (ui, featureset) functions.
243 # a list of (ui, featureset) functions.
244 # only functions defined in module of enabled extensions are invoked
244 # only functions defined in module of enabled extensions are invoked
245 featuresetupfuncs = set()
245 featuresetupfuncs = set()
246
246
247 def __init__(self, baseui, path=None, create=False):
247 def __init__(self, baseui, path=None, create=False):
248 self.requirements = set()
248 self.requirements = set()
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 self.wopener = self.wvfs
250 self.wopener = self.wvfs
251 self.root = self.wvfs.base
251 self.root = self.wvfs.base
252 self.path = self.wvfs.join(".hg")
252 self.path = self.wvfs.join(".hg")
253 self.origroot = path
253 self.origroot = path
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 realfs=False)
256 realfs=False)
257 self.vfs = scmutil.vfs(self.path)
257 self.vfs = scmutil.vfs(self.path)
258 self.opener = self.vfs
258 self.opener = self.vfs
259 self.baseui = baseui
259 self.baseui = baseui
260 self.ui = baseui.copy()
260 self.ui = baseui.copy()
261 self.ui.copy = baseui.copy # prevent copying repo configuration
261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 # A list of callback to shape the phase if no data were found.
262 # A list of callback to shape the phase if no data were found.
263 # Callback are in the form: func(repo, roots) --> processed root.
263 # Callback are in the form: func(repo, roots) --> processed root.
264 # This list it to be filled by extension during repo setup
264 # This list it to be filled by extension during repo setup
265 self._phasedefaults = []
265 self._phasedefaults = []
266 try:
266 try:
267 self.ui.readconfig(self.join("hgrc"), self.root)
267 self.ui.readconfig(self.join("hgrc"), self.root)
268 extensions.loadall(self.ui)
268 extensions.loadall(self.ui)
269 except IOError:
269 except IOError:
270 pass
270 pass
271
271
272 if self.featuresetupfuncs:
272 if self.featuresetupfuncs:
273 self.supported = set(self._basesupported) # use private copy
273 self.supported = set(self._basesupported) # use private copy
274 extmods = set(m.__name__ for n, m
274 extmods = set(m.__name__ for n, m
275 in extensions.extensions(self.ui))
275 in extensions.extensions(self.ui))
276 for setupfunc in self.featuresetupfuncs:
276 for setupfunc in self.featuresetupfuncs:
277 if setupfunc.__module__ in extmods:
277 if setupfunc.__module__ in extmods:
278 setupfunc(self.ui, self.supported)
278 setupfunc(self.ui, self.supported)
279 else:
279 else:
280 self.supported = self._basesupported
280 self.supported = self._basesupported
281
281
282 if not self.vfs.isdir():
282 if not self.vfs.isdir():
283 if create:
283 if create:
284 self.requirements = newreporequirements(self)
284 self.requirements = newreporequirements(self)
285
285
286 if not self.wvfs.exists():
286 if not self.wvfs.exists():
287 self.wvfs.makedirs()
287 self.wvfs.makedirs()
288 self.vfs.makedir(notindexed=True)
288 self.vfs.makedir(notindexed=True)
289
289
290 if 'store' in self.requirements:
290 if 'store' in self.requirements:
291 self.vfs.mkdir("store")
291 self.vfs.mkdir("store")
292
292
293 # create an invalid changelog
293 # create an invalid changelog
294 self.vfs.append(
294 self.vfs.append(
295 "00changelog.i",
295 "00changelog.i",
296 '\0\0\0\2' # represents revlogv2
296 '\0\0\0\2' # represents revlogv2
297 ' dummy changelog to prevent using the old repo layout'
297 ' dummy changelog to prevent using the old repo layout'
298 )
298 )
299 else:
299 else:
300 raise error.RepoError(_("repository %s not found") % path)
300 raise error.RepoError(_("repository %s not found") % path)
301 elif create:
301 elif create:
302 raise error.RepoError(_("repository %s already exists") % path)
302 raise error.RepoError(_("repository %s already exists") % path)
303 else:
303 else:
304 try:
304 try:
305 self.requirements = scmutil.readrequires(
305 self.requirements = scmutil.readrequires(
306 self.vfs, self.supported)
306 self.vfs, self.supported)
307 except IOError as inst:
307 except IOError as inst:
308 if inst.errno != errno.ENOENT:
308 if inst.errno != errno.ENOENT:
309 raise
309 raise
310
310
311 self.sharedpath = self.path
311 self.sharedpath = self.path
312 try:
312 try:
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 realpath=True)
314 realpath=True)
315 s = vfs.base
315 s = vfs.base
316 if not vfs.exists():
316 if not vfs.exists():
317 raise error.RepoError(
317 raise error.RepoError(
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 self.sharedpath = s
319 self.sharedpath = s
320 except IOError as inst:
320 except IOError as inst:
321 if inst.errno != errno.ENOENT:
321 if inst.errno != errno.ENOENT:
322 raise
322 raise
323
323
324 self.store = store.store(
324 self.store = store.store(
325 self.requirements, self.sharedpath, scmutil.vfs)
325 self.requirements, self.sharedpath, scmutil.vfs)
326 self.spath = self.store.path
326 self.spath = self.store.path
327 self.svfs = self.store.vfs
327 self.svfs = self.store.vfs
328 self.sjoin = self.store.join
328 self.sjoin = self.store.join
329 self.vfs.createmode = self.store.createmode
329 self.vfs.createmode = self.store.createmode
330 self._applyopenerreqs()
330 self._applyopenerreqs()
331 if create:
331 if create:
332 self._writerequirements()
332 self._writerequirements()
333
333
334 self._dirstatevalidatewarned = False
334 self._dirstatevalidatewarned = False
335
335
336 self._branchcaches = {}
336 self._branchcaches = {}
337 self._revbranchcache = None
337 self._revbranchcache = None
338 self.filterpats = {}
338 self.filterpats = {}
339 self._datafilters = {}
339 self._datafilters = {}
340 self._transref = self._lockref = self._wlockref = None
340 self._transref = self._lockref = self._wlockref = None
341
341
342 # A cache for various files under .hg/ that tracks file changes,
342 # A cache for various files under .hg/ that tracks file changes,
343 # (used by the filecache decorator)
343 # (used by the filecache decorator)
344 #
344 #
345 # Maps a property name to its util.filecacheentry
345 # Maps a property name to its util.filecacheentry
346 self._filecache = {}
346 self._filecache = {}
347
347
348 # hold sets of revision to be filtered
348 # hold sets of revision to be filtered
349 # should be cleared when something might have changed the filter value:
349 # should be cleared when something might have changed the filter value:
350 # - new changesets,
350 # - new changesets,
351 # - phase change,
351 # - phase change,
352 # - new obsolescence marker,
352 # - new obsolescence marker,
353 # - working directory parent change,
353 # - working directory parent change,
354 # - bookmark changes
354 # - bookmark changes
355 self.filteredrevcache = {}
355 self.filteredrevcache = {}
356
356
357 # generic mapping between names and nodes
357 # generic mapping between names and nodes
358 self.names = namespaces.namespaces()
358 self.names = namespaces.namespaces()
359
359
360 def close(self):
360 def close(self):
361 self._writecaches()
361 self._writecaches()
362
362
363 def _writecaches(self):
363 def _writecaches(self):
364 if self._revbranchcache:
364 if self._revbranchcache:
365 self._revbranchcache.write()
365 self._revbranchcache.write()
366
366
367 def _restrictcapabilities(self, caps):
367 def _restrictcapabilities(self, caps):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 caps = set(caps)
369 caps = set(caps)
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 return caps
372 return caps
373
373
374 def _applyopenerreqs(self):
374 def _applyopenerreqs(self):
375 self.svfs.options = dict((r, 1) for r in self.requirements
375 self.svfs.options = dict((r, 1) for r in self.requirements
376 if r in self.openerreqs)
376 if r in self.openerreqs)
377 # experimental config: format.chunkcachesize
377 # experimental config: format.chunkcachesize
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 if chunkcachesize is not None:
379 if chunkcachesize is not None:
380 self.svfs.options['chunkcachesize'] = chunkcachesize
380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 # experimental config: format.maxchainlen
381 # experimental config: format.maxchainlen
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 if maxchainlen is not None:
383 if maxchainlen is not None:
384 self.svfs.options['maxchainlen'] = maxchainlen
384 self.svfs.options['maxchainlen'] = maxchainlen
385 # experimental config: format.manifestcachesize
385 # experimental config: format.manifestcachesize
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 if manifestcachesize is not None:
387 if manifestcachesize is not None:
388 self.svfs.options['manifestcachesize'] = manifestcachesize
388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 # experimental config: format.aggressivemergedeltas
389 # experimental config: format.aggressivemergedeltas
390 aggressivemergedeltas = self.ui.configbool('format',
390 aggressivemergedeltas = self.ui.configbool('format',
391 'aggressivemergedeltas', False)
391 'aggressivemergedeltas', False)
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394
394
395 def _writerequirements(self):
395 def _writerequirements(self):
396 scmutil.writerequires(self.vfs, self.requirements)
396 scmutil.writerequires(self.vfs, self.requirements)
397
397
398 def _checknested(self, path):
398 def _checknested(self, path):
399 """Determine if path is a legal nested repository."""
399 """Determine if path is a legal nested repository."""
400 if not path.startswith(self.root):
400 if not path.startswith(self.root):
401 return False
401 return False
402 subpath = path[len(self.root) + 1:]
402 subpath = path[len(self.root) + 1:]
403 normsubpath = util.pconvert(subpath)
403 normsubpath = util.pconvert(subpath)
404
404
405 # XXX: Checking against the current working copy is wrong in
405 # XXX: Checking against the current working copy is wrong in
406 # the sense that it can reject things like
406 # the sense that it can reject things like
407 #
407 #
408 # $ hg cat -r 10 sub/x.txt
408 # $ hg cat -r 10 sub/x.txt
409 #
409 #
410 # if sub/ is no longer a subrepository in the working copy
410 # if sub/ is no longer a subrepository in the working copy
411 # parent revision.
411 # parent revision.
412 #
412 #
413 # However, it can of course also allow things that would have
413 # However, it can of course also allow things that would have
414 # been rejected before, such as the above cat command if sub/
414 # been rejected before, such as the above cat command if sub/
415 # is a subrepository now, but was a normal directory before.
415 # is a subrepository now, but was a normal directory before.
416 # The old path auditor would have rejected by mistake since it
416 # The old path auditor would have rejected by mistake since it
417 # panics when it sees sub/.hg/.
417 # panics when it sees sub/.hg/.
418 #
418 #
419 # All in all, checking against the working copy seems sensible
419 # All in all, checking against the working copy seems sensible
420 # since we want to prevent access to nested repositories on
420 # since we want to prevent access to nested repositories on
421 # the filesystem *now*.
421 # the filesystem *now*.
422 ctx = self[None]
422 ctx = self[None]
423 parts = util.splitpath(subpath)
423 parts = util.splitpath(subpath)
424 while parts:
424 while parts:
425 prefix = '/'.join(parts)
425 prefix = '/'.join(parts)
426 if prefix in ctx.substate:
426 if prefix in ctx.substate:
427 if prefix == normsubpath:
427 if prefix == normsubpath:
428 return True
428 return True
429 else:
429 else:
430 sub = ctx.sub(prefix)
430 sub = ctx.sub(prefix)
431 return sub.checknested(subpath[len(prefix) + 1:])
431 return sub.checknested(subpath[len(prefix) + 1:])
432 else:
432 else:
433 parts.pop()
433 parts.pop()
434 return False
434 return False
435
435
436 def peer(self):
436 def peer(self):
437 return localpeer(self) # not cached to avoid reference cycle
437 return localpeer(self) # not cached to avoid reference cycle
438
438
439 def unfiltered(self):
439 def unfiltered(self):
440 """Return unfiltered version of the repository
440 """Return unfiltered version of the repository
441
441
442 Intended to be overwritten by filtered repo."""
442 Intended to be overwritten by filtered repo."""
443 return self
443 return self
444
444
445 def filtered(self, name):
445 def filtered(self, name):
446 """Return a filtered version of a repository"""
446 """Return a filtered version of a repository"""
447 # build a new class with the mixin and the current class
447 # build a new class with the mixin and the current class
448 # (possibly subclass of the repo)
448 # (possibly subclass of the repo)
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 pass
450 pass
451 return proxycls(self, name)
451 return proxycls(self, name)
452
452
453 @repofilecache('bookmarks', 'bookmarks.current')
453 @repofilecache('bookmarks', 'bookmarks.current')
454 def _bookmarks(self):
454 def _bookmarks(self):
455 return bookmarks.bmstore(self)
455 return bookmarks.bmstore(self)
456
456
457 @property
457 @property
458 def _activebookmark(self):
458 def _activebookmark(self):
459 return self._bookmarks.active
459 return self._bookmarks.active
460
460
461 def bookmarkheads(self, bookmark):
461 def bookmarkheads(self, bookmark):
462 name = bookmark.split('@', 1)[0]
462 name = bookmark.split('@', 1)[0]
463 heads = []
463 heads = []
464 for mark, n in self._bookmarks.iteritems():
464 for mark, n in self._bookmarks.iteritems():
465 if mark.split('@', 1)[0] == name:
465 if mark.split('@', 1)[0] == name:
466 heads.append(n)
466 heads.append(n)
467 return heads
467 return heads
468
468
469 # _phaserevs and _phasesets depend on changelog. what we need is to
469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 # can't be easily expressed in filecache mechanism.
471 # can't be easily expressed in filecache mechanism.
472 @storecache('phaseroots', '00changelog.i')
472 @storecache('phaseroots', '00changelog.i')
473 def _phasecache(self):
473 def _phasecache(self):
474 return phases.phasecache(self, self._phasedefaults)
474 return phases.phasecache(self, self._phasedefaults)
475
475
476 @storecache('obsstore')
476 @storecache('obsstore')
477 def obsstore(self):
477 def obsstore(self):
478 # read default format for new obsstore.
478 # read default format for new obsstore.
479 # developer config: format.obsstore-version
479 # developer config: format.obsstore-version
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 # rely on obsstore class default when possible.
481 # rely on obsstore class default when possible.
482 kwargs = {}
482 kwargs = {}
483 if defaultformat is not None:
483 if defaultformat is not None:
484 kwargs['defaultformat'] = defaultformat
484 kwargs['defaultformat'] = defaultformat
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 **kwargs)
487 **kwargs)
488 if store and readonly:
488 if store and readonly:
489 self.ui.warn(
489 self.ui.warn(
490 _('obsolete feature not enabled but %i markers found!\n')
490 _('obsolete feature not enabled but %i markers found!\n')
491 % len(list(store)))
491 % len(list(store)))
492 return store
492 return store
493
493
494 @storecache('00changelog.i')
494 @storecache('00changelog.i')
495 def changelog(self):
495 def changelog(self):
496 c = changelog.changelog(self.svfs)
496 c = changelog.changelog(self.svfs)
497 if 'HG_PENDING' in os.environ:
497 if 'HG_PENDING' in os.environ:
498 p = os.environ['HG_PENDING']
498 p = os.environ['HG_PENDING']
499 if p.startswith(self.root):
499 if p.startswith(self.root):
500 c.readpending('00changelog.i.a')
500 c.readpending('00changelog.i.a')
501 return c
501 return c
502
502
503 @storecache('00manifest.i')
503 @storecache('00manifest.i')
504 def manifest(self):
504 def manifest(self):
505 return manifest.manifest(self.svfs)
505 return manifest.manifest(self.svfs)
506
506
507 def dirlog(self, dir):
508 return self.manifest.dirlog(dir)
509
510 @repofilecache('dirstate')
507 @repofilecache('dirstate')
511 def dirstate(self):
508 def dirstate(self):
512 return dirstate.dirstate(self.vfs, self.ui, self.root,
509 return dirstate.dirstate(self.vfs, self.ui, self.root,
513 self._dirstatevalidate)
510 self._dirstatevalidate)
514
511
515 def _dirstatevalidate(self, node):
512 def _dirstatevalidate(self, node):
516 try:
513 try:
517 self.changelog.rev(node)
514 self.changelog.rev(node)
518 return node
515 return node
519 except error.LookupError:
516 except error.LookupError:
520 if not self._dirstatevalidatewarned:
517 if not self._dirstatevalidatewarned:
521 self._dirstatevalidatewarned = True
518 self._dirstatevalidatewarned = True
522 self.ui.warn(_("warning: ignoring unknown"
519 self.ui.warn(_("warning: ignoring unknown"
523 " working parent %s!\n") % short(node))
520 " working parent %s!\n") % short(node))
524 return nullid
521 return nullid
525
522
526 def __getitem__(self, changeid):
523 def __getitem__(self, changeid):
527 if changeid is None or changeid == wdirrev:
524 if changeid is None or changeid == wdirrev:
528 return context.workingctx(self)
525 return context.workingctx(self)
529 if isinstance(changeid, slice):
526 if isinstance(changeid, slice):
530 return [context.changectx(self, i)
527 return [context.changectx(self, i)
531 for i in xrange(*changeid.indices(len(self)))
528 for i in xrange(*changeid.indices(len(self)))
532 if i not in self.changelog.filteredrevs]
529 if i not in self.changelog.filteredrevs]
533 return context.changectx(self, changeid)
530 return context.changectx(self, changeid)
534
531
535 def __contains__(self, changeid):
532 def __contains__(self, changeid):
536 try:
533 try:
537 self[changeid]
534 self[changeid]
538 return True
535 return True
539 except error.RepoLookupError:
536 except error.RepoLookupError:
540 return False
537 return False
541
538
542 def __nonzero__(self):
539 def __nonzero__(self):
543 return True
540 return True
544
541
545 def __len__(self):
542 def __len__(self):
546 return len(self.changelog)
543 return len(self.changelog)
547
544
548 def __iter__(self):
545 def __iter__(self):
549 return iter(self.changelog)
546 return iter(self.changelog)
550
547
551 def revs(self, expr, *args):
548 def revs(self, expr, *args):
552 '''Find revisions matching a revset.
549 '''Find revisions matching a revset.
553
550
554 The revset is specified as a string ``expr`` that may contain
551 The revset is specified as a string ``expr`` that may contain
555 %-formatting to escape certain types. See ``revset.formatspec``.
552 %-formatting to escape certain types. See ``revset.formatspec``.
556
553
557 Revset aliases from the configuration are not expanded. To expand
554 Revset aliases from the configuration are not expanded. To expand
558 user aliases, consider calling ``scmutil.revrange()``.
555 user aliases, consider calling ``scmutil.revrange()``.
559
556
560 Returns a revset.abstractsmartset, which is a list-like interface
557 Returns a revset.abstractsmartset, which is a list-like interface
561 that contains integer revisions.
558 that contains integer revisions.
562 '''
559 '''
563 expr = revset.formatspec(expr, *args)
560 expr = revset.formatspec(expr, *args)
564 m = revset.match(None, expr)
561 m = revset.match(None, expr)
565 return m(self)
562 return m(self)
566
563
567 def set(self, expr, *args):
564 def set(self, expr, *args):
568 '''Find revisions matching a revset and emit changectx instances.
565 '''Find revisions matching a revset and emit changectx instances.
569
566
570 This is a convenience wrapper around ``revs()`` that iterates the
567 This is a convenience wrapper around ``revs()`` that iterates the
571 result and is a generator of changectx instances.
568 result and is a generator of changectx instances.
572
569
573 Revset aliases from the configuration are not expanded. To expand
570 Revset aliases from the configuration are not expanded. To expand
574 user aliases, consider calling ``scmutil.revrange()``.
571 user aliases, consider calling ``scmutil.revrange()``.
575 '''
572 '''
576 for r in self.revs(expr, *args):
573 for r in self.revs(expr, *args):
577 yield self[r]
574 yield self[r]
578
575
579 def url(self):
576 def url(self):
580 return 'file:' + self.root
577 return 'file:' + self.root
581
578
582 def hook(self, name, throw=False, **args):
579 def hook(self, name, throw=False, **args):
583 """Call a hook, passing this repo instance.
580 """Call a hook, passing this repo instance.
584
581
585 This a convenience method to aid invoking hooks. Extensions likely
582 This a convenience method to aid invoking hooks. Extensions likely
586 won't call this unless they have registered a custom hook or are
583 won't call this unless they have registered a custom hook or are
587 replacing code that is expected to call a hook.
584 replacing code that is expected to call a hook.
588 """
585 """
589 return hook.hook(self.ui, self, name, throw, **args)
586 return hook.hook(self.ui, self, name, throw, **args)
590
587
591 @unfilteredmethod
588 @unfilteredmethod
592 def _tag(self, names, node, message, local, user, date, extra=None,
589 def _tag(self, names, node, message, local, user, date, extra=None,
593 editor=False):
590 editor=False):
594 if isinstance(names, str):
591 if isinstance(names, str):
595 names = (names,)
592 names = (names,)
596
593
597 branches = self.branchmap()
594 branches = self.branchmap()
598 for name in names:
595 for name in names:
599 self.hook('pretag', throw=True, node=hex(node), tag=name,
596 self.hook('pretag', throw=True, node=hex(node), tag=name,
600 local=local)
597 local=local)
601 if name in branches:
598 if name in branches:
602 self.ui.warn(_("warning: tag %s conflicts with existing"
599 self.ui.warn(_("warning: tag %s conflicts with existing"
603 " branch name\n") % name)
600 " branch name\n") % name)
604
601
605 def writetags(fp, names, munge, prevtags):
602 def writetags(fp, names, munge, prevtags):
606 fp.seek(0, 2)
603 fp.seek(0, 2)
607 if prevtags and prevtags[-1] != '\n':
604 if prevtags and prevtags[-1] != '\n':
608 fp.write('\n')
605 fp.write('\n')
609 for name in names:
606 for name in names:
610 if munge:
607 if munge:
611 m = munge(name)
608 m = munge(name)
612 else:
609 else:
613 m = name
610 m = name
614
611
615 if (self._tagscache.tagtypes and
612 if (self._tagscache.tagtypes and
616 name in self._tagscache.tagtypes):
613 name in self._tagscache.tagtypes):
617 old = self.tags().get(name, nullid)
614 old = self.tags().get(name, nullid)
618 fp.write('%s %s\n' % (hex(old), m))
615 fp.write('%s %s\n' % (hex(old), m))
619 fp.write('%s %s\n' % (hex(node), m))
616 fp.write('%s %s\n' % (hex(node), m))
620 fp.close()
617 fp.close()
621
618
622 prevtags = ''
619 prevtags = ''
623 if local:
620 if local:
624 try:
621 try:
625 fp = self.vfs('localtags', 'r+')
622 fp = self.vfs('localtags', 'r+')
626 except IOError:
623 except IOError:
627 fp = self.vfs('localtags', 'a')
624 fp = self.vfs('localtags', 'a')
628 else:
625 else:
629 prevtags = fp.read()
626 prevtags = fp.read()
630
627
631 # local tags are stored in the current charset
628 # local tags are stored in the current charset
632 writetags(fp, names, None, prevtags)
629 writetags(fp, names, None, prevtags)
633 for name in names:
630 for name in names:
634 self.hook('tag', node=hex(node), tag=name, local=local)
631 self.hook('tag', node=hex(node), tag=name, local=local)
635 return
632 return
636
633
637 try:
634 try:
638 fp = self.wfile('.hgtags', 'rb+')
635 fp = self.wfile('.hgtags', 'rb+')
639 except IOError as e:
636 except IOError as e:
640 if e.errno != errno.ENOENT:
637 if e.errno != errno.ENOENT:
641 raise
638 raise
642 fp = self.wfile('.hgtags', 'ab')
639 fp = self.wfile('.hgtags', 'ab')
643 else:
640 else:
644 prevtags = fp.read()
641 prevtags = fp.read()
645
642
646 # committed tags are stored in UTF-8
643 # committed tags are stored in UTF-8
647 writetags(fp, names, encoding.fromlocal, prevtags)
644 writetags(fp, names, encoding.fromlocal, prevtags)
648
645
649 fp.close()
646 fp.close()
650
647
651 self.invalidatecaches()
648 self.invalidatecaches()
652
649
653 if '.hgtags' not in self.dirstate:
650 if '.hgtags' not in self.dirstate:
654 self[None].add(['.hgtags'])
651 self[None].add(['.hgtags'])
655
652
656 m = matchmod.exact(self.root, '', ['.hgtags'])
653 m = matchmod.exact(self.root, '', ['.hgtags'])
657 tagnode = self.commit(message, user, date, extra=extra, match=m,
654 tagnode = self.commit(message, user, date, extra=extra, match=m,
658 editor=editor)
655 editor=editor)
659
656
660 for name in names:
657 for name in names:
661 self.hook('tag', node=hex(node), tag=name, local=local)
658 self.hook('tag', node=hex(node), tag=name, local=local)
662
659
663 return tagnode
660 return tagnode
664
661
665 def tag(self, names, node, message, local, user, date, editor=False):
662 def tag(self, names, node, message, local, user, date, editor=False):
666 '''tag a revision with one or more symbolic names.
663 '''tag a revision with one or more symbolic names.
667
664
668 names is a list of strings or, when adding a single tag, names may be a
665 names is a list of strings or, when adding a single tag, names may be a
669 string.
666 string.
670
667
671 if local is True, the tags are stored in a per-repository file.
668 if local is True, the tags are stored in a per-repository file.
672 otherwise, they are stored in the .hgtags file, and a new
669 otherwise, they are stored in the .hgtags file, and a new
673 changeset is committed with the change.
670 changeset is committed with the change.
674
671
675 keyword arguments:
672 keyword arguments:
676
673
677 local: whether to store tags in non-version-controlled file
674 local: whether to store tags in non-version-controlled file
678 (default False)
675 (default False)
679
676
680 message: commit message to use if committing
677 message: commit message to use if committing
681
678
682 user: name of user to use if committing
679 user: name of user to use if committing
683
680
684 date: date tuple to use if committing'''
681 date: date tuple to use if committing'''
685
682
686 if not local:
683 if not local:
687 m = matchmod.exact(self.root, '', ['.hgtags'])
684 m = matchmod.exact(self.root, '', ['.hgtags'])
688 if any(self.status(match=m, unknown=True, ignored=True)):
685 if any(self.status(match=m, unknown=True, ignored=True)):
689 raise error.Abort(_('working copy of .hgtags is changed'),
686 raise error.Abort(_('working copy of .hgtags is changed'),
690 hint=_('please commit .hgtags manually'))
687 hint=_('please commit .hgtags manually'))
691
688
692 self.tags() # instantiate the cache
689 self.tags() # instantiate the cache
693 self._tag(names, node, message, local, user, date, editor=editor)
690 self._tag(names, node, message, local, user, date, editor=editor)
694
691
695 @filteredpropertycache
692 @filteredpropertycache
696 def _tagscache(self):
693 def _tagscache(self):
697 '''Returns a tagscache object that contains various tags related
694 '''Returns a tagscache object that contains various tags related
698 caches.'''
695 caches.'''
699
696
700 # This simplifies its cache management by having one decorated
697 # This simplifies its cache management by having one decorated
701 # function (this one) and the rest simply fetch things from it.
698 # function (this one) and the rest simply fetch things from it.
702 class tagscache(object):
699 class tagscache(object):
703 def __init__(self):
700 def __init__(self):
704 # These two define the set of tags for this repository. tags
701 # These two define the set of tags for this repository. tags
705 # maps tag name to node; tagtypes maps tag name to 'global' or
702 # maps tag name to node; tagtypes maps tag name to 'global' or
706 # 'local'. (Global tags are defined by .hgtags across all
703 # 'local'. (Global tags are defined by .hgtags across all
707 # heads, and local tags are defined in .hg/localtags.)
704 # heads, and local tags are defined in .hg/localtags.)
708 # They constitute the in-memory cache of tags.
705 # They constitute the in-memory cache of tags.
709 self.tags = self.tagtypes = None
706 self.tags = self.tagtypes = None
710
707
711 self.nodetagscache = self.tagslist = None
708 self.nodetagscache = self.tagslist = None
712
709
713 cache = tagscache()
710 cache = tagscache()
714 cache.tags, cache.tagtypes = self._findtags()
711 cache.tags, cache.tagtypes = self._findtags()
715
712
716 return cache
713 return cache
717
714
718 def tags(self):
715 def tags(self):
719 '''return a mapping of tag to node'''
716 '''return a mapping of tag to node'''
720 t = {}
717 t = {}
721 if self.changelog.filteredrevs:
718 if self.changelog.filteredrevs:
722 tags, tt = self._findtags()
719 tags, tt = self._findtags()
723 else:
720 else:
724 tags = self._tagscache.tags
721 tags = self._tagscache.tags
725 for k, v in tags.iteritems():
722 for k, v in tags.iteritems():
726 try:
723 try:
727 # ignore tags to unknown nodes
724 # ignore tags to unknown nodes
728 self.changelog.rev(v)
725 self.changelog.rev(v)
729 t[k] = v
726 t[k] = v
730 except (error.LookupError, ValueError):
727 except (error.LookupError, ValueError):
731 pass
728 pass
732 return t
729 return t
733
730
734 def _findtags(self):
731 def _findtags(self):
735 '''Do the hard work of finding tags. Return a pair of dicts
732 '''Do the hard work of finding tags. Return a pair of dicts
736 (tags, tagtypes) where tags maps tag name to node, and tagtypes
733 (tags, tagtypes) where tags maps tag name to node, and tagtypes
737 maps tag name to a string like \'global\' or \'local\'.
734 maps tag name to a string like \'global\' or \'local\'.
738 Subclasses or extensions are free to add their own tags, but
735 Subclasses or extensions are free to add their own tags, but
739 should be aware that the returned dicts will be retained for the
736 should be aware that the returned dicts will be retained for the
740 duration of the localrepo object.'''
737 duration of the localrepo object.'''
741
738
742 # XXX what tagtype should subclasses/extensions use? Currently
739 # XXX what tagtype should subclasses/extensions use? Currently
743 # mq and bookmarks add tags, but do not set the tagtype at all.
740 # mq and bookmarks add tags, but do not set the tagtype at all.
744 # Should each extension invent its own tag type? Should there
741 # Should each extension invent its own tag type? Should there
745 # be one tagtype for all such "virtual" tags? Or is the status
742 # be one tagtype for all such "virtual" tags? Or is the status
746 # quo fine?
743 # quo fine?
747
744
748 alltags = {} # map tag name to (node, hist)
745 alltags = {} # map tag name to (node, hist)
749 tagtypes = {}
746 tagtypes = {}
750
747
751 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
748 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
752 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
749 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
753
750
754 # Build the return dicts. Have to re-encode tag names because
751 # Build the return dicts. Have to re-encode tag names because
755 # the tags module always uses UTF-8 (in order not to lose info
752 # the tags module always uses UTF-8 (in order not to lose info
756 # writing to the cache), but the rest of Mercurial wants them in
753 # writing to the cache), but the rest of Mercurial wants them in
757 # local encoding.
754 # local encoding.
758 tags = {}
755 tags = {}
759 for (name, (node, hist)) in alltags.iteritems():
756 for (name, (node, hist)) in alltags.iteritems():
760 if node != nullid:
757 if node != nullid:
761 tags[encoding.tolocal(name)] = node
758 tags[encoding.tolocal(name)] = node
762 tags['tip'] = self.changelog.tip()
759 tags['tip'] = self.changelog.tip()
763 tagtypes = dict([(encoding.tolocal(name), value)
760 tagtypes = dict([(encoding.tolocal(name), value)
764 for (name, value) in tagtypes.iteritems()])
761 for (name, value) in tagtypes.iteritems()])
765 return (tags, tagtypes)
762 return (tags, tagtypes)
766
763
767 def tagtype(self, tagname):
764 def tagtype(self, tagname):
768 '''
765 '''
769 return the type of the given tag. result can be:
766 return the type of the given tag. result can be:
770
767
771 'local' : a local tag
768 'local' : a local tag
772 'global' : a global tag
769 'global' : a global tag
773 None : tag does not exist
770 None : tag does not exist
774 '''
771 '''
775
772
776 return self._tagscache.tagtypes.get(tagname)
773 return self._tagscache.tagtypes.get(tagname)
777
774
778 def tagslist(self):
775 def tagslist(self):
779 '''return a list of tags ordered by revision'''
776 '''return a list of tags ordered by revision'''
780 if not self._tagscache.tagslist:
777 if not self._tagscache.tagslist:
781 l = []
778 l = []
782 for t, n in self.tags().iteritems():
779 for t, n in self.tags().iteritems():
783 l.append((self.changelog.rev(n), t, n))
780 l.append((self.changelog.rev(n), t, n))
784 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
781 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
785
782
786 return self._tagscache.tagslist
783 return self._tagscache.tagslist
787
784
788 def nodetags(self, node):
785 def nodetags(self, node):
789 '''return the tags associated with a node'''
786 '''return the tags associated with a node'''
790 if not self._tagscache.nodetagscache:
787 if not self._tagscache.nodetagscache:
791 nodetagscache = {}
788 nodetagscache = {}
792 for t, n in self._tagscache.tags.iteritems():
789 for t, n in self._tagscache.tags.iteritems():
793 nodetagscache.setdefault(n, []).append(t)
790 nodetagscache.setdefault(n, []).append(t)
794 for tags in nodetagscache.itervalues():
791 for tags in nodetagscache.itervalues():
795 tags.sort()
792 tags.sort()
796 self._tagscache.nodetagscache = nodetagscache
793 self._tagscache.nodetagscache = nodetagscache
797 return self._tagscache.nodetagscache.get(node, [])
794 return self._tagscache.nodetagscache.get(node, [])
798
795
799 def nodebookmarks(self, node):
796 def nodebookmarks(self, node):
800 """return the list of bookmarks pointing to the specified node"""
797 """return the list of bookmarks pointing to the specified node"""
801 marks = []
798 marks = []
802 for bookmark, n in self._bookmarks.iteritems():
799 for bookmark, n in self._bookmarks.iteritems():
803 if n == node:
800 if n == node:
804 marks.append(bookmark)
801 marks.append(bookmark)
805 return sorted(marks)
802 return sorted(marks)
806
803
807 def branchmap(self):
804 def branchmap(self):
808 '''returns a dictionary {branch: [branchheads]} with branchheads
805 '''returns a dictionary {branch: [branchheads]} with branchheads
809 ordered by increasing revision number'''
806 ordered by increasing revision number'''
810 branchmap.updatecache(self)
807 branchmap.updatecache(self)
811 return self._branchcaches[self.filtername]
808 return self._branchcaches[self.filtername]
812
809
813 @unfilteredmethod
810 @unfilteredmethod
814 def revbranchcache(self):
811 def revbranchcache(self):
815 if not self._revbranchcache:
812 if not self._revbranchcache:
816 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
813 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
817 return self._revbranchcache
814 return self._revbranchcache
818
815
819 def branchtip(self, branch, ignoremissing=False):
816 def branchtip(self, branch, ignoremissing=False):
820 '''return the tip node for a given branch
817 '''return the tip node for a given branch
821
818
822 If ignoremissing is True, then this method will not raise an error.
819 If ignoremissing is True, then this method will not raise an error.
823 This is helpful for callers that only expect None for a missing branch
820 This is helpful for callers that only expect None for a missing branch
824 (e.g. namespace).
821 (e.g. namespace).
825
822
826 '''
823 '''
827 try:
824 try:
828 return self.branchmap().branchtip(branch)
825 return self.branchmap().branchtip(branch)
829 except KeyError:
826 except KeyError:
830 if not ignoremissing:
827 if not ignoremissing:
831 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
828 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
832 else:
829 else:
833 pass
830 pass
834
831
835 def lookup(self, key):
832 def lookup(self, key):
836 return self[key].node()
833 return self[key].node()
837
834
838 def lookupbranch(self, key, remote=None):
835 def lookupbranch(self, key, remote=None):
839 repo = remote or self
836 repo = remote or self
840 if key in repo.branchmap():
837 if key in repo.branchmap():
841 return key
838 return key
842
839
843 repo = (remote and remote.local()) and remote or self
840 repo = (remote and remote.local()) and remote or self
844 return repo[key].branch()
841 return repo[key].branch()
845
842
846 def known(self, nodes):
843 def known(self, nodes):
847 cl = self.changelog
844 cl = self.changelog
848 nm = cl.nodemap
845 nm = cl.nodemap
849 filtered = cl.filteredrevs
846 filtered = cl.filteredrevs
850 result = []
847 result = []
851 for n in nodes:
848 for n in nodes:
852 r = nm.get(n)
849 r = nm.get(n)
853 resp = not (r is None or r in filtered)
850 resp = not (r is None or r in filtered)
854 result.append(resp)
851 result.append(resp)
855 return result
852 return result
856
853
857 def local(self):
854 def local(self):
858 return self
855 return self
859
856
860 def publishing(self):
857 def publishing(self):
861 # it's safe (and desirable) to trust the publish flag unconditionally
858 # it's safe (and desirable) to trust the publish flag unconditionally
862 # so that we don't finalize changes shared between users via ssh or nfs
859 # so that we don't finalize changes shared between users via ssh or nfs
863 return self.ui.configbool('phases', 'publish', True, untrusted=True)
860 return self.ui.configbool('phases', 'publish', True, untrusted=True)
864
861
865 def cancopy(self):
862 def cancopy(self):
866 # so statichttprepo's override of local() works
863 # so statichttprepo's override of local() works
867 if not self.local():
864 if not self.local():
868 return False
865 return False
869 if not self.publishing():
866 if not self.publishing():
870 return True
867 return True
871 # if publishing we can't copy if there is filtered content
868 # if publishing we can't copy if there is filtered content
872 return not self.filtered('visible').changelog.filteredrevs
869 return not self.filtered('visible').changelog.filteredrevs
873
870
874 def shared(self):
871 def shared(self):
875 '''the type of shared repository (None if not shared)'''
872 '''the type of shared repository (None if not shared)'''
876 if self.sharedpath != self.path:
873 if self.sharedpath != self.path:
877 return 'store'
874 return 'store'
878 return None
875 return None
879
876
880 def join(self, f, *insidef):
877 def join(self, f, *insidef):
881 return self.vfs.join(os.path.join(f, *insidef))
878 return self.vfs.join(os.path.join(f, *insidef))
882
879
883 def wjoin(self, f, *insidef):
880 def wjoin(self, f, *insidef):
884 return self.vfs.reljoin(self.root, f, *insidef)
881 return self.vfs.reljoin(self.root, f, *insidef)
885
882
886 def file(self, f):
883 def file(self, f):
887 if f[0] == '/':
884 if f[0] == '/':
888 f = f[1:]
885 f = f[1:]
889 return filelog.filelog(self.svfs, f)
886 return filelog.filelog(self.svfs, f)
890
887
891 def changectx(self, changeid):
888 def changectx(self, changeid):
892 return self[changeid]
889 return self[changeid]
893
890
894 def setparents(self, p1, p2=nullid):
891 def setparents(self, p1, p2=nullid):
895 self.dirstate.beginparentchange()
892 self.dirstate.beginparentchange()
896 copies = self.dirstate.setparents(p1, p2)
893 copies = self.dirstate.setparents(p1, p2)
897 pctx = self[p1]
894 pctx = self[p1]
898 if copies:
895 if copies:
899 # Adjust copy records, the dirstate cannot do it, it
896 # Adjust copy records, the dirstate cannot do it, it
900 # requires access to parents manifests. Preserve them
897 # requires access to parents manifests. Preserve them
901 # only for entries added to first parent.
898 # only for entries added to first parent.
902 for f in copies:
899 for f in copies:
903 if f not in pctx and copies[f] in pctx:
900 if f not in pctx and copies[f] in pctx:
904 self.dirstate.copy(copies[f], f)
901 self.dirstate.copy(copies[f], f)
905 if p2 == nullid:
902 if p2 == nullid:
906 for f, s in sorted(self.dirstate.copies().items()):
903 for f, s in sorted(self.dirstate.copies().items()):
907 if f not in pctx and s not in pctx:
904 if f not in pctx and s not in pctx:
908 self.dirstate.copy(None, f)
905 self.dirstate.copy(None, f)
909 self.dirstate.endparentchange()
906 self.dirstate.endparentchange()
910
907
911 def filectx(self, path, changeid=None, fileid=None):
908 def filectx(self, path, changeid=None, fileid=None):
912 """changeid can be a changeset revision, node, or tag.
909 """changeid can be a changeset revision, node, or tag.
913 fileid can be a file revision or node."""
910 fileid can be a file revision or node."""
914 return context.filectx(self, path, changeid, fileid)
911 return context.filectx(self, path, changeid, fileid)
915
912
916 def getcwd(self):
913 def getcwd(self):
917 return self.dirstate.getcwd()
914 return self.dirstate.getcwd()
918
915
919 def pathto(self, f, cwd=None):
916 def pathto(self, f, cwd=None):
920 return self.dirstate.pathto(f, cwd)
917 return self.dirstate.pathto(f, cwd)
921
918
922 def wfile(self, f, mode='r'):
919 def wfile(self, f, mode='r'):
923 return self.wvfs(f, mode)
920 return self.wvfs(f, mode)
924
921
925 def _link(self, f):
922 def _link(self, f):
926 return self.wvfs.islink(f)
923 return self.wvfs.islink(f)
927
924
928 def _loadfilter(self, filter):
925 def _loadfilter(self, filter):
929 if filter not in self.filterpats:
926 if filter not in self.filterpats:
930 l = []
927 l = []
931 for pat, cmd in self.ui.configitems(filter):
928 for pat, cmd in self.ui.configitems(filter):
932 if cmd == '!':
929 if cmd == '!':
933 continue
930 continue
934 mf = matchmod.match(self.root, '', [pat])
931 mf = matchmod.match(self.root, '', [pat])
935 fn = None
932 fn = None
936 params = cmd
933 params = cmd
937 for name, filterfn in self._datafilters.iteritems():
934 for name, filterfn in self._datafilters.iteritems():
938 if cmd.startswith(name):
935 if cmd.startswith(name):
939 fn = filterfn
936 fn = filterfn
940 params = cmd[len(name):].lstrip()
937 params = cmd[len(name):].lstrip()
941 break
938 break
942 if not fn:
939 if not fn:
943 fn = lambda s, c, **kwargs: util.filter(s, c)
940 fn = lambda s, c, **kwargs: util.filter(s, c)
944 # Wrap old filters not supporting keyword arguments
941 # Wrap old filters not supporting keyword arguments
945 if not inspect.getargspec(fn)[2]:
942 if not inspect.getargspec(fn)[2]:
946 oldfn = fn
943 oldfn = fn
947 fn = lambda s, c, **kwargs: oldfn(s, c)
944 fn = lambda s, c, **kwargs: oldfn(s, c)
948 l.append((mf, fn, params))
945 l.append((mf, fn, params))
949 self.filterpats[filter] = l
946 self.filterpats[filter] = l
950 return self.filterpats[filter]
947 return self.filterpats[filter]
951
948
952 def _filter(self, filterpats, filename, data):
949 def _filter(self, filterpats, filename, data):
953 for mf, fn, cmd in filterpats:
950 for mf, fn, cmd in filterpats:
954 if mf(filename):
951 if mf(filename):
955 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
952 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
956 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
953 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
957 break
954 break
958
955
959 return data
956 return data
960
957
961 @unfilteredpropertycache
958 @unfilteredpropertycache
962 def _encodefilterpats(self):
959 def _encodefilterpats(self):
963 return self._loadfilter('encode')
960 return self._loadfilter('encode')
964
961
965 @unfilteredpropertycache
962 @unfilteredpropertycache
966 def _decodefilterpats(self):
963 def _decodefilterpats(self):
967 return self._loadfilter('decode')
964 return self._loadfilter('decode')
968
965
969 def adddatafilter(self, name, filter):
966 def adddatafilter(self, name, filter):
970 self._datafilters[name] = filter
967 self._datafilters[name] = filter
971
968
972 def wread(self, filename):
969 def wread(self, filename):
973 if self._link(filename):
970 if self._link(filename):
974 data = self.wvfs.readlink(filename)
971 data = self.wvfs.readlink(filename)
975 else:
972 else:
976 data = self.wvfs.read(filename)
973 data = self.wvfs.read(filename)
977 return self._filter(self._encodefilterpats, filename, data)
974 return self._filter(self._encodefilterpats, filename, data)
978
975
979 def wwrite(self, filename, data, flags, backgroundclose=False):
976 def wwrite(self, filename, data, flags, backgroundclose=False):
980 """write ``data`` into ``filename`` in the working directory
977 """write ``data`` into ``filename`` in the working directory
981
978
982 This returns length of written (maybe decoded) data.
979 This returns length of written (maybe decoded) data.
983 """
980 """
984 data = self._filter(self._decodefilterpats, filename, data)
981 data = self._filter(self._decodefilterpats, filename, data)
985 if 'l' in flags:
982 if 'l' in flags:
986 self.wvfs.symlink(data, filename)
983 self.wvfs.symlink(data, filename)
987 else:
984 else:
988 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
985 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
989 if 'x' in flags:
986 if 'x' in flags:
990 self.wvfs.setflags(filename, False, True)
987 self.wvfs.setflags(filename, False, True)
991 return len(data)
988 return len(data)
992
989
993 def wwritedata(self, filename, data):
990 def wwritedata(self, filename, data):
994 return self._filter(self._decodefilterpats, filename, data)
991 return self._filter(self._decodefilterpats, filename, data)
995
992
996 def currenttransaction(self):
993 def currenttransaction(self):
997 """return the current transaction or None if non exists"""
994 """return the current transaction or None if non exists"""
998 if self._transref:
995 if self._transref:
999 tr = self._transref()
996 tr = self._transref()
1000 else:
997 else:
1001 tr = None
998 tr = None
1002
999
1003 if tr and tr.running():
1000 if tr and tr.running():
1004 return tr
1001 return tr
1005 return None
1002 return None
1006
1003
1007 def transaction(self, desc, report=None):
1004 def transaction(self, desc, report=None):
1008 if (self.ui.configbool('devel', 'all-warnings')
1005 if (self.ui.configbool('devel', 'all-warnings')
1009 or self.ui.configbool('devel', 'check-locks')):
1006 or self.ui.configbool('devel', 'check-locks')):
1010 if self._currentlock(self._lockref) is None:
1007 if self._currentlock(self._lockref) is None:
1011 raise RuntimeError('programming error: transaction requires '
1008 raise RuntimeError('programming error: transaction requires '
1012 'locking')
1009 'locking')
1013 tr = self.currenttransaction()
1010 tr = self.currenttransaction()
1014 if tr is not None:
1011 if tr is not None:
1015 return tr.nest()
1012 return tr.nest()
1016
1013
1017 # abort here if the journal already exists
1014 # abort here if the journal already exists
1018 if self.svfs.exists("journal"):
1015 if self.svfs.exists("journal"):
1019 raise error.RepoError(
1016 raise error.RepoError(
1020 _("abandoned transaction found"),
1017 _("abandoned transaction found"),
1021 hint=_("run 'hg recover' to clean up transaction"))
1018 hint=_("run 'hg recover' to clean up transaction"))
1022
1019
1023 idbase = "%.40f#%f" % (random.random(), time.time())
1020 idbase = "%.40f#%f" % (random.random(), time.time())
1024 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1021 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1025 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1022 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1026
1023
1027 self._writejournal(desc)
1024 self._writejournal(desc)
1028 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1025 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1029 if report:
1026 if report:
1030 rp = report
1027 rp = report
1031 else:
1028 else:
1032 rp = self.ui.warn
1029 rp = self.ui.warn
1033 vfsmap = {'plain': self.vfs} # root of .hg/
1030 vfsmap = {'plain': self.vfs} # root of .hg/
1034 # we must avoid cyclic reference between repo and transaction.
1031 # we must avoid cyclic reference between repo and transaction.
1035 reporef = weakref.ref(self)
1032 reporef = weakref.ref(self)
1036 def validate(tr):
1033 def validate(tr):
1037 """will run pre-closing hooks"""
1034 """will run pre-closing hooks"""
1038 reporef().hook('pretxnclose', throw=True,
1035 reporef().hook('pretxnclose', throw=True,
1039 txnname=desc, **tr.hookargs)
1036 txnname=desc, **tr.hookargs)
1040 def releasefn(tr, success):
1037 def releasefn(tr, success):
1041 repo = reporef()
1038 repo = reporef()
1042 if success:
1039 if success:
1043 # this should be explicitly invoked here, because
1040 # this should be explicitly invoked here, because
1044 # in-memory changes aren't written out at closing
1041 # in-memory changes aren't written out at closing
1045 # transaction, if tr.addfilegenerator (via
1042 # transaction, if tr.addfilegenerator (via
1046 # dirstate.write or so) isn't invoked while
1043 # dirstate.write or so) isn't invoked while
1047 # transaction running
1044 # transaction running
1048 repo.dirstate.write(None)
1045 repo.dirstate.write(None)
1049 else:
1046 else:
1050 # discard all changes (including ones already written
1047 # discard all changes (including ones already written
1051 # out) in this transaction
1048 # out) in this transaction
1052 repo.dirstate.restorebackup(None, prefix='journal.')
1049 repo.dirstate.restorebackup(None, prefix='journal.')
1053
1050
1054 repo.invalidate(clearfilecache=True)
1051 repo.invalidate(clearfilecache=True)
1055
1052
1056 tr = transaction.transaction(rp, self.svfs, vfsmap,
1053 tr = transaction.transaction(rp, self.svfs, vfsmap,
1057 "journal",
1054 "journal",
1058 "undo",
1055 "undo",
1059 aftertrans(renames),
1056 aftertrans(renames),
1060 self.store.createmode,
1057 self.store.createmode,
1061 validator=validate,
1058 validator=validate,
1062 releasefn=releasefn)
1059 releasefn=releasefn)
1063
1060
1064 tr.hookargs['txnid'] = txnid
1061 tr.hookargs['txnid'] = txnid
1065 # note: writing the fncache only during finalize mean that the file is
1062 # note: writing the fncache only during finalize mean that the file is
1066 # outdated when running hooks. As fncache is used for streaming clone,
1063 # outdated when running hooks. As fncache is used for streaming clone,
1067 # this is not expected to break anything that happen during the hooks.
1064 # this is not expected to break anything that happen during the hooks.
1068 tr.addfinalize('flush-fncache', self.store.write)
1065 tr.addfinalize('flush-fncache', self.store.write)
1069 def txnclosehook(tr2):
1066 def txnclosehook(tr2):
1070 """To be run if transaction is successful, will schedule a hook run
1067 """To be run if transaction is successful, will schedule a hook run
1071 """
1068 """
1072 # Don't reference tr2 in hook() so we don't hold a reference.
1069 # Don't reference tr2 in hook() so we don't hold a reference.
1073 # This reduces memory consumption when there are multiple
1070 # This reduces memory consumption when there are multiple
1074 # transactions per lock. This can likely go away if issue5045
1071 # transactions per lock. This can likely go away if issue5045
1075 # fixes the function accumulation.
1072 # fixes the function accumulation.
1076 hookargs = tr2.hookargs
1073 hookargs = tr2.hookargs
1077
1074
1078 def hook():
1075 def hook():
1079 reporef().hook('txnclose', throw=False, txnname=desc,
1076 reporef().hook('txnclose', throw=False, txnname=desc,
1080 **hookargs)
1077 **hookargs)
1081 reporef()._afterlock(hook)
1078 reporef()._afterlock(hook)
1082 tr.addfinalize('txnclose-hook', txnclosehook)
1079 tr.addfinalize('txnclose-hook', txnclosehook)
1083 def txnaborthook(tr2):
1080 def txnaborthook(tr2):
1084 """To be run if transaction is aborted
1081 """To be run if transaction is aborted
1085 """
1082 """
1086 reporef().hook('txnabort', throw=False, txnname=desc,
1083 reporef().hook('txnabort', throw=False, txnname=desc,
1087 **tr2.hookargs)
1084 **tr2.hookargs)
1088 tr.addabort('txnabort-hook', txnaborthook)
1085 tr.addabort('txnabort-hook', txnaborthook)
1089 # avoid eager cache invalidation. in-memory data should be identical
1086 # avoid eager cache invalidation. in-memory data should be identical
1090 # to stored data if transaction has no error.
1087 # to stored data if transaction has no error.
1091 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1088 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1092 self._transref = weakref.ref(tr)
1089 self._transref = weakref.ref(tr)
1093 return tr
1090 return tr
1094
1091
1095 def _journalfiles(self):
1092 def _journalfiles(self):
1096 return ((self.svfs, 'journal'),
1093 return ((self.svfs, 'journal'),
1097 (self.vfs, 'journal.dirstate'),
1094 (self.vfs, 'journal.dirstate'),
1098 (self.vfs, 'journal.branch'),
1095 (self.vfs, 'journal.branch'),
1099 (self.vfs, 'journal.desc'),
1096 (self.vfs, 'journal.desc'),
1100 (self.vfs, 'journal.bookmarks'),
1097 (self.vfs, 'journal.bookmarks'),
1101 (self.svfs, 'journal.phaseroots'))
1098 (self.svfs, 'journal.phaseroots'))
1102
1099
1103 def undofiles(self):
1100 def undofiles(self):
1104 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1101 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1105
1102
1106 def _writejournal(self, desc):
1103 def _writejournal(self, desc):
1107 self.dirstate.savebackup(None, prefix='journal.')
1104 self.dirstate.savebackup(None, prefix='journal.')
1108 self.vfs.write("journal.branch",
1105 self.vfs.write("journal.branch",
1109 encoding.fromlocal(self.dirstate.branch()))
1106 encoding.fromlocal(self.dirstate.branch()))
1110 self.vfs.write("journal.desc",
1107 self.vfs.write("journal.desc",
1111 "%d\n%s\n" % (len(self), desc))
1108 "%d\n%s\n" % (len(self), desc))
1112 self.vfs.write("journal.bookmarks",
1109 self.vfs.write("journal.bookmarks",
1113 self.vfs.tryread("bookmarks"))
1110 self.vfs.tryread("bookmarks"))
1114 self.svfs.write("journal.phaseroots",
1111 self.svfs.write("journal.phaseroots",
1115 self.svfs.tryread("phaseroots"))
1112 self.svfs.tryread("phaseroots"))
1116
1113
1117 def recover(self):
1114 def recover(self):
1118 with self.lock():
1115 with self.lock():
1119 if self.svfs.exists("journal"):
1116 if self.svfs.exists("journal"):
1120 self.ui.status(_("rolling back interrupted transaction\n"))
1117 self.ui.status(_("rolling back interrupted transaction\n"))
1121 vfsmap = {'': self.svfs,
1118 vfsmap = {'': self.svfs,
1122 'plain': self.vfs,}
1119 'plain': self.vfs,}
1123 transaction.rollback(self.svfs, vfsmap, "journal",
1120 transaction.rollback(self.svfs, vfsmap, "journal",
1124 self.ui.warn)
1121 self.ui.warn)
1125 self.invalidate()
1122 self.invalidate()
1126 return True
1123 return True
1127 else:
1124 else:
1128 self.ui.warn(_("no interrupted transaction available\n"))
1125 self.ui.warn(_("no interrupted transaction available\n"))
1129 return False
1126 return False
1130
1127
1131 def rollback(self, dryrun=False, force=False):
1128 def rollback(self, dryrun=False, force=False):
1132 wlock = lock = dsguard = None
1129 wlock = lock = dsguard = None
1133 try:
1130 try:
1134 wlock = self.wlock()
1131 wlock = self.wlock()
1135 lock = self.lock()
1132 lock = self.lock()
1136 if self.svfs.exists("undo"):
1133 if self.svfs.exists("undo"):
1137 dsguard = cmdutil.dirstateguard(self, 'rollback')
1134 dsguard = cmdutil.dirstateguard(self, 'rollback')
1138
1135
1139 return self._rollback(dryrun, force, dsguard)
1136 return self._rollback(dryrun, force, dsguard)
1140 else:
1137 else:
1141 self.ui.warn(_("no rollback information available\n"))
1138 self.ui.warn(_("no rollback information available\n"))
1142 return 1
1139 return 1
1143 finally:
1140 finally:
1144 release(dsguard, lock, wlock)
1141 release(dsguard, lock, wlock)
1145
1142
1146 @unfilteredmethod # Until we get smarter cache management
1143 @unfilteredmethod # Until we get smarter cache management
1147 def _rollback(self, dryrun, force, dsguard):
1144 def _rollback(self, dryrun, force, dsguard):
1148 ui = self.ui
1145 ui = self.ui
1149 try:
1146 try:
1150 args = self.vfs.read('undo.desc').splitlines()
1147 args = self.vfs.read('undo.desc').splitlines()
1151 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1148 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1152 if len(args) >= 3:
1149 if len(args) >= 3:
1153 detail = args[2]
1150 detail = args[2]
1154 oldtip = oldlen - 1
1151 oldtip = oldlen - 1
1155
1152
1156 if detail and ui.verbose:
1153 if detail and ui.verbose:
1157 msg = (_('repository tip rolled back to revision %s'
1154 msg = (_('repository tip rolled back to revision %s'
1158 ' (undo %s: %s)\n')
1155 ' (undo %s: %s)\n')
1159 % (oldtip, desc, detail))
1156 % (oldtip, desc, detail))
1160 else:
1157 else:
1161 msg = (_('repository tip rolled back to revision %s'
1158 msg = (_('repository tip rolled back to revision %s'
1162 ' (undo %s)\n')
1159 ' (undo %s)\n')
1163 % (oldtip, desc))
1160 % (oldtip, desc))
1164 except IOError:
1161 except IOError:
1165 msg = _('rolling back unknown transaction\n')
1162 msg = _('rolling back unknown transaction\n')
1166 desc = None
1163 desc = None
1167
1164
1168 if not force and self['.'] != self['tip'] and desc == 'commit':
1165 if not force and self['.'] != self['tip'] and desc == 'commit':
1169 raise error.Abort(
1166 raise error.Abort(
1170 _('rollback of last commit while not checked out '
1167 _('rollback of last commit while not checked out '
1171 'may lose data'), hint=_('use -f to force'))
1168 'may lose data'), hint=_('use -f to force'))
1172
1169
1173 ui.status(msg)
1170 ui.status(msg)
1174 if dryrun:
1171 if dryrun:
1175 return 0
1172 return 0
1176
1173
1177 parents = self.dirstate.parents()
1174 parents = self.dirstate.parents()
1178 self.destroying()
1175 self.destroying()
1179 vfsmap = {'plain': self.vfs, '': self.svfs}
1176 vfsmap = {'plain': self.vfs, '': self.svfs}
1180 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1177 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1181 if self.vfs.exists('undo.bookmarks'):
1178 if self.vfs.exists('undo.bookmarks'):
1182 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1179 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1183 if self.svfs.exists('undo.phaseroots'):
1180 if self.svfs.exists('undo.phaseroots'):
1184 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1181 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1185 self.invalidate()
1182 self.invalidate()
1186
1183
1187 parentgone = (parents[0] not in self.changelog.nodemap or
1184 parentgone = (parents[0] not in self.changelog.nodemap or
1188 parents[1] not in self.changelog.nodemap)
1185 parents[1] not in self.changelog.nodemap)
1189 if parentgone:
1186 if parentgone:
1190 # prevent dirstateguard from overwriting already restored one
1187 # prevent dirstateguard from overwriting already restored one
1191 dsguard.close()
1188 dsguard.close()
1192
1189
1193 self.dirstate.restorebackup(None, prefix='undo.')
1190 self.dirstate.restorebackup(None, prefix='undo.')
1194 try:
1191 try:
1195 branch = self.vfs.read('undo.branch')
1192 branch = self.vfs.read('undo.branch')
1196 self.dirstate.setbranch(encoding.tolocal(branch))
1193 self.dirstate.setbranch(encoding.tolocal(branch))
1197 except IOError:
1194 except IOError:
1198 ui.warn(_('named branch could not be reset: '
1195 ui.warn(_('named branch could not be reset: '
1199 'current branch is still \'%s\'\n')
1196 'current branch is still \'%s\'\n')
1200 % self.dirstate.branch())
1197 % self.dirstate.branch())
1201
1198
1202 parents = tuple([p.rev() for p in self[None].parents()])
1199 parents = tuple([p.rev() for p in self[None].parents()])
1203 if len(parents) > 1:
1200 if len(parents) > 1:
1204 ui.status(_('working directory now based on '
1201 ui.status(_('working directory now based on '
1205 'revisions %d and %d\n') % parents)
1202 'revisions %d and %d\n') % parents)
1206 else:
1203 else:
1207 ui.status(_('working directory now based on '
1204 ui.status(_('working directory now based on '
1208 'revision %d\n') % parents)
1205 'revision %d\n') % parents)
1209 mergemod.mergestate.clean(self, self['.'].node())
1206 mergemod.mergestate.clean(self, self['.'].node())
1210
1207
1211 # TODO: if we know which new heads may result from this rollback, pass
1208 # TODO: if we know which new heads may result from this rollback, pass
1212 # them to destroy(), which will prevent the branchhead cache from being
1209 # them to destroy(), which will prevent the branchhead cache from being
1213 # invalidated.
1210 # invalidated.
1214 self.destroyed()
1211 self.destroyed()
1215 return 0
1212 return 0
1216
1213
1217 def invalidatecaches(self):
1214 def invalidatecaches(self):
1218
1215
1219 if '_tagscache' in vars(self):
1216 if '_tagscache' in vars(self):
1220 # can't use delattr on proxy
1217 # can't use delattr on proxy
1221 del self.__dict__['_tagscache']
1218 del self.__dict__['_tagscache']
1222
1219
1223 self.unfiltered()._branchcaches.clear()
1220 self.unfiltered()._branchcaches.clear()
1224 self.invalidatevolatilesets()
1221 self.invalidatevolatilesets()
1225
1222
1226 def invalidatevolatilesets(self):
1223 def invalidatevolatilesets(self):
1227 self.filteredrevcache.clear()
1224 self.filteredrevcache.clear()
1228 obsolete.clearobscaches(self)
1225 obsolete.clearobscaches(self)
1229
1226
1230 def invalidatedirstate(self):
1227 def invalidatedirstate(self):
1231 '''Invalidates the dirstate, causing the next call to dirstate
1228 '''Invalidates the dirstate, causing the next call to dirstate
1232 to check if it was modified since the last time it was read,
1229 to check if it was modified since the last time it was read,
1233 rereading it if it has.
1230 rereading it if it has.
1234
1231
1235 This is different to dirstate.invalidate() that it doesn't always
1232 This is different to dirstate.invalidate() that it doesn't always
1236 rereads the dirstate. Use dirstate.invalidate() if you want to
1233 rereads the dirstate. Use dirstate.invalidate() if you want to
1237 explicitly read the dirstate again (i.e. restoring it to a previous
1234 explicitly read the dirstate again (i.e. restoring it to a previous
1238 known good state).'''
1235 known good state).'''
1239 if hasunfilteredcache(self, 'dirstate'):
1236 if hasunfilteredcache(self, 'dirstate'):
1240 for k in self.dirstate._filecache:
1237 for k in self.dirstate._filecache:
1241 try:
1238 try:
1242 delattr(self.dirstate, k)
1239 delattr(self.dirstate, k)
1243 except AttributeError:
1240 except AttributeError:
1244 pass
1241 pass
1245 delattr(self.unfiltered(), 'dirstate')
1242 delattr(self.unfiltered(), 'dirstate')
1246
1243
1247 def invalidate(self, clearfilecache=False):
1244 def invalidate(self, clearfilecache=False):
1248 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1245 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1249 for k in self._filecache.keys():
1246 for k in self._filecache.keys():
1250 # dirstate is invalidated separately in invalidatedirstate()
1247 # dirstate is invalidated separately in invalidatedirstate()
1251 if k == 'dirstate':
1248 if k == 'dirstate':
1252 continue
1249 continue
1253
1250
1254 if clearfilecache:
1251 if clearfilecache:
1255 del self._filecache[k]
1252 del self._filecache[k]
1256 try:
1253 try:
1257 delattr(unfiltered, k)
1254 delattr(unfiltered, k)
1258 except AttributeError:
1255 except AttributeError:
1259 pass
1256 pass
1260 self.invalidatecaches()
1257 self.invalidatecaches()
1261 self.store.invalidatecaches()
1258 self.store.invalidatecaches()
1262
1259
1263 def invalidateall(self):
1260 def invalidateall(self):
1264 '''Fully invalidates both store and non-store parts, causing the
1261 '''Fully invalidates both store and non-store parts, causing the
1265 subsequent operation to reread any outside changes.'''
1262 subsequent operation to reread any outside changes.'''
1266 # extension should hook this to invalidate its caches
1263 # extension should hook this to invalidate its caches
1267 self.invalidate()
1264 self.invalidate()
1268 self.invalidatedirstate()
1265 self.invalidatedirstate()
1269
1266
1270 def _refreshfilecachestats(self, tr):
1267 def _refreshfilecachestats(self, tr):
1271 """Reload stats of cached files so that they are flagged as valid"""
1268 """Reload stats of cached files so that they are flagged as valid"""
1272 for k, ce in self._filecache.items():
1269 for k, ce in self._filecache.items():
1273 if k == 'dirstate' or k not in self.__dict__:
1270 if k == 'dirstate' or k not in self.__dict__:
1274 continue
1271 continue
1275 ce.refresh()
1272 ce.refresh()
1276
1273
1277 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1274 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1278 inheritchecker=None, parentenvvar=None):
1275 inheritchecker=None, parentenvvar=None):
1279 parentlock = None
1276 parentlock = None
1280 # the contents of parentenvvar are used by the underlying lock to
1277 # the contents of parentenvvar are used by the underlying lock to
1281 # determine whether it can be inherited
1278 # determine whether it can be inherited
1282 if parentenvvar is not None:
1279 if parentenvvar is not None:
1283 parentlock = os.environ.get(parentenvvar)
1280 parentlock = os.environ.get(parentenvvar)
1284 try:
1281 try:
1285 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1282 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1286 acquirefn=acquirefn, desc=desc,
1283 acquirefn=acquirefn, desc=desc,
1287 inheritchecker=inheritchecker,
1284 inheritchecker=inheritchecker,
1288 parentlock=parentlock)
1285 parentlock=parentlock)
1289 except error.LockHeld as inst:
1286 except error.LockHeld as inst:
1290 if not wait:
1287 if not wait:
1291 raise
1288 raise
1292 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1289 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1293 (desc, inst.locker))
1290 (desc, inst.locker))
1294 # default to 600 seconds timeout
1291 # default to 600 seconds timeout
1295 l = lockmod.lock(vfs, lockname,
1292 l = lockmod.lock(vfs, lockname,
1296 int(self.ui.config("ui", "timeout", "600")),
1293 int(self.ui.config("ui", "timeout", "600")),
1297 releasefn=releasefn, acquirefn=acquirefn,
1294 releasefn=releasefn, acquirefn=acquirefn,
1298 desc=desc)
1295 desc=desc)
1299 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1296 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1300 return l
1297 return l
1301
1298
1302 def _afterlock(self, callback):
1299 def _afterlock(self, callback):
1303 """add a callback to be run when the repository is fully unlocked
1300 """add a callback to be run when the repository is fully unlocked
1304
1301
1305 The callback will be executed when the outermost lock is released
1302 The callback will be executed when the outermost lock is released
1306 (with wlock being higher level than 'lock')."""
1303 (with wlock being higher level than 'lock')."""
1307 for ref in (self._wlockref, self._lockref):
1304 for ref in (self._wlockref, self._lockref):
1308 l = ref and ref()
1305 l = ref and ref()
1309 if l and l.held:
1306 if l and l.held:
1310 l.postrelease.append(callback)
1307 l.postrelease.append(callback)
1311 break
1308 break
1312 else: # no lock have been found.
1309 else: # no lock have been found.
1313 callback()
1310 callback()
1314
1311
1315 def lock(self, wait=True):
1312 def lock(self, wait=True):
1316 '''Lock the repository store (.hg/store) and return a weak reference
1313 '''Lock the repository store (.hg/store) and return a weak reference
1317 to the lock. Use this before modifying the store (e.g. committing or
1314 to the lock. Use this before modifying the store (e.g. committing or
1318 stripping). If you are opening a transaction, get a lock as well.)
1315 stripping). If you are opening a transaction, get a lock as well.)
1319
1316
1320 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1317 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1321 'wlock' first to avoid a dead-lock hazard.'''
1318 'wlock' first to avoid a dead-lock hazard.'''
1322 l = self._currentlock(self._lockref)
1319 l = self._currentlock(self._lockref)
1323 if l is not None:
1320 if l is not None:
1324 l.lock()
1321 l.lock()
1325 return l
1322 return l
1326
1323
1327 l = self._lock(self.svfs, "lock", wait, None,
1324 l = self._lock(self.svfs, "lock", wait, None,
1328 self.invalidate, _('repository %s') % self.origroot)
1325 self.invalidate, _('repository %s') % self.origroot)
1329 self._lockref = weakref.ref(l)
1326 self._lockref = weakref.ref(l)
1330 return l
1327 return l
1331
1328
1332 def _wlockchecktransaction(self):
1329 def _wlockchecktransaction(self):
1333 if self.currenttransaction() is not None:
1330 if self.currenttransaction() is not None:
1334 raise error.LockInheritanceContractViolation(
1331 raise error.LockInheritanceContractViolation(
1335 'wlock cannot be inherited in the middle of a transaction')
1332 'wlock cannot be inherited in the middle of a transaction')
1336
1333
1337 def wlock(self, wait=True):
1334 def wlock(self, wait=True):
1338 '''Lock the non-store parts of the repository (everything under
1335 '''Lock the non-store parts of the repository (everything under
1339 .hg except .hg/store) and return a weak reference to the lock.
1336 .hg except .hg/store) and return a weak reference to the lock.
1340
1337
1341 Use this before modifying files in .hg.
1338 Use this before modifying files in .hg.
1342
1339
1343 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1340 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1344 'wlock' first to avoid a dead-lock hazard.'''
1341 'wlock' first to avoid a dead-lock hazard.'''
1345 l = self._wlockref and self._wlockref()
1342 l = self._wlockref and self._wlockref()
1346 if l is not None and l.held:
1343 if l is not None and l.held:
1347 l.lock()
1344 l.lock()
1348 return l
1345 return l
1349
1346
1350 # We do not need to check for non-waiting lock acquisition. Such
1347 # We do not need to check for non-waiting lock acquisition. Such
1351 # acquisition would not cause dead-lock as they would just fail.
1348 # acquisition would not cause dead-lock as they would just fail.
1352 if wait and (self.ui.configbool('devel', 'all-warnings')
1349 if wait and (self.ui.configbool('devel', 'all-warnings')
1353 or self.ui.configbool('devel', 'check-locks')):
1350 or self.ui.configbool('devel', 'check-locks')):
1354 if self._currentlock(self._lockref) is not None:
1351 if self._currentlock(self._lockref) is not None:
1355 self.ui.develwarn('"wlock" acquired after "lock"')
1352 self.ui.develwarn('"wlock" acquired after "lock"')
1356
1353
1357 def unlock():
1354 def unlock():
1358 if self.dirstate.pendingparentchange():
1355 if self.dirstate.pendingparentchange():
1359 self.dirstate.invalidate()
1356 self.dirstate.invalidate()
1360 else:
1357 else:
1361 self.dirstate.write(None)
1358 self.dirstate.write(None)
1362
1359
1363 self._filecache['dirstate'].refresh()
1360 self._filecache['dirstate'].refresh()
1364
1361
1365 l = self._lock(self.vfs, "wlock", wait, unlock,
1362 l = self._lock(self.vfs, "wlock", wait, unlock,
1366 self.invalidatedirstate, _('working directory of %s') %
1363 self.invalidatedirstate, _('working directory of %s') %
1367 self.origroot,
1364 self.origroot,
1368 inheritchecker=self._wlockchecktransaction,
1365 inheritchecker=self._wlockchecktransaction,
1369 parentenvvar='HG_WLOCK_LOCKER')
1366 parentenvvar='HG_WLOCK_LOCKER')
1370 self._wlockref = weakref.ref(l)
1367 self._wlockref = weakref.ref(l)
1371 return l
1368 return l
1372
1369
1373 def _currentlock(self, lockref):
1370 def _currentlock(self, lockref):
1374 """Returns the lock if it's held, or None if it's not."""
1371 """Returns the lock if it's held, or None if it's not."""
1375 if lockref is None:
1372 if lockref is None:
1376 return None
1373 return None
1377 l = lockref()
1374 l = lockref()
1378 if l is None or not l.held:
1375 if l is None or not l.held:
1379 return None
1376 return None
1380 return l
1377 return l
1381
1378
1382 def currentwlock(self):
1379 def currentwlock(self):
1383 """Returns the wlock if it's held, or None if it's not."""
1380 """Returns the wlock if it's held, or None if it's not."""
1384 return self._currentlock(self._wlockref)
1381 return self._currentlock(self._wlockref)
1385
1382
1386 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1383 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1387 """
1384 """
1388 commit an individual file as part of a larger transaction
1385 commit an individual file as part of a larger transaction
1389 """
1386 """
1390
1387
1391 fname = fctx.path()
1388 fname = fctx.path()
1392 fparent1 = manifest1.get(fname, nullid)
1389 fparent1 = manifest1.get(fname, nullid)
1393 fparent2 = manifest2.get(fname, nullid)
1390 fparent2 = manifest2.get(fname, nullid)
1394 if isinstance(fctx, context.filectx):
1391 if isinstance(fctx, context.filectx):
1395 node = fctx.filenode()
1392 node = fctx.filenode()
1396 if node in [fparent1, fparent2]:
1393 if node in [fparent1, fparent2]:
1397 self.ui.debug('reusing %s filelog entry\n' % fname)
1394 self.ui.debug('reusing %s filelog entry\n' % fname)
1398 if manifest1.flags(fname) != fctx.flags():
1395 if manifest1.flags(fname) != fctx.flags():
1399 changelist.append(fname)
1396 changelist.append(fname)
1400 return node
1397 return node
1401
1398
1402 flog = self.file(fname)
1399 flog = self.file(fname)
1403 meta = {}
1400 meta = {}
1404 copy = fctx.renamed()
1401 copy = fctx.renamed()
1405 if copy and copy[0] != fname:
1402 if copy and copy[0] != fname:
1406 # Mark the new revision of this file as a copy of another
1403 # Mark the new revision of this file as a copy of another
1407 # file. This copy data will effectively act as a parent
1404 # file. This copy data will effectively act as a parent
1408 # of this new revision. If this is a merge, the first
1405 # of this new revision. If this is a merge, the first
1409 # parent will be the nullid (meaning "look up the copy data")
1406 # parent will be the nullid (meaning "look up the copy data")
1410 # and the second one will be the other parent. For example:
1407 # and the second one will be the other parent. For example:
1411 #
1408 #
1412 # 0 --- 1 --- 3 rev1 changes file foo
1409 # 0 --- 1 --- 3 rev1 changes file foo
1413 # \ / rev2 renames foo to bar and changes it
1410 # \ / rev2 renames foo to bar and changes it
1414 # \- 2 -/ rev3 should have bar with all changes and
1411 # \- 2 -/ rev3 should have bar with all changes and
1415 # should record that bar descends from
1412 # should record that bar descends from
1416 # bar in rev2 and foo in rev1
1413 # bar in rev2 and foo in rev1
1417 #
1414 #
1418 # this allows this merge to succeed:
1415 # this allows this merge to succeed:
1419 #
1416 #
1420 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1417 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1421 # \ / merging rev3 and rev4 should use bar@rev2
1418 # \ / merging rev3 and rev4 should use bar@rev2
1422 # \- 2 --- 4 as the merge base
1419 # \- 2 --- 4 as the merge base
1423 #
1420 #
1424
1421
1425 cfname = copy[0]
1422 cfname = copy[0]
1426 crev = manifest1.get(cfname)
1423 crev = manifest1.get(cfname)
1427 newfparent = fparent2
1424 newfparent = fparent2
1428
1425
1429 if manifest2: # branch merge
1426 if manifest2: # branch merge
1430 if fparent2 == nullid or crev is None: # copied on remote side
1427 if fparent2 == nullid or crev is None: # copied on remote side
1431 if cfname in manifest2:
1428 if cfname in manifest2:
1432 crev = manifest2[cfname]
1429 crev = manifest2[cfname]
1433 newfparent = fparent1
1430 newfparent = fparent1
1434
1431
1435 # Here, we used to search backwards through history to try to find
1432 # Here, we used to search backwards through history to try to find
1436 # where the file copy came from if the source of a copy was not in
1433 # where the file copy came from if the source of a copy was not in
1437 # the parent directory. However, this doesn't actually make sense to
1434 # the parent directory. However, this doesn't actually make sense to
1438 # do (what does a copy from something not in your working copy even
1435 # do (what does a copy from something not in your working copy even
1439 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1436 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1440 # the user that copy information was dropped, so if they didn't
1437 # the user that copy information was dropped, so if they didn't
1441 # expect this outcome it can be fixed, but this is the correct
1438 # expect this outcome it can be fixed, but this is the correct
1442 # behavior in this circumstance.
1439 # behavior in this circumstance.
1443
1440
1444 if crev:
1441 if crev:
1445 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1442 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1446 meta["copy"] = cfname
1443 meta["copy"] = cfname
1447 meta["copyrev"] = hex(crev)
1444 meta["copyrev"] = hex(crev)
1448 fparent1, fparent2 = nullid, newfparent
1445 fparent1, fparent2 = nullid, newfparent
1449 else:
1446 else:
1450 self.ui.warn(_("warning: can't find ancestor for '%s' "
1447 self.ui.warn(_("warning: can't find ancestor for '%s' "
1451 "copied from '%s'!\n") % (fname, cfname))
1448 "copied from '%s'!\n") % (fname, cfname))
1452
1449
1453 elif fparent1 == nullid:
1450 elif fparent1 == nullid:
1454 fparent1, fparent2 = fparent2, nullid
1451 fparent1, fparent2 = fparent2, nullid
1455 elif fparent2 != nullid:
1452 elif fparent2 != nullid:
1456 # is one parent an ancestor of the other?
1453 # is one parent an ancestor of the other?
1457 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1454 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1458 if fparent1 in fparentancestors:
1455 if fparent1 in fparentancestors:
1459 fparent1, fparent2 = fparent2, nullid
1456 fparent1, fparent2 = fparent2, nullid
1460 elif fparent2 in fparentancestors:
1457 elif fparent2 in fparentancestors:
1461 fparent2 = nullid
1458 fparent2 = nullid
1462
1459
1463 # is the file changed?
1460 # is the file changed?
1464 text = fctx.data()
1461 text = fctx.data()
1465 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1462 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1466 changelist.append(fname)
1463 changelist.append(fname)
1467 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1464 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1468 # are just the flags changed during merge?
1465 # are just the flags changed during merge?
1469 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1466 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1470 changelist.append(fname)
1467 changelist.append(fname)
1471
1468
1472 return fparent1
1469 return fparent1
1473
1470
1474 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1471 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1475 """check for commit arguments that aren't commitable"""
1472 """check for commit arguments that aren't commitable"""
1476 if match.isexact() or match.prefix():
1473 if match.isexact() or match.prefix():
1477 matched = set(status.modified + status.added + status.removed)
1474 matched = set(status.modified + status.added + status.removed)
1478
1475
1479 for f in match.files():
1476 for f in match.files():
1480 f = self.dirstate.normalize(f)
1477 f = self.dirstate.normalize(f)
1481 if f == '.' or f in matched or f in wctx.substate:
1478 if f == '.' or f in matched or f in wctx.substate:
1482 continue
1479 continue
1483 if f in status.deleted:
1480 if f in status.deleted:
1484 fail(f, _('file not found!'))
1481 fail(f, _('file not found!'))
1485 if f in vdirs: # visited directory
1482 if f in vdirs: # visited directory
1486 d = f + '/'
1483 d = f + '/'
1487 for mf in matched:
1484 for mf in matched:
1488 if mf.startswith(d):
1485 if mf.startswith(d):
1489 break
1486 break
1490 else:
1487 else:
1491 fail(f, _("no match under directory!"))
1488 fail(f, _("no match under directory!"))
1492 elif f not in self.dirstate:
1489 elif f not in self.dirstate:
1493 fail(f, _("file not tracked!"))
1490 fail(f, _("file not tracked!"))
1494
1491
1495 @unfilteredmethod
1492 @unfilteredmethod
1496 def commit(self, text="", user=None, date=None, match=None, force=False,
1493 def commit(self, text="", user=None, date=None, match=None, force=False,
1497 editor=False, extra=None):
1494 editor=False, extra=None):
1498 """Add a new revision to current repository.
1495 """Add a new revision to current repository.
1499
1496
1500 Revision information is gathered from the working directory,
1497 Revision information is gathered from the working directory,
1501 match can be used to filter the committed files. If editor is
1498 match can be used to filter the committed files. If editor is
1502 supplied, it is called to get a commit message.
1499 supplied, it is called to get a commit message.
1503 """
1500 """
1504 if extra is None:
1501 if extra is None:
1505 extra = {}
1502 extra = {}
1506
1503
1507 def fail(f, msg):
1504 def fail(f, msg):
1508 raise error.Abort('%s: %s' % (f, msg))
1505 raise error.Abort('%s: %s' % (f, msg))
1509
1506
1510 if not match:
1507 if not match:
1511 match = matchmod.always(self.root, '')
1508 match = matchmod.always(self.root, '')
1512
1509
1513 if not force:
1510 if not force:
1514 vdirs = []
1511 vdirs = []
1515 match.explicitdir = vdirs.append
1512 match.explicitdir = vdirs.append
1516 match.bad = fail
1513 match.bad = fail
1517
1514
1518 wlock = lock = tr = None
1515 wlock = lock = tr = None
1519 try:
1516 try:
1520 wlock = self.wlock()
1517 wlock = self.wlock()
1521 lock = self.lock() # for recent changelog (see issue4368)
1518 lock = self.lock() # for recent changelog (see issue4368)
1522
1519
1523 wctx = self[None]
1520 wctx = self[None]
1524 merge = len(wctx.parents()) > 1
1521 merge = len(wctx.parents()) > 1
1525
1522
1526 if not force and merge and match.ispartial():
1523 if not force and merge and match.ispartial():
1527 raise error.Abort(_('cannot partially commit a merge '
1524 raise error.Abort(_('cannot partially commit a merge '
1528 '(do not specify files or patterns)'))
1525 '(do not specify files or patterns)'))
1529
1526
1530 status = self.status(match=match, clean=force)
1527 status = self.status(match=match, clean=force)
1531 if force:
1528 if force:
1532 status.modified.extend(status.clean) # mq may commit clean files
1529 status.modified.extend(status.clean) # mq may commit clean files
1533
1530
1534 # check subrepos
1531 # check subrepos
1535 subs = []
1532 subs = []
1536 commitsubs = set()
1533 commitsubs = set()
1537 newstate = wctx.substate.copy()
1534 newstate = wctx.substate.copy()
1538 # only manage subrepos and .hgsubstate if .hgsub is present
1535 # only manage subrepos and .hgsubstate if .hgsub is present
1539 if '.hgsub' in wctx:
1536 if '.hgsub' in wctx:
1540 # we'll decide whether to track this ourselves, thanks
1537 # we'll decide whether to track this ourselves, thanks
1541 for c in status.modified, status.added, status.removed:
1538 for c in status.modified, status.added, status.removed:
1542 if '.hgsubstate' in c:
1539 if '.hgsubstate' in c:
1543 c.remove('.hgsubstate')
1540 c.remove('.hgsubstate')
1544
1541
1545 # compare current state to last committed state
1542 # compare current state to last committed state
1546 # build new substate based on last committed state
1543 # build new substate based on last committed state
1547 oldstate = wctx.p1().substate
1544 oldstate = wctx.p1().substate
1548 for s in sorted(newstate.keys()):
1545 for s in sorted(newstate.keys()):
1549 if not match(s):
1546 if not match(s):
1550 # ignore working copy, use old state if present
1547 # ignore working copy, use old state if present
1551 if s in oldstate:
1548 if s in oldstate:
1552 newstate[s] = oldstate[s]
1549 newstate[s] = oldstate[s]
1553 continue
1550 continue
1554 if not force:
1551 if not force:
1555 raise error.Abort(
1552 raise error.Abort(
1556 _("commit with new subrepo %s excluded") % s)
1553 _("commit with new subrepo %s excluded") % s)
1557 dirtyreason = wctx.sub(s).dirtyreason(True)
1554 dirtyreason = wctx.sub(s).dirtyreason(True)
1558 if dirtyreason:
1555 if dirtyreason:
1559 if not self.ui.configbool('ui', 'commitsubrepos'):
1556 if not self.ui.configbool('ui', 'commitsubrepos'):
1560 raise error.Abort(dirtyreason,
1557 raise error.Abort(dirtyreason,
1561 hint=_("use --subrepos for recursive commit"))
1558 hint=_("use --subrepos for recursive commit"))
1562 subs.append(s)
1559 subs.append(s)
1563 commitsubs.add(s)
1560 commitsubs.add(s)
1564 else:
1561 else:
1565 bs = wctx.sub(s).basestate()
1562 bs = wctx.sub(s).basestate()
1566 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1563 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1567 if oldstate.get(s, (None, None, None))[1] != bs:
1564 if oldstate.get(s, (None, None, None))[1] != bs:
1568 subs.append(s)
1565 subs.append(s)
1569
1566
1570 # check for removed subrepos
1567 # check for removed subrepos
1571 for p in wctx.parents():
1568 for p in wctx.parents():
1572 r = [s for s in p.substate if s not in newstate]
1569 r = [s for s in p.substate if s not in newstate]
1573 subs += [s for s in r if match(s)]
1570 subs += [s for s in r if match(s)]
1574 if subs:
1571 if subs:
1575 if (not match('.hgsub') and
1572 if (not match('.hgsub') and
1576 '.hgsub' in (wctx.modified() + wctx.added())):
1573 '.hgsub' in (wctx.modified() + wctx.added())):
1577 raise error.Abort(
1574 raise error.Abort(
1578 _("can't commit subrepos without .hgsub"))
1575 _("can't commit subrepos without .hgsub"))
1579 status.modified.insert(0, '.hgsubstate')
1576 status.modified.insert(0, '.hgsubstate')
1580
1577
1581 elif '.hgsub' in status.removed:
1578 elif '.hgsub' in status.removed:
1582 # clean up .hgsubstate when .hgsub is removed
1579 # clean up .hgsubstate when .hgsub is removed
1583 if ('.hgsubstate' in wctx and
1580 if ('.hgsubstate' in wctx and
1584 '.hgsubstate' not in (status.modified + status.added +
1581 '.hgsubstate' not in (status.modified + status.added +
1585 status.removed)):
1582 status.removed)):
1586 status.removed.insert(0, '.hgsubstate')
1583 status.removed.insert(0, '.hgsubstate')
1587
1584
1588 # make sure all explicit patterns are matched
1585 # make sure all explicit patterns are matched
1589 if not force:
1586 if not force:
1590 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1587 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1591
1588
1592 cctx = context.workingcommitctx(self, status,
1589 cctx = context.workingcommitctx(self, status,
1593 text, user, date, extra)
1590 text, user, date, extra)
1594
1591
1595 # internal config: ui.allowemptycommit
1592 # internal config: ui.allowemptycommit
1596 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1593 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1597 or extra.get('close') or merge or cctx.files()
1594 or extra.get('close') or merge or cctx.files()
1598 or self.ui.configbool('ui', 'allowemptycommit'))
1595 or self.ui.configbool('ui', 'allowemptycommit'))
1599 if not allowemptycommit:
1596 if not allowemptycommit:
1600 return None
1597 return None
1601
1598
1602 if merge and cctx.deleted():
1599 if merge and cctx.deleted():
1603 raise error.Abort(_("cannot commit merge with missing files"))
1600 raise error.Abort(_("cannot commit merge with missing files"))
1604
1601
1605 ms = mergemod.mergestate.read(self)
1602 ms = mergemod.mergestate.read(self)
1606
1603
1607 if list(ms.unresolved()):
1604 if list(ms.unresolved()):
1608 raise error.Abort(_('unresolved merge conflicts '
1605 raise error.Abort(_('unresolved merge conflicts '
1609 '(see "hg help resolve")'))
1606 '(see "hg help resolve")'))
1610 if ms.mdstate() != 's' or list(ms.driverresolved()):
1607 if ms.mdstate() != 's' or list(ms.driverresolved()):
1611 raise error.Abort(_('driver-resolved merge conflicts'),
1608 raise error.Abort(_('driver-resolved merge conflicts'),
1612 hint=_('run "hg resolve --all" to resolve'))
1609 hint=_('run "hg resolve --all" to resolve'))
1613
1610
1614 if editor:
1611 if editor:
1615 cctx._text = editor(self, cctx, subs)
1612 cctx._text = editor(self, cctx, subs)
1616 edited = (text != cctx._text)
1613 edited = (text != cctx._text)
1617
1614
1618 # Save commit message in case this transaction gets rolled back
1615 # Save commit message in case this transaction gets rolled back
1619 # (e.g. by a pretxncommit hook). Leave the content alone on
1616 # (e.g. by a pretxncommit hook). Leave the content alone on
1620 # the assumption that the user will use the same editor again.
1617 # the assumption that the user will use the same editor again.
1621 msgfn = self.savecommitmessage(cctx._text)
1618 msgfn = self.savecommitmessage(cctx._text)
1622
1619
1623 # commit subs and write new state
1620 # commit subs and write new state
1624 if subs:
1621 if subs:
1625 for s in sorted(commitsubs):
1622 for s in sorted(commitsubs):
1626 sub = wctx.sub(s)
1623 sub = wctx.sub(s)
1627 self.ui.status(_('committing subrepository %s\n') %
1624 self.ui.status(_('committing subrepository %s\n') %
1628 subrepo.subrelpath(sub))
1625 subrepo.subrelpath(sub))
1629 sr = sub.commit(cctx._text, user, date)
1626 sr = sub.commit(cctx._text, user, date)
1630 newstate[s] = (newstate[s][0], sr)
1627 newstate[s] = (newstate[s][0], sr)
1631 subrepo.writestate(self, newstate)
1628 subrepo.writestate(self, newstate)
1632
1629
1633 p1, p2 = self.dirstate.parents()
1630 p1, p2 = self.dirstate.parents()
1634 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1631 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1635 try:
1632 try:
1636 self.hook("precommit", throw=True, parent1=hookp1,
1633 self.hook("precommit", throw=True, parent1=hookp1,
1637 parent2=hookp2)
1634 parent2=hookp2)
1638 tr = self.transaction('commit')
1635 tr = self.transaction('commit')
1639 ret = self.commitctx(cctx, True)
1636 ret = self.commitctx(cctx, True)
1640 except: # re-raises
1637 except: # re-raises
1641 if edited:
1638 if edited:
1642 self.ui.write(
1639 self.ui.write(
1643 _('note: commit message saved in %s\n') % msgfn)
1640 _('note: commit message saved in %s\n') % msgfn)
1644 raise
1641 raise
1645 # update bookmarks, dirstate and mergestate
1642 # update bookmarks, dirstate and mergestate
1646 bookmarks.update(self, [p1, p2], ret)
1643 bookmarks.update(self, [p1, p2], ret)
1647 cctx.markcommitted(ret)
1644 cctx.markcommitted(ret)
1648 ms.reset()
1645 ms.reset()
1649 tr.close()
1646 tr.close()
1650
1647
1651 finally:
1648 finally:
1652 lockmod.release(tr, lock, wlock)
1649 lockmod.release(tr, lock, wlock)
1653
1650
1654 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1651 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1655 # hack for command that use a temporary commit (eg: histedit)
1652 # hack for command that use a temporary commit (eg: histedit)
1656 # temporary commit got stripped before hook release
1653 # temporary commit got stripped before hook release
1657 if self.changelog.hasnode(ret):
1654 if self.changelog.hasnode(ret):
1658 self.hook("commit", node=node, parent1=parent1,
1655 self.hook("commit", node=node, parent1=parent1,
1659 parent2=parent2)
1656 parent2=parent2)
1660 self._afterlock(commithook)
1657 self._afterlock(commithook)
1661 return ret
1658 return ret
1662
1659
1663 @unfilteredmethod
1660 @unfilteredmethod
1664 def commitctx(self, ctx, error=False):
1661 def commitctx(self, ctx, error=False):
1665 """Add a new revision to current repository.
1662 """Add a new revision to current repository.
1666 Revision information is passed via the context argument.
1663 Revision information is passed via the context argument.
1667 """
1664 """
1668
1665
1669 tr = None
1666 tr = None
1670 p1, p2 = ctx.p1(), ctx.p2()
1667 p1, p2 = ctx.p1(), ctx.p2()
1671 user = ctx.user()
1668 user = ctx.user()
1672
1669
1673 lock = self.lock()
1670 lock = self.lock()
1674 try:
1671 try:
1675 tr = self.transaction("commit")
1672 tr = self.transaction("commit")
1676 trp = weakref.proxy(tr)
1673 trp = weakref.proxy(tr)
1677
1674
1678 if ctx.files():
1675 if ctx.files():
1679 m1 = p1.manifest()
1676 m1 = p1.manifest()
1680 m2 = p2.manifest()
1677 m2 = p2.manifest()
1681 m = m1.copy()
1678 m = m1.copy()
1682
1679
1683 # check in files
1680 # check in files
1684 added = []
1681 added = []
1685 changed = []
1682 changed = []
1686 removed = list(ctx.removed())
1683 removed = list(ctx.removed())
1687 linkrev = len(self)
1684 linkrev = len(self)
1688 self.ui.note(_("committing files:\n"))
1685 self.ui.note(_("committing files:\n"))
1689 for f in sorted(ctx.modified() + ctx.added()):
1686 for f in sorted(ctx.modified() + ctx.added()):
1690 self.ui.note(f + "\n")
1687 self.ui.note(f + "\n")
1691 try:
1688 try:
1692 fctx = ctx[f]
1689 fctx = ctx[f]
1693 if fctx is None:
1690 if fctx is None:
1694 removed.append(f)
1691 removed.append(f)
1695 else:
1692 else:
1696 added.append(f)
1693 added.append(f)
1697 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1694 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1698 trp, changed)
1695 trp, changed)
1699 m.setflag(f, fctx.flags())
1696 m.setflag(f, fctx.flags())
1700 except OSError as inst:
1697 except OSError as inst:
1701 self.ui.warn(_("trouble committing %s!\n") % f)
1698 self.ui.warn(_("trouble committing %s!\n") % f)
1702 raise
1699 raise
1703 except IOError as inst:
1700 except IOError as inst:
1704 errcode = getattr(inst, 'errno', errno.ENOENT)
1701 errcode = getattr(inst, 'errno', errno.ENOENT)
1705 if error or errcode and errcode != errno.ENOENT:
1702 if error or errcode and errcode != errno.ENOENT:
1706 self.ui.warn(_("trouble committing %s!\n") % f)
1703 self.ui.warn(_("trouble committing %s!\n") % f)
1707 raise
1704 raise
1708
1705
1709 # update manifest
1706 # update manifest
1710 self.ui.note(_("committing manifest\n"))
1707 self.ui.note(_("committing manifest\n"))
1711 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1708 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1712 drop = [f for f in removed if f in m]
1709 drop = [f for f in removed if f in m]
1713 for f in drop:
1710 for f in drop:
1714 del m[f]
1711 del m[f]
1715 mn = self.manifest.add(m, trp, linkrev,
1712 mn = self.manifest.add(m, trp, linkrev,
1716 p1.manifestnode(), p2.manifestnode(),
1713 p1.manifestnode(), p2.manifestnode(),
1717 added, drop)
1714 added, drop)
1718 files = changed + removed
1715 files = changed + removed
1719 else:
1716 else:
1720 mn = p1.manifestnode()
1717 mn = p1.manifestnode()
1721 files = []
1718 files = []
1722
1719
1723 # update changelog
1720 # update changelog
1724 self.ui.note(_("committing changelog\n"))
1721 self.ui.note(_("committing changelog\n"))
1725 self.changelog.delayupdate(tr)
1722 self.changelog.delayupdate(tr)
1726 n = self.changelog.add(mn, files, ctx.description(),
1723 n = self.changelog.add(mn, files, ctx.description(),
1727 trp, p1.node(), p2.node(),
1724 trp, p1.node(), p2.node(),
1728 user, ctx.date(), ctx.extra().copy())
1725 user, ctx.date(), ctx.extra().copy())
1729 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1726 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1730 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1727 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1731 parent2=xp2)
1728 parent2=xp2)
1732 # set the new commit is proper phase
1729 # set the new commit is proper phase
1733 targetphase = subrepo.newcommitphase(self.ui, ctx)
1730 targetphase = subrepo.newcommitphase(self.ui, ctx)
1734 if targetphase:
1731 if targetphase:
1735 # retract boundary do not alter parent changeset.
1732 # retract boundary do not alter parent changeset.
1736 # if a parent have higher the resulting phase will
1733 # if a parent have higher the resulting phase will
1737 # be compliant anyway
1734 # be compliant anyway
1738 #
1735 #
1739 # if minimal phase was 0 we don't need to retract anything
1736 # if minimal phase was 0 we don't need to retract anything
1740 phases.retractboundary(self, tr, targetphase, [n])
1737 phases.retractboundary(self, tr, targetphase, [n])
1741 tr.close()
1738 tr.close()
1742 branchmap.updatecache(self.filtered('served'))
1739 branchmap.updatecache(self.filtered('served'))
1743 return n
1740 return n
1744 finally:
1741 finally:
1745 if tr:
1742 if tr:
1746 tr.release()
1743 tr.release()
1747 lock.release()
1744 lock.release()
1748
1745
1749 @unfilteredmethod
1746 @unfilteredmethod
1750 def destroying(self):
1747 def destroying(self):
1751 '''Inform the repository that nodes are about to be destroyed.
1748 '''Inform the repository that nodes are about to be destroyed.
1752 Intended for use by strip and rollback, so there's a common
1749 Intended for use by strip and rollback, so there's a common
1753 place for anything that has to be done before destroying history.
1750 place for anything that has to be done before destroying history.
1754
1751
1755 This is mostly useful for saving state that is in memory and waiting
1752 This is mostly useful for saving state that is in memory and waiting
1756 to be flushed when the current lock is released. Because a call to
1753 to be flushed when the current lock is released. Because a call to
1757 destroyed is imminent, the repo will be invalidated causing those
1754 destroyed is imminent, the repo will be invalidated causing those
1758 changes to stay in memory (waiting for the next unlock), or vanish
1755 changes to stay in memory (waiting for the next unlock), or vanish
1759 completely.
1756 completely.
1760 '''
1757 '''
1761 # When using the same lock to commit and strip, the phasecache is left
1758 # When using the same lock to commit and strip, the phasecache is left
1762 # dirty after committing. Then when we strip, the repo is invalidated,
1759 # dirty after committing. Then when we strip, the repo is invalidated,
1763 # causing those changes to disappear.
1760 # causing those changes to disappear.
1764 if '_phasecache' in vars(self):
1761 if '_phasecache' in vars(self):
1765 self._phasecache.write()
1762 self._phasecache.write()
1766
1763
1767 @unfilteredmethod
1764 @unfilteredmethod
1768 def destroyed(self):
1765 def destroyed(self):
1769 '''Inform the repository that nodes have been destroyed.
1766 '''Inform the repository that nodes have been destroyed.
1770 Intended for use by strip and rollback, so there's a common
1767 Intended for use by strip and rollback, so there's a common
1771 place for anything that has to be done after destroying history.
1768 place for anything that has to be done after destroying history.
1772 '''
1769 '''
1773 # When one tries to:
1770 # When one tries to:
1774 # 1) destroy nodes thus calling this method (e.g. strip)
1771 # 1) destroy nodes thus calling this method (e.g. strip)
1775 # 2) use phasecache somewhere (e.g. commit)
1772 # 2) use phasecache somewhere (e.g. commit)
1776 #
1773 #
1777 # then 2) will fail because the phasecache contains nodes that were
1774 # then 2) will fail because the phasecache contains nodes that were
1778 # removed. We can either remove phasecache from the filecache,
1775 # removed. We can either remove phasecache from the filecache,
1779 # causing it to reload next time it is accessed, or simply filter
1776 # causing it to reload next time it is accessed, or simply filter
1780 # the removed nodes now and write the updated cache.
1777 # the removed nodes now and write the updated cache.
1781 self._phasecache.filterunknown(self)
1778 self._phasecache.filterunknown(self)
1782 self._phasecache.write()
1779 self._phasecache.write()
1783
1780
1784 # update the 'served' branch cache to help read only server process
1781 # update the 'served' branch cache to help read only server process
1785 # Thanks to branchcache collaboration this is done from the nearest
1782 # Thanks to branchcache collaboration this is done from the nearest
1786 # filtered subset and it is expected to be fast.
1783 # filtered subset and it is expected to be fast.
1787 branchmap.updatecache(self.filtered('served'))
1784 branchmap.updatecache(self.filtered('served'))
1788
1785
1789 # Ensure the persistent tag cache is updated. Doing it now
1786 # Ensure the persistent tag cache is updated. Doing it now
1790 # means that the tag cache only has to worry about destroyed
1787 # means that the tag cache only has to worry about destroyed
1791 # heads immediately after a strip/rollback. That in turn
1788 # heads immediately after a strip/rollback. That in turn
1792 # guarantees that "cachetip == currenttip" (comparing both rev
1789 # guarantees that "cachetip == currenttip" (comparing both rev
1793 # and node) always means no nodes have been added or destroyed.
1790 # and node) always means no nodes have been added or destroyed.
1794
1791
1795 # XXX this is suboptimal when qrefresh'ing: we strip the current
1792 # XXX this is suboptimal when qrefresh'ing: we strip the current
1796 # head, refresh the tag cache, then immediately add a new head.
1793 # head, refresh the tag cache, then immediately add a new head.
1797 # But I think doing it this way is necessary for the "instant
1794 # But I think doing it this way is necessary for the "instant
1798 # tag cache retrieval" case to work.
1795 # tag cache retrieval" case to work.
1799 self.invalidate()
1796 self.invalidate()
1800
1797
1801 def walk(self, match, node=None):
1798 def walk(self, match, node=None):
1802 '''
1799 '''
1803 walk recursively through the directory tree or a given
1800 walk recursively through the directory tree or a given
1804 changeset, finding all files matched by the match
1801 changeset, finding all files matched by the match
1805 function
1802 function
1806 '''
1803 '''
1807 return self[node].walk(match)
1804 return self[node].walk(match)
1808
1805
1809 def status(self, node1='.', node2=None, match=None,
1806 def status(self, node1='.', node2=None, match=None,
1810 ignored=False, clean=False, unknown=False,
1807 ignored=False, clean=False, unknown=False,
1811 listsubrepos=False):
1808 listsubrepos=False):
1812 '''a convenience method that calls node1.status(node2)'''
1809 '''a convenience method that calls node1.status(node2)'''
1813 return self[node1].status(node2, match, ignored, clean, unknown,
1810 return self[node1].status(node2, match, ignored, clean, unknown,
1814 listsubrepos)
1811 listsubrepos)
1815
1812
1816 def heads(self, start=None):
1813 def heads(self, start=None):
1817 heads = self.changelog.heads(start)
1814 heads = self.changelog.heads(start)
1818 # sort the output in rev descending order
1815 # sort the output in rev descending order
1819 return sorted(heads, key=self.changelog.rev, reverse=True)
1816 return sorted(heads, key=self.changelog.rev, reverse=True)
1820
1817
1821 def branchheads(self, branch=None, start=None, closed=False):
1818 def branchheads(self, branch=None, start=None, closed=False):
1822 '''return a (possibly filtered) list of heads for the given branch
1819 '''return a (possibly filtered) list of heads for the given branch
1823
1820
1824 Heads are returned in topological order, from newest to oldest.
1821 Heads are returned in topological order, from newest to oldest.
1825 If branch is None, use the dirstate branch.
1822 If branch is None, use the dirstate branch.
1826 If start is not None, return only heads reachable from start.
1823 If start is not None, return only heads reachable from start.
1827 If closed is True, return heads that are marked as closed as well.
1824 If closed is True, return heads that are marked as closed as well.
1828 '''
1825 '''
1829 if branch is None:
1826 if branch is None:
1830 branch = self[None].branch()
1827 branch = self[None].branch()
1831 branches = self.branchmap()
1828 branches = self.branchmap()
1832 if branch not in branches:
1829 if branch not in branches:
1833 return []
1830 return []
1834 # the cache returns heads ordered lowest to highest
1831 # the cache returns heads ordered lowest to highest
1835 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1832 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1836 if start is not None:
1833 if start is not None:
1837 # filter out the heads that cannot be reached from startrev
1834 # filter out the heads that cannot be reached from startrev
1838 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1835 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1839 bheads = [h for h in bheads if h in fbheads]
1836 bheads = [h for h in bheads if h in fbheads]
1840 return bheads
1837 return bheads
1841
1838
1842 def branches(self, nodes):
1839 def branches(self, nodes):
1843 if not nodes:
1840 if not nodes:
1844 nodes = [self.changelog.tip()]
1841 nodes = [self.changelog.tip()]
1845 b = []
1842 b = []
1846 for n in nodes:
1843 for n in nodes:
1847 t = n
1844 t = n
1848 while True:
1845 while True:
1849 p = self.changelog.parents(n)
1846 p = self.changelog.parents(n)
1850 if p[1] != nullid or p[0] == nullid:
1847 if p[1] != nullid or p[0] == nullid:
1851 b.append((t, n, p[0], p[1]))
1848 b.append((t, n, p[0], p[1]))
1852 break
1849 break
1853 n = p[0]
1850 n = p[0]
1854 return b
1851 return b
1855
1852
1856 def between(self, pairs):
1853 def between(self, pairs):
1857 r = []
1854 r = []
1858
1855
1859 for top, bottom in pairs:
1856 for top, bottom in pairs:
1860 n, l, i = top, [], 0
1857 n, l, i = top, [], 0
1861 f = 1
1858 f = 1
1862
1859
1863 while n != bottom and n != nullid:
1860 while n != bottom and n != nullid:
1864 p = self.changelog.parents(n)[0]
1861 p = self.changelog.parents(n)[0]
1865 if i == f:
1862 if i == f:
1866 l.append(n)
1863 l.append(n)
1867 f = f * 2
1864 f = f * 2
1868 n = p
1865 n = p
1869 i += 1
1866 i += 1
1870
1867
1871 r.append(l)
1868 r.append(l)
1872
1869
1873 return r
1870 return r
1874
1871
1875 def checkpush(self, pushop):
1872 def checkpush(self, pushop):
1876 """Extensions can override this function if additional checks have
1873 """Extensions can override this function if additional checks have
1877 to be performed before pushing, or call it if they override push
1874 to be performed before pushing, or call it if they override push
1878 command.
1875 command.
1879 """
1876 """
1880 pass
1877 pass
1881
1878
1882 @unfilteredpropertycache
1879 @unfilteredpropertycache
1883 def prepushoutgoinghooks(self):
1880 def prepushoutgoinghooks(self):
1884 """Return util.hooks consists of a pushop with repo, remote, outgoing
1881 """Return util.hooks consists of a pushop with repo, remote, outgoing
1885 methods, which are called before pushing changesets.
1882 methods, which are called before pushing changesets.
1886 """
1883 """
1887 return util.hooks()
1884 return util.hooks()
1888
1885
1889 def pushkey(self, namespace, key, old, new):
1886 def pushkey(self, namespace, key, old, new):
1890 try:
1887 try:
1891 tr = self.currenttransaction()
1888 tr = self.currenttransaction()
1892 hookargs = {}
1889 hookargs = {}
1893 if tr is not None:
1890 if tr is not None:
1894 hookargs.update(tr.hookargs)
1891 hookargs.update(tr.hookargs)
1895 hookargs['namespace'] = namespace
1892 hookargs['namespace'] = namespace
1896 hookargs['key'] = key
1893 hookargs['key'] = key
1897 hookargs['old'] = old
1894 hookargs['old'] = old
1898 hookargs['new'] = new
1895 hookargs['new'] = new
1899 self.hook('prepushkey', throw=True, **hookargs)
1896 self.hook('prepushkey', throw=True, **hookargs)
1900 except error.HookAbort as exc:
1897 except error.HookAbort as exc:
1901 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1898 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1902 if exc.hint:
1899 if exc.hint:
1903 self.ui.write_err(_("(%s)\n") % exc.hint)
1900 self.ui.write_err(_("(%s)\n") % exc.hint)
1904 return False
1901 return False
1905 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1902 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1906 ret = pushkey.push(self, namespace, key, old, new)
1903 ret = pushkey.push(self, namespace, key, old, new)
1907 def runhook():
1904 def runhook():
1908 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1905 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1909 ret=ret)
1906 ret=ret)
1910 self._afterlock(runhook)
1907 self._afterlock(runhook)
1911 return ret
1908 return ret
1912
1909
1913 def listkeys(self, namespace):
1910 def listkeys(self, namespace):
1914 self.hook('prelistkeys', throw=True, namespace=namespace)
1911 self.hook('prelistkeys', throw=True, namespace=namespace)
1915 self.ui.debug('listing keys for "%s"\n' % namespace)
1912 self.ui.debug('listing keys for "%s"\n' % namespace)
1916 values = pushkey.list(self, namespace)
1913 values = pushkey.list(self, namespace)
1917 self.hook('listkeys', namespace=namespace, values=values)
1914 self.hook('listkeys', namespace=namespace, values=values)
1918 return values
1915 return values
1919
1916
1920 def debugwireargs(self, one, two, three=None, four=None, five=None):
1917 def debugwireargs(self, one, two, three=None, four=None, five=None):
1921 '''used to test argument passing over the wire'''
1918 '''used to test argument passing over the wire'''
1922 return "%s %s %s %s %s" % (one, two, three, four, five)
1919 return "%s %s %s %s %s" % (one, two, three, four, five)
1923
1920
1924 def savecommitmessage(self, text):
1921 def savecommitmessage(self, text):
1925 fp = self.vfs('last-message.txt', 'wb')
1922 fp = self.vfs('last-message.txt', 'wb')
1926 try:
1923 try:
1927 fp.write(text)
1924 fp.write(text)
1928 finally:
1925 finally:
1929 fp.close()
1926 fp.close()
1930 return self.pathto(fp.name[len(self.root) + 1:])
1927 return self.pathto(fp.name[len(self.root) + 1:])
1931
1928
1932 # used to avoid circular references so destructors work
1929 # used to avoid circular references so destructors work
1933 def aftertrans(files):
1930 def aftertrans(files):
1934 renamefiles = [tuple(t) for t in files]
1931 renamefiles = [tuple(t) for t in files]
1935 def a():
1932 def a():
1936 for vfs, src, dest in renamefiles:
1933 for vfs, src, dest in renamefiles:
1937 try:
1934 try:
1938 vfs.rename(src, dest)
1935 vfs.rename(src, dest)
1939 except OSError: # journal file does not yet exist
1936 except OSError: # journal file does not yet exist
1940 pass
1937 pass
1941 return a
1938 return a
1942
1939
1943 def undoname(fn):
1940 def undoname(fn):
1944 base, name = os.path.split(fn)
1941 base, name = os.path.split(fn)
1945 assert name.startswith('journal')
1942 assert name.startswith('journal')
1946 return os.path.join(base, name.replace('journal', 'undo', 1))
1943 return os.path.join(base, name.replace('journal', 'undo', 1))
1947
1944
1948 def instance(ui, path, create):
1945 def instance(ui, path, create):
1949 return localrepository(ui, util.urllocalpath(path), create)
1946 return localrepository(ui, util.urllocalpath(path), create)
1950
1947
1951 def islocal(path):
1948 def islocal(path):
1952 return True
1949 return True
1953
1950
1954 def newreporequirements(repo):
1951 def newreporequirements(repo):
1955 """Determine the set of requirements for a new local repository.
1952 """Determine the set of requirements for a new local repository.
1956
1953
1957 Extensions can wrap this function to specify custom requirements for
1954 Extensions can wrap this function to specify custom requirements for
1958 new repositories.
1955 new repositories.
1959 """
1956 """
1960 ui = repo.ui
1957 ui = repo.ui
1961 requirements = set(['revlogv1'])
1958 requirements = set(['revlogv1'])
1962 if ui.configbool('format', 'usestore', True):
1959 if ui.configbool('format', 'usestore', True):
1963 requirements.add('store')
1960 requirements.add('store')
1964 if ui.configbool('format', 'usefncache', True):
1961 if ui.configbool('format', 'usefncache', True):
1965 requirements.add('fncache')
1962 requirements.add('fncache')
1966 if ui.configbool('format', 'dotencode', True):
1963 if ui.configbool('format', 'dotencode', True):
1967 requirements.add('dotencode')
1964 requirements.add('dotencode')
1968
1965
1969 if scmutil.gdinitconfig(ui):
1966 if scmutil.gdinitconfig(ui):
1970 requirements.add('generaldelta')
1967 requirements.add('generaldelta')
1971 if ui.configbool('experimental', 'treemanifest', False):
1968 if ui.configbool('experimental', 'treemanifest', False):
1972 requirements.add('treemanifest')
1969 requirements.add('treemanifest')
1973 if ui.configbool('experimental', 'manifestv2', False):
1970 if ui.configbool('experimental', 'manifestv2', False):
1974 requirements.add('manifestv2')
1971 requirements.add('manifestv2')
1975
1972
1976 return requirements
1973 return requirements
General Comments 0
You need to be logged in to leave comments. Login now